1/*
2 * SCSI Block Commands (SBC) parsing and emulation.
3 *
4 * (c) Copyright 2002-2013 Datera, Inc.
5 *
6 * Nicholas A. Bellinger <nab@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/ratelimit.h>
26#include <linux/crc-t10dif.h>
27#include <asm/unaligned.h>
28#include <scsi/scsi.h>
29#include <scsi/scsi_tcq.h>
30
31#include <target/target_core_base.h>
32#include <target/target_core_backend.h>
33#include <target/target_core_fabric.h>
34
35#include "target_core_internal.h"
36#include "target_core_ua.h"
37#include "target_core_alua.h"
38
39static sense_reason_t
40sbc_emulate_readcapacity(struct se_cmd *cmd)
41{
42	struct se_device *dev = cmd->se_dev;
43	unsigned char *cdb = cmd->t_task_cdb;
44	unsigned long long blocks_long = dev->transport->get_blocks(dev);
45	unsigned char *rbuf;
46	unsigned char buf[8];
47	u32 blocks;
48
49	/*
50	 * SBC-2 says:
51	 *   If the PMI bit is set to zero and the LOGICAL BLOCK
52	 *   ADDRESS field is not set to zero, the device server shall
53	 *   terminate the command with CHECK CONDITION status with
54	 *   the sense key set to ILLEGAL REQUEST and the additional
55	 *   sense code set to INVALID FIELD IN CDB.
56	 *
57	 * In SBC-3, these fields are obsolete, but some SCSI
58	 * compliance tests actually check this, so we might as well
59	 * follow SBC-2.
60	 */
61	if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
62		return TCM_INVALID_CDB_FIELD;
63
64	if (blocks_long >= 0x00000000ffffffff)
65		blocks = 0xffffffff;
66	else
67		blocks = (u32)blocks_long;
68
69	buf[0] = (blocks >> 24) & 0xff;
70	buf[1] = (blocks >> 16) & 0xff;
71	buf[2] = (blocks >> 8) & 0xff;
72	buf[3] = blocks & 0xff;
73	buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
74	buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
75	buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
76	buf[7] = dev->dev_attrib.block_size & 0xff;
77
78	rbuf = transport_kmap_data_sg(cmd);
79	if (rbuf) {
80		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
81		transport_kunmap_data_sg(cmd);
82	}
83
84	target_complete_cmd_with_length(cmd, GOOD, 8);
85	return 0;
86}
87
88static sense_reason_t
89sbc_emulate_readcapacity_16(struct se_cmd *cmd)
90{
91	struct se_device *dev = cmd->se_dev;
92	struct se_session *sess = cmd->se_sess;
93	unsigned char *rbuf;
94	unsigned char buf[32];
95	unsigned long long blocks = dev->transport->get_blocks(dev);
96
97	memset(buf, 0, sizeof(buf));
98	buf[0] = (blocks >> 56) & 0xff;
99	buf[1] = (blocks >> 48) & 0xff;
100	buf[2] = (blocks >> 40) & 0xff;
101	buf[3] = (blocks >> 32) & 0xff;
102	buf[4] = (blocks >> 24) & 0xff;
103	buf[5] = (blocks >> 16) & 0xff;
104	buf[6] = (blocks >> 8) & 0xff;
105	buf[7] = blocks & 0xff;
106	buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
107	buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
108	buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
109	buf[11] = dev->dev_attrib.block_size & 0xff;
110	/*
111	 * Set P_TYPE and PROT_EN bits for DIF support
112	 */
113	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
114		if (dev->dev_attrib.pi_prot_type)
115			buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
116	}
117
118	if (dev->transport->get_lbppbe)
119		buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
120
121	if (dev->transport->get_alignment_offset_lbas) {
122		u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
123		buf[14] = (lalba >> 8) & 0x3f;
124		buf[15] = lalba & 0xff;
125	}
126
127	/*
128	 * Set Thin Provisioning Enable bit following sbc3r22 in section
129	 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
130	 */
131	if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
132		buf[14] |= 0x80;
133
134	rbuf = transport_kmap_data_sg(cmd);
135	if (rbuf) {
136		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
137		transport_kunmap_data_sg(cmd);
138	}
139
140	target_complete_cmd_with_length(cmd, GOOD, 32);
141	return 0;
142}
143
144sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
145{
146	u32 num_blocks;
147
148	if (cmd->t_task_cdb[0] == WRITE_SAME)
149		num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
150	else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
151		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
152	else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
153		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
154
155	/*
156	 * Use the explicit range when non zero is supplied, otherwise calculate
157	 * the remaining range based on ->get_blocks() - starting LBA.
158	 */
159	if (num_blocks)
160		return num_blocks;
161
162	return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
163		cmd->t_task_lba + 1;
164}
165EXPORT_SYMBOL(sbc_get_write_same_sectors);
166
167static sense_reason_t
168sbc_emulate_noop(struct se_cmd *cmd)
169{
170	target_complete_cmd(cmd, GOOD);
171	return 0;
172}
173
174static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
175{
176	return cmd->se_dev->dev_attrib.block_size * sectors;
177}
178
179static inline u32 transport_get_sectors_6(unsigned char *cdb)
180{
181	/*
182	 * Use 8-bit sector value.  SBC-3 says:
183	 *
184	 *   A TRANSFER LENGTH field set to zero specifies that 256
185	 *   logical blocks shall be written.  Any other value
186	 *   specifies the number of logical blocks that shall be
187	 *   written.
188	 */
189	return cdb[4] ? : 256;
190}
191
192static inline u32 transport_get_sectors_10(unsigned char *cdb)
193{
194	return (u32)(cdb[7] << 8) + cdb[8];
195}
196
197static inline u32 transport_get_sectors_12(unsigned char *cdb)
198{
199	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
200}
201
202static inline u32 transport_get_sectors_16(unsigned char *cdb)
203{
204	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
205		    (cdb[12] << 8) + cdb[13];
206}
207
208/*
209 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
210 */
211static inline u32 transport_get_sectors_32(unsigned char *cdb)
212{
213	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
214		    (cdb[30] << 8) + cdb[31];
215
216}
217
218static inline u32 transport_lba_21(unsigned char *cdb)
219{
220	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
221}
222
223static inline u32 transport_lba_32(unsigned char *cdb)
224{
225	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
226}
227
228static inline unsigned long long transport_lba_64(unsigned char *cdb)
229{
230	unsigned int __v1, __v2;
231
232	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
233	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
234
235	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
236}
237
238/*
239 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
240 */
241static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
242{
243	unsigned int __v1, __v2;
244
245	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
246	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
247
248	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
249}
250
251static sense_reason_t
252sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
253{
254	unsigned int sectors = sbc_get_write_same_sectors(cmd);
255
256	if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
257		pr_err("WRITE_SAME PBDATA and LBDATA"
258			" bits not supported for Block Discard"
259			" Emulation\n");
260		return TCM_UNSUPPORTED_SCSI_OPCODE;
261	}
262	if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
263		pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
264			sectors, cmd->se_dev->dev_attrib.max_write_same_len);
265		return TCM_INVALID_CDB_FIELD;
266	}
267	/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
268	if (flags[0] & 0x10) {
269		pr_warn("WRITE SAME with ANCHOR not supported\n");
270		return TCM_INVALID_CDB_FIELD;
271	}
272	/*
273	 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
274	 * translated into block discard requests within backend code.
275	 */
276	if (flags[0] & 0x08) {
277		if (!ops->execute_write_same_unmap)
278			return TCM_UNSUPPORTED_SCSI_OPCODE;
279
280		cmd->execute_cmd = ops->execute_write_same_unmap;
281		return 0;
282	}
283	if (!ops->execute_write_same)
284		return TCM_UNSUPPORTED_SCSI_OPCODE;
285
286	cmd->execute_cmd = ops->execute_write_same;
287	return 0;
288}
289
290static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
291{
292	unsigned char *buf, *addr;
293	struct scatterlist *sg;
294	unsigned int offset;
295	sense_reason_t ret = TCM_NO_SENSE;
296	int i, count;
297	/*
298	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
299	 *
300	 * 1) read the specified logical block(s);
301	 * 2) transfer logical blocks from the data-out buffer;
302	 * 3) XOR the logical blocks transferred from the data-out buffer with
303	 *    the logical blocks read, storing the resulting XOR data in a buffer;
304	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
305	 *    blocks transferred from the data-out buffer; and
306	 * 5) transfer the resulting XOR data to the data-in buffer.
307	 */
308	buf = kmalloc(cmd->data_length, GFP_KERNEL);
309	if (!buf) {
310		pr_err("Unable to allocate xor_callback buf\n");
311		return TCM_OUT_OF_RESOURCES;
312	}
313	/*
314	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
315	 * into the locally allocated *buf
316	 */
317	sg_copy_to_buffer(cmd->t_data_sg,
318			  cmd->t_data_nents,
319			  buf,
320			  cmd->data_length);
321
322	/*
323	 * Now perform the XOR against the BIDI read memory located at
324	 * cmd->t_mem_bidi_list
325	 */
326
327	offset = 0;
328	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
329		addr = kmap_atomic(sg_page(sg));
330		if (!addr) {
331			ret = TCM_OUT_OF_RESOURCES;
332			goto out;
333		}
334
335		for (i = 0; i < sg->length; i++)
336			*(addr + sg->offset + i) ^= *(buf + offset + i);
337
338		offset += sg->length;
339		kunmap_atomic(addr);
340	}
341
342out:
343	kfree(buf);
344	return ret;
345}
346
347static sense_reason_t
348sbc_execute_rw(struct se_cmd *cmd)
349{
350	return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
351			       cmd->data_direction);
352}
353
354static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
355{
356	struct se_device *dev = cmd->se_dev;
357
358	/*
359	 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
360	 * within target_complete_ok_work() if the command was successfully
361	 * sent to the backend driver.
362	 */
363	spin_lock_irq(&cmd->t_state_lock);
364	if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
365		cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
366	spin_unlock_irq(&cmd->t_state_lock);
367
368	/*
369	 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
370	 * before the original READ I/O submission.
371	 */
372	up(&dev->caw_sem);
373
374	return TCM_NO_SENSE;
375}
376
377static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
378{
379	struct se_device *dev = cmd->se_dev;
380	struct scatterlist *write_sg = NULL, *sg;
381	unsigned char *buf = NULL, *addr;
382	struct sg_mapping_iter m;
383	unsigned int offset = 0, len;
384	unsigned int nlbas = cmd->t_task_nolb;
385	unsigned int block_size = dev->dev_attrib.block_size;
386	unsigned int compare_len = (nlbas * block_size);
387	sense_reason_t ret = TCM_NO_SENSE;
388	int rc, i;
389
390	/*
391	 * Handle early failure in transport_generic_request_failure(),
392	 * which will not have taken ->caw_mutex yet..
393	 */
394	if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
395		return TCM_NO_SENSE;
396	/*
397	 * Immediately exit + release dev->caw_sem if command has already
398	 * been failed with a non-zero SCSI status.
399	 */
400	if (cmd->scsi_status) {
401		pr_err("compare_and_write_callback: non zero scsi_status:"
402			" 0x%02x\n", cmd->scsi_status);
403		goto out;
404	}
405
406	buf = kzalloc(cmd->data_length, GFP_KERNEL);
407	if (!buf) {
408		pr_err("Unable to allocate compare_and_write buf\n");
409		ret = TCM_OUT_OF_RESOURCES;
410		goto out;
411	}
412
413	write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
414			   GFP_KERNEL);
415	if (!write_sg) {
416		pr_err("Unable to allocate compare_and_write sg\n");
417		ret = TCM_OUT_OF_RESOURCES;
418		goto out;
419	}
420	sg_init_table(write_sg, cmd->t_data_nents);
421	/*
422	 * Setup verify and write data payloads from total NumberLBAs.
423	 */
424	rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
425			       cmd->data_length);
426	if (!rc) {
427		pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
428		ret = TCM_OUT_OF_RESOURCES;
429		goto out;
430	}
431	/*
432	 * Compare against SCSI READ payload against verify payload
433	 */
434	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
435		addr = (unsigned char *)kmap_atomic(sg_page(sg));
436		if (!addr) {
437			ret = TCM_OUT_OF_RESOURCES;
438			goto out;
439		}
440
441		len = min(sg->length, compare_len);
442
443		if (memcmp(addr, buf + offset, len)) {
444			pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
445				addr, buf + offset);
446			kunmap_atomic(addr);
447			goto miscompare;
448		}
449		kunmap_atomic(addr);
450
451		offset += len;
452		compare_len -= len;
453		if (!compare_len)
454			break;
455	}
456
457	i = 0;
458	len = cmd->t_task_nolb * block_size;
459	sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
460	/*
461	 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
462	 */
463	while (len) {
464		sg_miter_next(&m);
465
466		if (block_size < PAGE_SIZE) {
467			sg_set_page(&write_sg[i], m.page, block_size,
468				    block_size);
469		} else {
470			sg_miter_next(&m);
471			sg_set_page(&write_sg[i], m.page, block_size,
472				    0);
473		}
474		len -= block_size;
475		i++;
476	}
477	sg_miter_stop(&m);
478	/*
479	 * Save the original SGL + nents values before updating to new
480	 * assignments, to be released in transport_free_pages() ->
481	 * transport_reset_sgl_orig()
482	 */
483	cmd->t_data_sg_orig = cmd->t_data_sg;
484	cmd->t_data_sg = write_sg;
485	cmd->t_data_nents_orig = cmd->t_data_nents;
486	cmd->t_data_nents = 1;
487
488	cmd->sam_task_attr = MSG_HEAD_TAG;
489	cmd->transport_complete_callback = compare_and_write_post;
490	/*
491	 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
492	 * for submitting the adjusted SGL to write instance user-data.
493	 */
494	cmd->execute_cmd = sbc_execute_rw;
495
496	spin_lock_irq(&cmd->t_state_lock);
497	cmd->t_state = TRANSPORT_PROCESSING;
498	cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
499	spin_unlock_irq(&cmd->t_state_lock);
500
501	__target_execute_cmd(cmd);
502
503	kfree(buf);
504	return ret;
505
506miscompare:
507	pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
508		dev->transport->name);
509	ret = TCM_MISCOMPARE_VERIFY;
510out:
511	/*
512	 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
513	 * sbc_compare_and_write() before the original READ I/O submission.
514	 */
515	up(&dev->caw_sem);
516	kfree(write_sg);
517	kfree(buf);
518	return ret;
519}
520
521static sense_reason_t
522sbc_compare_and_write(struct se_cmd *cmd)
523{
524	struct se_device *dev = cmd->se_dev;
525	sense_reason_t ret;
526	int rc;
527	/*
528	 * Submit the READ first for COMPARE_AND_WRITE to perform the
529	 * comparision using SGLs at cmd->t_bidi_data_sg..
530	 */
531	rc = down_interruptible(&dev->caw_sem);
532	if ((rc != 0) || signal_pending(current)) {
533		cmd->transport_complete_callback = NULL;
534		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
535	}
536	/*
537	 * Reset cmd->data_length to individual block_size in order to not
538	 * confuse backend drivers that depend on this value matching the
539	 * size of the I/O being submitted.
540	 */
541	cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
542
543	ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
544			      DMA_FROM_DEVICE);
545	if (ret) {
546		cmd->transport_complete_callback = NULL;
547		up(&dev->caw_sem);
548		return ret;
549	}
550	/*
551	 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
552	 * upon MISCOMPARE, or in compare_and_write_done() upon completion
553	 * of WRITE instance user-data.
554	 */
555	return TCM_NO_SENSE;
556}
557
558static int
559sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
560		       bool is_write, struct se_cmd *cmd)
561{
562	if (is_write) {
563		cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS :
564					 TARGET_PROT_DOUT_INSERT;
565		switch (protect) {
566		case 0x0:
567		case 0x3:
568			cmd->prot_checks = 0;
569			break;
570		case 0x1:
571		case 0x5:
572			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
573			if (prot_type == TARGET_DIF_TYPE1_PROT)
574				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
575			break;
576		case 0x2:
577			if (prot_type == TARGET_DIF_TYPE1_PROT)
578				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
579			break;
580		case 0x4:
581			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
582			break;
583		default:
584			pr_err("Unsupported protect field %d\n", protect);
585			return -EINVAL;
586		}
587	} else {
588		cmd->prot_op = protect ? TARGET_PROT_DIN_PASS :
589					 TARGET_PROT_DIN_STRIP;
590		switch (protect) {
591		case 0x0:
592		case 0x1:
593		case 0x5:
594			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
595			if (prot_type == TARGET_DIF_TYPE1_PROT)
596				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
597			break;
598		case 0x2:
599			if (prot_type == TARGET_DIF_TYPE1_PROT)
600				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
601			break;
602		case 0x3:
603			cmd->prot_checks = 0;
604			break;
605		case 0x4:
606			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
607			break;
608		default:
609			pr_err("Unsupported protect field %d\n", protect);
610			return -EINVAL;
611		}
612	}
613
614	return 0;
615}
616
617static bool
618sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
619	       u32 sectors, bool is_write)
620{
621	u8 protect = cdb[1] >> 5;
622
623	if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
624		return true;
625
626	switch (dev->dev_attrib.pi_prot_type) {
627	case TARGET_DIF_TYPE3_PROT:
628		cmd->reftag_seed = 0xffffffff;
629		break;
630	case TARGET_DIF_TYPE2_PROT:
631		if (protect)
632			return false;
633
634		cmd->reftag_seed = cmd->t_task_lba;
635		break;
636	case TARGET_DIF_TYPE1_PROT:
637		cmd->reftag_seed = cmd->t_task_lba;
638		break;
639	case TARGET_DIF_TYPE0_PROT:
640	default:
641		return true;
642	}
643
644	if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
645				   is_write, cmd))
646		return false;
647
648	cmd->prot_type = dev->dev_attrib.pi_prot_type;
649	cmd->prot_length = dev->prot_length * sectors;
650
651	/**
652	 * In case protection information exists over the wire
653	 * we modify command data length to describe pure data.
654	 * The actual transfer length is data length + protection
655	 * length
656	 **/
657	if (protect)
658		cmd->data_length = sectors * dev->dev_attrib.block_size;
659
660	pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
661		 "prot_op=%d prot_checks=%d\n",
662		 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
663		 cmd->prot_op, cmd->prot_checks);
664
665	return true;
666}
667
668sense_reason_t
669sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
670{
671	struct se_device *dev = cmd->se_dev;
672	unsigned char *cdb = cmd->t_task_cdb;
673	unsigned int size;
674	u32 sectors = 0;
675	sense_reason_t ret;
676
677	switch (cdb[0]) {
678	case READ_6:
679		sectors = transport_get_sectors_6(cdb);
680		cmd->t_task_lba = transport_lba_21(cdb);
681		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
682		cmd->execute_rw = ops->execute_rw;
683		cmd->execute_cmd = sbc_execute_rw;
684		break;
685	case READ_10:
686		sectors = transport_get_sectors_10(cdb);
687		cmd->t_task_lba = transport_lba_32(cdb);
688
689		if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
690			return TCM_UNSUPPORTED_SCSI_OPCODE;
691
692		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
693		cmd->execute_rw = ops->execute_rw;
694		cmd->execute_cmd = sbc_execute_rw;
695		break;
696	case READ_12:
697		sectors = transport_get_sectors_12(cdb);
698		cmd->t_task_lba = transport_lba_32(cdb);
699
700		if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
701			return TCM_UNSUPPORTED_SCSI_OPCODE;
702
703		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
704		cmd->execute_rw = ops->execute_rw;
705		cmd->execute_cmd = sbc_execute_rw;
706		break;
707	case READ_16:
708		sectors = transport_get_sectors_16(cdb);
709		cmd->t_task_lba = transport_lba_64(cdb);
710
711		if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
712			return TCM_UNSUPPORTED_SCSI_OPCODE;
713
714		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
715		cmd->execute_rw = ops->execute_rw;
716		cmd->execute_cmd = sbc_execute_rw;
717		break;
718	case WRITE_6:
719		sectors = transport_get_sectors_6(cdb);
720		cmd->t_task_lba = transport_lba_21(cdb);
721		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
722		cmd->execute_rw = ops->execute_rw;
723		cmd->execute_cmd = sbc_execute_rw;
724		break;
725	case WRITE_10:
726	case WRITE_VERIFY:
727		sectors = transport_get_sectors_10(cdb);
728		cmd->t_task_lba = transport_lba_32(cdb);
729
730		if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
731			return TCM_UNSUPPORTED_SCSI_OPCODE;
732
733		if (cdb[1] & 0x8)
734			cmd->se_cmd_flags |= SCF_FUA;
735		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
736		cmd->execute_rw = ops->execute_rw;
737		cmd->execute_cmd = sbc_execute_rw;
738		break;
739	case WRITE_12:
740		sectors = transport_get_sectors_12(cdb);
741		cmd->t_task_lba = transport_lba_32(cdb);
742
743		if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
744			return TCM_UNSUPPORTED_SCSI_OPCODE;
745
746		if (cdb[1] & 0x8)
747			cmd->se_cmd_flags |= SCF_FUA;
748		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
749		cmd->execute_rw = ops->execute_rw;
750		cmd->execute_cmd = sbc_execute_rw;
751		break;
752	case WRITE_16:
753		sectors = transport_get_sectors_16(cdb);
754		cmd->t_task_lba = transport_lba_64(cdb);
755
756		if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
757			return TCM_UNSUPPORTED_SCSI_OPCODE;
758
759		if (cdb[1] & 0x8)
760			cmd->se_cmd_flags |= SCF_FUA;
761		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
762		cmd->execute_rw = ops->execute_rw;
763		cmd->execute_cmd = sbc_execute_rw;
764		break;
765	case XDWRITEREAD_10:
766		if (cmd->data_direction != DMA_TO_DEVICE ||
767		    !(cmd->se_cmd_flags & SCF_BIDI))
768			return TCM_INVALID_CDB_FIELD;
769		sectors = transport_get_sectors_10(cdb);
770
771		cmd->t_task_lba = transport_lba_32(cdb);
772		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
773
774		/*
775		 * Setup BIDI XOR callback to be run after I/O completion.
776		 */
777		cmd->execute_rw = ops->execute_rw;
778		cmd->execute_cmd = sbc_execute_rw;
779		cmd->transport_complete_callback = &xdreadwrite_callback;
780		if (cdb[1] & 0x8)
781			cmd->se_cmd_flags |= SCF_FUA;
782		break;
783	case VARIABLE_LENGTH_CMD:
784	{
785		u16 service_action = get_unaligned_be16(&cdb[8]);
786		switch (service_action) {
787		case XDWRITEREAD_32:
788			sectors = transport_get_sectors_32(cdb);
789
790			/*
791			 * Use WRITE_32 and READ_32 opcodes for the emulated
792			 * XDWRITE_READ_32 logic.
793			 */
794			cmd->t_task_lba = transport_lba_64_ext(cdb);
795			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
796
797			/*
798			 * Setup BIDI XOR callback to be run during after I/O
799			 * completion.
800			 */
801			cmd->execute_rw = ops->execute_rw;
802			cmd->execute_cmd = sbc_execute_rw;
803			cmd->transport_complete_callback = &xdreadwrite_callback;
804			if (cdb[1] & 0x8)
805				cmd->se_cmd_flags |= SCF_FUA;
806			break;
807		case WRITE_SAME_32:
808			sectors = transport_get_sectors_32(cdb);
809			if (!sectors) {
810				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
811				       " supported\n");
812				return TCM_INVALID_CDB_FIELD;
813			}
814
815			size = sbc_get_size(cmd, 1);
816			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
817
818			ret = sbc_setup_write_same(cmd, &cdb[10], ops);
819			if (ret)
820				return ret;
821			break;
822		default:
823			pr_err("VARIABLE_LENGTH_CMD service action"
824				" 0x%04x not supported\n", service_action);
825			return TCM_UNSUPPORTED_SCSI_OPCODE;
826		}
827		break;
828	}
829	case COMPARE_AND_WRITE:
830		sectors = cdb[13];
831		/*
832		 * Currently enforce COMPARE_AND_WRITE for a single sector
833		 */
834		if (sectors > 1) {
835			pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
836			       " than 1\n", sectors);
837			return TCM_INVALID_CDB_FIELD;
838		}
839		/*
840		 * Double size because we have two buffers, note that
841		 * zero is not an error..
842		 */
843		size = 2 * sbc_get_size(cmd, sectors);
844		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
845		cmd->t_task_nolb = sectors;
846		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
847		cmd->execute_rw = ops->execute_rw;
848		cmd->execute_cmd = sbc_compare_and_write;
849		cmd->transport_complete_callback = compare_and_write_callback;
850		break;
851	case READ_CAPACITY:
852		size = READ_CAP_LEN;
853		cmd->execute_cmd = sbc_emulate_readcapacity;
854		break;
855	case SERVICE_ACTION_IN:
856		switch (cmd->t_task_cdb[1] & 0x1f) {
857		case SAI_READ_CAPACITY_16:
858			cmd->execute_cmd = sbc_emulate_readcapacity_16;
859			break;
860		case SAI_REPORT_REFERRALS:
861			cmd->execute_cmd = target_emulate_report_referrals;
862			break;
863		default:
864			pr_err("Unsupported SA: 0x%02x\n",
865				cmd->t_task_cdb[1] & 0x1f);
866			return TCM_INVALID_CDB_FIELD;
867		}
868		size = (cdb[10] << 24) | (cdb[11] << 16) |
869		       (cdb[12] << 8) | cdb[13];
870		break;
871	case SYNCHRONIZE_CACHE:
872	case SYNCHRONIZE_CACHE_16:
873		if (cdb[0] == SYNCHRONIZE_CACHE) {
874			sectors = transport_get_sectors_10(cdb);
875			cmd->t_task_lba = transport_lba_32(cdb);
876		} else {
877			sectors = transport_get_sectors_16(cdb);
878			cmd->t_task_lba = transport_lba_64(cdb);
879		}
880		if (ops->execute_sync_cache) {
881			cmd->execute_cmd = ops->execute_sync_cache;
882			goto check_lba;
883		}
884		size = 0;
885		cmd->execute_cmd = sbc_emulate_noop;
886		break;
887	case UNMAP:
888		if (!ops->execute_unmap)
889			return TCM_UNSUPPORTED_SCSI_OPCODE;
890
891		size = get_unaligned_be16(&cdb[7]);
892		cmd->execute_cmd = ops->execute_unmap;
893		break;
894	case WRITE_SAME_16:
895		sectors = transport_get_sectors_16(cdb);
896		if (!sectors) {
897			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
898			return TCM_INVALID_CDB_FIELD;
899		}
900
901		size = sbc_get_size(cmd, 1);
902		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
903
904		ret = sbc_setup_write_same(cmd, &cdb[1], ops);
905		if (ret)
906			return ret;
907		break;
908	case WRITE_SAME:
909		sectors = transport_get_sectors_10(cdb);
910		if (!sectors) {
911			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
912			return TCM_INVALID_CDB_FIELD;
913		}
914
915		size = sbc_get_size(cmd, 1);
916		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
917
918		/*
919		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
920		 * of byte 1 bit 3 UNMAP instead of original reserved field
921		 */
922		ret = sbc_setup_write_same(cmd, &cdb[1], ops);
923		if (ret)
924			return ret;
925		break;
926	case VERIFY:
927		size = 0;
928		sectors = transport_get_sectors_10(cdb);
929		cmd->t_task_lba = transport_lba_32(cdb);
930		cmd->execute_cmd = sbc_emulate_noop;
931		goto check_lba;
932	case REZERO_UNIT:
933	case SEEK_6:
934	case SEEK_10:
935		/*
936		 * There are still clients out there which use these old SCSI-2
937		 * commands. This mainly happens when running VMs with legacy
938		 * guest systems, connected via SCSI command pass-through to
939		 * iSCSI targets. Make them happy and return status GOOD.
940		 */
941		size = 0;
942		cmd->execute_cmd = sbc_emulate_noop;
943		break;
944	default:
945		ret = spc_parse_cdb(cmd, &size);
946		if (ret)
947			return ret;
948	}
949
950	/* reject any command that we don't have a handler for */
951	if (!cmd->execute_cmd)
952		return TCM_UNSUPPORTED_SCSI_OPCODE;
953
954	if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
955		unsigned long long end_lba;
956
957		if (sectors > dev->dev_attrib.fabric_max_sectors) {
958			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
959				" big sectors %u exceeds fabric_max_sectors:"
960				" %u\n", cdb[0], sectors,
961				dev->dev_attrib.fabric_max_sectors);
962			return TCM_INVALID_CDB_FIELD;
963		}
964		if (sectors > dev->dev_attrib.hw_max_sectors) {
965			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
966				" big sectors %u exceeds backend hw_max_sectors:"
967				" %u\n", cdb[0], sectors,
968				dev->dev_attrib.hw_max_sectors);
969			return TCM_INVALID_CDB_FIELD;
970		}
971check_lba:
972		end_lba = dev->transport->get_blocks(dev) + 1;
973		if (cmd->t_task_lba + sectors > end_lba) {
974			pr_err("cmd exceeds last lba %llu "
975				"(lba %llu, sectors %u)\n",
976				end_lba, cmd->t_task_lba, sectors);
977			return TCM_ADDRESS_OUT_OF_RANGE;
978		}
979
980		if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
981			size = sbc_get_size(cmd, sectors);
982	}
983
984	return target_cmd_size_check(cmd, size);
985}
986EXPORT_SYMBOL(sbc_parse_cdb);
987
988u32 sbc_get_device_type(struct se_device *dev)
989{
990	return TYPE_DISK;
991}
992EXPORT_SYMBOL(sbc_get_device_type);
993
994sense_reason_t
995sbc_execute_unmap(struct se_cmd *cmd,
996	sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
997				      sector_t, sector_t),
998	void *priv)
999{
1000	struct se_device *dev = cmd->se_dev;
1001	unsigned char *buf, *ptr = NULL;
1002	sector_t lba;
1003	int size;
1004	u32 range;
1005	sense_reason_t ret = 0;
1006	int dl, bd_dl;
1007
1008	/* We never set ANC_SUP */
1009	if (cmd->t_task_cdb[1])
1010		return TCM_INVALID_CDB_FIELD;
1011
1012	if (cmd->data_length == 0) {
1013		target_complete_cmd(cmd, SAM_STAT_GOOD);
1014		return 0;
1015	}
1016
1017	if (cmd->data_length < 8) {
1018		pr_warn("UNMAP parameter list length %u too small\n",
1019			cmd->data_length);
1020		return TCM_PARAMETER_LIST_LENGTH_ERROR;
1021	}
1022
1023	buf = transport_kmap_data_sg(cmd);
1024	if (!buf)
1025		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1026
1027	dl = get_unaligned_be16(&buf[0]);
1028	bd_dl = get_unaligned_be16(&buf[2]);
1029
1030	size = cmd->data_length - 8;
1031	if (bd_dl > size)
1032		pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
1033			cmd->data_length, bd_dl);
1034	else
1035		size = bd_dl;
1036
1037	if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
1038		ret = TCM_INVALID_PARAMETER_LIST;
1039		goto err;
1040	}
1041
1042	/* First UNMAP block descriptor starts at 8 byte offset */
1043	ptr = &buf[8];
1044	pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
1045		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1046
1047	while (size >= 16) {
1048		lba = get_unaligned_be64(&ptr[0]);
1049		range = get_unaligned_be32(&ptr[8]);
1050		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1051				 (unsigned long long)lba, range);
1052
1053		if (range > dev->dev_attrib.max_unmap_lba_count) {
1054			ret = TCM_INVALID_PARAMETER_LIST;
1055			goto err;
1056		}
1057
1058		if (lba + range > dev->transport->get_blocks(dev) + 1) {
1059			ret = TCM_ADDRESS_OUT_OF_RANGE;
1060			goto err;
1061		}
1062
1063		ret = do_unmap_fn(cmd, priv, lba, range);
1064		if (ret)
1065			goto err;
1066
1067		ptr += 16;
1068		size -= 16;
1069	}
1070
1071err:
1072	transport_kunmap_data_sg(cmd);
1073	if (!ret)
1074		target_complete_cmd(cmd, GOOD);
1075	return ret;
1076}
1077EXPORT_SYMBOL(sbc_execute_unmap);
1078
1079void
1080sbc_dif_generate(struct se_cmd *cmd)
1081{
1082	struct se_device *dev = cmd->se_dev;
1083	struct se_dif_v1_tuple *sdt;
1084	struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1085	sector_t sector = cmd->t_task_lba;
1086	void *daddr, *paddr;
1087	int i, j, offset = 0;
1088
1089	for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1090		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1091		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1092
1093		for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1094
1095			if (offset >= psg->length) {
1096				kunmap_atomic(paddr);
1097				psg = sg_next(psg);
1098				paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1099				offset = 0;
1100			}
1101
1102			sdt = paddr + offset;
1103			sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
1104						dev->dev_attrib.block_size));
1105			if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
1106				sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
1107			sdt->app_tag = 0;
1108
1109			pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x"
1110				 " app_tag: 0x%04x ref_tag: %u\n",
1111				 (unsigned long long)sector, sdt->guard_tag,
1112				 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1113
1114			sector++;
1115			offset += sizeof(struct se_dif_v1_tuple);
1116		}
1117
1118		kunmap_atomic(paddr);
1119		kunmap_atomic(daddr);
1120	}
1121}
1122
1123static sense_reason_t
1124sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
1125		  const void *p, sector_t sector, unsigned int ei_lba)
1126{
1127	int block_size = dev->dev_attrib.block_size;
1128	__be16 csum;
1129
1130	csum = cpu_to_be16(crc_t10dif(p, block_size));
1131
1132	if (sdt->guard_tag != csum) {
1133		pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
1134			" csum 0x%04x\n", (unsigned long long)sector,
1135			be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
1136		return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1137	}
1138
1139	if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
1140	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1141		pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
1142		       " sector MSB: 0x%08x\n", (unsigned long long)sector,
1143		       be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
1144		return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1145	}
1146
1147	if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
1148	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
1149		pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
1150		       " ei_lba: 0x%08x\n", (unsigned long long)sector,
1151			be32_to_cpu(sdt->ref_tag), ei_lba);
1152		return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1153	}
1154
1155	return 0;
1156}
1157
1158static void
1159sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1160		  struct scatterlist *sg, int sg_off)
1161{
1162	struct se_device *dev = cmd->se_dev;
1163	struct scatterlist *psg;
1164	void *paddr, *addr;
1165	unsigned int i, len, left;
1166	unsigned int offset = sg_off;
1167
1168	left = sectors * dev->prot_length;
1169
1170	for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1171		unsigned int psg_len, copied = 0;
1172
1173		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1174		psg_len = min(left, psg->length);
1175		while (psg_len) {
1176			len = min(psg_len, sg->length - offset);
1177			addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
1178
1179			if (read)
1180				memcpy(paddr + copied, addr, len);
1181			else
1182				memcpy(addr, paddr + copied, len);
1183
1184			left -= len;
1185			offset += len;
1186			copied += len;
1187			psg_len -= len;
1188
1189			if (offset >= sg->length) {
1190				sg = sg_next(sg);
1191				offset = 0;
1192			}
1193			kunmap_atomic(addr);
1194		}
1195		kunmap_atomic(paddr);
1196	}
1197}
1198
1199sense_reason_t
1200sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1201		     unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1202{
1203	struct se_device *dev = cmd->se_dev;
1204	struct se_dif_v1_tuple *sdt;
1205	struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1206	sector_t sector = start;
1207	void *daddr, *paddr;
1208	int i, j, offset = 0;
1209	sense_reason_t rc;
1210
1211	for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1212		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1213		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1214
1215		for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1216
1217			if (offset >= psg->length) {
1218				kunmap_atomic(paddr);
1219				psg = sg_next(psg);
1220				paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1221				offset = 0;
1222			}
1223
1224			sdt = paddr + offset;
1225
1226			pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
1227				 " app_tag: 0x%04x ref_tag: %u\n",
1228				 (unsigned long long)sector, sdt->guard_tag,
1229				 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1230
1231			rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1232					       ei_lba);
1233			if (rc) {
1234				kunmap_atomic(paddr);
1235				kunmap_atomic(daddr);
1236				cmd->bad_sector = sector;
1237				return rc;
1238			}
1239
1240			sector++;
1241			ei_lba++;
1242			offset += sizeof(struct se_dif_v1_tuple);
1243		}
1244
1245		kunmap_atomic(paddr);
1246		kunmap_atomic(daddr);
1247	}
1248	sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
1249
1250	return 0;
1251}
1252EXPORT_SYMBOL(sbc_dif_verify_write);
1253
1254static sense_reason_t
1255__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1256		      unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1257{
1258	struct se_device *dev = cmd->se_dev;
1259	struct se_dif_v1_tuple *sdt;
1260	struct scatterlist *dsg, *psg = sg;
1261	sector_t sector = start;
1262	void *daddr, *paddr;
1263	int i, j, offset = sg_off;
1264	sense_reason_t rc;
1265
1266	for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1267		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1268		paddr = kmap_atomic(sg_page(psg)) + sg->offset;
1269
1270		for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1271
1272			if (offset >= psg->length) {
1273				kunmap_atomic(paddr);
1274				psg = sg_next(psg);
1275				paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1276				offset = 0;
1277			}
1278
1279			sdt = paddr + offset;
1280
1281			pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
1282				 " app_tag: 0x%04x ref_tag: %u\n",
1283				 (unsigned long long)sector, sdt->guard_tag,
1284				 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1285
1286			if (sdt->app_tag == cpu_to_be16(0xffff)) {
1287				sector++;
1288				offset += sizeof(struct se_dif_v1_tuple);
1289				continue;
1290			}
1291
1292			rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1293					       ei_lba);
1294			if (rc) {
1295				kunmap_atomic(paddr);
1296				kunmap_atomic(daddr);
1297				cmd->bad_sector = sector;
1298				return rc;
1299			}
1300
1301			sector++;
1302			ei_lba++;
1303			offset += sizeof(struct se_dif_v1_tuple);
1304		}
1305
1306		kunmap_atomic(paddr);
1307		kunmap_atomic(daddr);
1308	}
1309
1310	return 0;
1311}
1312
1313sense_reason_t
1314sbc_dif_read_strip(struct se_cmd *cmd)
1315{
1316	struct se_device *dev = cmd->se_dev;
1317	u32 sectors = cmd->prot_length / dev->prot_length;
1318
1319	return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
1320				     cmd->t_prot_sg, 0);
1321}
1322
1323sense_reason_t
1324sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1325		    unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1326{
1327	sense_reason_t rc;
1328
1329	rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off);
1330	if (rc)
1331		return rc;
1332
1333	sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
1334	return 0;
1335}
1336EXPORT_SYMBOL(sbc_dif_verify_read);
1337