1/******************************************************************************* 2 * This file contains main functions related to the iSCSI Target Core Driver. 3 * 4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 * 6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 7 * 8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 ******************************************************************************/ 20 21#include <linux/string.h> 22#include <linux/kthread.h> 23#include <linux/crypto.h> 24#include <linux/completion.h> 25#include <linux/module.h> 26#include <asm/unaligned.h> 27#include <scsi/scsi_device.h> 28#include <scsi/iscsi_proto.h> 29#include <target/target_core_base.h> 30#include <target/target_core_fabric.h> 31 32#include "iscsi_target_core.h" 33#include "iscsi_target_parameters.h" 34#include "iscsi_target_seq_pdu_list.h" 35#include "iscsi_target_tq.h" 36#include "iscsi_target_configfs.h" 37#include "iscsi_target_datain_values.h" 38#include "iscsi_target_erl0.h" 39#include "iscsi_target_erl1.h" 40#include "iscsi_target_erl2.h" 41#include "iscsi_target_login.h" 42#include "iscsi_target_tmr.h" 43#include "iscsi_target_tpg.h" 44#include "iscsi_target_util.h" 45#include "iscsi_target.h" 46#include "iscsi_target_device.h" 47#include "iscsi_target_stat.h" 48 49static LIST_HEAD(g_tiqn_list); 50static LIST_HEAD(g_np_list); 51static DEFINE_SPINLOCK(tiqn_lock); 52static DEFINE_SPINLOCK(np_lock); 53 54static struct idr tiqn_idr; 55struct idr sess_idr; 56struct mutex auth_id_lock; 57spinlock_t sess_idr_lock; 58 59struct iscsit_global *iscsit_global; 60 61struct kmem_cache *lio_cmd_cache; 62struct kmem_cache *lio_qr_cache; 63struct kmem_cache *lio_dr_cache; 64struct kmem_cache *lio_ooo_cache; 65struct kmem_cache *lio_r2t_cache; 66 67static int iscsit_handle_immediate_data(struct iscsi_cmd *, 68 unsigned char *buf, u32); 69static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *); 70 71struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf) 72{ 73 struct iscsi_tiqn *tiqn = NULL; 74 75 spin_lock(&tiqn_lock); 76 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { 77 if (!strcmp(tiqn->tiqn, buf)) { 78 79 spin_lock(&tiqn->tiqn_state_lock); 80 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) { 81 tiqn->tiqn_access_count++; 82 spin_unlock(&tiqn->tiqn_state_lock); 83 spin_unlock(&tiqn_lock); 84 return tiqn; 85 } 86 spin_unlock(&tiqn->tiqn_state_lock); 87 } 88 } 89 spin_unlock(&tiqn_lock); 90 91 return NULL; 92} 93 94static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn) 95{ 96 spin_lock(&tiqn->tiqn_state_lock); 97 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) { 98 tiqn->tiqn_state = TIQN_STATE_SHUTDOWN; 99 spin_unlock(&tiqn->tiqn_state_lock); 100 return 0; 101 } 102 spin_unlock(&tiqn->tiqn_state_lock); 103 104 return -1; 105} 106 107void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn) 108{ 109 spin_lock(&tiqn->tiqn_state_lock); 110 tiqn->tiqn_access_count--; 111 spin_unlock(&tiqn->tiqn_state_lock); 112} 113 114/* 115 * Note that IQN formatting is expected to be done in userspace, and 116 * no explict IQN format checks are done here. 117 */ 118struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf) 119{ 120 struct iscsi_tiqn *tiqn = NULL; 121 int ret; 122 123 if (strlen(buf) >= ISCSI_IQN_LEN) { 124 pr_err("Target IQN exceeds %d bytes\n", 125 ISCSI_IQN_LEN); 126 return ERR_PTR(-EINVAL); 127 } 128 129 tiqn = kzalloc(sizeof(struct iscsi_tiqn), GFP_KERNEL); 130 if (!tiqn) { 131 pr_err("Unable to allocate struct iscsi_tiqn\n"); 132 return ERR_PTR(-ENOMEM); 133 } 134 135 sprintf(tiqn->tiqn, "%s", buf); 136 INIT_LIST_HEAD(&tiqn->tiqn_list); 137 INIT_LIST_HEAD(&tiqn->tiqn_tpg_list); 138 spin_lock_init(&tiqn->tiqn_state_lock); 139 spin_lock_init(&tiqn->tiqn_tpg_lock); 140 spin_lock_init(&tiqn->sess_err_stats.lock); 141 spin_lock_init(&tiqn->login_stats.lock); 142 spin_lock_init(&tiqn->logout_stats.lock); 143 144 if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) { 145 pr_err("idr_pre_get() for tiqn_idr failed\n"); 146 kfree(tiqn); 147 return ERR_PTR(-ENOMEM); 148 } 149 tiqn->tiqn_state = TIQN_STATE_ACTIVE; 150 151 spin_lock(&tiqn_lock); 152 ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index); 153 if (ret < 0) { 154 pr_err("idr_get_new() failed for tiqn->tiqn_index\n"); 155 spin_unlock(&tiqn_lock); 156 kfree(tiqn); 157 return ERR_PTR(ret); 158 } 159 list_add_tail(&tiqn->tiqn_list, &g_tiqn_list); 160 spin_unlock(&tiqn_lock); 161 162 pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn); 163 164 return tiqn; 165 166} 167 168static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn) 169{ 170 /* 171 * Wait for accesses to said struct iscsi_tiqn to end. 172 */ 173 spin_lock(&tiqn->tiqn_state_lock); 174 while (tiqn->tiqn_access_count != 0) { 175 spin_unlock(&tiqn->tiqn_state_lock); 176 msleep(10); 177 spin_lock(&tiqn->tiqn_state_lock); 178 } 179 spin_unlock(&tiqn->tiqn_state_lock); 180} 181 182void iscsit_del_tiqn(struct iscsi_tiqn *tiqn) 183{ 184 /* 185 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN 186 * while holding tiqn->tiqn_state_lock. This means that all subsequent 187 * attempts to access this struct iscsi_tiqn will fail from both transport 188 * fabric and control code paths. 189 */ 190 if (iscsit_set_tiqn_shutdown(tiqn) < 0) { 191 pr_err("iscsit_set_tiqn_shutdown() failed\n"); 192 return; 193 } 194 195 iscsit_wait_for_tiqn(tiqn); 196 197 spin_lock(&tiqn_lock); 198 list_del(&tiqn->tiqn_list); 199 idr_remove(&tiqn_idr, tiqn->tiqn_index); 200 spin_unlock(&tiqn_lock); 201 202 pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n", 203 tiqn->tiqn); 204 kfree(tiqn); 205} 206 207int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) 208{ 209 int ret; 210 /* 211 * Determine if the network portal is accepting storage traffic. 212 */ 213 spin_lock_bh(&np->np_thread_lock); 214 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 215 spin_unlock_bh(&np->np_thread_lock); 216 return -1; 217 } 218 if (np->np_login_tpg) { 219 pr_err("np->np_login_tpg() is not NULL!\n"); 220 spin_unlock_bh(&np->np_thread_lock); 221 return -1; 222 } 223 spin_unlock_bh(&np->np_thread_lock); 224 /* 225 * Determine if the portal group is accepting storage traffic. 226 */ 227 spin_lock_bh(&tpg->tpg_state_lock); 228 if (tpg->tpg_state != TPG_STATE_ACTIVE) { 229 spin_unlock_bh(&tpg->tpg_state_lock); 230 return -1; 231 } 232 spin_unlock_bh(&tpg->tpg_state_lock); 233 234 /* 235 * Here we serialize access across the TIQN+TPG Tuple. 236 */ 237 ret = mutex_lock_interruptible(&tpg->np_login_lock); 238 if ((ret != 0) || signal_pending(current)) 239 return -1; 240 241 spin_lock_bh(&np->np_thread_lock); 242 np->np_login_tpg = tpg; 243 spin_unlock_bh(&np->np_thread_lock); 244 245 return 0; 246} 247 248int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) 249{ 250 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 251 252 spin_lock_bh(&np->np_thread_lock); 253 np->np_login_tpg = NULL; 254 spin_unlock_bh(&np->np_thread_lock); 255 256 mutex_unlock(&tpg->np_login_lock); 257 258 if (tiqn) 259 iscsit_put_tiqn_for_login(tiqn); 260 261 return 0; 262} 263 264static struct iscsi_np *iscsit_get_np( 265 struct __kernel_sockaddr_storage *sockaddr, 266 int network_transport) 267{ 268 struct sockaddr_in *sock_in, *sock_in_e; 269 struct sockaddr_in6 *sock_in6, *sock_in6_e; 270 struct iscsi_np *np; 271 int ip_match = 0; 272 u16 port; 273 274 spin_lock_bh(&np_lock); 275 list_for_each_entry(np, &g_np_list, np_list) { 276 spin_lock(&np->np_thread_lock); 277 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 278 spin_unlock(&np->np_thread_lock); 279 continue; 280 } 281 282 if (sockaddr->ss_family == AF_INET6) { 283 sock_in6 = (struct sockaddr_in6 *)sockaddr; 284 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr; 285 286 if (!memcmp(&sock_in6->sin6_addr.in6_u, 287 &sock_in6_e->sin6_addr.in6_u, 288 sizeof(struct in6_addr))) 289 ip_match = 1; 290 291 port = ntohs(sock_in6->sin6_port); 292 } else { 293 sock_in = (struct sockaddr_in *)sockaddr; 294 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr; 295 296 if (sock_in->sin_addr.s_addr == 297 sock_in_e->sin_addr.s_addr) 298 ip_match = 1; 299 300 port = ntohs(sock_in->sin_port); 301 } 302 303 if ((ip_match == 1) && (np->np_port == port) && 304 (np->np_network_transport == network_transport)) { 305 /* 306 * Increment the np_exports reference count now to 307 * prevent iscsit_del_np() below from being called 308 * while iscsi_tpg_add_network_portal() is called. 309 */ 310 np->np_exports++; 311 spin_unlock(&np->np_thread_lock); 312 spin_unlock_bh(&np_lock); 313 return np; 314 } 315 spin_unlock(&np->np_thread_lock); 316 } 317 spin_unlock_bh(&np_lock); 318 319 return NULL; 320} 321 322struct iscsi_np *iscsit_add_np( 323 struct __kernel_sockaddr_storage *sockaddr, 324 char *ip_str, 325 int network_transport) 326{ 327 struct sockaddr_in *sock_in; 328 struct sockaddr_in6 *sock_in6; 329 struct iscsi_np *np; 330 int ret; 331 /* 332 * Locate the existing struct iscsi_np if already active.. 333 */ 334 np = iscsit_get_np(sockaddr, network_transport); 335 if (np) 336 return np; 337 338 np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL); 339 if (!np) { 340 pr_err("Unable to allocate memory for struct iscsi_np\n"); 341 return ERR_PTR(-ENOMEM); 342 } 343 344 np->np_flags |= NPF_IP_NETWORK; 345 if (sockaddr->ss_family == AF_INET6) { 346 sock_in6 = (struct sockaddr_in6 *)sockaddr; 347 snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str); 348 np->np_port = ntohs(sock_in6->sin6_port); 349 } else { 350 sock_in = (struct sockaddr_in *)sockaddr; 351 sprintf(np->np_ip, "%s", ip_str); 352 np->np_port = ntohs(sock_in->sin_port); 353 } 354 355 np->np_network_transport = network_transport; 356 spin_lock_init(&np->np_thread_lock); 357 init_completion(&np->np_restart_comp); 358 INIT_LIST_HEAD(&np->np_list); 359 360 ret = iscsi_target_setup_login_socket(np, sockaddr); 361 if (ret != 0) { 362 kfree(np); 363 return ERR_PTR(ret); 364 } 365 366 np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np"); 367 if (IS_ERR(np->np_thread)) { 368 pr_err("Unable to create kthread: iscsi_np\n"); 369 ret = PTR_ERR(np->np_thread); 370 kfree(np); 371 return ERR_PTR(ret); 372 } 373 /* 374 * Increment the np_exports reference count now to prevent 375 * iscsit_del_np() below from being run while a new call to 376 * iscsi_tpg_add_network_portal() for a matching iscsi_np is 377 * active. We don't need to hold np->np_thread_lock at this 378 * point because iscsi_np has not been added to g_np_list yet. 379 */ 380 np->np_exports = 1; 381 382 spin_lock_bh(&np_lock); 383 list_add_tail(&np->np_list, &g_np_list); 384 spin_unlock_bh(&np_lock); 385 386 pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n", 387 np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ? 388 "TCP" : "SCTP"); 389 390 return np; 391} 392 393int iscsit_reset_np_thread( 394 struct iscsi_np *np, 395 struct iscsi_tpg_np *tpg_np, 396 struct iscsi_portal_group *tpg) 397{ 398 spin_lock_bh(&np->np_thread_lock); 399 if (tpg && tpg_np) { 400 /* 401 * The reset operation need only be performed when the 402 * passed struct iscsi_portal_group has a login in progress 403 * to one of the network portals. 404 */ 405 if (tpg_np->tpg_np->np_login_tpg != tpg) { 406 spin_unlock_bh(&np->np_thread_lock); 407 return 0; 408 } 409 } 410 if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) { 411 spin_unlock_bh(&np->np_thread_lock); 412 return 0; 413 } 414 np->np_thread_state = ISCSI_NP_THREAD_RESET; 415 416 if (np->np_thread) { 417 spin_unlock_bh(&np->np_thread_lock); 418 send_sig(SIGINT, np->np_thread, 1); 419 wait_for_completion(&np->np_restart_comp); 420 spin_lock_bh(&np->np_thread_lock); 421 } 422 spin_unlock_bh(&np->np_thread_lock); 423 424 return 0; 425} 426 427int iscsit_del_np_comm(struct iscsi_np *np) 428{ 429 if (!np->np_socket) 430 return 0; 431 432 /* 433 * Some network transports allocate their own struct sock->file, 434 * see if we need to free any additional allocated resources. 435 */ 436 if (np->np_flags & NPF_SCTP_STRUCT_FILE) { 437 kfree(np->np_socket->file); 438 np->np_socket->file = NULL; 439 } 440 441 sock_release(np->np_socket); 442 return 0; 443} 444 445int iscsit_del_np(struct iscsi_np *np) 446{ 447 spin_lock_bh(&np->np_thread_lock); 448 np->np_exports--; 449 if (np->np_exports) { 450 spin_unlock_bh(&np->np_thread_lock); 451 return 0; 452 } 453 np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN; 454 spin_unlock_bh(&np->np_thread_lock); 455 456 if (np->np_thread) { 457 /* 458 * We need to send the signal to wakeup Linux/Net 459 * which may be sleeping in sock_accept().. 460 */ 461 send_sig(SIGINT, np->np_thread, 1); 462 kthread_stop(np->np_thread); 463 } 464 iscsit_del_np_comm(np); 465 466 spin_lock_bh(&np_lock); 467 list_del(&np->np_list); 468 spin_unlock_bh(&np_lock); 469 470 pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n", 471 np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ? 472 "TCP" : "SCTP"); 473 474 kfree(np); 475 return 0; 476} 477 478static int __init iscsi_target_init_module(void) 479{ 480 int ret = 0; 481 482 pr_debug("iSCSI-Target "ISCSIT_VERSION"\n"); 483 484 iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL); 485 if (!iscsit_global) { 486 pr_err("Unable to allocate memory for iscsit_global\n"); 487 return -1; 488 } 489 mutex_init(&auth_id_lock); 490 spin_lock_init(&sess_idr_lock); 491 idr_init(&tiqn_idr); 492 idr_init(&sess_idr); 493 494 ret = iscsi_target_register_configfs(); 495 if (ret < 0) 496 goto out; 497 498 ret = iscsi_thread_set_init(); 499 if (ret < 0) 500 goto configfs_out; 501 502 if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) != 503 TARGET_THREAD_SET_COUNT) { 504 pr_err("iscsi_allocate_thread_sets() returned" 505 " unexpected value!\n"); 506 goto ts_out1; 507 } 508 509 lio_cmd_cache = kmem_cache_create("lio_cmd_cache", 510 sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd), 511 0, NULL); 512 if (!lio_cmd_cache) { 513 pr_err("Unable to kmem_cache_create() for" 514 " lio_cmd_cache\n"); 515 goto ts_out2; 516 } 517 518 lio_qr_cache = kmem_cache_create("lio_qr_cache", 519 sizeof(struct iscsi_queue_req), 520 __alignof__(struct iscsi_queue_req), 0, NULL); 521 if (!lio_qr_cache) { 522 pr_err("nable to kmem_cache_create() for" 523 " lio_qr_cache\n"); 524 goto cmd_out; 525 } 526 527 lio_dr_cache = kmem_cache_create("lio_dr_cache", 528 sizeof(struct iscsi_datain_req), 529 __alignof__(struct iscsi_datain_req), 0, NULL); 530 if (!lio_dr_cache) { 531 pr_err("Unable to kmem_cache_create() for" 532 " lio_dr_cache\n"); 533 goto qr_out; 534 } 535 536 lio_ooo_cache = kmem_cache_create("lio_ooo_cache", 537 sizeof(struct iscsi_ooo_cmdsn), 538 __alignof__(struct iscsi_ooo_cmdsn), 0, NULL); 539 if (!lio_ooo_cache) { 540 pr_err("Unable to kmem_cache_create() for" 541 " lio_ooo_cache\n"); 542 goto dr_out; 543 } 544 545 lio_r2t_cache = kmem_cache_create("lio_r2t_cache", 546 sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t), 547 0, NULL); 548 if (!lio_r2t_cache) { 549 pr_err("Unable to kmem_cache_create() for" 550 " lio_r2t_cache\n"); 551 goto ooo_out; 552 } 553 554 if (iscsit_load_discovery_tpg() < 0) 555 goto r2t_out; 556 557 return ret; 558r2t_out: 559 kmem_cache_destroy(lio_r2t_cache); 560ooo_out: 561 kmem_cache_destroy(lio_ooo_cache); 562dr_out: 563 kmem_cache_destroy(lio_dr_cache); 564qr_out: 565 kmem_cache_destroy(lio_qr_cache); 566cmd_out: 567 kmem_cache_destroy(lio_cmd_cache); 568ts_out2: 569 iscsi_deallocate_thread_sets(); 570ts_out1: 571 iscsi_thread_set_free(); 572configfs_out: 573 iscsi_target_deregister_configfs(); 574out: 575 kfree(iscsit_global); 576 return -ENOMEM; 577} 578 579static void __exit iscsi_target_cleanup_module(void) 580{ 581 iscsi_deallocate_thread_sets(); 582 iscsi_thread_set_free(); 583 iscsit_release_discovery_tpg(); 584 kmem_cache_destroy(lio_cmd_cache); 585 kmem_cache_destroy(lio_qr_cache); 586 kmem_cache_destroy(lio_dr_cache); 587 kmem_cache_destroy(lio_ooo_cache); 588 kmem_cache_destroy(lio_r2t_cache); 589 590 iscsi_target_deregister_configfs(); 591 592 kfree(iscsit_global); 593} 594 595int iscsit_add_reject( 596 u8 reason, 597 int fail_conn, 598 unsigned char *buf, 599 struct iscsi_conn *conn) 600{ 601 struct iscsi_cmd *cmd; 602 struct iscsi_reject *hdr; 603 int ret; 604 605 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 606 if (!cmd) 607 return -1; 608 609 cmd->iscsi_opcode = ISCSI_OP_REJECT; 610 if (fail_conn) 611 cmd->cmd_flags |= ICF_REJECT_FAIL_CONN; 612 613 hdr = (struct iscsi_reject *) cmd->pdu; 614 hdr->reason = reason; 615 616 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); 617 if (!cmd->buf_ptr) { 618 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 619 iscsit_release_cmd(cmd); 620 return -1; 621 } 622 623 spin_lock_bh(&conn->cmd_lock); 624 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 625 spin_unlock_bh(&conn->cmd_lock); 626 627 cmd->i_state = ISTATE_SEND_REJECT; 628 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 629 630 ret = wait_for_completion_interruptible(&cmd->reject_comp); 631 if (ret != 0) 632 return -1; 633 634 return (!fail_conn) ? 0 : -1; 635} 636 637int iscsit_add_reject_from_cmd( 638 u8 reason, 639 int fail_conn, 640 int add_to_conn, 641 unsigned char *buf, 642 struct iscsi_cmd *cmd) 643{ 644 struct iscsi_conn *conn; 645 struct iscsi_reject *hdr; 646 int ret; 647 648 if (!cmd->conn) { 649 pr_err("cmd->conn is NULL for ITT: 0x%08x\n", 650 cmd->init_task_tag); 651 return -1; 652 } 653 conn = cmd->conn; 654 655 cmd->iscsi_opcode = ISCSI_OP_REJECT; 656 if (fail_conn) 657 cmd->cmd_flags |= ICF_REJECT_FAIL_CONN; 658 659 hdr = (struct iscsi_reject *) cmd->pdu; 660 hdr->reason = reason; 661 662 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); 663 if (!cmd->buf_ptr) { 664 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 665 iscsit_release_cmd(cmd); 666 return -1; 667 } 668 669 if (add_to_conn) { 670 spin_lock_bh(&conn->cmd_lock); 671 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 672 spin_unlock_bh(&conn->cmd_lock); 673 } 674 675 cmd->i_state = ISTATE_SEND_REJECT; 676 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 677 678 ret = wait_for_completion_interruptible(&cmd->reject_comp); 679 if (ret != 0) 680 return -1; 681 682 return (!fail_conn) ? 0 : -1; 683} 684 685/* 686 * Map some portion of the allocated scatterlist to an iovec, suitable for 687 * kernel sockets to copy data in/out. This handles both pages and slab-allocated 688 * buffers, since we have been tricky and mapped t_mem_sg to the buffer in 689 * either case (see iscsit_alloc_buffs) 690 */ 691static int iscsit_map_iovec( 692 struct iscsi_cmd *cmd, 693 struct kvec *iov, 694 u32 data_offset, 695 u32 data_length) 696{ 697 u32 i = 0; 698 struct scatterlist *sg; 699 unsigned int page_off; 700 701 /* 702 * We have a private mapping of the allocated pages in t_mem_sg. 703 * At this point, we also know each contains a page. 704 */ 705 sg = &cmd->t_mem_sg[data_offset / PAGE_SIZE]; 706 page_off = (data_offset % PAGE_SIZE); 707 708 cmd->first_data_sg = sg; 709 cmd->first_data_sg_off = page_off; 710 711 while (data_length) { 712 u32 cur_len = min_t(u32, data_length, sg->length - page_off); 713 714 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off; 715 iov[i].iov_len = cur_len; 716 717 data_length -= cur_len; 718 page_off = 0; 719 sg = sg_next(sg); 720 i++; 721 } 722 723 cmd->kmapped_nents = i; 724 725 return i; 726} 727 728static void iscsit_unmap_iovec(struct iscsi_cmd *cmd) 729{ 730 u32 i; 731 struct scatterlist *sg; 732 733 sg = cmd->first_data_sg; 734 735 for (i = 0; i < cmd->kmapped_nents; i++) 736 kunmap(sg_page(&sg[i])); 737} 738 739static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) 740{ 741 struct iscsi_cmd *cmd; 742 743 conn->exp_statsn = exp_statsn; 744 745 spin_lock_bh(&conn->cmd_lock); 746 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 747 spin_lock(&cmd->istate_lock); 748 if ((cmd->i_state == ISTATE_SENT_STATUS) && 749 (cmd->stat_sn < exp_statsn)) { 750 cmd->i_state = ISTATE_REMOVE; 751 spin_unlock(&cmd->istate_lock); 752 iscsit_add_cmd_to_immediate_queue(cmd, conn, 753 cmd->i_state); 754 continue; 755 } 756 spin_unlock(&cmd->istate_lock); 757 } 758 spin_unlock_bh(&conn->cmd_lock); 759} 760 761static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) 762{ 763 u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 : 764 cmd->se_cmd.t_data_nents; 765 766 iov_count += ISCSI_IOV_DATA_BUFFER; 767 768 cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL); 769 if (!cmd->iov_data) { 770 pr_err("Unable to allocate cmd->iov_data\n"); 771 return -ENOMEM; 772 } 773 774 cmd->orig_iov_data_count = iov_count; 775 return 0; 776} 777 778static int iscsit_alloc_buffs(struct iscsi_cmd *cmd) 779{ 780 struct scatterlist *sgl; 781 u32 length = cmd->se_cmd.data_length; 782 int nents = DIV_ROUND_UP(length, PAGE_SIZE); 783 int i = 0, ret; 784 /* 785 * If no SCSI payload is present, allocate the default iovecs used for 786 * iSCSI PDU Header 787 */ 788 if (!length) 789 return iscsit_allocate_iovecs(cmd); 790 791 sgl = kzalloc(sizeof(*sgl) * nents, GFP_KERNEL); 792 if (!sgl) 793 return -ENOMEM; 794 795 sg_init_table(sgl, nents); 796 797 while (length) { 798 int buf_size = min_t(int, length, PAGE_SIZE); 799 struct page *page; 800 801 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 802 if (!page) 803 goto page_alloc_failed; 804 805 sg_set_page(&sgl[i], page, buf_size, 0); 806 807 length -= buf_size; 808 i++; 809 } 810 811 cmd->t_mem_sg = sgl; 812 cmd->t_mem_sg_nents = nents; 813 814 /* BIDI ops not supported */ 815 816 /* Tell the core about our preallocated memory */ 817 transport_generic_map_mem_to_cmd(&cmd->se_cmd, sgl, nents, NULL, 0); 818 /* 819 * Allocate iovecs for SCSI payload after transport_generic_map_mem_to_cmd 820 * so that cmd->se_cmd.t_tasks_se_num has been set. 821 */ 822 ret = iscsit_allocate_iovecs(cmd); 823 if (ret < 0) 824 goto page_alloc_failed; 825 826 return 0; 827 828page_alloc_failed: 829 while (i >= 0) { 830 __free_page(sg_page(&sgl[i])); 831 i--; 832 } 833 kfree(cmd->t_mem_sg); 834 cmd->t_mem_sg = NULL; 835 return -ENOMEM; 836} 837 838static int iscsit_handle_scsi_cmd( 839 struct iscsi_conn *conn, 840 unsigned char *buf) 841{ 842 int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret; 843 int dump_immediate_data = 0, send_check_condition = 0, payload_length; 844 struct iscsi_cmd *cmd = NULL; 845 struct iscsi_scsi_req *hdr; 846 847 spin_lock_bh(&conn->sess->session_stats_lock); 848 conn->sess->cmd_pdus++; 849 if (conn->sess->se_sess->se_node_acl) { 850 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); 851 conn->sess->se_sess->se_node_acl->num_cmds++; 852 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); 853 } 854 spin_unlock_bh(&conn->sess->session_stats_lock); 855 856 hdr = (struct iscsi_scsi_req *) buf; 857 payload_length = ntoh24(hdr->dlength); 858 hdr->itt = be32_to_cpu(hdr->itt); 859 hdr->data_length = be32_to_cpu(hdr->data_length); 860 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 861 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 862 863 /* FIXME; Add checks for AdditionalHeaderSegment */ 864 865 if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) && 866 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) { 867 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL" 868 " not set. Bad iSCSI Initiator.\n"); 869 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 870 buf, conn); 871 } 872 873 if (((hdr->flags & ISCSI_FLAG_CMD_READ) || 874 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { 875 /* 876 * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) 877 * that adds support for RESERVE/RELEASE. There is a bug 878 * add with this new functionality that sets R/W bits when 879 * neither CDB carries any READ or WRITE datapayloads. 880 */ 881 if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { 882 hdr->flags &= ~ISCSI_FLAG_CMD_READ; 883 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; 884 goto done; 885 } 886 887 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" 888 " set when Expected Data Transfer Length is 0 for" 889 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); 890 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 891 buf, conn); 892 } 893done: 894 895 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && 896 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { 897 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE" 898 " MUST be set if Expected Data Transfer Length is not 0." 899 " Bad iSCSI Initiator\n"); 900 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 901 buf, conn); 902 } 903 904 if ((hdr->flags & ISCSI_FLAG_CMD_READ) && 905 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) { 906 pr_err("Bidirectional operations not supported!\n"); 907 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 908 buf, conn); 909 } 910 911 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 912 pr_err("Illegally set Immediate Bit in iSCSI Initiator" 913 " Scsi Command PDU.\n"); 914 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 915 buf, conn); 916 } 917 918 if (payload_length && !conn->sess->sess_ops->ImmediateData) { 919 pr_err("ImmediateData=No but DataSegmentLength=%u," 920 " protocol error.\n", payload_length); 921 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 922 buf, conn); 923 } 924 925 if ((hdr->data_length == payload_length) && 926 (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) { 927 pr_err("Expected Data Transfer Length and Length of" 928 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL" 929 " bit is not set protocol error\n"); 930 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 931 buf, conn); 932 } 933 934 if (payload_length > hdr->data_length) { 935 pr_err("DataSegmentLength: %u is greater than" 936 " EDTL: %u, protocol error.\n", payload_length, 937 hdr->data_length); 938 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 939 buf, conn); 940 } 941 942 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 943 pr_err("DataSegmentLength: %u is greater than" 944 " MaxRecvDataSegmentLength: %u, protocol error.\n", 945 payload_length, conn->conn_ops->MaxRecvDataSegmentLength); 946 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 947 buf, conn); 948 } 949 950 if (payload_length > conn->sess->sess_ops->FirstBurstLength) { 951 pr_err("DataSegmentLength: %u is greater than" 952 " FirstBurstLength: %u, protocol error.\n", 953 payload_length, conn->sess->sess_ops->FirstBurstLength); 954 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 955 buf, conn); 956 } 957 958 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE : 959 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE : 960 DMA_NONE; 961 962 cmd = iscsit_allocate_se_cmd(conn, hdr->data_length, data_direction, 963 (hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK)); 964 if (!cmd) 965 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, 966 buf, conn); 967 968 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," 969 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, 970 hdr->cmdsn, hdr->data_length, payload_length, conn->cid); 971 972 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD; 973 cmd->i_state = ISTATE_NEW_CMD; 974 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 975 cmd->immediate_data = (payload_length) ? 1 : 0; 976 cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) && 977 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0); 978 if (cmd->unsolicited_data) 979 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA; 980 981 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 982 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 983 spin_lock_bh(&conn->sess->ttt_lock); 984 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++; 985 if (cmd->targ_xfer_tag == 0xFFFFFFFF) 986 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++; 987 spin_unlock_bh(&conn->sess->ttt_lock); 988 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE) 989 cmd->targ_xfer_tag = 0xFFFFFFFF; 990 cmd->cmd_sn = hdr->cmdsn; 991 cmd->exp_stat_sn = hdr->exp_statsn; 992 cmd->first_burst_len = payload_length; 993 994 if (cmd->data_direction == DMA_FROM_DEVICE) { 995 struct iscsi_datain_req *dr; 996 997 dr = iscsit_allocate_datain_req(); 998 if (!dr) 999 return iscsit_add_reject_from_cmd( 1000 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1001 1, 1, buf, cmd); 1002 1003 iscsit_attach_datain_req(cmd, dr); 1004 } 1005 1006 /* 1007 * The CDB is going to an se_device_t. 1008 */ 1009 ret = iscsit_get_lun_for_cmd(cmd, hdr->cdb, 1010 get_unaligned_le64(&hdr->lun)); 1011 if (ret < 0) { 1012 if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) { 1013 pr_debug("Responding to non-acl'ed," 1014 " non-existent or non-exported iSCSI LUN:" 1015 " 0x%016Lx\n", get_unaligned_le64(&hdr->lun)); 1016 } 1017 send_check_condition = 1; 1018 goto attach_cmd; 1019 } 1020 /* 1021 * The Initiator Node has access to the LUN (the addressing method 1022 * is handled inside of iscsit_get_lun_for_cmd()). Now it's time to 1023 * allocate 1->N transport tasks (depending on sector count and 1024 * maximum request size the physical HBA(s) can handle. 1025 */ 1026 transport_ret = transport_generic_allocate_tasks(&cmd->se_cmd, hdr->cdb); 1027 if (transport_ret == -ENOMEM) { 1028 return iscsit_add_reject_from_cmd( 1029 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1030 1, 1, buf, cmd); 1031 } else if (transport_ret == -EINVAL) { 1032 /* 1033 * Unsupported SAM Opcode. CHECK_CONDITION will be sent 1034 * in iscsit_execute_cmd() during the CmdSN OOO Execution 1035 * Mechinism. 1036 */ 1037 send_check_condition = 1; 1038 } else { 1039 cmd->data_length = cmd->se_cmd.data_length; 1040 1041 if (iscsit_decide_list_to_build(cmd, payload_length) < 0) 1042 return iscsit_add_reject_from_cmd( 1043 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1044 1, 1, buf, cmd); 1045 } 1046 1047attach_cmd: 1048 spin_lock_bh(&conn->cmd_lock); 1049 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1050 spin_unlock_bh(&conn->cmd_lock); 1051 /* 1052 * Check if we need to delay processing because of ALUA 1053 * Active/NonOptimized primary access state.. 1054 */ 1055 core_alua_check_nonop_delay(&cmd->se_cmd); 1056 /* 1057 * Allocate and setup SGL used with transport_generic_map_mem_to_cmd(). 1058 * also call iscsit_allocate_iovecs() 1059 */ 1060 ret = iscsit_alloc_buffs(cmd); 1061 if (ret < 0) 1062 return iscsit_add_reject_from_cmd( 1063 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1064 1, 0, buf, cmd); 1065 /* 1066 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if 1067 * the Immediate Bit is not set, and no Immediate 1068 * Data is attached. 1069 * 1070 * A PDU/CmdSN carrying Immediate Data can only 1071 * be processed after the DataCRC has passed. 1072 * If the DataCRC fails, the CmdSN MUST NOT 1073 * be acknowledged. (See below) 1074 */ 1075 if (!cmd->immediate_data) { 1076 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1077 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) 1078 return 0; 1079 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1080 return iscsit_add_reject_from_cmd( 1081 ISCSI_REASON_PROTOCOL_ERROR, 1082 1, 0, buf, cmd); 1083 } 1084 1085 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1086 1087 /* 1088 * If no Immediate Data is attached, it's OK to return now. 1089 */ 1090 if (!cmd->immediate_data) { 1091 if (send_check_condition) 1092 return 0; 1093 1094 if (cmd->unsolicited_data) { 1095 iscsit_set_dataout_sequence_values(cmd); 1096 1097 spin_lock_bh(&cmd->dataout_timeout_lock); 1098 iscsit_start_dataout_timer(cmd, cmd->conn); 1099 spin_unlock_bh(&cmd->dataout_timeout_lock); 1100 } 1101 1102 return 0; 1103 } 1104 1105 /* 1106 * Early CHECK_CONDITIONs never make it to the transport processing 1107 * thread. They are processed in CmdSN order by 1108 * iscsit_check_received_cmdsn() below. 1109 */ 1110 if (send_check_condition) { 1111 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1112 dump_immediate_data = 1; 1113 goto after_immediate_data; 1114 } 1115 /* 1116 * Call directly into transport_generic_new_cmd() to perform 1117 * the backend memory allocation. 1118 */ 1119 ret = transport_generic_new_cmd(&cmd->se_cmd); 1120 if (ret < 0) { 1121 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1122 dump_immediate_data = 1; 1123 goto after_immediate_data; 1124 } 1125 1126 immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length); 1127after_immediate_data: 1128 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) { 1129 /* 1130 * A PDU/CmdSN carrying Immediate Data passed 1131 * DataCRC, check against ExpCmdSN/MaxCmdSN if 1132 * Immediate Bit is not set. 1133 */ 1134 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1135 /* 1136 * Special case for Unsupported SAM WRITE Opcodes 1137 * and ImmediateData=Yes. 1138 */ 1139 if (dump_immediate_data) { 1140 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) 1141 return -1; 1142 } else if (cmd->unsolicited_data) { 1143 iscsit_set_dataout_sequence_values(cmd); 1144 1145 spin_lock_bh(&cmd->dataout_timeout_lock); 1146 iscsit_start_dataout_timer(cmd, cmd->conn); 1147 spin_unlock_bh(&cmd->dataout_timeout_lock); 1148 } 1149 1150 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1151 return iscsit_add_reject_from_cmd( 1152 ISCSI_REASON_PROTOCOL_ERROR, 1153 1, 0, buf, cmd); 1154 1155 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) { 1156 /* 1157 * Immediate Data failed DataCRC and ERL>=1, 1158 * silently drop this PDU and let the initiator 1159 * plug the CmdSN gap. 1160 * 1161 * FIXME: Send Unsolicited NOPIN with reserved 1162 * TTT here to help the initiator figure out 1163 * the missing CmdSN, although they should be 1164 * intelligent enough to determine the missing 1165 * CmdSN and issue a retry to plug the sequence. 1166 */ 1167 cmd->i_state = ISTATE_REMOVE; 1168 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 1169 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */ 1170 return -1; 1171 1172 return 0; 1173} 1174 1175static u32 iscsit_do_crypto_hash_sg( 1176 struct hash_desc *hash, 1177 struct iscsi_cmd *cmd, 1178 u32 data_offset, 1179 u32 data_length, 1180 u32 padding, 1181 u8 *pad_bytes) 1182{ 1183 u32 data_crc; 1184 u32 i; 1185 struct scatterlist *sg; 1186 unsigned int page_off; 1187 1188 crypto_hash_init(hash); 1189 1190 sg = cmd->first_data_sg; 1191 page_off = cmd->first_data_sg_off; 1192 1193 i = 0; 1194 while (data_length) { 1195 u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off)); 1196 1197 crypto_hash_update(hash, &sg[i], cur_len); 1198 1199 data_length -= cur_len; 1200 page_off = 0; 1201 i++; 1202 } 1203 1204 if (padding) { 1205 struct scatterlist pad_sg; 1206 1207 sg_init_one(&pad_sg, pad_bytes, padding); 1208 crypto_hash_update(hash, &pad_sg, padding); 1209 } 1210 crypto_hash_final(hash, (u8 *) &data_crc); 1211 1212 return data_crc; 1213} 1214 1215static void iscsit_do_crypto_hash_buf( 1216 struct hash_desc *hash, 1217 unsigned char *buf, 1218 u32 payload_length, 1219 u32 padding, 1220 u8 *pad_bytes, 1221 u8 *data_crc) 1222{ 1223 struct scatterlist sg; 1224 1225 crypto_hash_init(hash); 1226 1227 sg_init_one(&sg, buf, payload_length); 1228 crypto_hash_update(hash, &sg, payload_length); 1229 1230 if (padding) { 1231 sg_init_one(&sg, pad_bytes, padding); 1232 crypto_hash_update(hash, &sg, padding); 1233 } 1234 crypto_hash_final(hash, data_crc); 1235} 1236 1237static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) 1238{ 1239 int iov_ret, ooo_cmdsn = 0, ret; 1240 u8 data_crc_failed = 0; 1241 u32 checksum, iov_count = 0, padding = 0, rx_got = 0; 1242 u32 rx_size = 0, payload_length; 1243 struct iscsi_cmd *cmd = NULL; 1244 struct se_cmd *se_cmd; 1245 struct iscsi_data *hdr; 1246 struct kvec *iov; 1247 unsigned long flags; 1248 1249 hdr = (struct iscsi_data *) buf; 1250 payload_length = ntoh24(hdr->dlength); 1251 hdr->itt = be32_to_cpu(hdr->itt); 1252 hdr->ttt = be32_to_cpu(hdr->ttt); 1253 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 1254 hdr->datasn = be32_to_cpu(hdr->datasn); 1255 hdr->offset = be32_to_cpu(hdr->offset); 1256 1257 if (!payload_length) { 1258 pr_err("DataOUT payload is ZERO, protocol error.\n"); 1259 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1260 buf, conn); 1261 } 1262 1263 /* iSCSI write */ 1264 spin_lock_bh(&conn->sess->session_stats_lock); 1265 conn->sess->rx_data_octets += payload_length; 1266 if (conn->sess->se_sess->se_node_acl) { 1267 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); 1268 conn->sess->se_sess->se_node_acl->write_bytes += payload_length; 1269 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); 1270 } 1271 spin_unlock_bh(&conn->sess->session_stats_lock); 1272 1273 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 1274 pr_err("DataSegmentLength: %u is greater than" 1275 " MaxRecvDataSegmentLength: %u\n", payload_length, 1276 conn->conn_ops->MaxRecvDataSegmentLength); 1277 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1278 buf, conn); 1279 } 1280 1281 cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 1282 payload_length); 1283 if (!cmd) 1284 return 0; 1285 1286 pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x," 1287 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", 1288 hdr->itt, hdr->ttt, hdr->datasn, hdr->offset, 1289 payload_length, conn->cid); 1290 1291 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { 1292 pr_err("Command ITT: 0x%08x received DataOUT after" 1293 " last DataOUT received, dumping payload\n", 1294 cmd->init_task_tag); 1295 return iscsit_dump_data_payload(conn, payload_length, 1); 1296 } 1297 1298 if (cmd->data_direction != DMA_TO_DEVICE) { 1299 pr_err("Command ITT: 0x%08x received DataOUT for a" 1300 " NON-WRITE command.\n", cmd->init_task_tag); 1301 return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR, 1302 1, 0, buf, cmd); 1303 } 1304 se_cmd = &cmd->se_cmd; 1305 iscsit_mod_dataout_timer(cmd); 1306 1307 if ((hdr->offset + payload_length) > cmd->data_length) { 1308 pr_err("DataOut Offset: %u, Length %u greater than" 1309 " iSCSI Command EDTL %u, protocol error.\n", 1310 hdr->offset, payload_length, cmd->data_length); 1311 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID, 1312 1, 0, buf, cmd); 1313 } 1314 1315 if (cmd->unsolicited_data) { 1316 int dump_unsolicited_data = 0; 1317 1318 if (conn->sess->sess_ops->InitialR2T) { 1319 pr_err("Received unexpected unsolicited data" 1320 " while InitialR2T=Yes, protocol error.\n"); 1321 transport_send_check_condition_and_sense(&cmd->se_cmd, 1322 TCM_UNEXPECTED_UNSOLICITED_DATA, 0); 1323 return -1; 1324 } 1325 /* 1326 * Special case for dealing with Unsolicited DataOUT 1327 * and Unsupported SAM WRITE Opcodes and SE resource allocation 1328 * failures; 1329 */ 1330 1331 /* Something's amiss if we're not in WRITE_PENDING state... */ 1332 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 1333 WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING); 1334 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1335 1336 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 1337 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) || 1338 (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)) 1339 dump_unsolicited_data = 1; 1340 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1341 1342 if (dump_unsolicited_data) { 1343 /* 1344 * Check if a delayed TASK_ABORTED status needs to 1345 * be sent now if the ISCSI_FLAG_CMD_FINAL has been 1346 * received with the unsolicitied data out. 1347 */ 1348 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1349 iscsit_stop_dataout_timer(cmd); 1350 1351 transport_check_aborted_status(se_cmd, 1352 (hdr->flags & ISCSI_FLAG_CMD_FINAL)); 1353 return iscsit_dump_data_payload(conn, payload_length, 1); 1354 } 1355 } else { 1356 /* 1357 * For the normal solicited data path: 1358 * 1359 * Check for a delayed TASK_ABORTED status and dump any 1360 * incoming data out payload if one exists. Also, when the 1361 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current 1362 * data out sequence, we decrement outstanding_r2ts. Once 1363 * outstanding_r2ts reaches zero, go ahead and send the delayed 1364 * TASK_ABORTED status. 1365 */ 1366 if (atomic_read(&se_cmd->t_transport_aborted) != 0) { 1367 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1368 if (--cmd->outstanding_r2ts < 1) { 1369 iscsit_stop_dataout_timer(cmd); 1370 transport_check_aborted_status( 1371 se_cmd, 1); 1372 } 1373 1374 return iscsit_dump_data_payload(conn, payload_length, 1); 1375 } 1376 } 1377 /* 1378 * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and 1379 * within-command recovery checks before receiving the payload. 1380 */ 1381 ret = iscsit_check_pre_dataout(cmd, buf); 1382 if (ret == DATAOUT_WITHIN_COMMAND_RECOVERY) 1383 return 0; 1384 else if (ret == DATAOUT_CANNOT_RECOVER) 1385 return -1; 1386 1387 rx_size += payload_length; 1388 iov = &cmd->iov_data[0]; 1389 1390 iov_ret = iscsit_map_iovec(cmd, iov, hdr->offset, payload_length); 1391 if (iov_ret < 0) 1392 return -1; 1393 1394 iov_count += iov_ret; 1395 1396 padding = ((-payload_length) & 3); 1397 if (padding != 0) { 1398 iov[iov_count].iov_base = cmd->pad_bytes; 1399 iov[iov_count++].iov_len = padding; 1400 rx_size += padding; 1401 pr_debug("Receiving %u padding bytes.\n", padding); 1402 } 1403 1404 if (conn->conn_ops->DataDigest) { 1405 iov[iov_count].iov_base = &checksum; 1406 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 1407 rx_size += ISCSI_CRC_LEN; 1408 } 1409 1410 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size); 1411 1412 iscsit_unmap_iovec(cmd); 1413 1414 if (rx_got != rx_size) 1415 return -1; 1416 1417 if (conn->conn_ops->DataDigest) { 1418 u32 data_crc; 1419 1420 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd, 1421 hdr->offset, payload_length, padding, 1422 cmd->pad_bytes); 1423 1424 if (checksum != data_crc) { 1425 pr_err("ITT: 0x%08x, Offset: %u, Length: %u," 1426 " DataSN: 0x%08x, CRC32C DataDigest 0x%08x" 1427 " does not match computed 0x%08x\n", 1428 hdr->itt, hdr->offset, payload_length, 1429 hdr->datasn, checksum, data_crc); 1430 data_crc_failed = 1; 1431 } else { 1432 pr_debug("Got CRC32C DataDigest 0x%08x for" 1433 " %u bytes of Data Out\n", checksum, 1434 payload_length); 1435 } 1436 } 1437 /* 1438 * Increment post receive data and CRC values or perform 1439 * within-command recovery. 1440 */ 1441 ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed); 1442 if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY)) 1443 return 0; 1444 else if (ret == DATAOUT_SEND_R2T) { 1445 iscsit_set_dataout_sequence_values(cmd); 1446 iscsit_build_r2ts_for_cmd(cmd, conn, 0); 1447 } else if (ret == DATAOUT_SEND_TO_TRANSPORT) { 1448 /* 1449 * Handle extra special case for out of order 1450 * Unsolicited Data Out. 1451 */ 1452 spin_lock_bh(&cmd->istate_lock); 1453 ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN); 1454 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1455 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1456 spin_unlock_bh(&cmd->istate_lock); 1457 1458 iscsit_stop_dataout_timer(cmd); 1459 return (!ooo_cmdsn) ? transport_generic_handle_data( 1460 &cmd->se_cmd) : 0; 1461 } else /* DATAOUT_CANNOT_RECOVER */ 1462 return -1; 1463 1464 return 0; 1465} 1466 1467static int iscsit_handle_nop_out( 1468 struct iscsi_conn *conn, 1469 unsigned char *buf) 1470{ 1471 unsigned char *ping_data = NULL; 1472 int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size; 1473 u32 checksum, data_crc, padding = 0, payload_length; 1474 u64 lun; 1475 struct iscsi_cmd *cmd = NULL; 1476 struct kvec *iov = NULL; 1477 struct iscsi_nopout *hdr; 1478 1479 hdr = (struct iscsi_nopout *) buf; 1480 payload_length = ntoh24(hdr->dlength); 1481 lun = get_unaligned_le64(&hdr->lun); 1482 hdr->itt = be32_to_cpu(hdr->itt); 1483 hdr->ttt = be32_to_cpu(hdr->ttt); 1484 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 1485 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 1486 1487 if ((hdr->itt == 0xFFFFFFFF) && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1488 pr_err("NOPOUT ITT is reserved, but Immediate Bit is" 1489 " not set, protocol error.\n"); 1490 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1491 buf, conn); 1492 } 1493 1494 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 1495 pr_err("NOPOUT Ping Data DataSegmentLength: %u is" 1496 " greater than MaxRecvDataSegmentLength: %u, protocol" 1497 " error.\n", payload_length, 1498 conn->conn_ops->MaxRecvDataSegmentLength); 1499 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1500 buf, conn); 1501 } 1502 1503 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x," 1504 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n", 1505 (hdr->itt == 0xFFFFFFFF) ? "Response" : "Request", 1506 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn, 1507 payload_length); 1508 /* 1509 * This is not a response to a Unsolicited NopIN, which means 1510 * it can either be a NOPOUT ping request (with a valid ITT), 1511 * or a NOPOUT not requesting a NOPIN (with a reserved ITT). 1512 * Either way, make sure we allocate an struct iscsi_cmd, as both 1513 * can contain ping data. 1514 */ 1515 if (hdr->ttt == 0xFFFFFFFF) { 1516 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1517 if (!cmd) 1518 return iscsit_add_reject( 1519 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1520 1, buf, conn); 1521 1522 cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT; 1523 cmd->i_state = ISTATE_SEND_NOPIN; 1524 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1525 1 : 0); 1526 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 1527 cmd->targ_xfer_tag = 0xFFFFFFFF; 1528 cmd->cmd_sn = hdr->cmdsn; 1529 cmd->exp_stat_sn = hdr->exp_statsn; 1530 cmd->data_direction = DMA_NONE; 1531 } 1532 1533 if (payload_length && (hdr->ttt == 0xFFFFFFFF)) { 1534 rx_size = payload_length; 1535 ping_data = kzalloc(payload_length + 1, GFP_KERNEL); 1536 if (!ping_data) { 1537 pr_err("Unable to allocate memory for" 1538 " NOPOUT ping data.\n"); 1539 ret = -1; 1540 goto out; 1541 } 1542 1543 iov = &cmd->iov_misc[0]; 1544 iov[niov].iov_base = ping_data; 1545 iov[niov++].iov_len = payload_length; 1546 1547 padding = ((-payload_length) & 3); 1548 if (padding != 0) { 1549 pr_debug("Receiving %u additional bytes" 1550 " for padding.\n", padding); 1551 iov[niov].iov_base = &cmd->pad_bytes; 1552 iov[niov++].iov_len = padding; 1553 rx_size += padding; 1554 } 1555 if (conn->conn_ops->DataDigest) { 1556 iov[niov].iov_base = &checksum; 1557 iov[niov++].iov_len = ISCSI_CRC_LEN; 1558 rx_size += ISCSI_CRC_LEN; 1559 } 1560 1561 rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size); 1562 if (rx_got != rx_size) { 1563 ret = -1; 1564 goto out; 1565 } 1566 1567 if (conn->conn_ops->DataDigest) { 1568 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, 1569 ping_data, payload_length, 1570 padding, cmd->pad_bytes, 1571 (u8 *)&data_crc); 1572 1573 if (checksum != data_crc) { 1574 pr_err("Ping data CRC32C DataDigest" 1575 " 0x%08x does not match computed 0x%08x\n", 1576 checksum, data_crc); 1577 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 1578 pr_err("Unable to recover from" 1579 " NOPOUT Ping DataCRC failure while in" 1580 " ERL=0.\n"); 1581 ret = -1; 1582 goto out; 1583 } else { 1584 /* 1585 * Silently drop this PDU and let the 1586 * initiator plug the CmdSN gap. 1587 */ 1588 pr_debug("Dropping NOPOUT" 1589 " Command CmdSN: 0x%08x due to" 1590 " DataCRC error.\n", hdr->cmdsn); 1591 ret = 0; 1592 goto out; 1593 } 1594 } else { 1595 pr_debug("Got CRC32C DataDigest" 1596 " 0x%08x for %u bytes of ping data.\n", 1597 checksum, payload_length); 1598 } 1599 } 1600 1601 ping_data[payload_length] = '\0'; 1602 /* 1603 * Attach ping data to struct iscsi_cmd->buf_ptr. 1604 */ 1605 cmd->buf_ptr = ping_data; 1606 cmd->buf_ptr_size = payload_length; 1607 1608 pr_debug("Got %u bytes of NOPOUT ping" 1609 " data.\n", payload_length); 1610 pr_debug("Ping Data: \"%s\"\n", ping_data); 1611 } 1612 1613 if (hdr->itt != 0xFFFFFFFF) { 1614 if (!cmd) { 1615 pr_err("Checking CmdSN for NOPOUT," 1616 " but cmd is NULL!\n"); 1617 return -1; 1618 } 1619 /* 1620 * Initiator is expecting a NopIN ping reply, 1621 */ 1622 spin_lock_bh(&conn->cmd_lock); 1623 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1624 spin_unlock_bh(&conn->cmd_lock); 1625 1626 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1627 1628 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 1629 iscsit_add_cmd_to_response_queue(cmd, conn, 1630 cmd->i_state); 1631 return 0; 1632 } 1633 1634 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1635 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { 1636 ret = 0; 1637 goto ping_out; 1638 } 1639 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1640 return iscsit_add_reject_from_cmd( 1641 ISCSI_REASON_PROTOCOL_ERROR, 1642 1, 0, buf, cmd); 1643 1644 return 0; 1645 } 1646 1647 if (hdr->ttt != 0xFFFFFFFF) { 1648 /* 1649 * This was a response to a unsolicited NOPIN ping. 1650 */ 1651 cmd = iscsit_find_cmd_from_ttt(conn, hdr->ttt); 1652 if (!cmd) 1653 return -1; 1654 1655 iscsit_stop_nopin_response_timer(conn); 1656 1657 cmd->i_state = ISTATE_REMOVE; 1658 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 1659 iscsit_start_nopin_timer(conn); 1660 } else { 1661 /* 1662 * Initiator is not expecting a NOPIN is response. 1663 * Just ignore for now. 1664 * 1665 * iSCSI v19-91 10.18 1666 * "A NOP-OUT may also be used to confirm a changed 1667 * ExpStatSN if another PDU will not be available 1668 * for a long time." 1669 */ 1670 ret = 0; 1671 goto out; 1672 } 1673 1674 return 0; 1675out: 1676 if (cmd) 1677 iscsit_release_cmd(cmd); 1678ping_out: 1679 kfree(ping_data); 1680 return ret; 1681} 1682 1683static int iscsit_handle_task_mgt_cmd( 1684 struct iscsi_conn *conn, 1685 unsigned char *buf) 1686{ 1687 struct iscsi_cmd *cmd; 1688 struct se_tmr_req *se_tmr; 1689 struct iscsi_tmr_req *tmr_req; 1690 struct iscsi_tm *hdr; 1691 u32 payload_length; 1692 int out_of_order_cmdsn = 0; 1693 int ret; 1694 u8 function; 1695 1696 hdr = (struct iscsi_tm *) buf; 1697 payload_length = ntoh24(hdr->dlength); 1698 hdr->itt = be32_to_cpu(hdr->itt); 1699 hdr->rtt = be32_to_cpu(hdr->rtt); 1700 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 1701 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 1702 hdr->refcmdsn = be32_to_cpu(hdr->refcmdsn); 1703 hdr->exp_datasn = be32_to_cpu(hdr->exp_datasn); 1704 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; 1705 function = hdr->flags; 1706 1707 pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:" 1708 " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:" 1709 " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function, 1710 hdr->rtt, hdr->refcmdsn, conn->cid); 1711 1712 if ((function != ISCSI_TM_FUNC_ABORT_TASK) && 1713 ((function != ISCSI_TM_FUNC_TASK_REASSIGN) && 1714 (hdr->rtt != ISCSI_RESERVED_TAG))) { 1715 pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n"); 1716 hdr->rtt = ISCSI_RESERVED_TAG; 1717 } 1718 1719 if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) && 1720 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1721 pr_err("Task Management Request TASK_REASSIGN not" 1722 " issued as immediate command, bad iSCSI Initiator" 1723 "implementation\n"); 1724 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1725 buf, conn); 1726 } 1727 if ((function != ISCSI_TM_FUNC_ABORT_TASK) && 1728 (hdr->refcmdsn != ISCSI_RESERVED_TAG)) 1729 hdr->refcmdsn = ISCSI_RESERVED_TAG; 1730 1731 cmd = iscsit_allocate_se_cmd_for_tmr(conn, function); 1732 if (!cmd) 1733 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1734 1, buf, conn); 1735 1736 cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC; 1737 cmd->i_state = ISTATE_SEND_TASKMGTRSP; 1738 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 1739 cmd->init_task_tag = hdr->itt; 1740 cmd->targ_xfer_tag = 0xFFFFFFFF; 1741 cmd->cmd_sn = hdr->cmdsn; 1742 cmd->exp_stat_sn = hdr->exp_statsn; 1743 se_tmr = cmd->se_cmd.se_tmr_req; 1744 tmr_req = cmd->tmr_req; 1745 /* 1746 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN 1747 */ 1748 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { 1749 ret = iscsit_get_lun_for_tmr(cmd, 1750 get_unaligned_le64(&hdr->lun)); 1751 if (ret < 0) { 1752 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1753 se_tmr->response = ISCSI_TMF_RSP_NO_LUN; 1754 goto attach; 1755 } 1756 } 1757 1758 switch (function) { 1759 case ISCSI_TM_FUNC_ABORT_TASK: 1760 se_tmr->response = iscsit_tmr_abort_task(cmd, buf); 1761 if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) { 1762 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1763 goto attach; 1764 } 1765 break; 1766 case ISCSI_TM_FUNC_ABORT_TASK_SET: 1767 case ISCSI_TM_FUNC_CLEAR_ACA: 1768 case ISCSI_TM_FUNC_CLEAR_TASK_SET: 1769 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: 1770 break; 1771 case ISCSI_TM_FUNC_TARGET_WARM_RESET: 1772 if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) { 1773 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1774 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; 1775 goto attach; 1776 } 1777 break; 1778 case ISCSI_TM_FUNC_TARGET_COLD_RESET: 1779 if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) { 1780 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1781 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; 1782 goto attach; 1783 } 1784 break; 1785 case ISCSI_TM_FUNC_TASK_REASSIGN: 1786 se_tmr->response = iscsit_tmr_task_reassign(cmd, buf); 1787 /* 1788 * Perform sanity checks on the ExpDataSN only if the 1789 * TASK_REASSIGN was successful. 1790 */ 1791 if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) 1792 break; 1793 1794 if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0) 1795 return iscsit_add_reject_from_cmd( 1796 ISCSI_REASON_BOOKMARK_INVALID, 1, 1, 1797 buf, cmd); 1798 break; 1799 default: 1800 pr_err("Unknown TMR function: 0x%02x, protocol" 1801 " error.\n", function); 1802 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1803 se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED; 1804 goto attach; 1805 } 1806 1807 if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) && 1808 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE)) 1809 se_tmr->call_transport = 1; 1810attach: 1811 spin_lock_bh(&conn->cmd_lock); 1812 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1813 spin_unlock_bh(&conn->cmd_lock); 1814 1815 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1816 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1817 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) 1818 out_of_order_cmdsn = 1; 1819 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) 1820 return 0; 1821 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1822 return iscsit_add_reject_from_cmd( 1823 ISCSI_REASON_PROTOCOL_ERROR, 1824 1, 0, buf, cmd); 1825 } 1826 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1827 1828 if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE)) 1829 return 0; 1830 /* 1831 * Found the referenced task, send to transport for processing. 1832 */ 1833 if (se_tmr->call_transport) 1834 return transport_generic_handle_tmr(&cmd->se_cmd); 1835 1836 /* 1837 * Could not find the referenced LUN, task, or Task Management 1838 * command not authorized or supported. Change state and 1839 * let the tx_thread send the response. 1840 * 1841 * For connection recovery, this is also the default action for 1842 * TMR TASK_REASSIGN. 1843 */ 1844 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 1845 return 0; 1846} 1847 1848/* #warning FIXME: Support Text Command parameters besides SendTargets */ 1849static int iscsit_handle_text_cmd( 1850 struct iscsi_conn *conn, 1851 unsigned char *buf) 1852{ 1853 char *text_ptr, *text_in; 1854 int cmdsn_ret, niov = 0, rx_got, rx_size; 1855 u32 checksum = 0, data_crc = 0, payload_length; 1856 u32 padding = 0, pad_bytes = 0, text_length = 0; 1857 struct iscsi_cmd *cmd; 1858 struct kvec iov[3]; 1859 struct iscsi_text *hdr; 1860 1861 hdr = (struct iscsi_text *) buf; 1862 payload_length = ntoh24(hdr->dlength); 1863 hdr->itt = be32_to_cpu(hdr->itt); 1864 hdr->ttt = be32_to_cpu(hdr->ttt); 1865 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 1866 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 1867 1868 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 1869 pr_err("Unable to accept text parameter length: %u" 1870 "greater than MaxRecvDataSegmentLength %u.\n", 1871 payload_length, conn->conn_ops->MaxRecvDataSegmentLength); 1872 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1873 buf, conn); 1874 } 1875 1876 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x," 1877 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn, 1878 hdr->exp_statsn, payload_length); 1879 1880 rx_size = text_length = payload_length; 1881 if (text_length) { 1882 text_in = kzalloc(text_length, GFP_KERNEL); 1883 if (!text_in) { 1884 pr_err("Unable to allocate memory for" 1885 " incoming text parameters\n"); 1886 return -1; 1887 } 1888 1889 memset(iov, 0, 3 * sizeof(struct kvec)); 1890 iov[niov].iov_base = text_in; 1891 iov[niov++].iov_len = text_length; 1892 1893 padding = ((-payload_length) & 3); 1894 if (padding != 0) { 1895 iov[niov].iov_base = &pad_bytes; 1896 iov[niov++].iov_len = padding; 1897 rx_size += padding; 1898 pr_debug("Receiving %u additional bytes" 1899 " for padding.\n", padding); 1900 } 1901 if (conn->conn_ops->DataDigest) { 1902 iov[niov].iov_base = &checksum; 1903 iov[niov++].iov_len = ISCSI_CRC_LEN; 1904 rx_size += ISCSI_CRC_LEN; 1905 } 1906 1907 rx_got = rx_data(conn, &iov[0], niov, rx_size); 1908 if (rx_got != rx_size) { 1909 kfree(text_in); 1910 return -1; 1911 } 1912 1913 if (conn->conn_ops->DataDigest) { 1914 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, 1915 text_in, text_length, 1916 padding, (u8 *)&pad_bytes, 1917 (u8 *)&data_crc); 1918 1919 if (checksum != data_crc) { 1920 pr_err("Text data CRC32C DataDigest" 1921 " 0x%08x does not match computed" 1922 " 0x%08x\n", checksum, data_crc); 1923 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 1924 pr_err("Unable to recover from" 1925 " Text Data digest failure while in" 1926 " ERL=0.\n"); 1927 kfree(text_in); 1928 return -1; 1929 } else { 1930 /* 1931 * Silently drop this PDU and let the 1932 * initiator plug the CmdSN gap. 1933 */ 1934 pr_debug("Dropping Text" 1935 " Command CmdSN: 0x%08x due to" 1936 " DataCRC error.\n", hdr->cmdsn); 1937 kfree(text_in); 1938 return 0; 1939 } 1940 } else { 1941 pr_debug("Got CRC32C DataDigest" 1942 " 0x%08x for %u bytes of text data.\n", 1943 checksum, text_length); 1944 } 1945 } 1946 text_in[text_length - 1] = '\0'; 1947 pr_debug("Successfully read %d bytes of text" 1948 " data.\n", text_length); 1949 1950 if (strncmp("SendTargets", text_in, 11) != 0) { 1951 pr_err("Received Text Data that is not" 1952 " SendTargets, cannot continue.\n"); 1953 kfree(text_in); 1954 return -1; 1955 } 1956 text_ptr = strchr(text_in, '='); 1957 if (!text_ptr) { 1958 pr_err("No \"=\" separator found in Text Data," 1959 " cannot continue.\n"); 1960 kfree(text_in); 1961 return -1; 1962 } 1963 if (strncmp("=All", text_ptr, 4) != 0) { 1964 pr_err("Unable to locate All value for" 1965 " SendTargets key, cannot continue.\n"); 1966 kfree(text_in); 1967 return -1; 1968 } 1969/*#warning Support SendTargets=(iSCSI Target Name/Nothing) values. */ 1970 kfree(text_in); 1971 } 1972 1973 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1974 if (!cmd) 1975 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1976 1, buf, conn); 1977 1978 cmd->iscsi_opcode = ISCSI_OP_TEXT; 1979 cmd->i_state = ISTATE_SEND_TEXTRSP; 1980 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 1981 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 1982 cmd->targ_xfer_tag = 0xFFFFFFFF; 1983 cmd->cmd_sn = hdr->cmdsn; 1984 cmd->exp_stat_sn = hdr->exp_statsn; 1985 cmd->data_direction = DMA_NONE; 1986 1987 spin_lock_bh(&conn->cmd_lock); 1988 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1989 spin_unlock_bh(&conn->cmd_lock); 1990 1991 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1992 1993 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1994 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1995 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1996 return iscsit_add_reject_from_cmd( 1997 ISCSI_REASON_PROTOCOL_ERROR, 1998 1, 0, buf, cmd); 1999 2000 return 0; 2001 } 2002 2003 return iscsit_execute_cmd(cmd, 0); 2004} 2005 2006int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2007{ 2008 struct iscsi_conn *conn_p; 2009 struct iscsi_session *sess = conn->sess; 2010 2011 pr_debug("Received logout request CLOSESESSION on CID: %hu" 2012 " for SID: %u.\n", conn->cid, conn->sess->sid); 2013 2014 atomic_set(&sess->session_logout, 1); 2015 atomic_set(&conn->conn_logout_remove, 1); 2016 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION; 2017 2018 iscsit_inc_conn_usage_count(conn); 2019 iscsit_inc_session_usage_count(sess); 2020 2021 spin_lock_bh(&sess->conn_lock); 2022 list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) { 2023 if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN) 2024 continue; 2025 2026 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 2027 conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT; 2028 } 2029 spin_unlock_bh(&sess->conn_lock); 2030 2031 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2032 2033 return 0; 2034} 2035 2036int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2037{ 2038 struct iscsi_conn *l_conn; 2039 struct iscsi_session *sess = conn->sess; 2040 2041 pr_debug("Received logout request CLOSECONNECTION for CID:" 2042 " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid); 2043 2044 /* 2045 * A Logout Request with a CLOSECONNECTION reason code for a CID 2046 * can arrive on a connection with a differing CID. 2047 */ 2048 if (conn->cid == cmd->logout_cid) { 2049 spin_lock_bh(&conn->state_lock); 2050 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 2051 conn->conn_state = TARG_CONN_STATE_IN_LOGOUT; 2052 2053 atomic_set(&conn->conn_logout_remove, 1); 2054 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION; 2055 iscsit_inc_conn_usage_count(conn); 2056 2057 spin_unlock_bh(&conn->state_lock); 2058 } else { 2059 /* 2060 * Handle all different cid CLOSECONNECTION requests in 2061 * iscsit_logout_post_handler_diffcid() as to give enough 2062 * time for any non immediate command's CmdSN to be 2063 * acknowledged on the connection in question. 2064 * 2065 * Here we simply make sure the CID is still around. 2066 */ 2067 l_conn = iscsit_get_conn_from_cid(sess, 2068 cmd->logout_cid); 2069 if (!l_conn) { 2070 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND; 2071 iscsit_add_cmd_to_response_queue(cmd, conn, 2072 cmd->i_state); 2073 return 0; 2074 } 2075 2076 iscsit_dec_conn_usage_count(l_conn); 2077 } 2078 2079 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2080 2081 return 0; 2082} 2083 2084int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2085{ 2086 struct iscsi_session *sess = conn->sess; 2087 2088 pr_debug("Received explicit REMOVECONNFORRECOVERY logout for" 2089 " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid); 2090 2091 if (sess->sess_ops->ErrorRecoveryLevel != 2) { 2092 pr_err("Received Logout Request REMOVECONNFORRECOVERY" 2093 " while ERL!=2.\n"); 2094 cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED; 2095 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2096 return 0; 2097 } 2098 2099 if (conn->cid == cmd->logout_cid) { 2100 pr_err("Received Logout Request REMOVECONNFORRECOVERY" 2101 " with CID: %hu on CID: %hu, implementation error.\n", 2102 cmd->logout_cid, conn->cid); 2103 cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED; 2104 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2105 return 0; 2106 } 2107 2108 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2109 2110 return 0; 2111} 2112 2113static int iscsit_handle_logout_cmd( 2114 struct iscsi_conn *conn, 2115 unsigned char *buf) 2116{ 2117 int cmdsn_ret, logout_remove = 0; 2118 u8 reason_code = 0; 2119 struct iscsi_cmd *cmd; 2120 struct iscsi_logout *hdr; 2121 struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn); 2122 2123 hdr = (struct iscsi_logout *) buf; 2124 reason_code = (hdr->flags & 0x7f); 2125 hdr->itt = be32_to_cpu(hdr->itt); 2126 hdr->cid = be16_to_cpu(hdr->cid); 2127 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 2128 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 2129 2130 if (tiqn) { 2131 spin_lock(&tiqn->logout_stats.lock); 2132 if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) 2133 tiqn->logout_stats.normal_logouts++; 2134 else 2135 tiqn->logout_stats.abnormal_logouts++; 2136 spin_unlock(&tiqn->logout_stats.lock); 2137 } 2138 2139 pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x" 2140 " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n", 2141 hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code, 2142 hdr->cid, conn->cid); 2143 2144 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) { 2145 pr_err("Received logout request on connection that" 2146 " is not in logged in state, ignoring request.\n"); 2147 return 0; 2148 } 2149 2150 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 2151 if (!cmd) 2152 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, 2153 buf, conn); 2154 2155 cmd->iscsi_opcode = ISCSI_OP_LOGOUT; 2156 cmd->i_state = ISTATE_SEND_LOGOUTRSP; 2157 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 2158 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 2159 cmd->targ_xfer_tag = 0xFFFFFFFF; 2160 cmd->cmd_sn = hdr->cmdsn; 2161 cmd->exp_stat_sn = hdr->exp_statsn; 2162 cmd->logout_cid = hdr->cid; 2163 cmd->logout_reason = reason_code; 2164 cmd->data_direction = DMA_NONE; 2165 2166 /* 2167 * We need to sleep in these cases (by returning 1) until the Logout 2168 * Response gets sent in the tx thread. 2169 */ 2170 if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) || 2171 ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) && 2172 (hdr->cid == conn->cid))) 2173 logout_remove = 1; 2174 2175 spin_lock_bh(&conn->cmd_lock); 2176 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 2177 spin_unlock_bh(&conn->cmd_lock); 2178 2179 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY) 2180 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 2181 2182 /* 2183 * Immediate commands are executed, well, immediately. 2184 * Non-Immediate Logout Commands are executed in CmdSN order. 2185 */ 2186 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 2187 int ret = iscsit_execute_cmd(cmd, 0); 2188 2189 if (ret < 0) 2190 return ret; 2191 } else { 2192 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 2193 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { 2194 logout_remove = 0; 2195 } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) { 2196 return iscsit_add_reject_from_cmd( 2197 ISCSI_REASON_PROTOCOL_ERROR, 2198 1, 0, buf, cmd); 2199 } 2200 } 2201 2202 return logout_remove; 2203} 2204 2205static int iscsit_handle_snack( 2206 struct iscsi_conn *conn, 2207 unsigned char *buf) 2208{ 2209 u32 unpacked_lun; 2210 u64 lun; 2211 struct iscsi_snack *hdr; 2212 2213 hdr = (struct iscsi_snack *) buf; 2214 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; 2215 lun = get_unaligned_le64(&hdr->lun); 2216 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 2217 hdr->itt = be32_to_cpu(hdr->itt); 2218 hdr->ttt = be32_to_cpu(hdr->ttt); 2219 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 2220 hdr->begrun = be32_to_cpu(hdr->begrun); 2221 hdr->runlength = be32_to_cpu(hdr->runlength); 2222 2223 pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:" 2224 " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x," 2225 " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags, 2226 hdr->begrun, hdr->runlength, conn->cid); 2227 2228 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 2229 pr_err("Initiator sent SNACK request while in" 2230 " ErrorRecoveryLevel=0.\n"); 2231 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 2232 buf, conn); 2233 } 2234 /* 2235 * SNACK_DATA and SNACK_R2T are both 0, so check which function to 2236 * call from inside iscsi_send_recovery_datain_or_r2t(). 2237 */ 2238 switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) { 2239 case 0: 2240 return iscsit_handle_recovery_datain_or_r2t(conn, buf, 2241 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); 2242 case ISCSI_FLAG_SNACK_TYPE_STATUS: 2243 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, 2244 hdr->begrun, hdr->runlength); 2245 case ISCSI_FLAG_SNACK_TYPE_DATA_ACK: 2246 return iscsit_handle_data_ack(conn, hdr->ttt, hdr->begrun, 2247 hdr->runlength); 2248 case ISCSI_FLAG_SNACK_TYPE_RDATA: 2249 /* FIXME: Support R-Data SNACK */ 2250 pr_err("R-Data SNACK Not Supported.\n"); 2251 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 2252 buf, conn); 2253 default: 2254 pr_err("Unknown SNACK type 0x%02x, protocol" 2255 " error.\n", hdr->flags & 0x0f); 2256 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 2257 buf, conn); 2258 } 2259 2260 return 0; 2261} 2262 2263static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn) 2264{ 2265 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || 2266 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { 2267 wait_for_completion_interruptible_timeout( 2268 &conn->rx_half_close_comp, 2269 ISCSI_RX_THREAD_TCP_TIMEOUT * HZ); 2270 } 2271} 2272 2273static int iscsit_handle_immediate_data( 2274 struct iscsi_cmd *cmd, 2275 unsigned char *buf, 2276 u32 length) 2277{ 2278 int iov_ret, rx_got = 0, rx_size = 0; 2279 u32 checksum, iov_count = 0, padding = 0; 2280 struct iscsi_conn *conn = cmd->conn; 2281 struct kvec *iov; 2282 2283 iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length); 2284 if (iov_ret < 0) 2285 return IMMEDIATE_DATA_CANNOT_RECOVER; 2286 2287 rx_size = length; 2288 iov_count = iov_ret; 2289 iov = &cmd->iov_data[0]; 2290 2291 padding = ((-length) & 3); 2292 if (padding != 0) { 2293 iov[iov_count].iov_base = cmd->pad_bytes; 2294 iov[iov_count++].iov_len = padding; 2295 rx_size += padding; 2296 } 2297 2298 if (conn->conn_ops->DataDigest) { 2299 iov[iov_count].iov_base = &checksum; 2300 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 2301 rx_size += ISCSI_CRC_LEN; 2302 } 2303 2304 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size); 2305 2306 iscsit_unmap_iovec(cmd); 2307 2308 if (rx_got != rx_size) { 2309 iscsit_rx_thread_wait_for_tcp(conn); 2310 return IMMEDIATE_DATA_CANNOT_RECOVER; 2311 } 2312 2313 if (conn->conn_ops->DataDigest) { 2314 u32 data_crc; 2315 2316 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd, 2317 cmd->write_data_done, length, padding, 2318 cmd->pad_bytes); 2319 2320 if (checksum != data_crc) { 2321 pr_err("ImmediateData CRC32C DataDigest 0x%08x" 2322 " does not match computed 0x%08x\n", checksum, 2323 data_crc); 2324 2325 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 2326 pr_err("Unable to recover from" 2327 " Immediate Data digest failure while" 2328 " in ERL=0.\n"); 2329 iscsit_add_reject_from_cmd( 2330 ISCSI_REASON_DATA_DIGEST_ERROR, 2331 1, 0, buf, cmd); 2332 return IMMEDIATE_DATA_CANNOT_RECOVER; 2333 } else { 2334 iscsit_add_reject_from_cmd( 2335 ISCSI_REASON_DATA_DIGEST_ERROR, 2336 0, 0, buf, cmd); 2337 return IMMEDIATE_DATA_ERL1_CRC_FAILURE; 2338 } 2339 } else { 2340 pr_debug("Got CRC32C DataDigest 0x%08x for" 2341 " %u bytes of Immediate Data\n", checksum, 2342 length); 2343 } 2344 } 2345 2346 cmd->write_data_done += length; 2347 2348 if (cmd->write_data_done == cmd->data_length) { 2349 spin_lock_bh(&cmd->istate_lock); 2350 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 2351 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 2352 spin_unlock_bh(&cmd->istate_lock); 2353 } 2354 2355 return IMMEDIATE_DATA_NORMAL_OPERATION; 2356} 2357 2358/* 2359 * Called with sess->conn_lock held. 2360 */ 2361/* #warning iscsi_build_conn_drop_async_message() only sends out on connections 2362 with active network interface */ 2363static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn) 2364{ 2365 struct iscsi_cmd *cmd; 2366 struct iscsi_conn *conn_p; 2367 2368 /* 2369 * Only send a Asynchronous Message on connections whos network 2370 * interface is still functional. 2371 */ 2372 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) { 2373 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) { 2374 iscsit_inc_conn_usage_count(conn_p); 2375 break; 2376 } 2377 } 2378 2379 if (!conn_p) 2380 return; 2381 2382 cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL); 2383 if (!cmd) { 2384 iscsit_dec_conn_usage_count(conn_p); 2385 return; 2386 } 2387 2388 cmd->logout_cid = conn->cid; 2389 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; 2390 cmd->i_state = ISTATE_SEND_ASYNCMSG; 2391 2392 spin_lock_bh(&conn_p->cmd_lock); 2393 list_add_tail(&cmd->i_list, &conn_p->conn_cmd_list); 2394 spin_unlock_bh(&conn_p->cmd_lock); 2395 2396 iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state); 2397 iscsit_dec_conn_usage_count(conn_p); 2398} 2399 2400static int iscsit_send_conn_drop_async_message( 2401 struct iscsi_cmd *cmd, 2402 struct iscsi_conn *conn) 2403{ 2404 struct iscsi_async *hdr; 2405 2406 cmd->tx_size = ISCSI_HDR_LEN; 2407 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; 2408 2409 hdr = (struct iscsi_async *) cmd->pdu; 2410 hdr->opcode = ISCSI_OP_ASYNC_EVENT; 2411 hdr->flags = ISCSI_FLAG_CMD_FINAL; 2412 cmd->init_task_tag = 0xFFFFFFFF; 2413 cmd->targ_xfer_tag = 0xFFFFFFFF; 2414 put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]); 2415 cmd->stat_sn = conn->stat_sn++; 2416 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2417 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2418 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2419 hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION; 2420 hdr->param1 = cpu_to_be16(cmd->logout_cid); 2421 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait); 2422 hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain); 2423 2424 if (conn->conn_ops->HeaderDigest) { 2425 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2426 2427 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2428 (unsigned char *)hdr, ISCSI_HDR_LEN, 2429 0, NULL, (u8 *)header_digest); 2430 2431 cmd->tx_size += ISCSI_CRC_LEN; 2432 pr_debug("Attaching CRC32C HeaderDigest to" 2433 " Async Message 0x%08x\n", *header_digest); 2434 } 2435 2436 cmd->iov_misc[0].iov_base = cmd->pdu; 2437 cmd->iov_misc[0].iov_len = cmd->tx_size; 2438 cmd->iov_misc_count = 1; 2439 2440 pr_debug("Sending Connection Dropped Async Message StatSN:" 2441 " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn, 2442 cmd->logout_cid, conn->cid); 2443 return 0; 2444} 2445 2446static int iscsit_send_data_in( 2447 struct iscsi_cmd *cmd, 2448 struct iscsi_conn *conn, 2449 int *eodr) 2450{ 2451 int iov_ret = 0, set_statsn = 0; 2452 u32 iov_count = 0, tx_size = 0; 2453 struct iscsi_datain datain; 2454 struct iscsi_datain_req *dr; 2455 struct iscsi_data_rsp *hdr; 2456 struct kvec *iov; 2457 2458 memset(&datain, 0, sizeof(struct iscsi_datain)); 2459 dr = iscsit_get_datain_values(cmd, &datain); 2460 if (!dr) { 2461 pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n", 2462 cmd->init_task_tag); 2463 return -1; 2464 } 2465 2466 /* 2467 * Be paranoid and double check the logic for now. 2468 */ 2469 if ((datain.offset + datain.length) > cmd->data_length) { 2470 pr_err("Command ITT: 0x%08x, datain.offset: %u and" 2471 " datain.length: %u exceeds cmd->data_length: %u\n", 2472 cmd->init_task_tag, datain.offset, datain.length, 2473 cmd->data_length); 2474 return -1; 2475 } 2476 2477 spin_lock_bh(&conn->sess->session_stats_lock); 2478 conn->sess->tx_data_octets += datain.length; 2479 if (conn->sess->se_sess->se_node_acl) { 2480 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); 2481 conn->sess->se_sess->se_node_acl->read_bytes += datain.length; 2482 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); 2483 } 2484 spin_unlock_bh(&conn->sess->session_stats_lock); 2485 /* 2486 * Special case for successfully execution w/ both DATAIN 2487 * and Sense Data. 2488 */ 2489 if ((datain.flags & ISCSI_FLAG_DATA_STATUS) && 2490 (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)) 2491 datain.flags &= ~ISCSI_FLAG_DATA_STATUS; 2492 else { 2493 if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) || 2494 (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) { 2495 iscsit_increment_maxcmdsn(cmd, conn->sess); 2496 cmd->stat_sn = conn->stat_sn++; 2497 set_statsn = 1; 2498 } else if (dr->dr_complete == 2499 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY) 2500 set_statsn = 1; 2501 } 2502 2503 hdr = (struct iscsi_data_rsp *) cmd->pdu; 2504 memset(hdr, 0, ISCSI_HDR_LEN); 2505 hdr->opcode = ISCSI_OP_SCSI_DATA_IN; 2506 hdr->flags = datain.flags; 2507 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { 2508 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 2509 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW; 2510 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 2511 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 2512 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW; 2513 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 2514 } 2515 } 2516 hton24(hdr->dlength, datain.length); 2517 if (hdr->flags & ISCSI_FLAG_DATA_ACK) 2518 int_to_scsilun(cmd->se_cmd.orig_fe_lun, 2519 (struct scsi_lun *)&hdr->lun); 2520 else 2521 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2522 2523 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2524 hdr->ttt = (hdr->flags & ISCSI_FLAG_DATA_ACK) ? 2525 cpu_to_be32(cmd->targ_xfer_tag) : 2526 0xFFFFFFFF; 2527 hdr->statsn = (set_statsn) ? cpu_to_be32(cmd->stat_sn) : 2528 0xFFFFFFFF; 2529 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2530 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2531 hdr->datasn = cpu_to_be32(datain.data_sn); 2532 hdr->offset = cpu_to_be32(datain.offset); 2533 2534 iov = &cmd->iov_data[0]; 2535 iov[iov_count].iov_base = cmd->pdu; 2536 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 2537 tx_size += ISCSI_HDR_LEN; 2538 2539 if (conn->conn_ops->HeaderDigest) { 2540 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2541 2542 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2543 (unsigned char *)hdr, ISCSI_HDR_LEN, 2544 0, NULL, (u8 *)header_digest); 2545 2546 iov[0].iov_len += ISCSI_CRC_LEN; 2547 tx_size += ISCSI_CRC_LEN; 2548 2549 pr_debug("Attaching CRC32 HeaderDigest" 2550 " for DataIN PDU 0x%08x\n", *header_digest); 2551 } 2552 2553 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length); 2554 if (iov_ret < 0) 2555 return -1; 2556 2557 iov_count += iov_ret; 2558 tx_size += datain.length; 2559 2560 cmd->padding = ((-datain.length) & 3); 2561 if (cmd->padding) { 2562 iov[iov_count].iov_base = cmd->pad_bytes; 2563 iov[iov_count++].iov_len = cmd->padding; 2564 tx_size += cmd->padding; 2565 2566 pr_debug("Attaching %u padding bytes\n", 2567 cmd->padding); 2568 } 2569 if (conn->conn_ops->DataDigest) { 2570 cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd, 2571 datain.offset, datain.length, cmd->padding, cmd->pad_bytes); 2572 2573 iov[iov_count].iov_base = &cmd->data_crc; 2574 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 2575 tx_size += ISCSI_CRC_LEN; 2576 2577 pr_debug("Attached CRC32C DataDigest %d bytes, crc" 2578 " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc); 2579 } 2580 2581 cmd->iov_data_count = iov_count; 2582 cmd->tx_size = tx_size; 2583 2584 pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x," 2585 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", 2586 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), 2587 ntohl(hdr->offset), datain.length, conn->cid); 2588 2589 if (dr->dr_complete) { 2590 *eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ? 2591 2 : 1; 2592 iscsit_free_datain_req(cmd, dr); 2593 } 2594 2595 return 0; 2596} 2597 2598static int iscsit_send_logout_response( 2599 struct iscsi_cmd *cmd, 2600 struct iscsi_conn *conn) 2601{ 2602 int niov = 0, tx_size; 2603 struct iscsi_conn *logout_conn = NULL; 2604 struct iscsi_conn_recovery *cr = NULL; 2605 struct iscsi_session *sess = conn->sess; 2606 struct kvec *iov; 2607 struct iscsi_logout_rsp *hdr; 2608 /* 2609 * The actual shutting down of Sessions and/or Connections 2610 * for CLOSESESSION and CLOSECONNECTION Logout Requests 2611 * is done in scsi_logout_post_handler(). 2612 */ 2613 switch (cmd->logout_reason) { 2614 case ISCSI_LOGOUT_REASON_CLOSE_SESSION: 2615 pr_debug("iSCSI session logout successful, setting" 2616 " logout response to ISCSI_LOGOUT_SUCCESS.\n"); 2617 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2618 break; 2619 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION: 2620 if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND) 2621 break; 2622 /* 2623 * For CLOSECONNECTION logout requests carrying 2624 * a matching logout CID -> local CID, the reference 2625 * for the local CID will have been incremented in 2626 * iscsi_logout_closeconnection(). 2627 * 2628 * For CLOSECONNECTION logout requests carrying 2629 * a different CID than the connection it arrived 2630 * on, the connection responding to cmd->logout_cid 2631 * is stopped in iscsit_logout_post_handler_diffcid(). 2632 */ 2633 2634 pr_debug("iSCSI CID: %hu logout on CID: %hu" 2635 " successful.\n", cmd->logout_cid, conn->cid); 2636 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2637 break; 2638 case ISCSI_LOGOUT_REASON_RECOVERY: 2639 if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) || 2640 (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED)) 2641 break; 2642 /* 2643 * If the connection is still active from our point of view 2644 * force connection recovery to occur. 2645 */ 2646 logout_conn = iscsit_get_conn_from_cid_rcfr(sess, 2647 cmd->logout_cid); 2648 if ((logout_conn)) { 2649 iscsit_connection_reinstatement_rcfr(logout_conn); 2650 iscsit_dec_conn_usage_count(logout_conn); 2651 } 2652 2653 cr = iscsit_get_inactive_connection_recovery_entry( 2654 conn->sess, cmd->logout_cid); 2655 if (!cr) { 2656 pr_err("Unable to locate CID: %hu for" 2657 " REMOVECONNFORRECOVERY Logout Request.\n", 2658 cmd->logout_cid); 2659 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND; 2660 break; 2661 } 2662 2663 iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn); 2664 2665 pr_debug("iSCSI REMOVECONNFORRECOVERY logout" 2666 " for recovery for CID: %hu on CID: %hu successful.\n", 2667 cmd->logout_cid, conn->cid); 2668 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2669 break; 2670 default: 2671 pr_err("Unknown cmd->logout_reason: 0x%02x\n", 2672 cmd->logout_reason); 2673 return -1; 2674 } 2675 2676 tx_size = ISCSI_HDR_LEN; 2677 hdr = (struct iscsi_logout_rsp *)cmd->pdu; 2678 memset(hdr, 0, ISCSI_HDR_LEN); 2679 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 2680 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2681 hdr->response = cmd->logout_response; 2682 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2683 cmd->stat_sn = conn->stat_sn++; 2684 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2685 2686 iscsit_increment_maxcmdsn(cmd, conn->sess); 2687 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2688 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2689 2690 iov = &cmd->iov_misc[0]; 2691 iov[niov].iov_base = cmd->pdu; 2692 iov[niov++].iov_len = ISCSI_HDR_LEN; 2693 2694 if (conn->conn_ops->HeaderDigest) { 2695 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2696 2697 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2698 (unsigned char *)hdr, ISCSI_HDR_LEN, 2699 0, NULL, (u8 *)header_digest); 2700 2701 iov[0].iov_len += ISCSI_CRC_LEN; 2702 tx_size += ISCSI_CRC_LEN; 2703 pr_debug("Attaching CRC32C HeaderDigest to" 2704 " Logout Response 0x%08x\n", *header_digest); 2705 } 2706 cmd->iov_misc_count = niov; 2707 cmd->tx_size = tx_size; 2708 2709 pr_debug("Sending Logout Response ITT: 0x%08x StatSN:" 2710 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n", 2711 cmd->init_task_tag, cmd->stat_sn, hdr->response, 2712 cmd->logout_cid, conn->cid); 2713 2714 return 0; 2715} 2716 2717/* 2718 * Unsolicited NOPIN, either requesting a response or not. 2719 */ 2720static int iscsit_send_unsolicited_nopin( 2721 struct iscsi_cmd *cmd, 2722 struct iscsi_conn *conn, 2723 int want_response) 2724{ 2725 int tx_size = ISCSI_HDR_LEN; 2726 struct iscsi_nopin *hdr; 2727 2728 hdr = (struct iscsi_nopin *) cmd->pdu; 2729 memset(hdr, 0, ISCSI_HDR_LEN); 2730 hdr->opcode = ISCSI_OP_NOOP_IN; 2731 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2732 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2733 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2734 cmd->stat_sn = conn->stat_sn; 2735 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2736 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2737 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2738 2739 if (conn->conn_ops->HeaderDigest) { 2740 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2741 2742 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2743 (unsigned char *)hdr, ISCSI_HDR_LEN, 2744 0, NULL, (u8 *)header_digest); 2745 2746 tx_size += ISCSI_CRC_LEN; 2747 pr_debug("Attaching CRC32C HeaderDigest to" 2748 " NopIN 0x%08x\n", *header_digest); 2749 } 2750 2751 cmd->iov_misc[0].iov_base = cmd->pdu; 2752 cmd->iov_misc[0].iov_len = tx_size; 2753 cmd->iov_misc_count = 1; 2754 cmd->tx_size = tx_size; 2755 2756 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:" 2757 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid); 2758 2759 return 0; 2760} 2761 2762static int iscsit_send_nopin_response( 2763 struct iscsi_cmd *cmd, 2764 struct iscsi_conn *conn) 2765{ 2766 int niov = 0, tx_size; 2767 u32 padding = 0; 2768 struct kvec *iov; 2769 struct iscsi_nopin *hdr; 2770 2771 tx_size = ISCSI_HDR_LEN; 2772 hdr = (struct iscsi_nopin *) cmd->pdu; 2773 memset(hdr, 0, ISCSI_HDR_LEN); 2774 hdr->opcode = ISCSI_OP_NOOP_IN; 2775 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2776 hton24(hdr->dlength, cmd->buf_ptr_size); 2777 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2778 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2779 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2780 cmd->stat_sn = conn->stat_sn++; 2781 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2782 2783 iscsit_increment_maxcmdsn(cmd, conn->sess); 2784 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2785 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2786 2787 iov = &cmd->iov_misc[0]; 2788 iov[niov].iov_base = cmd->pdu; 2789 iov[niov++].iov_len = ISCSI_HDR_LEN; 2790 2791 if (conn->conn_ops->HeaderDigest) { 2792 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2793 2794 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2795 (unsigned char *)hdr, ISCSI_HDR_LEN, 2796 0, NULL, (u8 *)header_digest); 2797 2798 iov[0].iov_len += ISCSI_CRC_LEN; 2799 tx_size += ISCSI_CRC_LEN; 2800 pr_debug("Attaching CRC32C HeaderDigest" 2801 " to NopIn 0x%08x\n", *header_digest); 2802 } 2803 2804 /* 2805 * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr. 2806 * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size. 2807 */ 2808 if (cmd->buf_ptr_size) { 2809 iov[niov].iov_base = cmd->buf_ptr; 2810 iov[niov++].iov_len = cmd->buf_ptr_size; 2811 tx_size += cmd->buf_ptr_size; 2812 2813 pr_debug("Echoing back %u bytes of ping" 2814 " data.\n", cmd->buf_ptr_size); 2815 2816 padding = ((-cmd->buf_ptr_size) & 3); 2817 if (padding != 0) { 2818 iov[niov].iov_base = &cmd->pad_bytes; 2819 iov[niov++].iov_len = padding; 2820 tx_size += padding; 2821 pr_debug("Attaching %u additional" 2822 " padding bytes.\n", padding); 2823 } 2824 if (conn->conn_ops->DataDigest) { 2825 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2826 cmd->buf_ptr, cmd->buf_ptr_size, 2827 padding, (u8 *)&cmd->pad_bytes, 2828 (u8 *)&cmd->data_crc); 2829 2830 iov[niov].iov_base = &cmd->data_crc; 2831 iov[niov++].iov_len = ISCSI_CRC_LEN; 2832 tx_size += ISCSI_CRC_LEN; 2833 pr_debug("Attached DataDigest for %u" 2834 " bytes of ping data, CRC 0x%08x\n", 2835 cmd->buf_ptr_size, cmd->data_crc); 2836 } 2837 } 2838 2839 cmd->iov_misc_count = niov; 2840 cmd->tx_size = tx_size; 2841 2842 pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:" 2843 " 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag, 2844 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size); 2845 2846 return 0; 2847} 2848 2849int iscsit_send_r2t( 2850 struct iscsi_cmd *cmd, 2851 struct iscsi_conn *conn) 2852{ 2853 int tx_size = 0; 2854 struct iscsi_r2t *r2t; 2855 struct iscsi_r2t_rsp *hdr; 2856 2857 r2t = iscsit_get_r2t_from_list(cmd); 2858 if (!r2t) 2859 return -1; 2860 2861 hdr = (struct iscsi_r2t_rsp *) cmd->pdu; 2862 memset(hdr, 0, ISCSI_HDR_LEN); 2863 hdr->opcode = ISCSI_OP_R2T; 2864 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2865 int_to_scsilun(cmd->se_cmd.orig_fe_lun, 2866 (struct scsi_lun *)&hdr->lun); 2867 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2868 spin_lock_bh(&conn->sess->ttt_lock); 2869 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++; 2870 if (r2t->targ_xfer_tag == 0xFFFFFFFF) 2871 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++; 2872 spin_unlock_bh(&conn->sess->ttt_lock); 2873 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag); 2874 hdr->statsn = cpu_to_be32(conn->stat_sn); 2875 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2876 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2877 hdr->r2tsn = cpu_to_be32(r2t->r2t_sn); 2878 hdr->data_offset = cpu_to_be32(r2t->offset); 2879 hdr->data_length = cpu_to_be32(r2t->xfer_len); 2880 2881 cmd->iov_misc[0].iov_base = cmd->pdu; 2882 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; 2883 tx_size += ISCSI_HDR_LEN; 2884 2885 if (conn->conn_ops->HeaderDigest) { 2886 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2887 2888 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2889 (unsigned char *)hdr, ISCSI_HDR_LEN, 2890 0, NULL, (u8 *)header_digest); 2891 2892 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; 2893 tx_size += ISCSI_CRC_LEN; 2894 pr_debug("Attaching CRC32 HeaderDigest for R2T" 2895 " PDU 0x%08x\n", *header_digest); 2896 } 2897 2898 pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:" 2899 " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n", 2900 (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag, 2901 r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn, 2902 r2t->offset, r2t->xfer_len, conn->cid); 2903 2904 cmd->iov_misc_count = 1; 2905 cmd->tx_size = tx_size; 2906 2907 spin_lock_bh(&cmd->r2t_lock); 2908 r2t->sent_r2t = 1; 2909 spin_unlock_bh(&cmd->r2t_lock); 2910 2911 return 0; 2912} 2913 2914/* 2915 * type 0: Normal Operation. 2916 * type 1: Called from Storage Transport. 2917 * type 2: Called from iscsi_task_reassign_complete_write() for 2918 * connection recovery. 2919 */ 2920int iscsit_build_r2ts_for_cmd( 2921 struct iscsi_cmd *cmd, 2922 struct iscsi_conn *conn, 2923 int type) 2924{ 2925 int first_r2t = 1; 2926 u32 offset = 0, xfer_len = 0; 2927 2928 spin_lock_bh(&cmd->r2t_lock); 2929 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) { 2930 spin_unlock_bh(&cmd->r2t_lock); 2931 return 0; 2932 } 2933 2934 if (conn->sess->sess_ops->DataSequenceInOrder && (type != 2)) 2935 if (cmd->r2t_offset < cmd->write_data_done) 2936 cmd->r2t_offset = cmd->write_data_done; 2937 2938 while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) { 2939 if (conn->sess->sess_ops->DataSequenceInOrder) { 2940 offset = cmd->r2t_offset; 2941 2942 if (first_r2t && (type == 2)) { 2943 xfer_len = ((offset + 2944 (conn->sess->sess_ops->MaxBurstLength - 2945 cmd->next_burst_len) > 2946 cmd->data_length) ? 2947 (cmd->data_length - offset) : 2948 (conn->sess->sess_ops->MaxBurstLength - 2949 cmd->next_burst_len)); 2950 } else { 2951 xfer_len = ((offset + 2952 conn->sess->sess_ops->MaxBurstLength) > 2953 cmd->data_length) ? 2954 (cmd->data_length - offset) : 2955 conn->sess->sess_ops->MaxBurstLength; 2956 } 2957 cmd->r2t_offset += xfer_len; 2958 2959 if (cmd->r2t_offset == cmd->data_length) 2960 cmd->cmd_flags |= ICF_SENT_LAST_R2T; 2961 } else { 2962 struct iscsi_seq *seq; 2963 2964 seq = iscsit_get_seq_holder_for_r2t(cmd); 2965 if (!seq) { 2966 spin_unlock_bh(&cmd->r2t_lock); 2967 return -1; 2968 } 2969 2970 offset = seq->offset; 2971 xfer_len = seq->xfer_len; 2972 2973 if (cmd->seq_send_order == cmd->seq_count) 2974 cmd->cmd_flags |= ICF_SENT_LAST_R2T; 2975 } 2976 cmd->outstanding_r2ts++; 2977 first_r2t = 0; 2978 2979 if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) { 2980 spin_unlock_bh(&cmd->r2t_lock); 2981 return -1; 2982 } 2983 2984 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) 2985 break; 2986 } 2987 spin_unlock_bh(&cmd->r2t_lock); 2988 2989 return 0; 2990} 2991 2992static int iscsit_send_status( 2993 struct iscsi_cmd *cmd, 2994 struct iscsi_conn *conn) 2995{ 2996 u8 iov_count = 0, recovery; 2997 u32 padding = 0, tx_size = 0; 2998 struct iscsi_scsi_rsp *hdr; 2999 struct kvec *iov; 3000 3001 recovery = (cmd->i_state != ISTATE_SEND_STATUS); 3002 if (!recovery) 3003 cmd->stat_sn = conn->stat_sn++; 3004 3005 spin_lock_bh(&conn->sess->session_stats_lock); 3006 conn->sess->rsp_pdus++; 3007 spin_unlock_bh(&conn->sess->session_stats_lock); 3008 3009 hdr = (struct iscsi_scsi_rsp *) cmd->pdu; 3010 memset(hdr, 0, ISCSI_HDR_LEN); 3011 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP; 3012 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3013 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 3014 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW; 3015 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 3016 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 3017 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW; 3018 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 3019 } 3020 hdr->response = cmd->iscsi_response; 3021 hdr->cmd_status = cmd->se_cmd.scsi_status; 3022 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3023 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3024 3025 iscsit_increment_maxcmdsn(cmd, conn->sess); 3026 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3027 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3028 3029 iov = &cmd->iov_misc[0]; 3030 iov[iov_count].iov_base = cmd->pdu; 3031 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3032 tx_size += ISCSI_HDR_LEN; 3033 3034 /* 3035 * Attach SENSE DATA payload to iSCSI Response PDU 3036 */ 3037 if (cmd->se_cmd.sense_buffer && 3038 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 3039 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 3040 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 3041 hton24(hdr->dlength, cmd->se_cmd.scsi_sense_length); 3042 iov[iov_count].iov_base = cmd->se_cmd.sense_buffer; 3043 iov[iov_count++].iov_len = 3044 (cmd->se_cmd.scsi_sense_length + padding); 3045 tx_size += cmd->se_cmd.scsi_sense_length; 3046 3047 if (padding) { 3048 memset(cmd->se_cmd.sense_buffer + 3049 cmd->se_cmd.scsi_sense_length, 0, padding); 3050 tx_size += padding; 3051 pr_debug("Adding %u bytes of padding to" 3052 " SENSE.\n", padding); 3053 } 3054 3055 if (conn->conn_ops->DataDigest) { 3056 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3057 cmd->se_cmd.sense_buffer, 3058 (cmd->se_cmd.scsi_sense_length + padding), 3059 0, NULL, (u8 *)&cmd->data_crc); 3060 3061 iov[iov_count].iov_base = &cmd->data_crc; 3062 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3063 tx_size += ISCSI_CRC_LEN; 3064 3065 pr_debug("Attaching CRC32 DataDigest for" 3066 " SENSE, %u bytes CRC 0x%08x\n", 3067 (cmd->se_cmd.scsi_sense_length + padding), 3068 cmd->data_crc); 3069 } 3070 3071 pr_debug("Attaching SENSE DATA: %u bytes to iSCSI" 3072 " Response PDU\n", 3073 cmd->se_cmd.scsi_sense_length); 3074 } 3075 3076 if (conn->conn_ops->HeaderDigest) { 3077 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3078 3079 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3080 (unsigned char *)hdr, ISCSI_HDR_LEN, 3081 0, NULL, (u8 *)header_digest); 3082 3083 iov[0].iov_len += ISCSI_CRC_LEN; 3084 tx_size += ISCSI_CRC_LEN; 3085 pr_debug("Attaching CRC32 HeaderDigest for Response" 3086 " PDU 0x%08x\n", *header_digest); 3087 } 3088 3089 cmd->iov_misc_count = iov_count; 3090 cmd->tx_size = tx_size; 3091 3092 pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x," 3093 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n", 3094 (!recovery) ? "" : "Recovery ", cmd->init_task_tag, 3095 cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid); 3096 3097 return 0; 3098} 3099 3100static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr) 3101{ 3102 switch (se_tmr->response) { 3103 case TMR_FUNCTION_COMPLETE: 3104 return ISCSI_TMF_RSP_COMPLETE; 3105 case TMR_TASK_DOES_NOT_EXIST: 3106 return ISCSI_TMF_RSP_NO_TASK; 3107 case TMR_LUN_DOES_NOT_EXIST: 3108 return ISCSI_TMF_RSP_NO_LUN; 3109 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: 3110 return ISCSI_TMF_RSP_NOT_SUPPORTED; 3111 case TMR_FUNCTION_AUTHORIZATION_FAILED: 3112 return ISCSI_TMF_RSP_AUTH_FAILED; 3113 case TMR_FUNCTION_REJECTED: 3114 default: 3115 return ISCSI_TMF_RSP_REJECTED; 3116 } 3117} 3118 3119static int iscsit_send_task_mgt_rsp( 3120 struct iscsi_cmd *cmd, 3121 struct iscsi_conn *conn) 3122{ 3123 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; 3124 struct iscsi_tm_rsp *hdr; 3125 u32 tx_size = 0; 3126 3127 hdr = (struct iscsi_tm_rsp *) cmd->pdu; 3128 memset(hdr, 0, ISCSI_HDR_LEN); 3129 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 3130 hdr->flags = ISCSI_FLAG_CMD_FINAL; 3131 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr); 3132 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3133 cmd->stat_sn = conn->stat_sn++; 3134 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3135 3136 iscsit_increment_maxcmdsn(cmd, conn->sess); 3137 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3138 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3139 3140 cmd->iov_misc[0].iov_base = cmd->pdu; 3141 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; 3142 tx_size += ISCSI_HDR_LEN; 3143 3144 if (conn->conn_ops->HeaderDigest) { 3145 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3146 3147 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3148 (unsigned char *)hdr, ISCSI_HDR_LEN, 3149 0, NULL, (u8 *)header_digest); 3150 3151 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; 3152 tx_size += ISCSI_CRC_LEN; 3153 pr_debug("Attaching CRC32 HeaderDigest for Task" 3154 " Mgmt Response PDU 0x%08x\n", *header_digest); 3155 } 3156 3157 cmd->iov_misc_count = 1; 3158 cmd->tx_size = tx_size; 3159 3160 pr_debug("Built Task Management Response ITT: 0x%08x," 3161 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n", 3162 cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid); 3163 3164 return 0; 3165} 3166 3167static bool iscsit_check_inaddr_any(struct iscsi_np *np) 3168{ 3169 bool ret = false; 3170 3171 if (np->np_sockaddr.ss_family == AF_INET6) { 3172 const struct sockaddr_in6 sin6 = { 3173 .sin6_addr = IN6ADDR_ANY_INIT }; 3174 struct sockaddr_in6 *sock_in6 = 3175 (struct sockaddr_in6 *)&np->np_sockaddr; 3176 3177 if (!memcmp(sock_in6->sin6_addr.s6_addr, 3178 sin6.sin6_addr.s6_addr, 16)) 3179 ret = true; 3180 } else { 3181 struct sockaddr_in * sock_in = 3182 (struct sockaddr_in *)&np->np_sockaddr; 3183 3184 if (sock_in->sin_addr.s_addr == INADDR_ANY) 3185 ret = true; 3186 } 3187 3188 return ret; 3189} 3190 3191static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) 3192{ 3193 char *payload = NULL; 3194 struct iscsi_conn *conn = cmd->conn; 3195 struct iscsi_portal_group *tpg; 3196 struct iscsi_tiqn *tiqn; 3197 struct iscsi_tpg_np *tpg_np; 3198 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0; 3199 unsigned char buf[256]; 3200 3201 buffer_len = (conn->conn_ops->MaxRecvDataSegmentLength > 32768) ? 3202 32768 : conn->conn_ops->MaxRecvDataSegmentLength; 3203 3204 memset(buf, 0, 256); 3205 3206 payload = kzalloc(buffer_len, GFP_KERNEL); 3207 if (!payload) { 3208 pr_err("Unable to allocate memory for sendtargets" 3209 " response.\n"); 3210 return -ENOMEM; 3211 } 3212 3213 spin_lock(&tiqn_lock); 3214 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { 3215 len = sprintf(buf, "TargetName=%s", tiqn->tiqn); 3216 len += 1; 3217 3218 if ((len + payload_len) > buffer_len) { 3219 spin_unlock(&tiqn->tiqn_tpg_lock); 3220 end_of_buf = 1; 3221 goto eob; 3222 } 3223 memcpy(payload + payload_len, buf, len); 3224 payload_len += len; 3225 3226 spin_lock(&tiqn->tiqn_tpg_lock); 3227 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { 3228 3229 spin_lock(&tpg->tpg_state_lock); 3230 if ((tpg->tpg_state == TPG_STATE_FREE) || 3231 (tpg->tpg_state == TPG_STATE_INACTIVE)) { 3232 spin_unlock(&tpg->tpg_state_lock); 3233 continue; 3234 } 3235 spin_unlock(&tpg->tpg_state_lock); 3236 3237 spin_lock(&tpg->tpg_np_lock); 3238 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, 3239 tpg_np_list) { 3240 struct iscsi_np *np = tpg_np->tpg_np; 3241 bool inaddr_any = iscsit_check_inaddr_any(np); 3242 3243 len = sprintf(buf, "TargetAddress=" 3244 "%s%s%s:%hu,%hu", 3245 (np->np_sockaddr.ss_family == AF_INET6) ? 3246 "[" : "", (inaddr_any == false) ? 3247 np->np_ip : conn->local_ip, 3248 (np->np_sockaddr.ss_family == AF_INET6) ? 3249 "]" : "", (inaddr_any == false) ? 3250 np->np_port : conn->local_port, 3251 tpg->tpgt); 3252 len += 1; 3253 3254 if ((len + payload_len) > buffer_len) { 3255 spin_unlock(&tpg->tpg_np_lock); 3256 spin_unlock(&tiqn->tiqn_tpg_lock); 3257 end_of_buf = 1; 3258 goto eob; 3259 } 3260 memcpy(payload + payload_len, buf, len); 3261 payload_len += len; 3262 } 3263 spin_unlock(&tpg->tpg_np_lock); 3264 } 3265 spin_unlock(&tiqn->tiqn_tpg_lock); 3266eob: 3267 if (end_of_buf) 3268 break; 3269 } 3270 spin_unlock(&tiqn_lock); 3271 3272 cmd->buf_ptr = payload; 3273 3274 return payload_len; 3275} 3276 3277/* 3278 * FIXME: Add support for F_BIT and C_BIT when the length is longer than 3279 * MaxRecvDataSegmentLength. 3280 */ 3281static int iscsit_send_text_rsp( 3282 struct iscsi_cmd *cmd, 3283 struct iscsi_conn *conn) 3284{ 3285 struct iscsi_text_rsp *hdr; 3286 struct kvec *iov; 3287 u32 padding = 0, tx_size = 0; 3288 int text_length, iov_count = 0; 3289 3290 text_length = iscsit_build_sendtargets_response(cmd); 3291 if (text_length < 0) 3292 return text_length; 3293 3294 padding = ((-text_length) & 3); 3295 if (padding != 0) { 3296 memset(cmd->buf_ptr + text_length, 0, padding); 3297 pr_debug("Attaching %u additional bytes for" 3298 " padding.\n", padding); 3299 } 3300 3301 hdr = (struct iscsi_text_rsp *) cmd->pdu; 3302 memset(hdr, 0, ISCSI_HDR_LEN); 3303 hdr->opcode = ISCSI_OP_TEXT_RSP; 3304 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3305 hton24(hdr->dlength, text_length); 3306 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3307 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 3308 cmd->stat_sn = conn->stat_sn++; 3309 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3310 3311 iscsit_increment_maxcmdsn(cmd, conn->sess); 3312 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3313 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3314 3315 iov = &cmd->iov_misc[0]; 3316 3317 iov[iov_count].iov_base = cmd->pdu; 3318 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3319 iov[iov_count].iov_base = cmd->buf_ptr; 3320 iov[iov_count++].iov_len = text_length + padding; 3321 3322 tx_size += (ISCSI_HDR_LEN + text_length + padding); 3323 3324 if (conn->conn_ops->HeaderDigest) { 3325 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3326 3327 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3328 (unsigned char *)hdr, ISCSI_HDR_LEN, 3329 0, NULL, (u8 *)header_digest); 3330 3331 iov[0].iov_len += ISCSI_CRC_LEN; 3332 tx_size += ISCSI_CRC_LEN; 3333 pr_debug("Attaching CRC32 HeaderDigest for" 3334 " Text Response PDU 0x%08x\n", *header_digest); 3335 } 3336 3337 if (conn->conn_ops->DataDigest) { 3338 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3339 cmd->buf_ptr, (text_length + padding), 3340 0, NULL, (u8 *)&cmd->data_crc); 3341 3342 iov[iov_count].iov_base = &cmd->data_crc; 3343 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3344 tx_size += ISCSI_CRC_LEN; 3345 3346 pr_debug("Attaching DataDigest for %u bytes of text" 3347 " data, CRC 0x%08x\n", (text_length + padding), 3348 cmd->data_crc); 3349 } 3350 3351 cmd->iov_misc_count = iov_count; 3352 cmd->tx_size = tx_size; 3353 3354 pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x," 3355 " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn, 3356 text_length, conn->cid); 3357 return 0; 3358} 3359 3360static int iscsit_send_reject( 3361 struct iscsi_cmd *cmd, 3362 struct iscsi_conn *conn) 3363{ 3364 u32 iov_count = 0, tx_size = 0; 3365 struct iscsi_reject *hdr; 3366 struct kvec *iov; 3367 3368 hdr = (struct iscsi_reject *) cmd->pdu; 3369 hdr->opcode = ISCSI_OP_REJECT; 3370 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3371 hton24(hdr->dlength, ISCSI_HDR_LEN); 3372 cmd->stat_sn = conn->stat_sn++; 3373 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3374 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3375 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3376 3377 iov = &cmd->iov_misc[0]; 3378 3379 iov[iov_count].iov_base = cmd->pdu; 3380 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3381 iov[iov_count].iov_base = cmd->buf_ptr; 3382 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3383 3384 tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN); 3385 3386 if (conn->conn_ops->HeaderDigest) { 3387 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3388 3389 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3390 (unsigned char *)hdr, ISCSI_HDR_LEN, 3391 0, NULL, (u8 *)header_digest); 3392 3393 iov[0].iov_len += ISCSI_CRC_LEN; 3394 tx_size += ISCSI_CRC_LEN; 3395 pr_debug("Attaching CRC32 HeaderDigest for" 3396 " REJECT PDU 0x%08x\n", *header_digest); 3397 } 3398 3399 if (conn->conn_ops->DataDigest) { 3400 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3401 (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN, 3402 0, NULL, (u8 *)&cmd->data_crc); 3403 3404 iov[iov_count].iov_base = &cmd->data_crc; 3405 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3406 tx_size += ISCSI_CRC_LEN; 3407 pr_debug("Attaching CRC32 DataDigest for REJECT" 3408 " PDU 0x%08x\n", cmd->data_crc); 3409 } 3410 3411 cmd->iov_misc_count = iov_count; 3412 cmd->tx_size = tx_size; 3413 3414 pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x," 3415 " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid); 3416 3417 return 0; 3418} 3419 3420static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn) 3421{ 3422 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || 3423 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { 3424 wait_for_completion_interruptible_timeout( 3425 &conn->tx_half_close_comp, 3426 ISCSI_TX_THREAD_TCP_TIMEOUT * HZ); 3427 } 3428} 3429 3430#ifdef CONFIG_SMP 3431 3432void iscsit_thread_get_cpumask(struct iscsi_conn *conn) 3433{ 3434 struct iscsi_thread_set *ts = conn->thread_set; 3435 int ord, cpu; 3436 /* 3437 * thread_id is assigned from iscsit_global->ts_bitmap from 3438 * within iscsi_thread_set.c:iscsi_allocate_thread_sets() 3439 * 3440 * Here we use thread_id to determine which CPU that this 3441 * iSCSI connection's iscsi_thread_set will be scheduled to 3442 * execute upon. 3443 */ 3444 ord = ts->thread_id % cpumask_weight(cpu_online_mask); 3445#if 0 3446 pr_debug(">>>>>>>>>>>>>>>>>>>> Generated ord: %d from" 3447 " thread_id: %d\n", ord, ts->thread_id); 3448#endif 3449 for_each_online_cpu(cpu) { 3450 if (ord-- == 0) { 3451 cpumask_set_cpu(cpu, conn->conn_cpumask); 3452 return; 3453 } 3454 } 3455 /* 3456 * This should never be reached.. 3457 */ 3458 dump_stack(); 3459 cpumask_setall(conn->conn_cpumask); 3460} 3461 3462static inline void iscsit_thread_check_cpumask( 3463 struct iscsi_conn *conn, 3464 struct task_struct *p, 3465 int mode) 3466{ 3467 char buf[128]; 3468 /* 3469 * mode == 1 signals iscsi_target_tx_thread() usage. 3470 * mode == 0 signals iscsi_target_rx_thread() usage. 3471 */ 3472 if (mode == 1) { 3473 if (!conn->conn_tx_reset_cpumask) 3474 return; 3475 conn->conn_tx_reset_cpumask = 0; 3476 } else { 3477 if (!conn->conn_rx_reset_cpumask) 3478 return; 3479 conn->conn_rx_reset_cpumask = 0; 3480 } 3481 /* 3482 * Update the CPU mask for this single kthread so that 3483 * both TX and RX kthreads are scheduled to run on the 3484 * same CPU. 3485 */ 3486 memset(buf, 0, 128); 3487 cpumask_scnprintf(buf, 128, conn->conn_cpumask); 3488#if 0 3489 pr_debug(">>>>>>>>>>>>>> Calling set_cpus_allowed_ptr():" 3490 " %s for %s\n", buf, p->comm); 3491#endif 3492 set_cpus_allowed_ptr(p, conn->conn_cpumask); 3493} 3494 3495#else 3496 3497void iscsit_thread_get_cpumask(struct iscsi_conn *conn) 3498{ 3499 return; 3500} 3501 3502#define iscsit_thread_check_cpumask(X, Y, Z) ({}) 3503#endif /* CONFIG_SMP */ 3504 3505int iscsi_target_tx_thread(void *arg) 3506{ 3507 u8 state; 3508 int eodr = 0; 3509 int ret = 0; 3510 int sent_status = 0; 3511 int use_misc = 0; 3512 int map_sg = 0; 3513 struct iscsi_cmd *cmd = NULL; 3514 struct iscsi_conn *conn; 3515 struct iscsi_queue_req *qr = NULL; 3516 struct se_cmd *se_cmd; 3517 struct iscsi_thread_set *ts = arg; 3518 /* 3519 * Allow ourselves to be interrupted by SIGINT so that a 3520 * connection recovery / failure event can be triggered externally. 3521 */ 3522 allow_signal(SIGINT); 3523 3524restart: 3525 conn = iscsi_tx_thread_pre_handler(ts); 3526 if (!conn) 3527 goto out; 3528 3529 eodr = map_sg = ret = sent_status = use_misc = 0; 3530 3531 while (!kthread_should_stop()) { 3532 /* 3533 * Ensure that both TX and RX per connection kthreads 3534 * are scheduled to run on the same CPU. 3535 */ 3536 iscsit_thread_check_cpumask(conn, current, 1); 3537 3538 schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); 3539 3540 if ((ts->status == ISCSI_THREAD_SET_RESET) || 3541 signal_pending(current)) 3542 goto transport_err; 3543 3544get_immediate: 3545 qr = iscsit_get_cmd_from_immediate_queue(conn); 3546 if (qr) { 3547 atomic_set(&conn->check_immediate_queue, 0); 3548 cmd = qr->cmd; 3549 state = qr->state; 3550 kmem_cache_free(lio_qr_cache, qr); 3551 3552 spin_lock_bh(&cmd->istate_lock); 3553 switch (state) { 3554 case ISTATE_SEND_R2T: 3555 spin_unlock_bh(&cmd->istate_lock); 3556 ret = iscsit_send_r2t(cmd, conn); 3557 break; 3558 case ISTATE_REMOVE: 3559 spin_unlock_bh(&cmd->istate_lock); 3560 3561 if (cmd->data_direction == DMA_TO_DEVICE) 3562 iscsit_stop_dataout_timer(cmd); 3563 3564 spin_lock_bh(&conn->cmd_lock); 3565 list_del(&cmd->i_list); 3566 spin_unlock_bh(&conn->cmd_lock); 3567 3568 iscsit_free_cmd(cmd); 3569 goto get_immediate; 3570 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 3571 spin_unlock_bh(&cmd->istate_lock); 3572 iscsit_mod_nopin_response_timer(conn); 3573 ret = iscsit_send_unsolicited_nopin(cmd, 3574 conn, 1); 3575 break; 3576 case ISTATE_SEND_NOPIN_NO_RESPONSE: 3577 spin_unlock_bh(&cmd->istate_lock); 3578 ret = iscsit_send_unsolicited_nopin(cmd, 3579 conn, 0); 3580 break; 3581 default: 3582 pr_err("Unknown Opcode: 0x%02x ITT:" 3583 " 0x%08x, i_state: %d on CID: %hu\n", 3584 cmd->iscsi_opcode, cmd->init_task_tag, state, 3585 conn->cid); 3586 spin_unlock_bh(&cmd->istate_lock); 3587 goto transport_err; 3588 } 3589 if (ret < 0) { 3590 conn->tx_immediate_queue = 0; 3591 goto transport_err; 3592 } 3593 3594 if (iscsit_send_tx_data(cmd, conn, 1) < 0) { 3595 conn->tx_immediate_queue = 0; 3596 iscsit_tx_thread_wait_for_tcp(conn); 3597 goto transport_err; 3598 } 3599 3600 spin_lock_bh(&cmd->istate_lock); 3601 switch (state) { 3602 case ISTATE_SEND_R2T: 3603 spin_unlock_bh(&cmd->istate_lock); 3604 spin_lock_bh(&cmd->dataout_timeout_lock); 3605 iscsit_start_dataout_timer(cmd, conn); 3606 spin_unlock_bh(&cmd->dataout_timeout_lock); 3607 break; 3608 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 3609 cmd->i_state = ISTATE_SENT_NOPIN_WANT_RESPONSE; 3610 spin_unlock_bh(&cmd->istate_lock); 3611 break; 3612 case ISTATE_SEND_NOPIN_NO_RESPONSE: 3613 cmd->i_state = ISTATE_SENT_STATUS; 3614 spin_unlock_bh(&cmd->istate_lock); 3615 break; 3616 default: 3617 pr_err("Unknown Opcode: 0x%02x ITT:" 3618 " 0x%08x, i_state: %d on CID: %hu\n", 3619 cmd->iscsi_opcode, cmd->init_task_tag, 3620 state, conn->cid); 3621 spin_unlock_bh(&cmd->istate_lock); 3622 goto transport_err; 3623 } 3624 goto get_immediate; 3625 } else 3626 conn->tx_immediate_queue = 0; 3627 3628get_response: 3629 qr = iscsit_get_cmd_from_response_queue(conn); 3630 if (qr) { 3631 cmd = qr->cmd; 3632 state = qr->state; 3633 kmem_cache_free(lio_qr_cache, qr); 3634 3635 spin_lock_bh(&cmd->istate_lock); 3636check_rsp_state: 3637 switch (state) { 3638 case ISTATE_SEND_DATAIN: 3639 spin_unlock_bh(&cmd->istate_lock); 3640 ret = iscsit_send_data_in(cmd, conn, 3641 &eodr); 3642 map_sg = 1; 3643 break; 3644 case ISTATE_SEND_STATUS: 3645 case ISTATE_SEND_STATUS_RECOVERY: 3646 spin_unlock_bh(&cmd->istate_lock); 3647 use_misc = 1; 3648 ret = iscsit_send_status(cmd, conn); 3649 break; 3650 case ISTATE_SEND_LOGOUTRSP: 3651 spin_unlock_bh(&cmd->istate_lock); 3652 use_misc = 1; 3653 ret = iscsit_send_logout_response(cmd, conn); 3654 break; 3655 case ISTATE_SEND_ASYNCMSG: 3656 spin_unlock_bh(&cmd->istate_lock); 3657 use_misc = 1; 3658 ret = iscsit_send_conn_drop_async_message( 3659 cmd, conn); 3660 break; 3661 case ISTATE_SEND_NOPIN: 3662 spin_unlock_bh(&cmd->istate_lock); 3663 use_misc = 1; 3664 ret = iscsit_send_nopin_response(cmd, conn); 3665 break; 3666 case ISTATE_SEND_REJECT: 3667 spin_unlock_bh(&cmd->istate_lock); 3668 use_misc = 1; 3669 ret = iscsit_send_reject(cmd, conn); 3670 break; 3671 case ISTATE_SEND_TASKMGTRSP: 3672 spin_unlock_bh(&cmd->istate_lock); 3673 use_misc = 1; 3674 ret = iscsit_send_task_mgt_rsp(cmd, conn); 3675 if (ret != 0) 3676 break; 3677 ret = iscsit_tmr_post_handler(cmd, conn); 3678 if (ret != 0) 3679 iscsit_fall_back_to_erl0(conn->sess); 3680 break; 3681 case ISTATE_SEND_TEXTRSP: 3682 spin_unlock_bh(&cmd->istate_lock); 3683 use_misc = 1; 3684 ret = iscsit_send_text_rsp(cmd, conn); 3685 break; 3686 default: 3687 pr_err("Unknown Opcode: 0x%02x ITT:" 3688 " 0x%08x, i_state: %d on CID: %hu\n", 3689 cmd->iscsi_opcode, cmd->init_task_tag, 3690 state, conn->cid); 3691 spin_unlock_bh(&cmd->istate_lock); 3692 goto transport_err; 3693 } 3694 if (ret < 0) { 3695 conn->tx_response_queue = 0; 3696 goto transport_err; 3697 } 3698 3699 se_cmd = &cmd->se_cmd; 3700 3701 if (map_sg && !conn->conn_ops->IFMarker) { 3702 if (iscsit_fe_sendpage_sg(cmd, conn) < 0) { 3703 conn->tx_response_queue = 0; 3704 iscsit_tx_thread_wait_for_tcp(conn); 3705 iscsit_unmap_iovec(cmd); 3706 goto transport_err; 3707 } 3708 } else { 3709 if (iscsit_send_tx_data(cmd, conn, use_misc) < 0) { 3710 conn->tx_response_queue = 0; 3711 iscsit_tx_thread_wait_for_tcp(conn); 3712 iscsit_unmap_iovec(cmd); 3713 goto transport_err; 3714 } 3715 } 3716 map_sg = 0; 3717 iscsit_unmap_iovec(cmd); 3718 3719 spin_lock_bh(&cmd->istate_lock); 3720 switch (state) { 3721 case ISTATE_SEND_DATAIN: 3722 if (!eodr) 3723 goto check_rsp_state; 3724 3725 if (eodr == 1) { 3726 cmd->i_state = ISTATE_SENT_LAST_DATAIN; 3727 sent_status = 1; 3728 eodr = use_misc = 0; 3729 } else if (eodr == 2) { 3730 cmd->i_state = state = 3731 ISTATE_SEND_STATUS; 3732 sent_status = 0; 3733 eodr = use_misc = 0; 3734 goto check_rsp_state; 3735 } 3736 break; 3737 case ISTATE_SEND_STATUS: 3738 use_misc = 0; 3739 sent_status = 1; 3740 break; 3741 case ISTATE_SEND_ASYNCMSG: 3742 case ISTATE_SEND_NOPIN: 3743 case ISTATE_SEND_STATUS_RECOVERY: 3744 case ISTATE_SEND_TEXTRSP: 3745 use_misc = 0; 3746 sent_status = 1; 3747 break; 3748 case ISTATE_SEND_REJECT: 3749 use_misc = 0; 3750 if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) { 3751 cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN; 3752 spin_unlock_bh(&cmd->istate_lock); 3753 complete(&cmd->reject_comp); 3754 goto transport_err; 3755 } 3756 complete(&cmd->reject_comp); 3757 break; 3758 case ISTATE_SEND_TASKMGTRSP: 3759 use_misc = 0; 3760 sent_status = 1; 3761 break; 3762 case ISTATE_SEND_LOGOUTRSP: 3763 spin_unlock_bh(&cmd->istate_lock); 3764 if (!iscsit_logout_post_handler(cmd, conn)) 3765 goto restart; 3766 spin_lock_bh(&cmd->istate_lock); 3767 use_misc = 0; 3768 sent_status = 1; 3769 break; 3770 default: 3771 pr_err("Unknown Opcode: 0x%02x ITT:" 3772 " 0x%08x, i_state: %d on CID: %hu\n", 3773 cmd->iscsi_opcode, cmd->init_task_tag, 3774 cmd->i_state, conn->cid); 3775 spin_unlock_bh(&cmd->istate_lock); 3776 goto transport_err; 3777 } 3778 3779 if (sent_status) { 3780 cmd->i_state = ISTATE_SENT_STATUS; 3781 sent_status = 0; 3782 } 3783 spin_unlock_bh(&cmd->istate_lock); 3784 3785 if (atomic_read(&conn->check_immediate_queue)) 3786 goto get_immediate; 3787 3788 goto get_response; 3789 } else 3790 conn->tx_response_queue = 0; 3791 } 3792 3793transport_err: 3794 iscsit_take_action_for_connection_exit(conn); 3795 goto restart; 3796out: 3797 return 0; 3798} 3799 3800int iscsi_target_rx_thread(void *arg) 3801{ 3802 int ret; 3803 u8 buffer[ISCSI_HDR_LEN], opcode; 3804 u32 checksum = 0, digest = 0; 3805 struct iscsi_conn *conn = NULL; 3806 struct iscsi_thread_set *ts = arg; 3807 struct kvec iov; 3808 /* 3809 * Allow ourselves to be interrupted by SIGINT so that a 3810 * connection recovery / failure event can be triggered externally. 3811 */ 3812 allow_signal(SIGINT); 3813 3814restart: 3815 conn = iscsi_rx_thread_pre_handler(ts); 3816 if (!conn) 3817 goto out; 3818 3819 while (!kthread_should_stop()) { 3820 /* 3821 * Ensure that both TX and RX per connection kthreads 3822 * are scheduled to run on the same CPU. 3823 */ 3824 iscsit_thread_check_cpumask(conn, current, 0); 3825 3826 memset(buffer, 0, ISCSI_HDR_LEN); 3827 memset(&iov, 0, sizeof(struct kvec)); 3828 3829 iov.iov_base = buffer; 3830 iov.iov_len = ISCSI_HDR_LEN; 3831 3832 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); 3833 if (ret != ISCSI_HDR_LEN) { 3834 iscsit_rx_thread_wait_for_tcp(conn); 3835 goto transport_err; 3836 } 3837 3838 /* 3839 * Set conn->bad_hdr for use with REJECT PDUs. 3840 */ 3841 memcpy(&conn->bad_hdr, &buffer, ISCSI_HDR_LEN); 3842 3843 if (conn->conn_ops->HeaderDigest) { 3844 iov.iov_base = &digest; 3845 iov.iov_len = ISCSI_CRC_LEN; 3846 3847 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); 3848 if (ret != ISCSI_CRC_LEN) { 3849 iscsit_rx_thread_wait_for_tcp(conn); 3850 goto transport_err; 3851 } 3852 3853 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, 3854 buffer, ISCSI_HDR_LEN, 3855 0, NULL, (u8 *)&checksum); 3856 3857 if (digest != checksum) { 3858 pr_err("HeaderDigest CRC32C failed," 3859 " received 0x%08x, computed 0x%08x\n", 3860 digest, checksum); 3861 /* 3862 * Set the PDU to 0xff so it will intentionally 3863 * hit default in the switch below. 3864 */ 3865 memset(buffer, 0xff, ISCSI_HDR_LEN); 3866 spin_lock_bh(&conn->sess->session_stats_lock); 3867 conn->sess->conn_digest_errors++; 3868 spin_unlock_bh(&conn->sess->session_stats_lock); 3869 } else { 3870 pr_debug("Got HeaderDigest CRC32C" 3871 " 0x%08x\n", checksum); 3872 } 3873 } 3874 3875 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) 3876 goto transport_err; 3877 3878 opcode = buffer[0] & ISCSI_OPCODE_MASK; 3879 3880 if (conn->sess->sess_ops->SessionType && 3881 ((!(opcode & ISCSI_OP_TEXT)) || 3882 (!(opcode & ISCSI_OP_LOGOUT)))) { 3883 pr_err("Received illegal iSCSI Opcode: 0x%02x" 3884 " while in Discovery Session, rejecting.\n", opcode); 3885 iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 3886 buffer, conn); 3887 goto transport_err; 3888 } 3889 3890 switch (opcode) { 3891 case ISCSI_OP_SCSI_CMD: 3892 if (iscsit_handle_scsi_cmd(conn, buffer) < 0) 3893 goto transport_err; 3894 break; 3895 case ISCSI_OP_SCSI_DATA_OUT: 3896 if (iscsit_handle_data_out(conn, buffer) < 0) 3897 goto transport_err; 3898 break; 3899 case ISCSI_OP_NOOP_OUT: 3900 if (iscsit_handle_nop_out(conn, buffer) < 0) 3901 goto transport_err; 3902 break; 3903 case ISCSI_OP_SCSI_TMFUNC: 3904 if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0) 3905 goto transport_err; 3906 break; 3907 case ISCSI_OP_TEXT: 3908 if (iscsit_handle_text_cmd(conn, buffer) < 0) 3909 goto transport_err; 3910 break; 3911 case ISCSI_OP_LOGOUT: 3912 ret = iscsit_handle_logout_cmd(conn, buffer); 3913 if (ret > 0) { 3914 wait_for_completion_timeout(&conn->conn_logout_comp, 3915 SECONDS_FOR_LOGOUT_COMP * HZ); 3916 goto transport_err; 3917 } else if (ret < 0) 3918 goto transport_err; 3919 break; 3920 case ISCSI_OP_SNACK: 3921 if (iscsit_handle_snack(conn, buffer) < 0) 3922 goto transport_err; 3923 break; 3924 default: 3925 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", 3926 opcode); 3927 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 3928 pr_err("Cannot recover from unknown" 3929 " opcode while ERL=0, closing iSCSI connection" 3930 ".\n"); 3931 goto transport_err; 3932 } 3933 if (!conn->conn_ops->OFMarker) { 3934 pr_err("Unable to recover from unknown" 3935 " opcode while OFMarker=No, closing iSCSI" 3936 " connection.\n"); 3937 goto transport_err; 3938 } 3939 if (iscsit_recover_from_unknown_opcode(conn) < 0) { 3940 pr_err("Unable to recover from unknown" 3941 " opcode, closing iSCSI connection.\n"); 3942 goto transport_err; 3943 } 3944 break; 3945 } 3946 } 3947 3948transport_err: 3949 if (!signal_pending(current)) 3950 atomic_set(&conn->transport_failed, 1); 3951 iscsit_take_action_for_connection_exit(conn); 3952 goto restart; 3953out: 3954 return 0; 3955} 3956 3957static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) 3958{ 3959 struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL; 3960 struct iscsi_session *sess = conn->sess; 3961 /* 3962 * We expect this function to only ever be called from either RX or TX 3963 * thread context via iscsit_close_connection() once the other context 3964 * has been reset -> returned sleeping pre-handler state. 3965 */ 3966 spin_lock_bh(&conn->cmd_lock); 3967 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { 3968 3969 list_del(&cmd->i_list); 3970 spin_unlock_bh(&conn->cmd_lock); 3971 3972 iscsit_increment_maxcmdsn(cmd, sess); 3973 3974 iscsit_free_cmd(cmd); 3975 3976 spin_lock_bh(&conn->cmd_lock); 3977 } 3978 spin_unlock_bh(&conn->cmd_lock); 3979} 3980 3981static void iscsit_stop_timers_for_cmds( 3982 struct iscsi_conn *conn) 3983{ 3984 struct iscsi_cmd *cmd; 3985 3986 spin_lock_bh(&conn->cmd_lock); 3987 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 3988 if (cmd->data_direction == DMA_TO_DEVICE) 3989 iscsit_stop_dataout_timer(cmd); 3990 } 3991 spin_unlock_bh(&conn->cmd_lock); 3992} 3993 3994int iscsit_close_connection( 3995 struct iscsi_conn *conn) 3996{ 3997 int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT); 3998 struct iscsi_session *sess = conn->sess; 3999 4000 pr_debug("Closing iSCSI connection CID %hu on SID:" 4001 " %u\n", conn->cid, sess->sid); 4002 /* 4003 * Always up conn_logout_comp just in case the RX Thread is sleeping 4004 * and the logout response never got sent because the connection 4005 * failed. 4006 */ 4007 complete(&conn->conn_logout_comp); 4008 4009 iscsi_release_thread_set(conn); 4010 4011 iscsit_stop_timers_for_cmds(conn); 4012 iscsit_stop_nopin_response_timer(conn); 4013 iscsit_stop_nopin_timer(conn); 4014 iscsit_free_queue_reqs_for_conn(conn); 4015 4016 /* 4017 * During Connection recovery drop unacknowledged out of order 4018 * commands for this connection, and prepare the other commands 4019 * for realligence. 4020 * 4021 * During normal operation clear the out of order commands (but 4022 * do not free the struct iscsi_ooo_cmdsn's) and release all 4023 * struct iscsi_cmds. 4024 */ 4025 if (atomic_read(&conn->connection_recovery)) { 4026 iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn); 4027 iscsit_prepare_cmds_for_realligance(conn); 4028 } else { 4029 iscsit_clear_ooo_cmdsns_for_conn(conn); 4030 iscsit_release_commands_from_conn(conn); 4031 } 4032 4033 /* 4034 * Handle decrementing session or connection usage count if 4035 * a logout response was not able to be sent because the 4036 * connection failed. Fall back to Session Recovery here. 4037 */ 4038 if (atomic_read(&conn->conn_logout_remove)) { 4039 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) { 4040 iscsit_dec_conn_usage_count(conn); 4041 iscsit_dec_session_usage_count(sess); 4042 } 4043 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) 4044 iscsit_dec_conn_usage_count(conn); 4045 4046 atomic_set(&conn->conn_logout_remove, 0); 4047 atomic_set(&sess->session_reinstatement, 0); 4048 atomic_set(&sess->session_fall_back_to_erl0, 1); 4049 } 4050 4051 spin_lock_bh(&sess->conn_lock); 4052 list_del(&conn->conn_list); 4053 4054 /* 4055 * Attempt to let the Initiator know this connection failed by 4056 * sending an Connection Dropped Async Message on another 4057 * active connection. 4058 */ 4059 if (atomic_read(&conn->connection_recovery)) 4060 iscsit_build_conn_drop_async_message(conn); 4061 4062 spin_unlock_bh(&sess->conn_lock); 4063 4064 /* 4065 * If connection reinstatement is being performed on this connection, 4066 * up the connection reinstatement semaphore that is being blocked on 4067 * in iscsit_cause_connection_reinstatement(). 4068 */ 4069 spin_lock_bh(&conn->state_lock); 4070 if (atomic_read(&conn->sleep_on_conn_wait_comp)) { 4071 spin_unlock_bh(&conn->state_lock); 4072 complete(&conn->conn_wait_comp); 4073 wait_for_completion(&conn->conn_post_wait_comp); 4074 spin_lock_bh(&conn->state_lock); 4075 } 4076 4077 /* 4078 * If connection reinstatement is being performed on this connection 4079 * by receiving a REMOVECONNFORRECOVERY logout request, up the 4080 * connection wait rcfr semaphore that is being blocked on 4081 * an iscsit_connection_reinstatement_rcfr(). 4082 */ 4083 if (atomic_read(&conn->connection_wait_rcfr)) { 4084 spin_unlock_bh(&conn->state_lock); 4085 complete(&conn->conn_wait_rcfr_comp); 4086 wait_for_completion(&conn->conn_post_wait_comp); 4087 spin_lock_bh(&conn->state_lock); 4088 } 4089 atomic_set(&conn->connection_reinstatement, 1); 4090 spin_unlock_bh(&conn->state_lock); 4091 4092 /* 4093 * If any other processes are accessing this connection pointer we 4094 * must wait until they have completed. 4095 */ 4096 iscsit_check_conn_usage_count(conn); 4097 4098 if (conn->conn_rx_hash.tfm) 4099 crypto_free_hash(conn->conn_rx_hash.tfm); 4100 if (conn->conn_tx_hash.tfm) 4101 crypto_free_hash(conn->conn_tx_hash.tfm); 4102 4103 if (conn->conn_cpumask) 4104 free_cpumask_var(conn->conn_cpumask); 4105 4106 kfree(conn->conn_ops); 4107 conn->conn_ops = NULL; 4108 4109 if (conn->sock) { 4110 if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) { 4111 kfree(conn->sock->file); 4112 conn->sock->file = NULL; 4113 } 4114 sock_release(conn->sock); 4115 } 4116 conn->thread_set = NULL; 4117 4118 pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 4119 conn->conn_state = TARG_CONN_STATE_FREE; 4120 kfree(conn); 4121 4122 spin_lock_bh(&sess->conn_lock); 4123 atomic_dec(&sess->nconn); 4124 pr_debug("Decremented iSCSI connection count to %hu from node:" 4125 " %s\n", atomic_read(&sess->nconn), 4126 sess->sess_ops->InitiatorName); 4127 /* 4128 * Make sure that if one connection fails in an non ERL=2 iSCSI 4129 * Session that they all fail. 4130 */ 4131 if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout && 4132 !atomic_read(&sess->session_logout)) 4133 atomic_set(&sess->session_fall_back_to_erl0, 1); 4134 4135 /* 4136 * If this was not the last connection in the session, and we are 4137 * performing session reinstatement or falling back to ERL=0, call 4138 * iscsit_stop_session() without sleeping to shutdown the other 4139 * active connections. 4140 */ 4141 if (atomic_read(&sess->nconn)) { 4142 if (!atomic_read(&sess->session_reinstatement) && 4143 !atomic_read(&sess->session_fall_back_to_erl0)) { 4144 spin_unlock_bh(&sess->conn_lock); 4145 return 0; 4146 } 4147 if (!atomic_read(&sess->session_stop_active)) { 4148 atomic_set(&sess->session_stop_active, 1); 4149 spin_unlock_bh(&sess->conn_lock); 4150 iscsit_stop_session(sess, 0, 0); 4151 return 0; 4152 } 4153 spin_unlock_bh(&sess->conn_lock); 4154 return 0; 4155 } 4156 4157 /* 4158 * If this was the last connection in the session and one of the 4159 * following is occurring: 4160 * 4161 * Session Reinstatement is not being performed, and are falling back 4162 * to ERL=0 call iscsit_close_session(). 4163 * 4164 * Session Logout was requested. iscsit_close_session() will be called 4165 * elsewhere. 4166 * 4167 * Session Continuation is not being performed, start the Time2Retain 4168 * handler and check if sleep_on_sess_wait_sem is active. 4169 */ 4170 if (!atomic_read(&sess->session_reinstatement) && 4171 atomic_read(&sess->session_fall_back_to_erl0)) { 4172 spin_unlock_bh(&sess->conn_lock); 4173 iscsit_close_session(sess); 4174 4175 return 0; 4176 } else if (atomic_read(&sess->session_logout)) { 4177 pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); 4178 sess->session_state = TARG_SESS_STATE_FREE; 4179 spin_unlock_bh(&sess->conn_lock); 4180 4181 if (atomic_read(&sess->sleep_on_sess_wait_comp)) 4182 complete(&sess->session_wait_comp); 4183 4184 return 0; 4185 } else { 4186 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); 4187 sess->session_state = TARG_SESS_STATE_FAILED; 4188 4189 if (!atomic_read(&sess->session_continuation)) { 4190 spin_unlock_bh(&sess->conn_lock); 4191 iscsit_start_time2retain_handler(sess); 4192 } else 4193 spin_unlock_bh(&sess->conn_lock); 4194 4195 if (atomic_read(&sess->sleep_on_sess_wait_comp)) 4196 complete(&sess->session_wait_comp); 4197 4198 return 0; 4199 } 4200 spin_unlock_bh(&sess->conn_lock); 4201 4202 return 0; 4203} 4204 4205int iscsit_close_session(struct iscsi_session *sess) 4206{ 4207 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); 4208 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4209 4210 if (atomic_read(&sess->nconn)) { 4211 pr_err("%d connection(s) still exist for iSCSI session" 4212 " to %s\n", atomic_read(&sess->nconn), 4213 sess->sess_ops->InitiatorName); 4214 BUG(); 4215 } 4216 4217 spin_lock_bh(&se_tpg->session_lock); 4218 atomic_set(&sess->session_logout, 1); 4219 atomic_set(&sess->session_reinstatement, 1); 4220 iscsit_stop_time2retain_timer(sess); 4221 spin_unlock_bh(&se_tpg->session_lock); 4222 4223 /* 4224 * transport_deregister_session_configfs() will clear the 4225 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context 4226 * can be setting it again with __transport_register_session() in 4227 * iscsi_post_login_handler() again after the iscsit_stop_session() 4228 * completes in iscsi_np context. 4229 */ 4230 transport_deregister_session_configfs(sess->se_sess); 4231 4232 /* 4233 * If any other processes are accessing this session pointer we must 4234 * wait until they have completed. If we are in an interrupt (the 4235 * time2retain handler) and contain and active session usage count we 4236 * restart the timer and exit. 4237 */ 4238 if (!in_interrupt()) { 4239 if (iscsit_check_session_usage_count(sess) == 1) 4240 iscsit_stop_session(sess, 1, 1); 4241 } else { 4242 if (iscsit_check_session_usage_count(sess) == 2) { 4243 atomic_set(&sess->session_logout, 0); 4244 iscsit_start_time2retain_handler(sess); 4245 return 0; 4246 } 4247 } 4248 4249 transport_deregister_session(sess->se_sess); 4250 4251 if (sess->sess_ops->ErrorRecoveryLevel == 2) 4252 iscsit_free_connection_recovery_entires(sess); 4253 4254 iscsit_free_all_ooo_cmdsns(sess); 4255 4256 spin_lock_bh(&se_tpg->session_lock); 4257 pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); 4258 sess->session_state = TARG_SESS_STATE_FREE; 4259 pr_debug("Released iSCSI session from node: %s\n", 4260 sess->sess_ops->InitiatorName); 4261 tpg->nsessions--; 4262 if (tpg->tpg_tiqn) 4263 tpg->tpg_tiqn->tiqn_nsessions--; 4264 4265 pr_debug("Decremented number of active iSCSI Sessions on" 4266 " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions); 4267 4268 spin_lock(&sess_idr_lock); 4269 idr_remove(&sess_idr, sess->session_index); 4270 spin_unlock(&sess_idr_lock); 4271 4272 kfree(sess->sess_ops); 4273 sess->sess_ops = NULL; 4274 spin_unlock_bh(&se_tpg->session_lock); 4275 4276 kfree(sess); 4277 return 0; 4278} 4279 4280static void iscsit_logout_post_handler_closesession( 4281 struct iscsi_conn *conn) 4282{ 4283 struct iscsi_session *sess = conn->sess; 4284 4285 iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); 4286 iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); 4287 4288 atomic_set(&conn->conn_logout_remove, 0); 4289 complete(&conn->conn_logout_comp); 4290 4291 iscsit_dec_conn_usage_count(conn); 4292 iscsit_stop_session(sess, 1, 1); 4293 iscsit_dec_session_usage_count(sess); 4294 iscsit_close_session(sess); 4295} 4296 4297static void iscsit_logout_post_handler_samecid( 4298 struct iscsi_conn *conn) 4299{ 4300 iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); 4301 iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); 4302 4303 atomic_set(&conn->conn_logout_remove, 0); 4304 complete(&conn->conn_logout_comp); 4305 4306 iscsit_cause_connection_reinstatement(conn, 1); 4307 iscsit_dec_conn_usage_count(conn); 4308} 4309 4310static void iscsit_logout_post_handler_diffcid( 4311 struct iscsi_conn *conn, 4312 u16 cid) 4313{ 4314 struct iscsi_conn *l_conn; 4315 struct iscsi_session *sess = conn->sess; 4316 4317 if (!sess) 4318 return; 4319 4320 spin_lock_bh(&sess->conn_lock); 4321 list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) { 4322 if (l_conn->cid == cid) { 4323 iscsit_inc_conn_usage_count(l_conn); 4324 break; 4325 } 4326 } 4327 spin_unlock_bh(&sess->conn_lock); 4328 4329 if (!l_conn) 4330 return; 4331 4332 if (l_conn->sock) 4333 l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN); 4334 4335 spin_lock_bh(&l_conn->state_lock); 4336 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 4337 l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT; 4338 spin_unlock_bh(&l_conn->state_lock); 4339 4340 iscsit_cause_connection_reinstatement(l_conn, 1); 4341 iscsit_dec_conn_usage_count(l_conn); 4342} 4343 4344/* 4345 * Return of 0 causes the TX thread to restart. 4346 */ 4347static int iscsit_logout_post_handler( 4348 struct iscsi_cmd *cmd, 4349 struct iscsi_conn *conn) 4350{ 4351 int ret = 0; 4352 4353 switch (cmd->logout_reason) { 4354 case ISCSI_LOGOUT_REASON_CLOSE_SESSION: 4355 switch (cmd->logout_response) { 4356 case ISCSI_LOGOUT_SUCCESS: 4357 case ISCSI_LOGOUT_CLEANUP_FAILED: 4358 default: 4359 iscsit_logout_post_handler_closesession(conn); 4360 break; 4361 } 4362 ret = 0; 4363 break; 4364 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION: 4365 if (conn->cid == cmd->logout_cid) { 4366 switch (cmd->logout_response) { 4367 case ISCSI_LOGOUT_SUCCESS: 4368 case ISCSI_LOGOUT_CLEANUP_FAILED: 4369 default: 4370 iscsit_logout_post_handler_samecid(conn); 4371 break; 4372 } 4373 ret = 0; 4374 } else { 4375 switch (cmd->logout_response) { 4376 case ISCSI_LOGOUT_SUCCESS: 4377 iscsit_logout_post_handler_diffcid(conn, 4378 cmd->logout_cid); 4379 break; 4380 case ISCSI_LOGOUT_CID_NOT_FOUND: 4381 case ISCSI_LOGOUT_CLEANUP_FAILED: 4382 default: 4383 break; 4384 } 4385 ret = 1; 4386 } 4387 break; 4388 case ISCSI_LOGOUT_REASON_RECOVERY: 4389 switch (cmd->logout_response) { 4390 case ISCSI_LOGOUT_SUCCESS: 4391 case ISCSI_LOGOUT_CID_NOT_FOUND: 4392 case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED: 4393 case ISCSI_LOGOUT_CLEANUP_FAILED: 4394 default: 4395 break; 4396 } 4397 ret = 1; 4398 break; 4399 default: 4400 break; 4401 4402 } 4403 return ret; 4404} 4405 4406void iscsit_fail_session(struct iscsi_session *sess) 4407{ 4408 struct iscsi_conn *conn; 4409 4410 spin_lock_bh(&sess->conn_lock); 4411 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 4412 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n"); 4413 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT; 4414 } 4415 spin_unlock_bh(&sess->conn_lock); 4416 4417 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); 4418 sess->session_state = TARG_SESS_STATE_FAILED; 4419} 4420 4421int iscsit_free_session(struct iscsi_session *sess) 4422{ 4423 u16 conn_count = atomic_read(&sess->nconn); 4424 struct iscsi_conn *conn, *conn_tmp = NULL; 4425 int is_last; 4426 4427 spin_lock_bh(&sess->conn_lock); 4428 atomic_set(&sess->sleep_on_sess_wait_comp, 1); 4429 4430 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, 4431 conn_list) { 4432 if (conn_count == 0) 4433 break; 4434 4435 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) { 4436 is_last = 1; 4437 } else { 4438 iscsit_inc_conn_usage_count(conn_tmp); 4439 is_last = 0; 4440 } 4441 iscsit_inc_conn_usage_count(conn); 4442 4443 spin_unlock_bh(&sess->conn_lock); 4444 iscsit_cause_connection_reinstatement(conn, 1); 4445 spin_lock_bh(&sess->conn_lock); 4446 4447 iscsit_dec_conn_usage_count(conn); 4448 if (is_last == 0) 4449 iscsit_dec_conn_usage_count(conn_tmp); 4450 4451 conn_count--; 4452 } 4453 4454 if (atomic_read(&sess->nconn)) { 4455 spin_unlock_bh(&sess->conn_lock); 4456 wait_for_completion(&sess->session_wait_comp); 4457 } else 4458 spin_unlock_bh(&sess->conn_lock); 4459 4460 iscsit_close_session(sess); 4461 return 0; 4462} 4463 4464void iscsit_stop_session( 4465 struct iscsi_session *sess, 4466 int session_sleep, 4467 int connection_sleep) 4468{ 4469 u16 conn_count = atomic_read(&sess->nconn); 4470 struct iscsi_conn *conn, *conn_tmp = NULL; 4471 int is_last; 4472 4473 spin_lock_bh(&sess->conn_lock); 4474 if (session_sleep) 4475 atomic_set(&sess->sleep_on_sess_wait_comp, 1); 4476 4477 if (connection_sleep) { 4478 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, 4479 conn_list) { 4480 if (conn_count == 0) 4481 break; 4482 4483 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) { 4484 is_last = 1; 4485 } else { 4486 iscsit_inc_conn_usage_count(conn_tmp); 4487 is_last = 0; 4488 } 4489 iscsit_inc_conn_usage_count(conn); 4490 4491 spin_unlock_bh(&sess->conn_lock); 4492 iscsit_cause_connection_reinstatement(conn, 1); 4493 spin_lock_bh(&sess->conn_lock); 4494 4495 iscsit_dec_conn_usage_count(conn); 4496 if (is_last == 0) 4497 iscsit_dec_conn_usage_count(conn_tmp); 4498 conn_count--; 4499 } 4500 } else { 4501 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) 4502 iscsit_cause_connection_reinstatement(conn, 0); 4503 } 4504 4505 if (session_sleep && atomic_read(&sess->nconn)) { 4506 spin_unlock_bh(&sess->conn_lock); 4507 wait_for_completion(&sess->session_wait_comp); 4508 } else 4509 spin_unlock_bh(&sess->conn_lock); 4510} 4511 4512int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) 4513{ 4514 struct iscsi_session *sess; 4515 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4516 struct se_session *se_sess, *se_sess_tmp; 4517 int session_count = 0; 4518 4519 spin_lock_bh(&se_tpg->session_lock); 4520 if (tpg->nsessions && !force) { 4521 spin_unlock_bh(&se_tpg->session_lock); 4522 return -1; 4523 } 4524 4525 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, 4526 sess_list) { 4527 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 4528 4529 spin_lock(&sess->conn_lock); 4530 if (atomic_read(&sess->session_fall_back_to_erl0) || 4531 atomic_read(&sess->session_logout) || 4532 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { 4533 spin_unlock(&sess->conn_lock); 4534 continue; 4535 } 4536 atomic_set(&sess->session_reinstatement, 1); 4537 spin_unlock(&sess->conn_lock); 4538 spin_unlock_bh(&se_tpg->session_lock); 4539 4540 iscsit_free_session(sess); 4541 spin_lock_bh(&se_tpg->session_lock); 4542 4543 session_count++; 4544 } 4545 spin_unlock_bh(&se_tpg->session_lock); 4546 4547 pr_debug("Released %d iSCSI Session(s) from Target Portal" 4548 " Group: %hu\n", session_count, tpg->tpgt); 4549 return 0; 4550} 4551 4552MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure"); 4553MODULE_VERSION("4.1.x"); 4554MODULE_AUTHOR("nab@Linux-iSCSI.org"); 4555MODULE_LICENSE("GPL"); 4556 4557module_init(iscsi_target_init_module); 4558module_exit(iscsi_target_cleanup_module); 4559