request.c revision 43a5ab151f0268459c4368292c2ddb2266b8f243
1/* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56#include <scsi/scsi_cmnd.h> 57#include "isci.h" 58#include "task.h" 59#include "request.h" 60#include "scu_completion_codes.h" 61#include "scu_event_codes.h" 62#include "sas.h" 63 64static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, 65 int idx) 66{ 67 if (idx == 0) 68 return &ireq->tc->sgl_pair_ab; 69 else if (idx == 1) 70 return &ireq->tc->sgl_pair_cd; 71 else if (idx < 0) 72 return NULL; 73 else 74 return &ireq->sg_table[idx - 2]; 75} 76 77static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, 78 struct isci_request *ireq, u32 idx) 79{ 80 u32 offset; 81 82 if (idx == 0) { 83 offset = (void *) &ireq->tc->sgl_pair_ab - 84 (void *) &ihost->task_context_table[0]; 85 return ihost->task_context_dma + offset; 86 } else if (idx == 1) { 87 offset = (void *) &ireq->tc->sgl_pair_cd - 88 (void *) &ihost->task_context_table[0]; 89 return ihost->task_context_dma + offset; 90 } 91 92 return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); 93} 94 95static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) 96{ 97 e->length = sg_dma_len(sg); 98 e->address_upper = upper_32_bits(sg_dma_address(sg)); 99 e->address_lower = lower_32_bits(sg_dma_address(sg)); 100 e->address_modifier = 0; 101} 102 103static void sci_request_build_sgl(struct isci_request *ireq) 104{ 105 struct isci_host *ihost = ireq->isci_host; 106 struct sas_task *task = isci_request_access_task(ireq); 107 struct scatterlist *sg = NULL; 108 dma_addr_t dma_addr; 109 u32 sg_idx = 0; 110 struct scu_sgl_element_pair *scu_sg = NULL; 111 struct scu_sgl_element_pair *prev_sg = NULL; 112 113 if (task->num_scatter > 0) { 114 sg = task->scatter; 115 116 while (sg) { 117 scu_sg = to_sgl_element_pair(ireq, sg_idx); 118 init_sgl_element(&scu_sg->A, sg); 119 sg = sg_next(sg); 120 if (sg) { 121 init_sgl_element(&scu_sg->B, sg); 122 sg = sg_next(sg); 123 } else 124 memset(&scu_sg->B, 0, sizeof(scu_sg->B)); 125 126 if (prev_sg) { 127 dma_addr = to_sgl_element_pair_dma(ihost, 128 ireq, 129 sg_idx); 130 131 prev_sg->next_pair_upper = 132 upper_32_bits(dma_addr); 133 prev_sg->next_pair_lower = 134 lower_32_bits(dma_addr); 135 } 136 137 prev_sg = scu_sg; 138 sg_idx++; 139 } 140 } else { /* handle when no sg */ 141 scu_sg = to_sgl_element_pair(ireq, sg_idx); 142 143 dma_addr = dma_map_single(&ihost->pdev->dev, 144 task->scatter, 145 task->total_xfer_len, 146 task->data_dir); 147 148 ireq->zero_scatter_daddr = dma_addr; 149 150 scu_sg->A.length = task->total_xfer_len; 151 scu_sg->A.address_upper = upper_32_bits(dma_addr); 152 scu_sg->A.address_lower = lower_32_bits(dma_addr); 153 } 154 155 if (scu_sg) { 156 scu_sg->next_pair_upper = 0; 157 scu_sg->next_pair_lower = 0; 158 } 159} 160 161static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) 162{ 163 struct ssp_cmd_iu *cmd_iu; 164 struct sas_task *task = isci_request_access_task(ireq); 165 166 cmd_iu = &ireq->ssp.cmd; 167 168 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); 169 cmd_iu->add_cdb_len = 0; 170 cmd_iu->_r_a = 0; 171 cmd_iu->_r_b = 0; 172 cmd_iu->en_fburst = 0; /* unsupported */ 173 cmd_iu->task_prio = task->ssp_task.task_prio; 174 cmd_iu->task_attr = task->ssp_task.task_attr; 175 cmd_iu->_r_c = 0; 176 177 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb, 178 sizeof(task->ssp_task.cdb) / sizeof(u32)); 179} 180 181static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) 182{ 183 struct ssp_task_iu *task_iu; 184 struct sas_task *task = isci_request_access_task(ireq); 185 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 186 187 task_iu = &ireq->ssp.tmf; 188 189 memset(task_iu, 0, sizeof(struct ssp_task_iu)); 190 191 memcpy(task_iu->LUN, task->ssp_task.LUN, 8); 192 193 task_iu->task_func = isci_tmf->tmf_code; 194 task_iu->task_tag = 195 (test_bit(IREQ_TMF, &ireq->flags)) ? 196 isci_tmf->io_tag : 197 SCI_CONTROLLER_INVALID_IO_TAG; 198} 199 200/** 201 * This method is will fill in the SCU Task Context for any type of SSP request. 202 * @sci_req: 203 * @task_context: 204 * 205 */ 206static void scu_ssp_reqeust_construct_task_context( 207 struct isci_request *ireq, 208 struct scu_task_context *task_context) 209{ 210 dma_addr_t dma_addr; 211 struct isci_remote_device *idev; 212 struct isci_port *iport; 213 214 idev = ireq->target_device; 215 iport = idev->owning_port; 216 217 /* Fill in the TC with the its required data */ 218 task_context->abort = 0; 219 task_context->priority = 0; 220 task_context->initiator_request = 1; 221 task_context->connection_rate = idev->connection_rate; 222 task_context->protocol_engine_index = ISCI_PEG; 223 task_context->logical_port_index = iport->physical_port_index; 224 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; 225 task_context->valid = SCU_TASK_CONTEXT_VALID; 226 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 227 228 task_context->remote_node_index = idev->rnc.remote_node_index; 229 task_context->command_code = 0; 230 231 task_context->link_layer_control = 0; 232 task_context->do_not_dma_ssp_good_response = 1; 233 task_context->strict_ordering = 0; 234 task_context->control_frame = 0; 235 task_context->timeout_enable = 0; 236 task_context->block_guard_enable = 0; 237 238 task_context->address_modifier = 0; 239 240 /* task_context->type.ssp.tag = ireq->io_tag; */ 241 task_context->task_phase = 0x01; 242 243 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 244 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 245 (iport->physical_port_index << 246 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 247 ISCI_TAG_TCI(ireq->io_tag)); 248 249 /* 250 * Copy the physical address for the command buffer to the 251 * SCU Task Context 252 */ 253 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); 254 255 task_context->command_iu_upper = upper_32_bits(dma_addr); 256 task_context->command_iu_lower = lower_32_bits(dma_addr); 257 258 /* 259 * Copy the physical address for the response buffer to the 260 * SCU Task Context 261 */ 262 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); 263 264 task_context->response_iu_upper = upper_32_bits(dma_addr); 265 task_context->response_iu_lower = lower_32_bits(dma_addr); 266} 267 268static u8 scu_bg_blk_size(struct scsi_device *sdp) 269{ 270 switch (sdp->sector_size) { 271 case 512: 272 return 0; 273 case 1024: 274 return 1; 275 case 4096: 276 return 3; 277 default: 278 return 0xff; 279 } 280} 281 282static u32 scu_dif_bytes(u32 len, u32 sector_size) 283{ 284 return (len >> ilog2(sector_size)) * 8; 285} 286 287static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op) 288{ 289 struct scu_task_context *tc = ireq->tc; 290 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; 291 u8 blk_sz = scu_bg_blk_size(scmd->device); 292 293 tc->block_guard_enable = 1; 294 tc->blk_prot_en = 1; 295 tc->blk_sz = blk_sz; 296 /* DIF write insert */ 297 tc->blk_prot_func = 0x2; 298 299 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, 300 scmd->device->sector_size); 301 302 /* always init to 0, used by hw */ 303 tc->interm_crc_val = 0; 304 305 tc->init_crc_seed = 0; 306 tc->app_tag_verify = 0; 307 tc->app_tag_gen = 0; 308 tc->ref_tag_seed_verify = 0; 309 310 /* always init to same as bg_blk_sz */ 311 tc->UD_bytes_immed_val = scmd->device->sector_size; 312 313 tc->reserved_DC_0 = 0; 314 315 /* always init to 8 */ 316 tc->DIF_bytes_immed_val = 8; 317 318 tc->reserved_DC_1 = 0; 319 tc->bgc_blk_sz = scmd->device->sector_size; 320 tc->reserved_E0_0 = 0; 321 tc->app_tag_gen_mask = 0; 322 323 /** setup block guard control **/ 324 tc->bgctl = 0; 325 326 /* DIF write insert */ 327 tc->bgctl_f.op = 0x2; 328 329 tc->app_tag_verify_mask = 0; 330 331 /* must init to 0 for hw */ 332 tc->blk_guard_err = 0; 333 334 tc->reserved_E8_0 = 0; 335 336 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) 337 tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff; 338 else if (type & SCSI_PROT_DIF_TYPE3) 339 tc->ref_tag_seed_gen = 0; 340} 341 342static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op) 343{ 344 struct scu_task_context *tc = ireq->tc; 345 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; 346 u8 blk_sz = scu_bg_blk_size(scmd->device); 347 348 tc->block_guard_enable = 1; 349 tc->blk_prot_en = 1; 350 tc->blk_sz = blk_sz; 351 /* DIF read strip */ 352 tc->blk_prot_func = 0x1; 353 354 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, 355 scmd->device->sector_size); 356 357 /* always init to 0, used by hw */ 358 tc->interm_crc_val = 0; 359 360 tc->init_crc_seed = 0; 361 tc->app_tag_verify = 0; 362 tc->app_tag_gen = 0; 363 364 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) 365 tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff; 366 else if (type & SCSI_PROT_DIF_TYPE3) 367 tc->ref_tag_seed_verify = 0; 368 369 /* always init to same as bg_blk_sz */ 370 tc->UD_bytes_immed_val = scmd->device->sector_size; 371 372 tc->reserved_DC_0 = 0; 373 374 /* always init to 8 */ 375 tc->DIF_bytes_immed_val = 8; 376 377 tc->reserved_DC_1 = 0; 378 tc->bgc_blk_sz = scmd->device->sector_size; 379 tc->reserved_E0_0 = 0; 380 tc->app_tag_gen_mask = 0; 381 382 /** setup block guard control **/ 383 tc->bgctl = 0; 384 385 /* DIF read strip */ 386 tc->bgctl_f.crc_verify = 1; 387 tc->bgctl_f.op = 0x1; 388 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) { 389 tc->bgctl_f.ref_tag_chk = 1; 390 tc->bgctl_f.app_f_detect = 1; 391 } else if (type & SCSI_PROT_DIF_TYPE3) 392 tc->bgctl_f.app_ref_f_detect = 1; 393 394 tc->app_tag_verify_mask = 0; 395 396 /* must init to 0 for hw */ 397 tc->blk_guard_err = 0; 398 399 tc->reserved_E8_0 = 0; 400 tc->ref_tag_seed_gen = 0; 401} 402 403/** 404 * This method is will fill in the SCU Task Context for a SSP IO request. 405 * @sci_req: 406 * 407 */ 408static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, 409 enum dma_data_direction dir, 410 u32 len) 411{ 412 struct scu_task_context *task_context = ireq->tc; 413 struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr; 414 struct scsi_cmnd *scmd = sas_task->uldd_task; 415 u8 prot_type = scsi_get_prot_type(scmd); 416 u8 prot_op = scsi_get_prot_op(scmd); 417 418 scu_ssp_reqeust_construct_task_context(ireq, task_context); 419 420 task_context->ssp_command_iu_length = 421 sizeof(struct ssp_cmd_iu) / sizeof(u32); 422 task_context->type.ssp.frame_type = SSP_COMMAND; 423 424 switch (dir) { 425 case DMA_FROM_DEVICE: 426 case DMA_NONE: 427 default: 428 task_context->task_type = SCU_TASK_TYPE_IOREAD; 429 break; 430 case DMA_TO_DEVICE: 431 task_context->task_type = SCU_TASK_TYPE_IOWRITE; 432 break; 433 } 434 435 task_context->transfer_length_bytes = len; 436 437 if (task_context->transfer_length_bytes > 0) 438 sci_request_build_sgl(ireq); 439 440 if (prot_type != SCSI_PROT_DIF_TYPE0) { 441 if (prot_op == SCSI_PROT_READ_STRIP) 442 scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op); 443 else if (prot_op == SCSI_PROT_WRITE_INSERT) 444 scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op); 445 } 446} 447 448/** 449 * This method will fill in the SCU Task Context for a SSP Task request. The 450 * following important settings are utilized: -# priority == 451 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued 452 * ahead of other task destined for the same Remote Node. -# task_type == 453 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type 454 * (i.e. non-raw frame) is being utilized to perform task management. -# 455 * control_frame == 1. This ensures that the proper endianess is set so 456 * that the bytes are transmitted in the right order for a task frame. 457 * @sci_req: This parameter specifies the task request object being 458 * constructed. 459 * 460 */ 461static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq) 462{ 463 struct scu_task_context *task_context = ireq->tc; 464 465 scu_ssp_reqeust_construct_task_context(ireq, task_context); 466 467 task_context->control_frame = 1; 468 task_context->priority = SCU_TASK_PRIORITY_HIGH; 469 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; 470 task_context->transfer_length_bytes = 0; 471 task_context->type.ssp.frame_type = SSP_TASK; 472 task_context->ssp_command_iu_length = 473 sizeof(struct ssp_task_iu) / sizeof(u32); 474} 475 476/** 477 * This method is will fill in the SCU Task Context for any type of SATA 478 * request. This is called from the various SATA constructors. 479 * @sci_req: The general IO request object which is to be used in 480 * constructing the SCU task context. 481 * @task_context: The buffer pointer for the SCU task context which is being 482 * constructed. 483 * 484 * The general io request construction is complete. The buffer assignment for 485 * the command buffer is complete. none Revisit task context construction to 486 * determine what is common for SSP/SMP/STP task context structures. 487 */ 488static void scu_sata_reqeust_construct_task_context( 489 struct isci_request *ireq, 490 struct scu_task_context *task_context) 491{ 492 dma_addr_t dma_addr; 493 struct isci_remote_device *idev; 494 struct isci_port *iport; 495 496 idev = ireq->target_device; 497 iport = idev->owning_port; 498 499 /* Fill in the TC with the its required data */ 500 task_context->abort = 0; 501 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 502 task_context->initiator_request = 1; 503 task_context->connection_rate = idev->connection_rate; 504 task_context->protocol_engine_index = ISCI_PEG; 505 task_context->logical_port_index = iport->physical_port_index; 506 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; 507 task_context->valid = SCU_TASK_CONTEXT_VALID; 508 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 509 510 task_context->remote_node_index = idev->rnc.remote_node_index; 511 task_context->command_code = 0; 512 513 task_context->link_layer_control = 0; 514 task_context->do_not_dma_ssp_good_response = 1; 515 task_context->strict_ordering = 0; 516 task_context->control_frame = 0; 517 task_context->timeout_enable = 0; 518 task_context->block_guard_enable = 0; 519 520 task_context->address_modifier = 0; 521 task_context->task_phase = 0x01; 522 523 task_context->ssp_command_iu_length = 524 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); 525 526 /* Set the first word of the H2D REG FIS */ 527 task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; 528 529 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 530 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 531 (iport->physical_port_index << 532 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 533 ISCI_TAG_TCI(ireq->io_tag)); 534 /* 535 * Copy the physical address for the command buffer to the SCU Task 536 * Context. We must offset the command buffer by 4 bytes because the 537 * first 4 bytes are transfered in the body of the TC. 538 */ 539 dma_addr = sci_io_request_get_dma_addr(ireq, 540 ((char *) &ireq->stp.cmd) + 541 sizeof(u32)); 542 543 task_context->command_iu_upper = upper_32_bits(dma_addr); 544 task_context->command_iu_lower = lower_32_bits(dma_addr); 545 546 /* SATA Requests do not have a response buffer */ 547 task_context->response_iu_upper = 0; 548 task_context->response_iu_lower = 0; 549} 550 551static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq) 552{ 553 struct scu_task_context *task_context = ireq->tc; 554 555 scu_sata_reqeust_construct_task_context(ireq, task_context); 556 557 task_context->control_frame = 0; 558 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 559 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; 560 task_context->type.stp.fis_type = FIS_REGH2D; 561 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); 562} 563 564static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq, 565 bool copy_rx_frame) 566{ 567 struct isci_stp_request *stp_req = &ireq->stp.req; 568 569 scu_stp_raw_request_construct_task_context(ireq); 570 571 stp_req->status = 0; 572 stp_req->sgl.offset = 0; 573 stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; 574 575 if (copy_rx_frame) { 576 sci_request_build_sgl(ireq); 577 stp_req->sgl.index = 0; 578 } else { 579 /* The user does not want the data copied to the SGL buffer location */ 580 stp_req->sgl.index = -1; 581 } 582 583 return SCI_SUCCESS; 584} 585 586/** 587 * 588 * @sci_req: This parameter specifies the request to be constructed as an 589 * optimized request. 590 * @optimized_task_type: This parameter specifies whether the request is to be 591 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A 592 * value of 1 indicates NCQ. 593 * 594 * This method will perform request construction common to all types of STP 595 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method 596 * returns an indication as to whether the construction was successful. 597 */ 598static void sci_stp_optimized_request_construct(struct isci_request *ireq, 599 u8 optimized_task_type, 600 u32 len, 601 enum dma_data_direction dir) 602{ 603 struct scu_task_context *task_context = ireq->tc; 604 605 /* Build the STP task context structure */ 606 scu_sata_reqeust_construct_task_context(ireq, task_context); 607 608 /* Copy over the SGL elements */ 609 sci_request_build_sgl(ireq); 610 611 /* Copy over the number of bytes to be transfered */ 612 task_context->transfer_length_bytes = len; 613 614 if (dir == DMA_TO_DEVICE) { 615 /* 616 * The difference between the DMA IN and DMA OUT request task type 617 * values are consistent with the difference between FPDMA READ 618 * and FPDMA WRITE values. Add the supplied task type parameter 619 * to this difference to set the task type properly for this 620 * DATA OUT (WRITE) case. */ 621 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT 622 - SCU_TASK_TYPE_DMA_IN); 623 } else { 624 /* 625 * For the DATA IN (READ) case, simply save the supplied 626 * optimized task type. */ 627 task_context->task_type = optimized_task_type; 628 } 629} 630 631static void sci_atapi_construct(struct isci_request *ireq) 632{ 633 struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd; 634 struct sas_task *task; 635 636 /* To simplify the implementation we take advantage of the 637 * silicon's partial acceleration of atapi protocol (dma data 638 * transfers), so we promote all commands to dma protocol. This 639 * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives. 640 */ 641 h2d_fis->features |= ATAPI_PKT_DMA; 642 643 scu_stp_raw_request_construct_task_context(ireq); 644 645 task = isci_request_access_task(ireq); 646 if (task->data_dir == DMA_NONE) 647 task->total_xfer_len = 0; 648 649 /* clear the response so we can detect arrivial of an 650 * unsolicited h2d fis 651 */ 652 ireq->stp.rsp.fis_type = 0; 653} 654 655static enum sci_status 656sci_io_request_construct_sata(struct isci_request *ireq, 657 u32 len, 658 enum dma_data_direction dir, 659 bool copy) 660{ 661 enum sci_status status = SCI_SUCCESS; 662 struct sas_task *task = isci_request_access_task(ireq); 663 struct domain_device *dev = ireq->target_device->domain_dev; 664 665 /* check for management protocols */ 666 if (test_bit(IREQ_TMF, &ireq->flags)) { 667 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 668 669 dev_err(&ireq->owning_controller->pdev->dev, 670 "%s: Request 0x%p received un-handled SAT " 671 "management protocol 0x%x.\n", 672 __func__, ireq, tmf->tmf_code); 673 674 return SCI_FAILURE; 675 } 676 677 if (!sas_protocol_ata(task->task_proto)) { 678 dev_err(&ireq->owning_controller->pdev->dev, 679 "%s: Non-ATA protocol in SATA path: 0x%x\n", 680 __func__, 681 task->task_proto); 682 return SCI_FAILURE; 683 684 } 685 686 /* ATAPI */ 687 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && 688 task->ata_task.fis.command == ATA_CMD_PACKET) { 689 sci_atapi_construct(ireq); 690 return SCI_SUCCESS; 691 } 692 693 /* non data */ 694 if (task->data_dir == DMA_NONE) { 695 scu_stp_raw_request_construct_task_context(ireq); 696 return SCI_SUCCESS; 697 } 698 699 /* NCQ */ 700 if (task->ata_task.use_ncq) { 701 sci_stp_optimized_request_construct(ireq, 702 SCU_TASK_TYPE_FPDMAQ_READ, 703 len, dir); 704 return SCI_SUCCESS; 705 } 706 707 /* DMA */ 708 if (task->ata_task.dma_xfer) { 709 sci_stp_optimized_request_construct(ireq, 710 SCU_TASK_TYPE_DMA_IN, 711 len, dir); 712 return SCI_SUCCESS; 713 } else /* PIO */ 714 return sci_stp_pio_request_construct(ireq, copy); 715 716 return status; 717} 718 719static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq) 720{ 721 struct sas_task *task = isci_request_access_task(ireq); 722 723 ireq->protocol = SCIC_SSP_PROTOCOL; 724 725 scu_ssp_io_request_construct_task_context(ireq, 726 task->data_dir, 727 task->total_xfer_len); 728 729 sci_io_request_build_ssp_command_iu(ireq); 730 731 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 732 733 return SCI_SUCCESS; 734} 735 736enum sci_status sci_task_request_construct_ssp( 737 struct isci_request *ireq) 738{ 739 /* Construct the SSP Task SCU Task Context */ 740 scu_ssp_task_request_construct_task_context(ireq); 741 742 /* Fill in the SSP Task IU */ 743 sci_task_request_build_ssp_task_iu(ireq); 744 745 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 746 747 return SCI_SUCCESS; 748} 749 750static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq) 751{ 752 enum sci_status status; 753 bool copy = false; 754 struct sas_task *task = isci_request_access_task(ireq); 755 756 ireq->protocol = SCIC_STP_PROTOCOL; 757 758 copy = (task->data_dir == DMA_NONE) ? false : true; 759 760 status = sci_io_request_construct_sata(ireq, 761 task->total_xfer_len, 762 task->data_dir, 763 copy); 764 765 if (status == SCI_SUCCESS) 766 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 767 768 return status; 769} 770 771/** 772 * sci_req_tx_bytes - bytes transferred when reply underruns request 773 * @ireq: request that was terminated early 774 */ 775#define SCU_TASK_CONTEXT_SRAM 0x200000 776static u32 sci_req_tx_bytes(struct isci_request *ireq) 777{ 778 struct isci_host *ihost = ireq->owning_controller; 779 u32 ret_val = 0; 780 781 if (readl(&ihost->smu_registers->address_modifier) == 0) { 782 void __iomem *scu_reg_base = ihost->scu_registers; 783 784 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where 785 * BAR1 is the scu_registers 786 * 0x20002C = 0x200000 + 0x2c 787 * = start of task context SRAM + offset of (type.ssp.data_offset) 788 * TCi is the io_tag of struct sci_request 789 */ 790 ret_val = readl(scu_reg_base + 791 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + 792 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag))); 793 } 794 795 return ret_val; 796} 797 798enum sci_status sci_request_start(struct isci_request *ireq) 799{ 800 enum sci_base_request_states state; 801 struct scu_task_context *tc = ireq->tc; 802 struct isci_host *ihost = ireq->owning_controller; 803 804 state = ireq->sm.current_state_id; 805 if (state != SCI_REQ_CONSTRUCTED) { 806 dev_warn(&ihost->pdev->dev, 807 "%s: SCIC IO Request requested to start while in wrong " 808 "state %d\n", __func__, state); 809 return SCI_FAILURE_INVALID_STATE; 810 } 811 812 tc->task_index = ISCI_TAG_TCI(ireq->io_tag); 813 814 switch (tc->protocol_type) { 815 case SCU_TASK_CONTEXT_PROTOCOL_SMP: 816 case SCU_TASK_CONTEXT_PROTOCOL_SSP: 817 /* SSP/SMP Frame */ 818 tc->type.ssp.tag = ireq->io_tag; 819 tc->type.ssp.target_port_transfer_tag = 0xFFFF; 820 break; 821 822 case SCU_TASK_CONTEXT_PROTOCOL_STP: 823 /* STP/SATA Frame 824 * tc->type.stp.ncq_tag = ireq->ncq_tag; 825 */ 826 break; 827 828 case SCU_TASK_CONTEXT_PROTOCOL_NONE: 829 /* / @todo When do we set no protocol type? */ 830 break; 831 832 default: 833 /* This should never happen since we build the IO 834 * requests */ 835 break; 836 } 837 838 /* Add to the post_context the io tag value */ 839 ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag); 840 841 /* Everything is good go ahead and change state */ 842 sci_change_state(&ireq->sm, SCI_REQ_STARTED); 843 844 return SCI_SUCCESS; 845} 846 847enum sci_status 848sci_io_request_terminate(struct isci_request *ireq) 849{ 850 enum sci_base_request_states state; 851 852 state = ireq->sm.current_state_id; 853 854 switch (state) { 855 case SCI_REQ_CONSTRUCTED: 856 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; 857 ireq->sci_status = SCI_FAILURE_IO_TERMINATED; 858 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 859 return SCI_SUCCESS; 860 case SCI_REQ_STARTED: 861 case SCI_REQ_TASK_WAIT_TC_COMP: 862 case SCI_REQ_SMP_WAIT_RESP: 863 case SCI_REQ_SMP_WAIT_TC_COMP: 864 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 865 case SCI_REQ_STP_UDMA_WAIT_D2H: 866 case SCI_REQ_STP_NON_DATA_WAIT_H2D: 867 case SCI_REQ_STP_NON_DATA_WAIT_D2H: 868 case SCI_REQ_STP_PIO_WAIT_H2D: 869 case SCI_REQ_STP_PIO_WAIT_FRAME: 870 case SCI_REQ_STP_PIO_DATA_IN: 871 case SCI_REQ_STP_PIO_DATA_OUT: 872 case SCI_REQ_ATAPI_WAIT_H2D: 873 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: 874 case SCI_REQ_ATAPI_WAIT_D2H: 875 case SCI_REQ_ATAPI_WAIT_TC_COMP: 876 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 877 return SCI_SUCCESS; 878 case SCI_REQ_TASK_WAIT_TC_RESP: 879 /* The task frame was already confirmed to have been 880 * sent by the SCU HW. Since the state machine is 881 * now only waiting for the task response itself, 882 * abort the request and complete it immediately 883 * and don't wait for the task response. 884 */ 885 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 886 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 887 return SCI_SUCCESS; 888 case SCI_REQ_ABORTING: 889 /* If a request has a termination requested twice, return 890 * a failure indication, since HW confirmation of the first 891 * abort is still outstanding. 892 */ 893 case SCI_REQ_COMPLETED: 894 default: 895 dev_warn(&ireq->owning_controller->pdev->dev, 896 "%s: SCIC IO Request requested to abort while in wrong " 897 "state %d\n", 898 __func__, 899 ireq->sm.current_state_id); 900 break; 901 } 902 903 return SCI_FAILURE_INVALID_STATE; 904} 905 906enum sci_status sci_request_complete(struct isci_request *ireq) 907{ 908 enum sci_base_request_states state; 909 struct isci_host *ihost = ireq->owning_controller; 910 911 state = ireq->sm.current_state_id; 912 if (WARN_ONCE(state != SCI_REQ_COMPLETED, 913 "isci: request completion from wrong state (%d)\n", state)) 914 return SCI_FAILURE_INVALID_STATE; 915 916 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) 917 sci_controller_release_frame(ihost, 918 ireq->saved_rx_frame_index); 919 920 /* XXX can we just stop the machine and remove the 'final' state? */ 921 sci_change_state(&ireq->sm, SCI_REQ_FINAL); 922 return SCI_SUCCESS; 923} 924 925enum sci_status sci_io_request_event_handler(struct isci_request *ireq, 926 u32 event_code) 927{ 928 enum sci_base_request_states state; 929 struct isci_host *ihost = ireq->owning_controller; 930 931 state = ireq->sm.current_state_id; 932 933 if (state != SCI_REQ_STP_PIO_DATA_IN) { 934 dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n", 935 __func__, event_code, state); 936 937 return SCI_FAILURE_INVALID_STATE; 938 } 939 940 switch (scu_get_event_specifier(event_code)) { 941 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: 942 /* We are waiting for data and the SCU has R_ERR the data frame. 943 * Go back to waiting for the D2H Register FIS 944 */ 945 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 946 return SCI_SUCCESS; 947 default: 948 dev_err(&ihost->pdev->dev, 949 "%s: pio request unexpected event %#x\n", 950 __func__, event_code); 951 952 /* TODO Should we fail the PIO request when we get an 953 * unexpected event? 954 */ 955 return SCI_FAILURE; 956 } 957} 958 959/* 960 * This function copies response data for requests returning response data 961 * instead of sense data. 962 * @sci_req: This parameter specifies the request object for which to copy 963 * the response data. 964 */ 965static void sci_io_request_copy_response(struct isci_request *ireq) 966{ 967 void *resp_buf; 968 u32 len; 969 struct ssp_response_iu *ssp_response; 970 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 971 972 ssp_response = &ireq->ssp.rsp; 973 974 resp_buf = &isci_tmf->resp.resp_iu; 975 976 len = min_t(u32, 977 SSP_RESP_IU_MAX_SIZE, 978 be32_to_cpu(ssp_response->response_data_len)); 979 980 memcpy(resp_buf, ssp_response->resp_data, len); 981} 982 983static enum sci_status 984request_started_state_tc_event(struct isci_request *ireq, 985 u32 completion_code) 986{ 987 struct ssp_response_iu *resp_iu; 988 u8 datapres; 989 990 /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000 991 * to determine SDMA status 992 */ 993 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 994 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 995 ireq->scu_status = SCU_TASK_DONE_GOOD; 996 ireq->sci_status = SCI_SUCCESS; 997 break; 998 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { 999 /* There are times when the SCU hardware will return an early 1000 * response because the io request specified more data than is 1001 * returned by the target device (mode pages, inquiry data, 1002 * etc.). We must check the response stats to see if this is 1003 * truly a failed request or a good request that just got 1004 * completed early. 1005 */ 1006 struct ssp_response_iu *resp = &ireq->ssp.rsp; 1007 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1008 1009 sci_swab32_cpy(&ireq->ssp.rsp, 1010 &ireq->ssp.rsp, 1011 word_cnt); 1012 1013 if (resp->status == 0) { 1014 ireq->scu_status = SCU_TASK_DONE_GOOD; 1015 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; 1016 } else { 1017 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1018 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1019 } 1020 break; 1021 } 1022 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { 1023 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1024 1025 sci_swab32_cpy(&ireq->ssp.rsp, 1026 &ireq->ssp.rsp, 1027 word_cnt); 1028 1029 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1030 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1031 break; 1032 } 1033 1034 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): 1035 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame 1036 * guaranteed to be received before this completion status is 1037 * posted? 1038 */ 1039 resp_iu = &ireq->ssp.rsp; 1040 datapres = resp_iu->datapres; 1041 1042 if (datapres == 1 || datapres == 2) { 1043 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1044 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1045 } else { 1046 ireq->scu_status = SCU_TASK_DONE_GOOD; 1047 ireq->sci_status = SCI_SUCCESS; 1048 } 1049 break; 1050 /* only stp device gets suspended. */ 1051 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1052 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): 1053 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): 1054 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): 1055 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): 1056 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): 1057 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): 1058 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): 1059 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): 1060 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 1061 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): 1062 if (ireq->protocol == SCIC_STP_PROTOCOL) { 1063 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1064 SCU_COMPLETION_TL_STATUS_SHIFT; 1065 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 1066 } else { 1067 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1068 SCU_COMPLETION_TL_STATUS_SHIFT; 1069 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1070 } 1071 break; 1072 1073 /* both stp/ssp device gets suspended */ 1074 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): 1075 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): 1076 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): 1077 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): 1078 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): 1079 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): 1080 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): 1081 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): 1082 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): 1083 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): 1084 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1085 SCU_COMPLETION_TL_STATUS_SHIFT; 1086 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 1087 break; 1088 1089 /* neither ssp nor stp gets suspended. */ 1090 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): 1091 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): 1092 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): 1093 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): 1094 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): 1095 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): 1096 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1097 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 1098 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 1099 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 1100 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): 1101 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): 1102 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): 1103 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): 1104 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): 1105 default: 1106 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1107 SCU_COMPLETION_TL_STATUS_SHIFT; 1108 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1109 break; 1110 } 1111 1112 /* 1113 * TODO: This is probably wrong for ACK/NAK timeout conditions 1114 */ 1115 1116 /* In all cases we will treat this as the completion of the IO req. */ 1117 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1118 return SCI_SUCCESS; 1119} 1120 1121static enum sci_status 1122request_aborting_state_tc_event(struct isci_request *ireq, 1123 u32 completion_code) 1124{ 1125 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1126 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 1127 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): 1128 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; 1129 ireq->sci_status = SCI_FAILURE_IO_TERMINATED; 1130 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1131 break; 1132 1133 default: 1134 /* Unless we get some strange error wait for the task abort to complete 1135 * TODO: Should there be a state change for this completion? 1136 */ 1137 break; 1138 } 1139 1140 return SCI_SUCCESS; 1141} 1142 1143static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq, 1144 u32 completion_code) 1145{ 1146 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1147 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1148 ireq->scu_status = SCU_TASK_DONE_GOOD; 1149 ireq->sci_status = SCI_SUCCESS; 1150 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1151 break; 1152 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1153 /* Currently, the decision is to simply allow the task request 1154 * to timeout if the task IU wasn't received successfully. 1155 * There is a potential for receiving multiple task responses if 1156 * we decide to send the task IU again. 1157 */ 1158 dev_warn(&ireq->owning_controller->pdev->dev, 1159 "%s: TaskRequest:0x%p CompletionCode:%x - " 1160 "ACK/NAK timeout\n", __func__, ireq, 1161 completion_code); 1162 1163 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1164 break; 1165 default: 1166 /* 1167 * All other completion status cause the IO to be complete. 1168 * If a NAK was received, then it is up to the user to retry 1169 * the request. 1170 */ 1171 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1172 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1173 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1174 break; 1175 } 1176 1177 return SCI_SUCCESS; 1178} 1179 1180static enum sci_status 1181smp_request_await_response_tc_event(struct isci_request *ireq, 1182 u32 completion_code) 1183{ 1184 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1185 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1186 /* In the AWAIT RESPONSE state, any TC completion is 1187 * unexpected. but if the TC has success status, we 1188 * complete the IO anyway. 1189 */ 1190 ireq->scu_status = SCU_TASK_DONE_GOOD; 1191 ireq->sci_status = SCI_SUCCESS; 1192 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1193 break; 1194 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1195 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 1196 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 1197 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 1198 /* These status has been seen in a specific LSI 1199 * expander, which sometimes is not able to send smp 1200 * response within 2 ms. This causes our hardware break 1201 * the connection and set TC completion with one of 1202 * these SMP_XXX_XX_ERR status. For these type of error, 1203 * we ask ihost user to retry the request. 1204 */ 1205 ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR; 1206 ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED; 1207 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1208 break; 1209 default: 1210 /* All other completion status cause the IO to be complete. If a NAK 1211 * was received, then it is up to the user to retry the request 1212 */ 1213 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1214 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1215 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1216 break; 1217 } 1218 1219 return SCI_SUCCESS; 1220} 1221 1222static enum sci_status 1223smp_request_await_tc_event(struct isci_request *ireq, 1224 u32 completion_code) 1225{ 1226 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1227 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1228 ireq->scu_status = SCU_TASK_DONE_GOOD; 1229 ireq->sci_status = SCI_SUCCESS; 1230 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1231 break; 1232 default: 1233 /* All other completion status cause the IO to be 1234 * complete. If a NAK was received, then it is up to 1235 * the user to retry the request. 1236 */ 1237 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1238 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1239 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1240 break; 1241 } 1242 1243 return SCI_SUCCESS; 1244} 1245 1246static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) 1247{ 1248 struct scu_sgl_element *sgl; 1249 struct scu_sgl_element_pair *sgl_pair; 1250 struct isci_request *ireq = to_ireq(stp_req); 1251 struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl; 1252 1253 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); 1254 if (!sgl_pair) 1255 sgl = NULL; 1256 else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) { 1257 if (sgl_pair->B.address_lower == 0 && 1258 sgl_pair->B.address_upper == 0) { 1259 sgl = NULL; 1260 } else { 1261 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B; 1262 sgl = &sgl_pair->B; 1263 } 1264 } else { 1265 if (sgl_pair->next_pair_lower == 0 && 1266 sgl_pair->next_pair_upper == 0) { 1267 sgl = NULL; 1268 } else { 1269 pio_sgl->index++; 1270 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A; 1271 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); 1272 sgl = &sgl_pair->A; 1273 } 1274 } 1275 1276 return sgl; 1277} 1278 1279static enum sci_status 1280stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, 1281 u32 completion_code) 1282{ 1283 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1284 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1285 ireq->scu_status = SCU_TASK_DONE_GOOD; 1286 ireq->sci_status = SCI_SUCCESS; 1287 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); 1288 break; 1289 1290 default: 1291 /* All other completion status cause the IO to be 1292 * complete. If a NAK was received, then it is up to 1293 * the user to retry the request. 1294 */ 1295 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1296 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1297 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1298 break; 1299 } 1300 1301 return SCI_SUCCESS; 1302} 1303 1304#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ 1305 1306/* transmit DATA_FIS from (current sgl + offset) for input 1307 * parameter length. current sgl and offset is alreay stored in the IO request 1308 */ 1309static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( 1310 struct isci_request *ireq, 1311 u32 length) 1312{ 1313 struct isci_stp_request *stp_req = &ireq->stp.req; 1314 struct scu_task_context *task_context = ireq->tc; 1315 struct scu_sgl_element_pair *sgl_pair; 1316 struct scu_sgl_element *current_sgl; 1317 1318 /* Recycle the TC and reconstruct it for sending out DATA FIS containing 1319 * for the data from current_sgl+offset for the input length 1320 */ 1321 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); 1322 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) 1323 current_sgl = &sgl_pair->A; 1324 else 1325 current_sgl = &sgl_pair->B; 1326 1327 /* update the TC */ 1328 task_context->command_iu_upper = current_sgl->address_upper; 1329 task_context->command_iu_lower = current_sgl->address_lower; 1330 task_context->transfer_length_bytes = length; 1331 task_context->type.stp.fis_type = FIS_DATA; 1332 1333 /* send the new TC out. */ 1334 return sci_controller_continue_io(ireq); 1335} 1336 1337static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) 1338{ 1339 struct isci_stp_request *stp_req = &ireq->stp.req; 1340 struct scu_sgl_element_pair *sgl_pair; 1341 enum sci_status status = SCI_SUCCESS; 1342 struct scu_sgl_element *sgl; 1343 u32 offset; 1344 u32 len = 0; 1345 1346 offset = stp_req->sgl.offset; 1347 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); 1348 if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) 1349 return SCI_FAILURE; 1350 1351 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) { 1352 sgl = &sgl_pair->A; 1353 len = sgl_pair->A.length - offset; 1354 } else { 1355 sgl = &sgl_pair->B; 1356 len = sgl_pair->B.length - offset; 1357 } 1358 1359 if (stp_req->pio_len == 0) 1360 return SCI_SUCCESS; 1361 1362 if (stp_req->pio_len >= len) { 1363 status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len); 1364 if (status != SCI_SUCCESS) 1365 return status; 1366 stp_req->pio_len -= len; 1367 1368 /* update the current sgl, offset and save for future */ 1369 sgl = pio_sgl_next(stp_req); 1370 offset = 0; 1371 } else if (stp_req->pio_len < len) { 1372 sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); 1373 1374 /* Sgl offset will be adjusted and saved for future */ 1375 offset += stp_req->pio_len; 1376 sgl->address_lower += stp_req->pio_len; 1377 stp_req->pio_len = 0; 1378 } 1379 1380 stp_req->sgl.offset = offset; 1381 1382 return status; 1383} 1384 1385/** 1386 * 1387 * @stp_request: The request that is used for the SGL processing. 1388 * @data_buffer: The buffer of data to be copied. 1389 * @length: The length of the data transfer. 1390 * 1391 * Copy the data from the buffer for the length specified to the IO reqeust SGL 1392 * specified data region. enum sci_status 1393 */ 1394static enum sci_status 1395sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, 1396 u8 *data_buf, u32 len) 1397{ 1398 struct isci_request *ireq; 1399 u8 *src_addr; 1400 int copy_len; 1401 struct sas_task *task; 1402 struct scatterlist *sg; 1403 void *kaddr; 1404 int total_len = len; 1405 1406 ireq = to_ireq(stp_req); 1407 task = isci_request_access_task(ireq); 1408 src_addr = data_buf; 1409 1410 if (task->num_scatter > 0) { 1411 sg = task->scatter; 1412 1413 while (total_len > 0) { 1414 struct page *page = sg_page(sg); 1415 1416 copy_len = min_t(int, total_len, sg_dma_len(sg)); 1417 kaddr = kmap_atomic(page, KM_IRQ0); 1418 memcpy(kaddr + sg->offset, src_addr, copy_len); 1419 kunmap_atomic(kaddr, KM_IRQ0); 1420 total_len -= copy_len; 1421 src_addr += copy_len; 1422 sg = sg_next(sg); 1423 } 1424 } else { 1425 BUG_ON(task->total_xfer_len < total_len); 1426 memcpy(task->scatter, src_addr, total_len); 1427 } 1428 1429 return SCI_SUCCESS; 1430} 1431 1432/** 1433 * 1434 * @sci_req: The PIO DATA IN request that is to receive the data. 1435 * @data_buffer: The buffer to copy from. 1436 * 1437 * Copy the data buffer to the io request data region. enum sci_status 1438 */ 1439static enum sci_status sci_stp_request_pio_data_in_copy_data( 1440 struct isci_stp_request *stp_req, 1441 u8 *data_buffer) 1442{ 1443 enum sci_status status; 1444 1445 /* 1446 * If there is less than 1K remaining in the transfer request 1447 * copy just the data for the transfer */ 1448 if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { 1449 status = sci_stp_request_pio_data_in_copy_data_buffer( 1450 stp_req, data_buffer, stp_req->pio_len); 1451 1452 if (status == SCI_SUCCESS) 1453 stp_req->pio_len = 0; 1454 } else { 1455 /* We are transfering the whole frame so copy */ 1456 status = sci_stp_request_pio_data_in_copy_data_buffer( 1457 stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); 1458 1459 if (status == SCI_SUCCESS) 1460 stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE; 1461 } 1462 1463 return status; 1464} 1465 1466static enum sci_status 1467stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, 1468 u32 completion_code) 1469{ 1470 enum sci_status status = SCI_SUCCESS; 1471 1472 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1473 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1474 ireq->scu_status = SCU_TASK_DONE_GOOD; 1475 ireq->sci_status = SCI_SUCCESS; 1476 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1477 break; 1478 1479 default: 1480 /* All other completion status cause the IO to be 1481 * complete. If a NAK was received, then it is up to 1482 * the user to retry the request. 1483 */ 1484 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1485 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1486 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1487 break; 1488 } 1489 1490 return status; 1491} 1492 1493static enum sci_status 1494pio_data_out_tx_done_tc_event(struct isci_request *ireq, 1495 u32 completion_code) 1496{ 1497 enum sci_status status = SCI_SUCCESS; 1498 bool all_frames_transferred = false; 1499 struct isci_stp_request *stp_req = &ireq->stp.req; 1500 1501 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1502 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1503 /* Transmit data */ 1504 if (stp_req->pio_len != 0) { 1505 status = sci_stp_request_pio_data_out_transmit_data(ireq); 1506 if (status == SCI_SUCCESS) { 1507 if (stp_req->pio_len == 0) 1508 all_frames_transferred = true; 1509 } 1510 } else if (stp_req->pio_len == 0) { 1511 /* 1512 * this will happen if the all data is written at the 1513 * first time after the pio setup fis is received 1514 */ 1515 all_frames_transferred = true; 1516 } 1517 1518 /* all data transferred. */ 1519 if (all_frames_transferred) { 1520 /* 1521 * Change the state to SCI_REQ_STP_PIO_DATA_IN 1522 * and wait for PIO_SETUP fis / or D2H REg fis. */ 1523 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1524 } 1525 break; 1526 1527 default: 1528 /* 1529 * All other completion status cause the IO to be complete. 1530 * If a NAK was received, then it is up to the user to retry 1531 * the request. 1532 */ 1533 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1534 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1535 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1536 break; 1537 } 1538 1539 return status; 1540} 1541 1542static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, 1543 u32 frame_index) 1544{ 1545 struct isci_host *ihost = ireq->owning_controller; 1546 struct dev_to_host_fis *frame_header; 1547 enum sci_status status; 1548 u32 *frame_buffer; 1549 1550 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1551 frame_index, 1552 (void **)&frame_header); 1553 1554 if ((status == SCI_SUCCESS) && 1555 (frame_header->fis_type == FIS_REGD2H)) { 1556 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1557 frame_index, 1558 (void **)&frame_buffer); 1559 1560 sci_controller_copy_sata_response(&ireq->stp.rsp, 1561 frame_header, 1562 frame_buffer); 1563 } 1564 1565 sci_controller_release_frame(ihost, frame_index); 1566 1567 return status; 1568} 1569 1570static enum sci_status process_unsolicited_fis(struct isci_request *ireq, 1571 u32 frame_index) 1572{ 1573 struct isci_host *ihost = ireq->owning_controller; 1574 enum sci_status status; 1575 struct dev_to_host_fis *frame_header; 1576 u32 *frame_buffer; 1577 1578 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1579 frame_index, 1580 (void **)&frame_header); 1581 1582 if (status != SCI_SUCCESS) 1583 return status; 1584 1585 if (frame_header->fis_type != FIS_REGD2H) { 1586 dev_err(&ireq->isci_host->pdev->dev, 1587 "%s ERROR: invalid fis type 0x%X\n", 1588 __func__, frame_header->fis_type); 1589 return SCI_FAILURE; 1590 } 1591 1592 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1593 frame_index, 1594 (void **)&frame_buffer); 1595 1596 sci_controller_copy_sata_response(&ireq->stp.rsp, 1597 (u32 *)frame_header, 1598 frame_buffer); 1599 1600 /* Frame has been decoded return it to the controller */ 1601 sci_controller_release_frame(ihost, frame_index); 1602 1603 return status; 1604} 1605 1606static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq, 1607 u32 frame_index) 1608{ 1609 struct sas_task *task = isci_request_access_task(ireq); 1610 enum sci_status status; 1611 1612 status = process_unsolicited_fis(ireq, frame_index); 1613 1614 if (status == SCI_SUCCESS) { 1615 if (ireq->stp.rsp.status & ATA_ERR) 1616 status = SCI_IO_FAILURE_RESPONSE_VALID; 1617 } else { 1618 status = SCI_IO_FAILURE_RESPONSE_VALID; 1619 } 1620 1621 if (status != SCI_SUCCESS) { 1622 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1623 ireq->sci_status = status; 1624 } else { 1625 ireq->scu_status = SCU_TASK_DONE_GOOD; 1626 ireq->sci_status = SCI_SUCCESS; 1627 } 1628 1629 /* the d2h ufi is the end of non-data commands */ 1630 if (task->data_dir == DMA_NONE) 1631 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1632 1633 return status; 1634} 1635 1636static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq) 1637{ 1638 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); 1639 void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet; 1640 struct scu_task_context *task_context = ireq->tc; 1641 1642 /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame 1643 * type. The TC for previous Packet fis was already there, we only need to 1644 * change the H2D fis content. 1645 */ 1646 memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis)); 1647 memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN); 1648 memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context)); 1649 task_context->type.stp.fis_type = FIS_DATA; 1650 task_context->transfer_length_bytes = dev->cdb_len; 1651} 1652 1653static void scu_atapi_construct_task_context(struct isci_request *ireq) 1654{ 1655 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); 1656 struct sas_task *task = isci_request_access_task(ireq); 1657 struct scu_task_context *task_context = ireq->tc; 1658 int cdb_len = dev->cdb_len; 1659 1660 /* reference: SSTL 1.13.4.2 1661 * task_type, sata_direction 1662 */ 1663 if (task->data_dir == DMA_TO_DEVICE) { 1664 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT; 1665 task_context->sata_direction = 0; 1666 } else { 1667 /* todo: for NO_DATA command, we need to send out raw frame. */ 1668 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN; 1669 task_context->sata_direction = 1; 1670 } 1671 1672 memset(&task_context->type.stp, 0, sizeof(task_context->type.stp)); 1673 task_context->type.stp.fis_type = FIS_DATA; 1674 1675 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 1676 memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len); 1677 task_context->ssp_command_iu_length = cdb_len / sizeof(u32); 1678 1679 /* task phase is set to TX_CMD */ 1680 task_context->task_phase = 0x1; 1681 1682 /* retry counter */ 1683 task_context->stp_retry_count = 0; 1684 1685 /* data transfer size. */ 1686 task_context->transfer_length_bytes = task->total_xfer_len; 1687 1688 /* setup sgl */ 1689 sci_request_build_sgl(ireq); 1690} 1691 1692enum sci_status 1693sci_io_request_frame_handler(struct isci_request *ireq, 1694 u32 frame_index) 1695{ 1696 struct isci_host *ihost = ireq->owning_controller; 1697 struct isci_stp_request *stp_req = &ireq->stp.req; 1698 enum sci_base_request_states state; 1699 enum sci_status status; 1700 ssize_t word_cnt; 1701 1702 state = ireq->sm.current_state_id; 1703 switch (state) { 1704 case SCI_REQ_STARTED: { 1705 struct ssp_frame_hdr ssp_hdr; 1706 void *frame_header; 1707 1708 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1709 frame_index, 1710 &frame_header); 1711 1712 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); 1713 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); 1714 1715 if (ssp_hdr.frame_type == SSP_RESPONSE) { 1716 struct ssp_response_iu *resp_iu; 1717 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1718 1719 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1720 frame_index, 1721 (void **)&resp_iu); 1722 1723 sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt); 1724 1725 resp_iu = &ireq->ssp.rsp; 1726 1727 if (resp_iu->datapres == 0x01 || 1728 resp_iu->datapres == 0x02) { 1729 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1730 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1731 } else { 1732 ireq->scu_status = SCU_TASK_DONE_GOOD; 1733 ireq->sci_status = SCI_SUCCESS; 1734 } 1735 } else { 1736 /* not a response frame, why did it get forwarded? */ 1737 dev_err(&ihost->pdev->dev, 1738 "%s: SCIC IO Request 0x%p received unexpected " 1739 "frame %d type 0x%02x\n", __func__, ireq, 1740 frame_index, ssp_hdr.frame_type); 1741 } 1742 1743 /* 1744 * In any case we are done with this frame buffer return it to 1745 * the controller 1746 */ 1747 sci_controller_release_frame(ihost, frame_index); 1748 1749 return SCI_SUCCESS; 1750 } 1751 1752 case SCI_REQ_TASK_WAIT_TC_RESP: 1753 sci_io_request_copy_response(ireq); 1754 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1755 sci_controller_release_frame(ihost, frame_index); 1756 return SCI_SUCCESS; 1757 1758 case SCI_REQ_SMP_WAIT_RESP: { 1759 struct sas_task *task = isci_request_access_task(ireq); 1760 struct scatterlist *sg = &task->smp_task.smp_resp; 1761 void *frame_header, *kaddr; 1762 u8 *rsp; 1763 1764 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1765 frame_index, 1766 &frame_header); 1767 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); 1768 rsp = kaddr + sg->offset; 1769 sci_swab32_cpy(rsp, frame_header, 1); 1770 1771 if (rsp[0] == SMP_RESPONSE) { 1772 void *smp_resp; 1773 1774 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1775 frame_index, 1776 &smp_resp); 1777 1778 word_cnt = (sg->length/4)-1; 1779 if (word_cnt > 0) 1780 word_cnt = min_t(unsigned int, word_cnt, 1781 SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4); 1782 sci_swab32_cpy(rsp + 4, smp_resp, word_cnt); 1783 1784 ireq->scu_status = SCU_TASK_DONE_GOOD; 1785 ireq->sci_status = SCI_SUCCESS; 1786 sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); 1787 } else { 1788 /* 1789 * This was not a response frame why did it get 1790 * forwarded? 1791 */ 1792 dev_err(&ihost->pdev->dev, 1793 "%s: SCIC SMP Request 0x%p received unexpected " 1794 "frame %d type 0x%02x\n", 1795 __func__, 1796 ireq, 1797 frame_index, 1798 rsp[0]); 1799 1800 ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; 1801 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1802 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1803 } 1804 kunmap_atomic(kaddr, KM_IRQ0); 1805 1806 sci_controller_release_frame(ihost, frame_index); 1807 1808 return SCI_SUCCESS; 1809 } 1810 1811 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 1812 return sci_stp_request_udma_general_frame_handler(ireq, 1813 frame_index); 1814 1815 case SCI_REQ_STP_UDMA_WAIT_D2H: 1816 /* Use the general frame handler to copy the resposne data */ 1817 status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); 1818 1819 if (status != SCI_SUCCESS) 1820 return status; 1821 1822 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1823 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1824 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1825 return SCI_SUCCESS; 1826 1827 case SCI_REQ_STP_NON_DATA_WAIT_D2H: { 1828 struct dev_to_host_fis *frame_header; 1829 u32 *frame_buffer; 1830 1831 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1832 frame_index, 1833 (void **)&frame_header); 1834 1835 if (status != SCI_SUCCESS) { 1836 dev_err(&ihost->pdev->dev, 1837 "%s: SCIC IO Request 0x%p could not get frame " 1838 "header for frame index %d, status %x\n", 1839 __func__, 1840 stp_req, 1841 frame_index, 1842 status); 1843 1844 return status; 1845 } 1846 1847 switch (frame_header->fis_type) { 1848 case FIS_REGD2H: 1849 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1850 frame_index, 1851 (void **)&frame_buffer); 1852 1853 sci_controller_copy_sata_response(&ireq->stp.rsp, 1854 frame_header, 1855 frame_buffer); 1856 1857 /* The command has completed with error */ 1858 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1859 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1860 break; 1861 1862 default: 1863 dev_warn(&ihost->pdev->dev, 1864 "%s: IO Request:0x%p Frame Id:%d protocol " 1865 "violation occurred\n", __func__, stp_req, 1866 frame_index); 1867 1868 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; 1869 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; 1870 break; 1871 } 1872 1873 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1874 1875 /* Frame has been decoded return it to the controller */ 1876 sci_controller_release_frame(ihost, frame_index); 1877 1878 return status; 1879 } 1880 1881 case SCI_REQ_STP_PIO_WAIT_FRAME: { 1882 struct sas_task *task = isci_request_access_task(ireq); 1883 struct dev_to_host_fis *frame_header; 1884 u32 *frame_buffer; 1885 1886 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1887 frame_index, 1888 (void **)&frame_header); 1889 1890 if (status != SCI_SUCCESS) { 1891 dev_err(&ihost->pdev->dev, 1892 "%s: SCIC IO Request 0x%p could not get frame " 1893 "header for frame index %d, status %x\n", 1894 __func__, stp_req, frame_index, status); 1895 return status; 1896 } 1897 1898 switch (frame_header->fis_type) { 1899 case FIS_PIO_SETUP: 1900 /* Get from the frame buffer the PIO Setup Data */ 1901 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1902 frame_index, 1903 (void **)&frame_buffer); 1904 1905 /* Get the data from the PIO Setup The SCU Hardware 1906 * returns first word in the frame_header and the rest 1907 * of the data is in the frame buffer so we need to 1908 * back up one dword 1909 */ 1910 1911 /* transfer_count: first 16bits in the 4th dword */ 1912 stp_req->pio_len = frame_buffer[3] & 0xffff; 1913 1914 /* status: 4th byte in the 3rd dword */ 1915 stp_req->status = (frame_buffer[2] >> 24) & 0xff; 1916 1917 sci_controller_copy_sata_response(&ireq->stp.rsp, 1918 frame_header, 1919 frame_buffer); 1920 1921 ireq->stp.rsp.status = stp_req->status; 1922 1923 /* The next state is dependent on whether the 1924 * request was PIO Data-in or Data out 1925 */ 1926 if (task->data_dir == DMA_FROM_DEVICE) { 1927 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); 1928 } else if (task->data_dir == DMA_TO_DEVICE) { 1929 /* Transmit data */ 1930 status = sci_stp_request_pio_data_out_transmit_data(ireq); 1931 if (status != SCI_SUCCESS) 1932 break; 1933 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); 1934 } 1935 break; 1936 1937 case FIS_SETDEVBITS: 1938 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1939 break; 1940 1941 case FIS_REGD2H: 1942 if (frame_header->status & ATA_BUSY) { 1943 /* 1944 * Now why is the drive sending a D2H Register 1945 * FIS when it is still busy? Do nothing since 1946 * we are still in the right state. 1947 */ 1948 dev_dbg(&ihost->pdev->dev, 1949 "%s: SCIC PIO Request 0x%p received " 1950 "D2H Register FIS with BSY status " 1951 "0x%x\n", 1952 __func__, 1953 stp_req, 1954 frame_header->status); 1955 break; 1956 } 1957 1958 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1959 frame_index, 1960 (void **)&frame_buffer); 1961 1962 sci_controller_copy_sata_response(&ireq->stp.req, 1963 frame_header, 1964 frame_buffer); 1965 1966 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1967 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1968 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1969 break; 1970 1971 default: 1972 /* FIXME: what do we do here? */ 1973 break; 1974 } 1975 1976 /* Frame is decoded return it to the controller */ 1977 sci_controller_release_frame(ihost, frame_index); 1978 1979 return status; 1980 } 1981 1982 case SCI_REQ_STP_PIO_DATA_IN: { 1983 struct dev_to_host_fis *frame_header; 1984 struct sata_fis_data *frame_buffer; 1985 1986 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1987 frame_index, 1988 (void **)&frame_header); 1989 1990 if (status != SCI_SUCCESS) { 1991 dev_err(&ihost->pdev->dev, 1992 "%s: SCIC IO Request 0x%p could not get frame " 1993 "header for frame index %d, status %x\n", 1994 __func__, 1995 stp_req, 1996 frame_index, 1997 status); 1998 return status; 1999 } 2000 2001 if (frame_header->fis_type != FIS_DATA) { 2002 dev_err(&ihost->pdev->dev, 2003 "%s: SCIC PIO Request 0x%p received frame %d " 2004 "with fis type 0x%02x when expecting a data " 2005 "fis.\n", 2006 __func__, 2007 stp_req, 2008 frame_index, 2009 frame_header->fis_type); 2010 2011 ireq->scu_status = SCU_TASK_DONE_GOOD; 2012 ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT; 2013 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2014 2015 /* Frame is decoded return it to the controller */ 2016 sci_controller_release_frame(ihost, frame_index); 2017 return status; 2018 } 2019 2020 if (stp_req->sgl.index < 0) { 2021 ireq->saved_rx_frame_index = frame_index; 2022 stp_req->pio_len = 0; 2023 } else { 2024 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 2025 frame_index, 2026 (void **)&frame_buffer); 2027 2028 status = sci_stp_request_pio_data_in_copy_data(stp_req, 2029 (u8 *)frame_buffer); 2030 2031 /* Frame is decoded return it to the controller */ 2032 sci_controller_release_frame(ihost, frame_index); 2033 } 2034 2035 /* Check for the end of the transfer, are there more 2036 * bytes remaining for this data transfer 2037 */ 2038 if (status != SCI_SUCCESS || stp_req->pio_len != 0) 2039 return status; 2040 2041 if ((stp_req->status & ATA_BUSY) == 0) { 2042 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2043 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2044 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2045 } else { 2046 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 2047 } 2048 return status; 2049 } 2050 2051 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: { 2052 struct sas_task *task = isci_request_access_task(ireq); 2053 2054 sci_controller_release_frame(ihost, frame_index); 2055 ireq->target_device->working_request = ireq; 2056 if (task->data_dir == DMA_NONE) { 2057 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP); 2058 scu_atapi_reconstruct_raw_frame_task_context(ireq); 2059 } else { 2060 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); 2061 scu_atapi_construct_task_context(ireq); 2062 } 2063 2064 sci_controller_continue_io(ireq); 2065 return SCI_SUCCESS; 2066 } 2067 case SCI_REQ_ATAPI_WAIT_D2H: 2068 return atapi_d2h_reg_frame_handler(ireq, frame_index); 2069 case SCI_REQ_ABORTING: 2070 /* 2071 * TODO: Is it even possible to get an unsolicited frame in the 2072 * aborting state? 2073 */ 2074 sci_controller_release_frame(ihost, frame_index); 2075 return SCI_SUCCESS; 2076 2077 default: 2078 dev_warn(&ihost->pdev->dev, 2079 "%s: SCIC IO Request given unexpected frame %x while " 2080 "in state %d\n", 2081 __func__, 2082 frame_index, 2083 state); 2084 2085 sci_controller_release_frame(ihost, frame_index); 2086 return SCI_FAILURE_INVALID_STATE; 2087 } 2088} 2089 2090static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq, 2091 u32 completion_code) 2092{ 2093 enum sci_status status = SCI_SUCCESS; 2094 2095 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2096 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2097 ireq->scu_status = SCU_TASK_DONE_GOOD; 2098 ireq->sci_status = SCI_SUCCESS; 2099 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2100 break; 2101 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): 2102 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 2103 /* We must check ther response buffer to see if the D2H 2104 * Register FIS was received before we got the TC 2105 * completion. 2106 */ 2107 if (ireq->stp.rsp.fis_type == FIS_REGD2H) { 2108 sci_remote_device_suspend(ireq->target_device, 2109 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); 2110 2111 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2112 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2113 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2114 } else { 2115 /* If we have an error completion status for the 2116 * TC then we can expect a D2H register FIS from 2117 * the device so we must change state to wait 2118 * for it 2119 */ 2120 sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H); 2121 } 2122 break; 2123 2124 /* TODO Check to see if any of these completion status need to 2125 * wait for the device to host register fis. 2126 */ 2127 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR 2128 * - this comes only for B0 2129 */ 2130 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN): 2131 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): 2132 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): 2133 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): 2134 sci_remote_device_suspend(ireq->target_device, 2135 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); 2136 /* Fall through to the default case */ 2137 default: 2138 /* All other completion status cause the IO to be complete. */ 2139 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 2140 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 2141 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2142 break; 2143 } 2144 2145 return status; 2146} 2147 2148static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code, 2149 enum sci_base_request_states next) 2150{ 2151 enum sci_status status = SCI_SUCCESS; 2152 2153 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2154 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2155 ireq->scu_status = SCU_TASK_DONE_GOOD; 2156 ireq->sci_status = SCI_SUCCESS; 2157 sci_change_state(&ireq->sm, next); 2158 break; 2159 default: 2160 /* All other completion status cause the IO to be complete. 2161 * If a NAK was received, then it is up to the user to retry 2162 * the request. 2163 */ 2164 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 2165 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 2166 2167 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2168 break; 2169 } 2170 2171 return status; 2172} 2173 2174static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq, 2175 u32 completion_code) 2176{ 2177 struct isci_remote_device *idev = ireq->target_device; 2178 struct dev_to_host_fis *d2h = &ireq->stp.rsp; 2179 enum sci_status status = SCI_SUCCESS; 2180 2181 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2182 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 2183 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2184 break; 2185 2186 case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): { 2187 u16 len = sci_req_tx_bytes(ireq); 2188 2189 /* likely non-error data underrrun, workaround missing 2190 * d2h frame from the controller 2191 */ 2192 if (d2h->fis_type != FIS_REGD2H) { 2193 d2h->fis_type = FIS_REGD2H; 2194 d2h->flags = (1 << 6); 2195 d2h->status = 0x50; 2196 d2h->error = 0; 2197 d2h->lbal = 0; 2198 d2h->byte_count_low = len & 0xff; 2199 d2h->byte_count_high = len >> 8; 2200 d2h->device = 0xa0; 2201 d2h->lbal_exp = 0; 2202 d2h->lbam_exp = 0; 2203 d2h->lbah_exp = 0; 2204 d2h->_r_a = 0; 2205 d2h->sector_count = 0x3; 2206 d2h->sector_count_exp = 0; 2207 d2h->_r_b = 0; 2208 d2h->_r_c = 0; 2209 d2h->_r_d = 0; 2210 } 2211 2212 ireq->scu_status = SCU_TASK_DONE_GOOD; 2213 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; 2214 status = ireq->sci_status; 2215 2216 /* the hw will have suspended the rnc, so complete the 2217 * request upon pending resume 2218 */ 2219 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); 2220 break; 2221 } 2222 case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT): 2223 /* In this case, there is no UF coming after. 2224 * compelte the IO now. 2225 */ 2226 ireq->scu_status = SCU_TASK_DONE_GOOD; 2227 ireq->sci_status = SCI_SUCCESS; 2228 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2229 break; 2230 2231 default: 2232 if (d2h->fis_type == FIS_REGD2H) { 2233 /* UF received change the device state to ATAPI_ERROR */ 2234 status = ireq->sci_status; 2235 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); 2236 } else { 2237 /* If receiving any non-sucess TC status, no UF 2238 * received yet, then an UF for the status fis 2239 * is coming after (XXX: suspect this is 2240 * actually a protocol error or a bug like the 2241 * DONE_UNEXP_FIS case) 2242 */ 2243 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2244 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2245 2246 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); 2247 } 2248 break; 2249 } 2250 2251 return status; 2252} 2253 2254enum sci_status 2255sci_io_request_tc_completion(struct isci_request *ireq, 2256 u32 completion_code) 2257{ 2258 enum sci_base_request_states state; 2259 struct isci_host *ihost = ireq->owning_controller; 2260 2261 state = ireq->sm.current_state_id; 2262 2263 switch (state) { 2264 case SCI_REQ_STARTED: 2265 return request_started_state_tc_event(ireq, completion_code); 2266 2267 case SCI_REQ_TASK_WAIT_TC_COMP: 2268 return ssp_task_request_await_tc_event(ireq, 2269 completion_code); 2270 2271 case SCI_REQ_SMP_WAIT_RESP: 2272 return smp_request_await_response_tc_event(ireq, 2273 completion_code); 2274 2275 case SCI_REQ_SMP_WAIT_TC_COMP: 2276 return smp_request_await_tc_event(ireq, completion_code); 2277 2278 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 2279 return stp_request_udma_await_tc_event(ireq, 2280 completion_code); 2281 2282 case SCI_REQ_STP_NON_DATA_WAIT_H2D: 2283 return stp_request_non_data_await_h2d_tc_event(ireq, 2284 completion_code); 2285 2286 case SCI_REQ_STP_PIO_WAIT_H2D: 2287 return stp_request_pio_await_h2d_completion_tc_event(ireq, 2288 completion_code); 2289 2290 case SCI_REQ_STP_PIO_DATA_OUT: 2291 return pio_data_out_tx_done_tc_event(ireq, completion_code); 2292 2293 case SCI_REQ_ABORTING: 2294 return request_aborting_state_tc_event(ireq, 2295 completion_code); 2296 2297 case SCI_REQ_ATAPI_WAIT_H2D: 2298 return atapi_raw_completion(ireq, completion_code, 2299 SCI_REQ_ATAPI_WAIT_PIO_SETUP); 2300 2301 case SCI_REQ_ATAPI_WAIT_TC_COMP: 2302 return atapi_raw_completion(ireq, completion_code, 2303 SCI_REQ_ATAPI_WAIT_D2H); 2304 2305 case SCI_REQ_ATAPI_WAIT_D2H: 2306 return atapi_data_tc_completion_handler(ireq, completion_code); 2307 2308 default: 2309 dev_warn(&ihost->pdev->dev, 2310 "%s: SCIC IO Request given task completion " 2311 "notification %x while in wrong state %d\n", 2312 __func__, 2313 completion_code, 2314 state); 2315 return SCI_FAILURE_INVALID_STATE; 2316 } 2317} 2318 2319/** 2320 * isci_request_process_response_iu() - This function sets the status and 2321 * response iu, in the task struct, from the request object for the upper 2322 * layer driver. 2323 * @sas_task: This parameter is the task struct from the upper layer driver. 2324 * @resp_iu: This parameter points to the response iu of the completed request. 2325 * @dev: This parameter specifies the linux device struct. 2326 * 2327 * none. 2328 */ 2329static void isci_request_process_response_iu( 2330 struct sas_task *task, 2331 struct ssp_response_iu *resp_iu, 2332 struct device *dev) 2333{ 2334 dev_dbg(dev, 2335 "%s: resp_iu = %p " 2336 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " 2337 "resp_iu->response_data_len = %x, " 2338 "resp_iu->sense_data_len = %x\nrepsonse data: ", 2339 __func__, 2340 resp_iu, 2341 resp_iu->status, 2342 resp_iu->datapres, 2343 resp_iu->response_data_len, 2344 resp_iu->sense_data_len); 2345 2346 task->task_status.stat = resp_iu->status; 2347 2348 /* libsas updates the task status fields based on the response iu. */ 2349 sas_ssp_task_response(dev, task, resp_iu); 2350} 2351 2352/** 2353 * isci_request_set_open_reject_status() - This function prepares the I/O 2354 * completion for OPEN_REJECT conditions. 2355 * @request: This parameter is the completed isci_request object. 2356 * @response_ptr: This parameter specifies the service response for the I/O. 2357 * @status_ptr: This parameter specifies the exec status for the I/O. 2358 * @complete_to_host_ptr: This parameter specifies the action to be taken by 2359 * the LLDD with respect to completing this request or forcing an abort 2360 * condition on the I/O. 2361 * @open_rej_reason: This parameter specifies the encoded reason for the 2362 * abandon-class reject. 2363 * 2364 * none. 2365 */ 2366static void isci_request_set_open_reject_status( 2367 struct isci_request *request, 2368 struct sas_task *task, 2369 enum service_response *response_ptr, 2370 enum exec_status *status_ptr, 2371 enum isci_completion_selection *complete_to_host_ptr, 2372 enum sas_open_rej_reason open_rej_reason) 2373{ 2374 /* Task in the target is done. */ 2375 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2376 *response_ptr = SAS_TASK_UNDELIVERED; 2377 *status_ptr = SAS_OPEN_REJECT; 2378 *complete_to_host_ptr = isci_perform_normal_io_completion; 2379 task->task_status.open_rej_reason = open_rej_reason; 2380} 2381 2382/** 2383 * isci_request_handle_controller_specific_errors() - This function decodes 2384 * controller-specific I/O completion error conditions. 2385 * @request: This parameter is the completed isci_request object. 2386 * @response_ptr: This parameter specifies the service response for the I/O. 2387 * @status_ptr: This parameter specifies the exec status for the I/O. 2388 * @complete_to_host_ptr: This parameter specifies the action to be taken by 2389 * the LLDD with respect to completing this request or forcing an abort 2390 * condition on the I/O. 2391 * 2392 * none. 2393 */ 2394static void isci_request_handle_controller_specific_errors( 2395 struct isci_remote_device *idev, 2396 struct isci_request *request, 2397 struct sas_task *task, 2398 enum service_response *response_ptr, 2399 enum exec_status *status_ptr, 2400 enum isci_completion_selection *complete_to_host_ptr) 2401{ 2402 unsigned int cstatus; 2403 2404 cstatus = request->scu_status; 2405 2406 dev_dbg(&request->isci_host->pdev->dev, 2407 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " 2408 "- controller status = 0x%x\n", 2409 __func__, request, cstatus); 2410 2411 /* Decode the controller-specific errors; most 2412 * important is to recognize those conditions in which 2413 * the target may still have a task outstanding that 2414 * must be aborted. 2415 * 2416 * Note that there are SCU completion codes being 2417 * named in the decode below for which SCIC has already 2418 * done work to handle them in a way other than as 2419 * a controller-specific completion code; these are left 2420 * in the decode below for completeness sake. 2421 */ 2422 switch (cstatus) { 2423 case SCU_TASK_DONE_DMASETUP_DIRERR: 2424 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ 2425 case SCU_TASK_DONE_XFERCNT_ERR: 2426 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ 2427 if (task->task_proto == SAS_PROTOCOL_SMP) { 2428 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ 2429 *response_ptr = SAS_TASK_COMPLETE; 2430 2431 /* See if the device has been/is being stopped. Note 2432 * that we ignore the quiesce state, since we are 2433 * concerned about the actual device state. 2434 */ 2435 if (!idev) 2436 *status_ptr = SAS_DEVICE_UNKNOWN; 2437 else 2438 *status_ptr = SAS_ABORTED_TASK; 2439 2440 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2441 2442 *complete_to_host_ptr = 2443 isci_perform_normal_io_completion; 2444 } else { 2445 /* Task in the target is not done. */ 2446 *response_ptr = SAS_TASK_UNDELIVERED; 2447 2448 if (!idev) 2449 *status_ptr = SAS_DEVICE_UNKNOWN; 2450 else 2451 *status_ptr = SAM_STAT_TASK_ABORTED; 2452 2453 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2454 2455 *complete_to_host_ptr = 2456 isci_perform_error_io_completion; 2457 } 2458 2459 break; 2460 2461 case SCU_TASK_DONE_CRC_ERR: 2462 case SCU_TASK_DONE_NAK_CMD_ERR: 2463 case SCU_TASK_DONE_EXCESS_DATA: 2464 case SCU_TASK_DONE_UNEXP_FIS: 2465 /* Also SCU_TASK_DONE_UNEXP_RESP: */ 2466 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ 2467 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ 2468 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ 2469 /* These are conditions in which the target 2470 * has completed the task, so that no cleanup 2471 * is necessary. 2472 */ 2473 *response_ptr = SAS_TASK_COMPLETE; 2474 2475 /* See if the device has been/is being stopped. Note 2476 * that we ignore the quiesce state, since we are 2477 * concerned about the actual device state. 2478 */ 2479 if (!idev) 2480 *status_ptr = SAS_DEVICE_UNKNOWN; 2481 else 2482 *status_ptr = SAS_ABORTED_TASK; 2483 2484 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2485 2486 *complete_to_host_ptr = isci_perform_normal_io_completion; 2487 break; 2488 2489 2490 /* Note that the only open reject completion codes seen here will be 2491 * abandon-class codes; all others are automatically retried in the SCU. 2492 */ 2493 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2494 2495 isci_request_set_open_reject_status( 2496 request, task, response_ptr, status_ptr, 2497 complete_to_host_ptr, SAS_OREJ_WRONG_DEST); 2498 break; 2499 2500 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2501 2502 /* Note - the return of AB0 will change when 2503 * libsas implements detection of zone violations. 2504 */ 2505 isci_request_set_open_reject_status( 2506 request, task, response_ptr, status_ptr, 2507 complete_to_host_ptr, SAS_OREJ_RESV_AB0); 2508 break; 2509 2510 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2511 2512 isci_request_set_open_reject_status( 2513 request, task, response_ptr, status_ptr, 2514 complete_to_host_ptr, SAS_OREJ_RESV_AB1); 2515 break; 2516 2517 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2518 2519 isci_request_set_open_reject_status( 2520 request, task, response_ptr, status_ptr, 2521 complete_to_host_ptr, SAS_OREJ_RESV_AB2); 2522 break; 2523 2524 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2525 2526 isci_request_set_open_reject_status( 2527 request, task, response_ptr, status_ptr, 2528 complete_to_host_ptr, SAS_OREJ_RESV_AB3); 2529 break; 2530 2531 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2532 2533 isci_request_set_open_reject_status( 2534 request, task, response_ptr, status_ptr, 2535 complete_to_host_ptr, SAS_OREJ_BAD_DEST); 2536 break; 2537 2538 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2539 2540 isci_request_set_open_reject_status( 2541 request, task, response_ptr, status_ptr, 2542 complete_to_host_ptr, SAS_OREJ_STP_NORES); 2543 break; 2544 2545 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2546 2547 isci_request_set_open_reject_status( 2548 request, task, response_ptr, status_ptr, 2549 complete_to_host_ptr, SAS_OREJ_EPROTO); 2550 break; 2551 2552 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2553 2554 isci_request_set_open_reject_status( 2555 request, task, response_ptr, status_ptr, 2556 complete_to_host_ptr, SAS_OREJ_CONN_RATE); 2557 break; 2558 2559 case SCU_TASK_DONE_LL_R_ERR: 2560 /* Also SCU_TASK_DONE_ACK_NAK_TO: */ 2561 case SCU_TASK_DONE_LL_PERR: 2562 case SCU_TASK_DONE_LL_SY_TERM: 2563 /* Also SCU_TASK_DONE_NAK_ERR:*/ 2564 case SCU_TASK_DONE_LL_LF_TERM: 2565 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ 2566 case SCU_TASK_DONE_LL_ABORT_ERR: 2567 case SCU_TASK_DONE_SEQ_INV_TYPE: 2568 /* Also SCU_TASK_DONE_UNEXP_XR: */ 2569 case SCU_TASK_DONE_XR_IU_LEN_ERR: 2570 case SCU_TASK_DONE_INV_FIS_LEN: 2571 /* Also SCU_TASK_DONE_XR_WD_LEN: */ 2572 case SCU_TASK_DONE_SDMA_ERR: 2573 case SCU_TASK_DONE_OFFSET_ERR: 2574 case SCU_TASK_DONE_MAX_PLD_ERR: 2575 case SCU_TASK_DONE_LF_ERR: 2576 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ 2577 case SCU_TASK_DONE_SMP_LL_RX_ERR: 2578 case SCU_TASK_DONE_UNEXP_DATA: 2579 case SCU_TASK_DONE_UNEXP_SDBFIS: 2580 case SCU_TASK_DONE_REG_ERR: 2581 case SCU_TASK_DONE_SDB_ERR: 2582 case SCU_TASK_DONE_TASK_ABORT: 2583 default: 2584 /* Task in the target is not done. */ 2585 *response_ptr = SAS_TASK_UNDELIVERED; 2586 *status_ptr = SAM_STAT_TASK_ABORTED; 2587 2588 if (task->task_proto == SAS_PROTOCOL_SMP) { 2589 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2590 2591 *complete_to_host_ptr = isci_perform_normal_io_completion; 2592 } else { 2593 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2594 2595 *complete_to_host_ptr = isci_perform_error_io_completion; 2596 } 2597 break; 2598 } 2599} 2600 2601/** 2602 * isci_task_save_for_upper_layer_completion() - This function saves the 2603 * request for later completion to the upper layer driver. 2604 * @host: This parameter is a pointer to the host on which the the request 2605 * should be queued (either as an error or success). 2606 * @request: This parameter is the completed request. 2607 * @response: This parameter is the response code for the completed task. 2608 * @status: This parameter is the status code for the completed task. 2609 * 2610 * none. 2611 */ 2612static void isci_task_save_for_upper_layer_completion( 2613 struct isci_host *host, 2614 struct isci_request *request, 2615 enum service_response response, 2616 enum exec_status status, 2617 enum isci_completion_selection task_notification_selection) 2618{ 2619 struct sas_task *task = isci_request_access_task(request); 2620 2621 task_notification_selection 2622 = isci_task_set_completion_status(task, response, status, 2623 task_notification_selection); 2624 2625 /* Tasks aborted specifically by a call to the lldd_abort_task 2626 * function should not be completed to the host in the regular path. 2627 */ 2628 switch (task_notification_selection) { 2629 2630 case isci_perform_normal_io_completion: 2631 /* Normal notification (task_done) */ 2632 2633 /* Add to the completed list. */ 2634 list_add(&request->completed_node, 2635 &host->requests_to_complete); 2636 2637 /* Take the request off the device's pending request list. */ 2638 list_del_init(&request->dev_node); 2639 break; 2640 2641 case isci_perform_aborted_io_completion: 2642 /* No notification to libsas because this request is 2643 * already in the abort path. 2644 */ 2645 /* Wake up whatever process was waiting for this 2646 * request to complete. 2647 */ 2648 WARN_ON(request->io_request_completion == NULL); 2649 2650 if (request->io_request_completion != NULL) { 2651 2652 /* Signal whoever is waiting that this 2653 * request is complete. 2654 */ 2655 complete(request->io_request_completion); 2656 } 2657 break; 2658 2659 case isci_perform_error_io_completion: 2660 /* Use sas_task_abort */ 2661 /* Add to the aborted list. */ 2662 list_add(&request->completed_node, 2663 &host->requests_to_errorback); 2664 break; 2665 2666 default: 2667 /* Add to the error to libsas list. */ 2668 list_add(&request->completed_node, 2669 &host->requests_to_errorback); 2670 break; 2671 } 2672 dev_dbg(&host->pdev->dev, 2673 "%s: %d - task = %p, response=%d (%d), status=%d (%d)\n", 2674 __func__, task_notification_selection, task, 2675 (task) ? task->task_status.resp : 0, response, 2676 (task) ? task->task_status.stat : 0, status); 2677} 2678 2679static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) 2680{ 2681 struct task_status_struct *ts = &task->task_status; 2682 struct ata_task_resp *resp = (void *)&ts->buf[0]; 2683 2684 resp->frame_len = sizeof(*fis); 2685 memcpy(resp->ending_fis, fis, sizeof(*fis)); 2686 ts->buf_valid_size = sizeof(*resp); 2687 2688 /* If the device fault bit is set in the status register, then 2689 * set the sense data and return. 2690 */ 2691 if (fis->status & ATA_DF) 2692 ts->stat = SAS_PROTO_RESPONSE; 2693 else if (fis->status & ATA_ERR) 2694 ts->stat = SAM_STAT_CHECK_CONDITION; 2695 else 2696 ts->stat = SAM_STAT_GOOD; 2697 2698 ts->resp = SAS_TASK_COMPLETE; 2699} 2700 2701static void isci_request_io_request_complete(struct isci_host *ihost, 2702 struct isci_request *request, 2703 enum sci_io_status completion_status) 2704{ 2705 struct sas_task *task = isci_request_access_task(request); 2706 struct ssp_response_iu *resp_iu; 2707 unsigned long task_flags; 2708 struct isci_remote_device *idev = request->target_device; 2709 enum service_response response = SAS_TASK_UNDELIVERED; 2710 enum exec_status status = SAS_ABORTED_TASK; 2711 enum isci_request_status request_status; 2712 enum isci_completion_selection complete_to_host 2713 = isci_perform_normal_io_completion; 2714 2715 dev_dbg(&ihost->pdev->dev, 2716 "%s: request = %p, task = %p,\n" 2717 "task->data_dir = %d completion_status = 0x%x\n", 2718 __func__, 2719 request, 2720 task, 2721 task->data_dir, 2722 completion_status); 2723 2724 spin_lock(&request->state_lock); 2725 request_status = request->status; 2726 2727 /* Decode the request status. Note that if the request has been 2728 * aborted by a task management function, we don't care 2729 * what the status is. 2730 */ 2731 switch (request_status) { 2732 2733 case aborted: 2734 /* "aborted" indicates that the request was aborted by a task 2735 * management function, since once a task management request is 2736 * perfomed by the device, the request only completes because 2737 * of the subsequent driver terminate. 2738 * 2739 * Aborted also means an external thread is explicitly managing 2740 * this request, so that we do not complete it up the stack. 2741 * 2742 * The target is still there (since the TMF was successful). 2743 */ 2744 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2745 response = SAS_TASK_COMPLETE; 2746 2747 /* See if the device has been/is being stopped. Note 2748 * that we ignore the quiesce state, since we are 2749 * concerned about the actual device state. 2750 */ 2751 if (!idev) 2752 status = SAS_DEVICE_UNKNOWN; 2753 else 2754 status = SAS_ABORTED_TASK; 2755 2756 complete_to_host = isci_perform_aborted_io_completion; 2757 /* This was an aborted request. */ 2758 2759 spin_unlock(&request->state_lock); 2760 break; 2761 2762 case aborting: 2763 /* aborting means that the task management function tried and 2764 * failed to abort the request. We need to note the request 2765 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the 2766 * target as down. 2767 * 2768 * Aborting also means an external thread is explicitly managing 2769 * this request, so that we do not complete it up the stack. 2770 */ 2771 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2772 response = SAS_TASK_UNDELIVERED; 2773 2774 if (!idev) 2775 /* The device has been /is being stopped. Note that 2776 * we ignore the quiesce state, since we are 2777 * concerned about the actual device state. 2778 */ 2779 status = SAS_DEVICE_UNKNOWN; 2780 else 2781 status = SAS_PHY_DOWN; 2782 2783 complete_to_host = isci_perform_aborted_io_completion; 2784 2785 /* This was an aborted request. */ 2786 2787 spin_unlock(&request->state_lock); 2788 break; 2789 2790 case terminating: 2791 2792 /* This was an terminated request. This happens when 2793 * the I/O is being terminated because of an action on 2794 * the device (reset, tear down, etc.), and the I/O needs 2795 * to be completed up the stack. 2796 */ 2797 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2798 response = SAS_TASK_UNDELIVERED; 2799 2800 /* See if the device has been/is being stopped. Note 2801 * that we ignore the quiesce state, since we are 2802 * concerned about the actual device state. 2803 */ 2804 if (!idev) 2805 status = SAS_DEVICE_UNKNOWN; 2806 else 2807 status = SAS_ABORTED_TASK; 2808 2809 complete_to_host = isci_perform_aborted_io_completion; 2810 2811 /* This was a terminated request. */ 2812 2813 spin_unlock(&request->state_lock); 2814 break; 2815 2816 case dead: 2817 /* This was a terminated request that timed-out during the 2818 * termination process. There is no task to complete to 2819 * libsas. 2820 */ 2821 complete_to_host = isci_perform_normal_io_completion; 2822 spin_unlock(&request->state_lock); 2823 break; 2824 2825 default: 2826 2827 /* The request is done from an SCU HW perspective. */ 2828 request->status = completed; 2829 2830 spin_unlock(&request->state_lock); 2831 2832 /* This is an active request being completed from the core. */ 2833 switch (completion_status) { 2834 2835 case SCI_IO_FAILURE_RESPONSE_VALID: 2836 dev_dbg(&ihost->pdev->dev, 2837 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", 2838 __func__, 2839 request, 2840 task); 2841 2842 if (sas_protocol_ata(task->task_proto)) { 2843 isci_process_stp_response(task, &request->stp.rsp); 2844 } else if (SAS_PROTOCOL_SSP == task->task_proto) { 2845 2846 /* crack the iu response buffer. */ 2847 resp_iu = &request->ssp.rsp; 2848 isci_request_process_response_iu(task, resp_iu, 2849 &ihost->pdev->dev); 2850 2851 } else if (SAS_PROTOCOL_SMP == task->task_proto) { 2852 2853 dev_err(&ihost->pdev->dev, 2854 "%s: SCI_IO_FAILURE_RESPONSE_VALID: " 2855 "SAS_PROTOCOL_SMP protocol\n", 2856 __func__); 2857 2858 } else 2859 dev_err(&ihost->pdev->dev, 2860 "%s: unknown protocol\n", __func__); 2861 2862 /* use the task status set in the task struct by the 2863 * isci_request_process_response_iu call. 2864 */ 2865 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2866 response = task->task_status.resp; 2867 status = task->task_status.stat; 2868 break; 2869 2870 case SCI_IO_SUCCESS: 2871 case SCI_IO_SUCCESS_IO_DONE_EARLY: 2872 2873 response = SAS_TASK_COMPLETE; 2874 status = SAM_STAT_GOOD; 2875 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2876 2877 if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { 2878 2879 /* This was an SSP / STP / SATA transfer. 2880 * There is a possibility that less data than 2881 * the maximum was transferred. 2882 */ 2883 u32 transferred_length = sci_req_tx_bytes(request); 2884 2885 task->task_status.residual 2886 = task->total_xfer_len - transferred_length; 2887 2888 /* If there were residual bytes, call this an 2889 * underrun. 2890 */ 2891 if (task->task_status.residual != 0) 2892 status = SAS_DATA_UNDERRUN; 2893 2894 dev_dbg(&ihost->pdev->dev, 2895 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", 2896 __func__, 2897 status); 2898 2899 } else 2900 dev_dbg(&ihost->pdev->dev, 2901 "%s: SCI_IO_SUCCESS\n", 2902 __func__); 2903 2904 break; 2905 2906 case SCI_IO_FAILURE_TERMINATED: 2907 dev_dbg(&ihost->pdev->dev, 2908 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", 2909 __func__, 2910 request, 2911 task); 2912 2913 /* The request was terminated explicitly. No handling 2914 * is needed in the SCSI error handler path. 2915 */ 2916 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2917 response = SAS_TASK_UNDELIVERED; 2918 2919 /* See if the device has been/is being stopped. Note 2920 * that we ignore the quiesce state, since we are 2921 * concerned about the actual device state. 2922 */ 2923 if (!idev) 2924 status = SAS_DEVICE_UNKNOWN; 2925 else 2926 status = SAS_ABORTED_TASK; 2927 2928 complete_to_host = isci_perform_normal_io_completion; 2929 break; 2930 2931 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: 2932 2933 isci_request_handle_controller_specific_errors( 2934 idev, request, task, &response, &status, 2935 &complete_to_host); 2936 2937 break; 2938 2939 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: 2940 /* This is a special case, in that the I/O completion 2941 * is telling us that the device needs a reset. 2942 * In order for the device reset condition to be 2943 * noticed, the I/O has to be handled in the error 2944 * handler. Set the reset flag and cause the 2945 * SCSI error thread to be scheduled. 2946 */ 2947 spin_lock_irqsave(&task->task_state_lock, task_flags); 2948 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 2949 spin_unlock_irqrestore(&task->task_state_lock, task_flags); 2950 2951 /* Fail the I/O. */ 2952 response = SAS_TASK_UNDELIVERED; 2953 status = SAM_STAT_TASK_ABORTED; 2954 2955 complete_to_host = isci_perform_error_io_completion; 2956 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2957 break; 2958 2959 case SCI_FAILURE_RETRY_REQUIRED: 2960 2961 /* Fail the I/O so it can be retried. */ 2962 response = SAS_TASK_UNDELIVERED; 2963 if (!idev) 2964 status = SAS_DEVICE_UNKNOWN; 2965 else 2966 status = SAS_ABORTED_TASK; 2967 2968 complete_to_host = isci_perform_normal_io_completion; 2969 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2970 break; 2971 2972 2973 default: 2974 /* Catch any otherwise unhandled error codes here. */ 2975 dev_dbg(&ihost->pdev->dev, 2976 "%s: invalid completion code: 0x%x - " 2977 "isci_request = %p\n", 2978 __func__, completion_status, request); 2979 2980 response = SAS_TASK_UNDELIVERED; 2981 2982 /* See if the device has been/is being stopped. Note 2983 * that we ignore the quiesce state, since we are 2984 * concerned about the actual device state. 2985 */ 2986 if (!idev) 2987 status = SAS_DEVICE_UNKNOWN; 2988 else 2989 status = SAS_ABORTED_TASK; 2990 2991 if (SAS_PROTOCOL_SMP == task->task_proto) { 2992 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2993 complete_to_host = isci_perform_normal_io_completion; 2994 } else { 2995 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2996 complete_to_host = isci_perform_error_io_completion; 2997 } 2998 break; 2999 } 3000 break; 3001 } 3002 3003 switch (task->task_proto) { 3004 case SAS_PROTOCOL_SSP: 3005 if (task->data_dir == DMA_NONE) 3006 break; 3007 if (task->num_scatter == 0) 3008 /* 0 indicates a single dma address */ 3009 dma_unmap_single(&ihost->pdev->dev, 3010 request->zero_scatter_daddr, 3011 task->total_xfer_len, task->data_dir); 3012 else /* unmap the sgl dma addresses */ 3013 dma_unmap_sg(&ihost->pdev->dev, task->scatter, 3014 request->num_sg_entries, task->data_dir); 3015 break; 3016 case SAS_PROTOCOL_SMP: { 3017 struct scatterlist *sg = &task->smp_task.smp_req; 3018 struct smp_req *smp_req; 3019 void *kaddr; 3020 3021 dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); 3022 3023 /* need to swab it back in case the command buffer is re-used */ 3024 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); 3025 smp_req = kaddr + sg->offset; 3026 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 3027 kunmap_atomic(kaddr, KM_IRQ0); 3028 break; 3029 } 3030 default: 3031 break; 3032 } 3033 3034 /* Put the completed request on the correct list */ 3035 isci_task_save_for_upper_layer_completion(ihost, request, response, 3036 status, complete_to_host 3037 ); 3038 3039 /* complete the io request to the core. */ 3040 sci_controller_complete_io(ihost, request->target_device, request); 3041 3042 /* set terminated handle so it cannot be completed or 3043 * terminated again, and to cause any calls into abort 3044 * task to recognize the already completed case. 3045 */ 3046 set_bit(IREQ_TERMINATED, &request->flags); 3047} 3048 3049static void sci_request_started_state_enter(struct sci_base_state_machine *sm) 3050{ 3051 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3052 struct domain_device *dev = ireq->target_device->domain_dev; 3053 enum sci_base_request_states state; 3054 struct sas_task *task; 3055 3056 /* XXX as hch said always creating an internal sas_task for tmf 3057 * requests would simplify the driver 3058 */ 3059 task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq); 3060 3061 /* all unaccelerated request types (non ssp or ncq) handled with 3062 * substates 3063 */ 3064 if (!task && dev->dev_type == SAS_END_DEV) { 3065 state = SCI_REQ_TASK_WAIT_TC_COMP; 3066 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { 3067 state = SCI_REQ_SMP_WAIT_RESP; 3068 } else if (task && sas_protocol_ata(task->task_proto) && 3069 !task->ata_task.use_ncq) { 3070 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && 3071 task->ata_task.fis.command == ATA_CMD_PACKET) { 3072 state = SCI_REQ_ATAPI_WAIT_H2D; 3073 } else if (task->data_dir == DMA_NONE) { 3074 state = SCI_REQ_STP_NON_DATA_WAIT_H2D; 3075 } else if (task->ata_task.dma_xfer) { 3076 state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; 3077 } else /* PIO */ { 3078 state = SCI_REQ_STP_PIO_WAIT_H2D; 3079 } 3080 } else { 3081 /* SSP or NCQ are fully accelerated, no substates */ 3082 return; 3083 } 3084 sci_change_state(sm, state); 3085} 3086 3087static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) 3088{ 3089 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3090 struct isci_host *ihost = ireq->owning_controller; 3091 3092 /* Tell the SCI_USER that the IO request is complete */ 3093 if (!test_bit(IREQ_TMF, &ireq->flags)) 3094 isci_request_io_request_complete(ihost, ireq, 3095 ireq->sci_status); 3096 else 3097 isci_task_request_complete(ihost, ireq, ireq->sci_status); 3098} 3099 3100static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm) 3101{ 3102 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3103 3104 /* Setting the abort bit in the Task Context is required by the silicon. */ 3105 ireq->tc->abort = 1; 3106} 3107 3108static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) 3109{ 3110 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3111 3112 ireq->target_device->working_request = ireq; 3113} 3114 3115static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) 3116{ 3117 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3118 3119 ireq->target_device->working_request = ireq; 3120} 3121 3122static const struct sci_base_state sci_request_state_table[] = { 3123 [SCI_REQ_INIT] = { }, 3124 [SCI_REQ_CONSTRUCTED] = { }, 3125 [SCI_REQ_STARTED] = { 3126 .enter_state = sci_request_started_state_enter, 3127 }, 3128 [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { 3129 .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter, 3130 }, 3131 [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, 3132 [SCI_REQ_STP_PIO_WAIT_H2D] = { 3133 .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter, 3134 }, 3135 [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, 3136 [SCI_REQ_STP_PIO_DATA_IN] = { }, 3137 [SCI_REQ_STP_PIO_DATA_OUT] = { }, 3138 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, 3139 [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, 3140 [SCI_REQ_TASK_WAIT_TC_COMP] = { }, 3141 [SCI_REQ_TASK_WAIT_TC_RESP] = { }, 3142 [SCI_REQ_SMP_WAIT_RESP] = { }, 3143 [SCI_REQ_SMP_WAIT_TC_COMP] = { }, 3144 [SCI_REQ_ATAPI_WAIT_H2D] = { }, 3145 [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { }, 3146 [SCI_REQ_ATAPI_WAIT_D2H] = { }, 3147 [SCI_REQ_ATAPI_WAIT_TC_COMP] = { }, 3148 [SCI_REQ_COMPLETED] = { 3149 .enter_state = sci_request_completed_state_enter, 3150 }, 3151 [SCI_REQ_ABORTING] = { 3152 .enter_state = sci_request_aborting_state_enter, 3153 }, 3154 [SCI_REQ_FINAL] = { }, 3155}; 3156 3157static void 3158sci_general_request_construct(struct isci_host *ihost, 3159 struct isci_remote_device *idev, 3160 struct isci_request *ireq) 3161{ 3162 sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); 3163 3164 ireq->target_device = idev; 3165 ireq->protocol = SCIC_NO_PROTOCOL; 3166 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; 3167 3168 ireq->sci_status = SCI_SUCCESS; 3169 ireq->scu_status = 0; 3170 ireq->post_context = 0xFFFFFFFF; 3171} 3172 3173static enum sci_status 3174sci_io_request_construct(struct isci_host *ihost, 3175 struct isci_remote_device *idev, 3176 struct isci_request *ireq) 3177{ 3178 struct domain_device *dev = idev->domain_dev; 3179 enum sci_status status = SCI_SUCCESS; 3180 3181 /* Build the common part of the request */ 3182 sci_general_request_construct(ihost, idev, ireq); 3183 3184 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 3185 return SCI_FAILURE_INVALID_REMOTE_DEVICE; 3186 3187 if (dev->dev_type == SAS_END_DEV) 3188 /* pass */; 3189 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) 3190 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 3191 else if (dev_is_expander(dev)) 3192 /* pass */; 3193 else 3194 return SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3195 3196 memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); 3197 3198 return status; 3199} 3200 3201enum sci_status sci_task_request_construct(struct isci_host *ihost, 3202 struct isci_remote_device *idev, 3203 u16 io_tag, struct isci_request *ireq) 3204{ 3205 struct domain_device *dev = idev->domain_dev; 3206 enum sci_status status = SCI_SUCCESS; 3207 3208 /* Build the common part of the request */ 3209 sci_general_request_construct(ihost, idev, ireq); 3210 3211 if (dev->dev_type == SAS_END_DEV || 3212 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { 3213 set_bit(IREQ_TMF, &ireq->flags); 3214 memset(ireq->tc, 0, sizeof(struct scu_task_context)); 3215 } else 3216 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3217 3218 return status; 3219} 3220 3221static enum sci_status isci_request_ssp_request_construct( 3222 struct isci_request *request) 3223{ 3224 enum sci_status status; 3225 3226 dev_dbg(&request->isci_host->pdev->dev, 3227 "%s: request = %p\n", 3228 __func__, 3229 request); 3230 status = sci_io_request_construct_basic_ssp(request); 3231 return status; 3232} 3233 3234static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq) 3235{ 3236 struct sas_task *task = isci_request_access_task(ireq); 3237 struct host_to_dev_fis *fis = &ireq->stp.cmd; 3238 struct ata_queued_cmd *qc = task->uldd_task; 3239 enum sci_status status; 3240 3241 dev_dbg(&ireq->isci_host->pdev->dev, 3242 "%s: ireq = %p\n", 3243 __func__, 3244 ireq); 3245 3246 memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 3247 if (!task->ata_task.device_control_reg_update) 3248 fis->flags |= 0x80; 3249 fis->flags &= 0xF0; 3250 3251 status = sci_io_request_construct_basic_sata(ireq); 3252 3253 if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE || 3254 qc->tf.command == ATA_CMD_FPDMA_READ)) { 3255 fis->sector_count = qc->tag << 3; 3256 ireq->tc->type.stp.ncq_tag = qc->tag; 3257 } 3258 3259 return status; 3260} 3261 3262static enum sci_status 3263sci_io_request_construct_smp(struct device *dev, 3264 struct isci_request *ireq, 3265 struct sas_task *task) 3266{ 3267 struct scatterlist *sg = &task->smp_task.smp_req; 3268 struct isci_remote_device *idev; 3269 struct scu_task_context *task_context; 3270 struct isci_port *iport; 3271 struct smp_req *smp_req; 3272 void *kaddr; 3273 u8 req_len; 3274 u32 cmd; 3275 3276 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); 3277 smp_req = kaddr + sg->offset; 3278 /* 3279 * Look at the SMP requests' header fields; for certain SAS 1.x SMP 3280 * functions under SAS 2.0, a zero request length really indicates 3281 * a non-zero default length. 3282 */ 3283 if (smp_req->req_len == 0) { 3284 switch (smp_req->func) { 3285 case SMP_DISCOVER: 3286 case SMP_REPORT_PHY_ERR_LOG: 3287 case SMP_REPORT_PHY_SATA: 3288 case SMP_REPORT_ROUTE_INFO: 3289 smp_req->req_len = 2; 3290 break; 3291 case SMP_CONF_ROUTE_INFO: 3292 case SMP_PHY_CONTROL: 3293 case SMP_PHY_TEST_FUNCTION: 3294 smp_req->req_len = 9; 3295 break; 3296 /* Default - zero is a valid default for 2.0. */ 3297 } 3298 } 3299 req_len = smp_req->req_len; 3300 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 3301 cmd = *(u32 *) smp_req; 3302 kunmap_atomic(kaddr, KM_IRQ0); 3303 3304 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) 3305 return SCI_FAILURE; 3306 3307 ireq->protocol = SCIC_SMP_PROTOCOL; 3308 3309 /* byte swap the smp request. */ 3310 3311 task_context = ireq->tc; 3312 3313 idev = ireq->target_device; 3314 iport = idev->owning_port; 3315 3316 /* 3317 * Fill in the TC with the its required data 3318 * 00h 3319 */ 3320 task_context->priority = 0; 3321 task_context->initiator_request = 1; 3322 task_context->connection_rate = idev->connection_rate; 3323 task_context->protocol_engine_index = ISCI_PEG; 3324 task_context->logical_port_index = iport->physical_port_index; 3325 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; 3326 task_context->abort = 0; 3327 task_context->valid = SCU_TASK_CONTEXT_VALID; 3328 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 3329 3330 /* 04h */ 3331 task_context->remote_node_index = idev->rnc.remote_node_index; 3332 task_context->command_code = 0; 3333 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; 3334 3335 /* 08h */ 3336 task_context->link_layer_control = 0; 3337 task_context->do_not_dma_ssp_good_response = 1; 3338 task_context->strict_ordering = 0; 3339 task_context->control_frame = 1; 3340 task_context->timeout_enable = 0; 3341 task_context->block_guard_enable = 0; 3342 3343 /* 0ch */ 3344 task_context->address_modifier = 0; 3345 3346 /* 10h */ 3347 task_context->ssp_command_iu_length = req_len; 3348 3349 /* 14h */ 3350 task_context->transfer_length_bytes = 0; 3351 3352 /* 3353 * 18h ~ 30h, protocol specific 3354 * since commandIU has been build by framework at this point, we just 3355 * copy the frist DWord from command IU to this location. */ 3356 memcpy(&task_context->type.smp, &cmd, sizeof(u32)); 3357 3358 /* 3359 * 40h 3360 * "For SMP you could program it to zero. We would prefer that way 3361 * so that done code will be consistent." - Venki 3362 */ 3363 task_context->task_phase = 0; 3364 3365 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 3366 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 3367 (iport->physical_port_index << 3368 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 3369 ISCI_TAG_TCI(ireq->io_tag)); 3370 /* 3371 * Copy the physical address for the command buffer to the SCU Task 3372 * Context command buffer should not contain command header. 3373 */ 3374 task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg)); 3375 task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32)); 3376 3377 /* SMP response comes as UF, so no need to set response IU address. */ 3378 task_context->response_iu_upper = 0; 3379 task_context->response_iu_lower = 0; 3380 3381 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 3382 3383 return SCI_SUCCESS; 3384} 3385 3386/* 3387 * isci_smp_request_build() - This function builds the smp request. 3388 * @ireq: This parameter points to the isci_request allocated in the 3389 * request construct function. 3390 * 3391 * SCI_SUCCESS on successfull completion, or specific failure code. 3392 */ 3393static enum sci_status isci_smp_request_build(struct isci_request *ireq) 3394{ 3395 struct sas_task *task = isci_request_access_task(ireq); 3396 struct device *dev = &ireq->isci_host->pdev->dev; 3397 enum sci_status status = SCI_FAILURE; 3398 3399 status = sci_io_request_construct_smp(dev, ireq, task); 3400 if (status != SCI_SUCCESS) 3401 dev_dbg(&ireq->isci_host->pdev->dev, 3402 "%s: failed with status = %d\n", 3403 __func__, 3404 status); 3405 3406 return status; 3407} 3408 3409/** 3410 * isci_io_request_build() - This function builds the io request object. 3411 * @ihost: This parameter specifies the ISCI host object 3412 * @request: This parameter points to the isci_request object allocated in the 3413 * request construct function. 3414 * @sci_device: This parameter is the handle for the sci core's remote device 3415 * object that is the destination for this request. 3416 * 3417 * SCI_SUCCESS on successfull completion, or specific failure code. 3418 */ 3419static enum sci_status isci_io_request_build(struct isci_host *ihost, 3420 struct isci_request *request, 3421 struct isci_remote_device *idev) 3422{ 3423 enum sci_status status = SCI_SUCCESS; 3424 struct sas_task *task = isci_request_access_task(request); 3425 3426 dev_dbg(&ihost->pdev->dev, 3427 "%s: idev = 0x%p; request = %p, " 3428 "num_scatter = %d\n", 3429 __func__, 3430 idev, 3431 request, 3432 task->num_scatter); 3433 3434 /* map the sgl addresses, if present. 3435 * libata does the mapping for sata devices 3436 * before we get the request. 3437 */ 3438 if (task->num_scatter && 3439 !sas_protocol_ata(task->task_proto) && 3440 !(SAS_PROTOCOL_SMP & task->task_proto)) { 3441 3442 request->num_sg_entries = dma_map_sg( 3443 &ihost->pdev->dev, 3444 task->scatter, 3445 task->num_scatter, 3446 task->data_dir 3447 ); 3448 3449 if (request->num_sg_entries == 0) 3450 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 3451 } 3452 3453 status = sci_io_request_construct(ihost, idev, request); 3454 3455 if (status != SCI_SUCCESS) { 3456 dev_dbg(&ihost->pdev->dev, 3457 "%s: failed request construct\n", 3458 __func__); 3459 return SCI_FAILURE; 3460 } 3461 3462 switch (task->task_proto) { 3463 case SAS_PROTOCOL_SMP: 3464 status = isci_smp_request_build(request); 3465 break; 3466 case SAS_PROTOCOL_SSP: 3467 status = isci_request_ssp_request_construct(request); 3468 break; 3469 case SAS_PROTOCOL_SATA: 3470 case SAS_PROTOCOL_STP: 3471 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 3472 status = isci_request_stp_request_construct(request); 3473 break; 3474 default: 3475 dev_dbg(&ihost->pdev->dev, 3476 "%s: unknown protocol\n", __func__); 3477 return SCI_FAILURE; 3478 } 3479 3480 return SCI_SUCCESS; 3481} 3482 3483static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag) 3484{ 3485 struct isci_request *ireq; 3486 3487 ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; 3488 ireq->io_tag = tag; 3489 ireq->io_request_completion = NULL; 3490 ireq->flags = 0; 3491 ireq->num_sg_entries = 0; 3492 INIT_LIST_HEAD(&ireq->completed_node); 3493 INIT_LIST_HEAD(&ireq->dev_node); 3494 isci_request_change_state(ireq, allocated); 3495 3496 return ireq; 3497} 3498 3499static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, 3500 struct sas_task *task, 3501 u16 tag) 3502{ 3503 struct isci_request *ireq; 3504 3505 ireq = isci_request_from_tag(ihost, tag); 3506 ireq->ttype_ptr.io_task_ptr = task; 3507 clear_bit(IREQ_TMF, &ireq->flags); 3508 task->lldd_task = ireq; 3509 3510 return ireq; 3511} 3512 3513struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, 3514 struct isci_tmf *isci_tmf, 3515 u16 tag) 3516{ 3517 struct isci_request *ireq; 3518 3519 ireq = isci_request_from_tag(ihost, tag); 3520 ireq->ttype_ptr.tmf_task_ptr = isci_tmf; 3521 set_bit(IREQ_TMF, &ireq->flags); 3522 3523 return ireq; 3524} 3525 3526int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, 3527 struct sas_task *task, u16 tag) 3528{ 3529 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3530 struct isci_request *ireq; 3531 unsigned long flags; 3532 int ret = 0; 3533 3534 /* do common allocation and init of request object. */ 3535 ireq = isci_io_request_from_tag(ihost, task, tag); 3536 3537 status = isci_io_request_build(ihost, ireq, idev); 3538 if (status != SCI_SUCCESS) { 3539 dev_dbg(&ihost->pdev->dev, 3540 "%s: request_construct failed - status = 0x%x\n", 3541 __func__, 3542 status); 3543 return status; 3544 } 3545 3546 spin_lock_irqsave(&ihost->scic_lock, flags); 3547 3548 if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) { 3549 3550 if (isci_task_is_ncq_recovery(task)) { 3551 3552 /* The device is in an NCQ recovery state. Issue the 3553 * request on the task side. Note that it will 3554 * complete on the I/O request side because the 3555 * request was built that way (ie. 3556 * ireq->is_task_management_request is false). 3557 */ 3558 status = sci_controller_start_task(ihost, 3559 idev, 3560 ireq); 3561 } else { 3562 status = SCI_FAILURE; 3563 } 3564 } else { 3565 /* send the request, let the core assign the IO TAG. */ 3566 status = sci_controller_start_io(ihost, idev, 3567 ireq); 3568 } 3569 3570 if (status != SCI_SUCCESS && 3571 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3572 dev_dbg(&ihost->pdev->dev, 3573 "%s: failed request start (0x%x)\n", 3574 __func__, status); 3575 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3576 return status; 3577 } 3578 3579 /* Either I/O started OK, or the core has signaled that 3580 * the device needs a target reset. 3581 * 3582 * In either case, hold onto the I/O for later. 3583 * 3584 * Update it's status and add it to the list in the 3585 * remote device object. 3586 */ 3587 list_add(&ireq->dev_node, &idev->reqs_in_process); 3588 3589 if (status == SCI_SUCCESS) { 3590 isci_request_change_state(ireq, started); 3591 } else { 3592 /* The request did not really start in the 3593 * hardware, so clear the request handle 3594 * here so no terminations will be done. 3595 */ 3596 set_bit(IREQ_TERMINATED, &ireq->flags); 3597 isci_request_change_state(ireq, completed); 3598 } 3599 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3600 3601 if (status == 3602 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3603 /* Signal libsas that we need the SCSI error 3604 * handler thread to work on this I/O and that 3605 * we want a device reset. 3606 */ 3607 spin_lock_irqsave(&task->task_state_lock, flags); 3608 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 3609 spin_unlock_irqrestore(&task->task_state_lock, flags); 3610 3611 /* Cause this task to be scheduled in the SCSI error 3612 * handler thread. 3613 */ 3614 sas_task_abort(task); 3615 3616 /* Change the status, since we are holding 3617 * the I/O until it is managed by the SCSI 3618 * error handler. 3619 */ 3620 status = SCI_SUCCESS; 3621 } 3622 3623 return ret; 3624} 3625