request.c revision 9269e0e898594c65dee6b20d4ed48e33dbbd4eeb
1/* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56#include "isci.h" 57#include "task.h" 58#include "request.h" 59#include "sata.h" 60#include "scu_completion_codes.h" 61#include "scu_event_codes.h" 62#include "sas.h" 63 64/** 65 * This method returns the sgl element pair for the specificed sgl_pair index. 66 * @sci_req: This parameter specifies the IO request for which to retrieve 67 * the Scatter-Gather List element pair. 68 * @sgl_pair_index: This parameter specifies the index into the SGL element 69 * pair to be retrieved. 70 * 71 * This method returns a pointer to an struct scu_sgl_element_pair. 72 */ 73static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair( 74 struct scic_sds_request *sci_req, 75 u32 sgl_pair_index 76 ) { 77 struct scu_task_context *task_context; 78 79 task_context = (struct scu_task_context *)sci_req->task_context_buffer; 80 81 if (sgl_pair_index == 0) { 82 return &task_context->sgl_pair_ab; 83 } else if (sgl_pair_index == 1) { 84 return &task_context->sgl_pair_cd; 85 } 86 87 return &sci_req->sg_table[sgl_pair_index - 2]; 88} 89 90/** 91 * This function will build the SGL list for an IO request. 92 * @sci_req: This parameter specifies the IO request for which to build 93 * the Scatter-Gather List. 94 * 95 */ 96static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) 97{ 98 struct isci_request *isci_request = sci_req_to_ireq(sds_request); 99 struct isci_host *isci_host = isci_request->isci_host; 100 struct sas_task *task = isci_request_access_task(isci_request); 101 struct scatterlist *sg = NULL; 102 dma_addr_t dma_addr; 103 u32 sg_idx = 0; 104 struct scu_sgl_element_pair *scu_sg = NULL; 105 struct scu_sgl_element_pair *prev_sg = NULL; 106 107 if (task->num_scatter > 0) { 108 sg = task->scatter; 109 110 while (sg) { 111 scu_sg = scic_sds_request_get_sgl_element_pair( 112 sds_request, 113 sg_idx); 114 115 SCU_SGL_COPY(scu_sg->A, sg); 116 117 sg = sg_next(sg); 118 119 if (sg) { 120 SCU_SGL_COPY(scu_sg->B, sg); 121 sg = sg_next(sg); 122 } else 123 SCU_SGL_ZERO(scu_sg->B); 124 125 if (prev_sg) { 126 dma_addr = 127 scic_io_request_get_dma_addr( 128 sds_request, 129 scu_sg); 130 131 prev_sg->next_pair_upper = 132 upper_32_bits(dma_addr); 133 prev_sg->next_pair_lower = 134 lower_32_bits(dma_addr); 135 } 136 137 prev_sg = scu_sg; 138 sg_idx++; 139 } 140 } else { /* handle when no sg */ 141 scu_sg = scic_sds_request_get_sgl_element_pair(sds_request, 142 sg_idx); 143 144 dma_addr = dma_map_single(&isci_host->pdev->dev, 145 task->scatter, 146 task->total_xfer_len, 147 task->data_dir); 148 149 isci_request->zero_scatter_daddr = dma_addr; 150 151 scu_sg->A.length = task->total_xfer_len; 152 scu_sg->A.address_upper = upper_32_bits(dma_addr); 153 scu_sg->A.address_lower = lower_32_bits(dma_addr); 154 } 155 156 if (scu_sg) { 157 scu_sg->next_pair_upper = 0; 158 scu_sg->next_pair_lower = 0; 159 } 160} 161 162static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req) 163{ 164 struct ssp_cmd_iu *cmd_iu; 165 struct isci_request *ireq = sci_req_to_ireq(sci_req); 166 struct sas_task *task = isci_request_access_task(ireq); 167 168 cmd_iu = &sci_req->ssp.cmd; 169 170 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); 171 cmd_iu->add_cdb_len = 0; 172 cmd_iu->_r_a = 0; 173 cmd_iu->_r_b = 0; 174 cmd_iu->en_fburst = 0; /* unsupported */ 175 cmd_iu->task_prio = task->ssp_task.task_prio; 176 cmd_iu->task_attr = task->ssp_task.task_attr; 177 cmd_iu->_r_c = 0; 178 179 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb, 180 sizeof(task->ssp_task.cdb) / sizeof(u32)); 181} 182 183static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req) 184{ 185 struct ssp_task_iu *task_iu; 186 struct isci_request *ireq = sci_req_to_ireq(sci_req); 187 struct sas_task *task = isci_request_access_task(ireq); 188 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 189 190 task_iu = &sci_req->ssp.tmf; 191 192 memset(task_iu, 0, sizeof(struct ssp_task_iu)); 193 194 memcpy(task_iu->LUN, task->ssp_task.LUN, 8); 195 196 task_iu->task_func = isci_tmf->tmf_code; 197 task_iu->task_tag = 198 (ireq->ttype == tmf_task) ? 199 isci_tmf->io_tag : 200 SCI_CONTROLLER_INVALID_IO_TAG; 201} 202 203/** 204 * This method is will fill in the SCU Task Context for any type of SSP request. 205 * @sci_req: 206 * @task_context: 207 * 208 */ 209static void scu_ssp_reqeust_construct_task_context( 210 struct scic_sds_request *sds_request, 211 struct scu_task_context *task_context) 212{ 213 dma_addr_t dma_addr; 214 struct scic_sds_controller *controller; 215 struct scic_sds_remote_device *target_device; 216 struct scic_sds_port *target_port; 217 218 controller = scic_sds_request_get_controller(sds_request); 219 target_device = scic_sds_request_get_device(sds_request); 220 target_port = scic_sds_request_get_port(sds_request); 221 222 /* Fill in the TC with the its required data */ 223 task_context->abort = 0; 224 task_context->priority = 0; 225 task_context->initiator_request = 1; 226 task_context->connection_rate = target_device->connection_rate; 227 task_context->protocol_engine_index = 228 scic_sds_controller_get_protocol_engine_group(controller); 229 task_context->logical_port_index = 230 scic_sds_port_get_index(target_port); 231 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; 232 task_context->valid = SCU_TASK_CONTEXT_VALID; 233 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 234 235 task_context->remote_node_index = 236 scic_sds_remote_device_get_index(sds_request->target_device); 237 task_context->command_code = 0; 238 239 task_context->link_layer_control = 0; 240 task_context->do_not_dma_ssp_good_response = 1; 241 task_context->strict_ordering = 0; 242 task_context->control_frame = 0; 243 task_context->timeout_enable = 0; 244 task_context->block_guard_enable = 0; 245 246 task_context->address_modifier = 0; 247 248 /* task_context->type.ssp.tag = sci_req->io_tag; */ 249 task_context->task_phase = 0x01; 250 251 if (sds_request->was_tag_assigned_by_user) { 252 /* 253 * Build the task context now since we have already read 254 * the data 255 */ 256 sds_request->post_context = 257 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 258 (scic_sds_controller_get_protocol_engine_group( 259 controller) << 260 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 261 (scic_sds_port_get_index(target_port) << 262 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 263 scic_sds_io_tag_get_index(sds_request->io_tag)); 264 } else { 265 /* 266 * Build the task context now since we have already read 267 * the data 268 * 269 * I/O tag index is not assigned because we have to wait 270 * until we get a TCi 271 */ 272 sds_request->post_context = 273 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 274 (scic_sds_controller_get_protocol_engine_group( 275 owning_controller) << 276 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 277 (scic_sds_port_get_index(target_port) << 278 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); 279 } 280 281 /* 282 * Copy the physical address for the command buffer to the 283 * SCU Task Context 284 */ 285 dma_addr = scic_io_request_get_dma_addr(sds_request, 286 &sds_request->ssp.cmd); 287 288 task_context->command_iu_upper = upper_32_bits(dma_addr); 289 task_context->command_iu_lower = lower_32_bits(dma_addr); 290 291 /* 292 * Copy the physical address for the response buffer to the 293 * SCU Task Context 294 */ 295 dma_addr = scic_io_request_get_dma_addr(sds_request, 296 &sds_request->ssp.rsp); 297 298 task_context->response_iu_upper = upper_32_bits(dma_addr); 299 task_context->response_iu_lower = lower_32_bits(dma_addr); 300} 301 302/** 303 * This method is will fill in the SCU Task Context for a SSP IO request. 304 * @sci_req: 305 * 306 */ 307static void scu_ssp_io_request_construct_task_context( 308 struct scic_sds_request *sci_req, 309 enum dma_data_direction dir, 310 u32 len) 311{ 312 struct scu_task_context *task_context; 313 314 task_context = scic_sds_request_get_task_context(sci_req); 315 316 scu_ssp_reqeust_construct_task_context(sci_req, task_context); 317 318 task_context->ssp_command_iu_length = 319 sizeof(struct ssp_cmd_iu) / sizeof(u32); 320 task_context->type.ssp.frame_type = SSP_COMMAND; 321 322 switch (dir) { 323 case DMA_FROM_DEVICE: 324 case DMA_NONE: 325 default: 326 task_context->task_type = SCU_TASK_TYPE_IOREAD; 327 break; 328 case DMA_TO_DEVICE: 329 task_context->task_type = SCU_TASK_TYPE_IOWRITE; 330 break; 331 } 332 333 task_context->transfer_length_bytes = len; 334 335 if (task_context->transfer_length_bytes > 0) 336 scic_sds_request_build_sgl(sci_req); 337} 338 339/** 340 * This method will fill in the SCU Task Context for a SSP Task request. The 341 * following important settings are utilized: -# priority == 342 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued 343 * ahead of other task destined for the same Remote Node. -# task_type == 344 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type 345 * (i.e. non-raw frame) is being utilized to perform task management. -# 346 * control_frame == 1. This ensures that the proper endianess is set so 347 * that the bytes are transmitted in the right order for a task frame. 348 * @sci_req: This parameter specifies the task request object being 349 * constructed. 350 * 351 */ 352static void scu_ssp_task_request_construct_task_context( 353 struct scic_sds_request *sci_req) 354{ 355 struct scu_task_context *task_context; 356 357 task_context = scic_sds_request_get_task_context(sci_req); 358 359 scu_ssp_reqeust_construct_task_context(sci_req, task_context); 360 361 task_context->control_frame = 1; 362 task_context->priority = SCU_TASK_PRIORITY_HIGH; 363 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; 364 task_context->transfer_length_bytes = 0; 365 task_context->type.ssp.frame_type = SSP_TASK; 366 task_context->ssp_command_iu_length = 367 sizeof(struct ssp_task_iu) / sizeof(u32); 368} 369 370/** 371 * This method is will fill in the SCU Task Context for any type of SATA 372 * request. This is called from the various SATA constructors. 373 * @sci_req: The general IO request object which is to be used in 374 * constructing the SCU task context. 375 * @task_context: The buffer pointer for the SCU task context which is being 376 * constructed. 377 * 378 * The general io request construction is complete. The buffer assignment for 379 * the command buffer is complete. none Revisit task context construction to 380 * determine what is common for SSP/SMP/STP task context structures. 381 */ 382static void scu_sata_reqeust_construct_task_context( 383 struct scic_sds_request *sci_req, 384 struct scu_task_context *task_context) 385{ 386 dma_addr_t dma_addr; 387 struct scic_sds_controller *controller; 388 struct scic_sds_remote_device *target_device; 389 struct scic_sds_port *target_port; 390 391 controller = scic_sds_request_get_controller(sci_req); 392 target_device = scic_sds_request_get_device(sci_req); 393 target_port = scic_sds_request_get_port(sci_req); 394 395 /* Fill in the TC with the its required data */ 396 task_context->abort = 0; 397 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 398 task_context->initiator_request = 1; 399 task_context->connection_rate = target_device->connection_rate; 400 task_context->protocol_engine_index = 401 scic_sds_controller_get_protocol_engine_group(controller); 402 task_context->logical_port_index = 403 scic_sds_port_get_index(target_port); 404 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; 405 task_context->valid = SCU_TASK_CONTEXT_VALID; 406 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 407 408 task_context->remote_node_index = 409 scic_sds_remote_device_get_index(sci_req->target_device); 410 task_context->command_code = 0; 411 412 task_context->link_layer_control = 0; 413 task_context->do_not_dma_ssp_good_response = 1; 414 task_context->strict_ordering = 0; 415 task_context->control_frame = 0; 416 task_context->timeout_enable = 0; 417 task_context->block_guard_enable = 0; 418 419 task_context->address_modifier = 0; 420 task_context->task_phase = 0x01; 421 422 task_context->ssp_command_iu_length = 423 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); 424 425 /* Set the first word of the H2D REG FIS */ 426 task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd; 427 428 if (sci_req->was_tag_assigned_by_user) { 429 /* 430 * Build the task context now since we have already read 431 * the data 432 */ 433 sci_req->post_context = 434 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 435 (scic_sds_controller_get_protocol_engine_group( 436 controller) << 437 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 438 (scic_sds_port_get_index(target_port) << 439 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 440 scic_sds_io_tag_get_index(sci_req->io_tag)); 441 } else { 442 /* 443 * Build the task context now since we have already read 444 * the data. 445 * I/O tag index is not assigned because we have to wait 446 * until we get a TCi. 447 */ 448 sci_req->post_context = 449 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 450 (scic_sds_controller_get_protocol_engine_group( 451 controller) << 452 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 453 (scic_sds_port_get_index(target_port) << 454 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); 455 } 456 457 /* 458 * Copy the physical address for the command buffer to the SCU Task 459 * Context. We must offset the command buffer by 4 bytes because the 460 * first 4 bytes are transfered in the body of the TC. 461 */ 462 dma_addr = scic_io_request_get_dma_addr(sci_req, 463 ((char *) &sci_req->stp.cmd) + 464 sizeof(u32)); 465 466 task_context->command_iu_upper = upper_32_bits(dma_addr); 467 task_context->command_iu_lower = lower_32_bits(dma_addr); 468 469 /* SATA Requests do not have a response buffer */ 470 task_context->response_iu_upper = 0; 471 task_context->response_iu_lower = 0; 472} 473 474 475 476/** 477 * scu_stp_raw_request_construct_task_context - 478 * @sci_req: This parameter specifies the STP request object for which to 479 * construct a RAW command frame task context. 480 * @task_context: This parameter specifies the SCU specific task context buffer 481 * to construct. 482 * 483 * This method performs the operations common to all SATA/STP requests 484 * utilizing the raw frame method. none 485 */ 486static void scu_stp_raw_request_construct_task_context(struct scic_sds_stp_request *stp_req, 487 struct scu_task_context *task_context) 488{ 489 struct scic_sds_request *sci_req = to_sci_req(stp_req); 490 491 scu_sata_reqeust_construct_task_context(sci_req, task_context); 492 493 task_context->control_frame = 0; 494 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 495 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; 496 task_context->type.stp.fis_type = FIS_REGH2D; 497 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); 498} 499 500static enum sci_status 501scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, 502 bool copy_rx_frame) 503{ 504 struct scic_sds_stp_request *stp_req = &sci_req->stp.req; 505 struct scic_sds_stp_pio_request *pio = &stp_req->type.pio; 506 507 scu_stp_raw_request_construct_task_context(stp_req, 508 sci_req->task_context_buffer); 509 510 pio->current_transfer_bytes = 0; 511 pio->ending_error = 0; 512 pio->ending_status = 0; 513 514 pio->request_current.sgl_offset = 0; 515 pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A; 516 517 if (copy_rx_frame) { 518 scic_sds_request_build_sgl(sci_req); 519 /* Since the IO request copy of the TC contains the same data as 520 * the actual TC this pointer is vaild for either. 521 */ 522 pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab; 523 } else { 524 /* The user does not want the data copied to the SGL buffer location */ 525 pio->request_current.sgl_pair = NULL; 526 } 527 528 return SCI_SUCCESS; 529} 530 531/** 532 * 533 * @sci_req: This parameter specifies the request to be constructed as an 534 * optimized request. 535 * @optimized_task_type: This parameter specifies whether the request is to be 536 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A 537 * value of 1 indicates NCQ. 538 * 539 * This method will perform request construction common to all types of STP 540 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method 541 * returns an indication as to whether the construction was successful. 542 */ 543static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req, 544 u8 optimized_task_type, 545 u32 len, 546 enum dma_data_direction dir) 547{ 548 struct scu_task_context *task_context = sci_req->task_context_buffer; 549 550 /* Build the STP task context structure */ 551 scu_sata_reqeust_construct_task_context(sci_req, task_context); 552 553 /* Copy over the SGL elements */ 554 scic_sds_request_build_sgl(sci_req); 555 556 /* Copy over the number of bytes to be transfered */ 557 task_context->transfer_length_bytes = len; 558 559 if (dir == DMA_TO_DEVICE) { 560 /* 561 * The difference between the DMA IN and DMA OUT request task type 562 * values are consistent with the difference between FPDMA READ 563 * and FPDMA WRITE values. Add the supplied task type parameter 564 * to this difference to set the task type properly for this 565 * DATA OUT (WRITE) case. */ 566 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT 567 - SCU_TASK_TYPE_DMA_IN); 568 } else { 569 /* 570 * For the DATA IN (READ) case, simply save the supplied 571 * optimized task type. */ 572 task_context->task_type = optimized_task_type; 573 } 574} 575 576 577 578static enum sci_status 579scic_io_request_construct_sata(struct scic_sds_request *sci_req, 580 u32 len, 581 enum dma_data_direction dir, 582 bool copy) 583{ 584 enum sci_status status = SCI_SUCCESS; 585 struct isci_request *ireq = sci_req_to_ireq(sci_req); 586 struct sas_task *task = isci_request_access_task(ireq); 587 588 /* check for management protocols */ 589 if (ireq->ttype == tmf_task) { 590 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 591 592 if (tmf->tmf_code == isci_tmf_sata_srst_high || 593 tmf->tmf_code == isci_tmf_sata_srst_low) { 594 scu_stp_raw_request_construct_task_context(&sci_req->stp.req, 595 sci_req->task_context_buffer); 596 return SCI_SUCCESS; 597 } else { 598 dev_err(scic_to_dev(sci_req->owning_controller), 599 "%s: Request 0x%p received un-handled SAT " 600 "management protocol 0x%x.\n", 601 __func__, sci_req, tmf->tmf_code); 602 603 return SCI_FAILURE; 604 } 605 } 606 607 if (!sas_protocol_ata(task->task_proto)) { 608 dev_err(scic_to_dev(sci_req->owning_controller), 609 "%s: Non-ATA protocol in SATA path: 0x%x\n", 610 __func__, 611 task->task_proto); 612 return SCI_FAILURE; 613 614 } 615 616 /* non data */ 617 if (task->data_dir == DMA_NONE) { 618 scu_stp_raw_request_construct_task_context(&sci_req->stp.req, 619 sci_req->task_context_buffer); 620 return SCI_SUCCESS; 621 } 622 623 /* NCQ */ 624 if (task->ata_task.use_ncq) { 625 scic_sds_stp_optimized_request_construct(sci_req, 626 SCU_TASK_TYPE_FPDMAQ_READ, 627 len, dir); 628 return SCI_SUCCESS; 629 } 630 631 /* DMA */ 632 if (task->ata_task.dma_xfer) { 633 scic_sds_stp_optimized_request_construct(sci_req, 634 SCU_TASK_TYPE_DMA_IN, 635 len, dir); 636 return SCI_SUCCESS; 637 } else /* PIO */ 638 return scic_sds_stp_pio_request_construct(sci_req, copy); 639 640 return status; 641} 642 643static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req) 644{ 645 struct isci_request *ireq = sci_req_to_ireq(sci_req); 646 struct sas_task *task = isci_request_access_task(ireq); 647 648 sci_req->protocol = SCIC_SSP_PROTOCOL; 649 650 scu_ssp_io_request_construct_task_context(sci_req, 651 task->data_dir, 652 task->total_xfer_len); 653 654 scic_sds_io_request_build_ssp_command_iu(sci_req); 655 656 sci_base_state_machine_change_state(&sci_req->state_machine, 657 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 658 659 return SCI_SUCCESS; 660} 661 662enum sci_status scic_task_request_construct_ssp( 663 struct scic_sds_request *sci_req) 664{ 665 /* Construct the SSP Task SCU Task Context */ 666 scu_ssp_task_request_construct_task_context(sci_req); 667 668 /* Fill in the SSP Task IU */ 669 scic_sds_task_request_build_ssp_task_iu(sci_req); 670 671 sci_base_state_machine_change_state(&sci_req->state_machine, 672 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 673 674 return SCI_SUCCESS; 675} 676 677static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req) 678{ 679 enum sci_status status; 680 struct scic_sds_stp_request *stp_req; 681 bool copy = false; 682 struct isci_request *isci_request = sci_req_to_ireq(sci_req); 683 struct sas_task *task = isci_request_access_task(isci_request); 684 685 stp_req = &sci_req->stp.req; 686 sci_req->protocol = SCIC_STP_PROTOCOL; 687 688 copy = (task->data_dir == DMA_NONE) ? false : true; 689 690 status = scic_io_request_construct_sata(sci_req, 691 task->total_xfer_len, 692 task->data_dir, 693 copy); 694 695 if (status == SCI_SUCCESS) 696 sci_base_state_machine_change_state(&sci_req->state_machine, 697 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 698 699 return status; 700} 701 702enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req) 703{ 704 enum sci_status status = SCI_SUCCESS; 705 struct isci_request *ireq = sci_req_to_ireq(sci_req); 706 707 /* check for management protocols */ 708 if (ireq->ttype == tmf_task) { 709 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 710 711 if (tmf->tmf_code == isci_tmf_sata_srst_high || 712 tmf->tmf_code == isci_tmf_sata_srst_low) { 713 scu_stp_raw_request_construct_task_context(&sci_req->stp.req, 714 sci_req->task_context_buffer); 715 } else { 716 dev_err(scic_to_dev(sci_req->owning_controller), 717 "%s: Request 0x%p received un-handled SAT " 718 "Protocol 0x%x.\n", 719 __func__, sci_req, tmf->tmf_code); 720 721 return SCI_FAILURE; 722 } 723 } 724 725 if (status != SCI_SUCCESS) 726 return status; 727 sci_base_state_machine_change_state(&sci_req->state_machine, 728 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 729 730 return status; 731} 732 733/** 734 * sci_req_tx_bytes - bytes transferred when reply underruns request 735 * @sci_req: request that was terminated early 736 */ 737#define SCU_TASK_CONTEXT_SRAM 0x200000 738static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req) 739{ 740 struct scic_sds_controller *scic = sci_req->owning_controller; 741 u32 ret_val = 0; 742 743 if (readl(&scic->smu_registers->address_modifier) == 0) { 744 void __iomem *scu_reg_base = scic->scu_registers; 745 746 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where 747 * BAR1 is the scu_registers 748 * 0x20002C = 0x200000 + 0x2c 749 * = start of task context SRAM + offset of (type.ssp.data_offset) 750 * TCi is the io_tag of struct scic_sds_request 751 */ 752 ret_val = readl(scu_reg_base + 753 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + 754 ((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(sci_req->io_tag))); 755 } 756 757 return ret_val; 758} 759 760enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) 761{ 762 struct scic_sds_controller *scic = sci_req->owning_controller; 763 struct scu_task_context *task_context; 764 enum sci_base_request_states state; 765 766 if (sci_req->device_sequence != 767 scic_sds_remote_device_get_sequence(sci_req->target_device)) 768 return SCI_FAILURE; 769 770 state = sci_req->state_machine.current_state_id; 771 if (state != SCI_BASE_REQUEST_STATE_CONSTRUCTED) { 772 dev_warn(scic_to_dev(scic), 773 "%s: SCIC IO Request requested to start while in wrong " 774 "state %d\n", __func__, state); 775 return SCI_FAILURE_INVALID_STATE; 776 } 777 778 /* if necessary, allocate a TCi for the io request object and then will, 779 * if necessary, copy the constructed TC data into the actual TC buffer. 780 * If everything is successful the post context field is updated with 781 * the TCi so the controller can post the request to the hardware. 782 */ 783 if (sci_req->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) 784 sci_req->io_tag = scic_controller_allocate_io_tag(scic); 785 786 /* Record the IO Tag in the request */ 787 if (sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) { 788 task_context = sci_req->task_context_buffer; 789 790 task_context->task_index = scic_sds_io_tag_get_index(sci_req->io_tag); 791 792 switch (task_context->protocol_type) { 793 case SCU_TASK_CONTEXT_PROTOCOL_SMP: 794 case SCU_TASK_CONTEXT_PROTOCOL_SSP: 795 /* SSP/SMP Frame */ 796 task_context->type.ssp.tag = sci_req->io_tag; 797 task_context->type.ssp.target_port_transfer_tag = 798 0xFFFF; 799 break; 800 801 case SCU_TASK_CONTEXT_PROTOCOL_STP: 802 /* STP/SATA Frame 803 * task_context->type.stp.ncq_tag = sci_req->ncq_tag; 804 */ 805 break; 806 807 case SCU_TASK_CONTEXT_PROTOCOL_NONE: 808 /* / @todo When do we set no protocol type? */ 809 break; 810 811 default: 812 /* This should never happen since we build the IO 813 * requests */ 814 break; 815 } 816 817 /* 818 * Check to see if we need to copy the task context buffer 819 * or have been building into the task context buffer */ 820 if (sci_req->was_tag_assigned_by_user == false) 821 scic_sds_controller_copy_task_context(scic, sci_req); 822 823 /* Add to the post_context the io tag value */ 824 sci_req->post_context |= scic_sds_io_tag_get_index(sci_req->io_tag); 825 826 /* Everything is good go ahead and change state */ 827 sci_base_state_machine_change_state(&sci_req->state_machine, 828 SCI_BASE_REQUEST_STATE_STARTED); 829 830 return SCI_SUCCESS; 831 } 832 833 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 834} 835 836enum sci_status 837scic_sds_io_request_terminate(struct scic_sds_request *sci_req) 838{ 839 enum sci_base_request_states state; 840 841 state = sci_req->state_machine.current_state_id; 842 843 switch (state) { 844 case SCI_BASE_REQUEST_STATE_CONSTRUCTED: 845 scic_sds_request_set_status(sci_req, 846 SCU_TASK_DONE_TASK_ABORT, 847 SCI_FAILURE_IO_TERMINATED); 848 849 sci_base_state_machine_change_state(&sci_req->state_machine, 850 SCI_BASE_REQUEST_STATE_COMPLETED); 851 return SCI_SUCCESS; 852 case SCI_BASE_REQUEST_STATE_STARTED: 853 case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION: 854 case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE: 855 case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION: 856 case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE: 857 case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE: 858 case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE: 859 case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE: 860 case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE: 861 case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE: 862 case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE: 863 case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE: 864 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE: 865 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE: 866 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE: 867 sci_base_state_machine_change_state(&sci_req->state_machine, 868 SCI_BASE_REQUEST_STATE_ABORTING); 869 return SCI_SUCCESS; 870 case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE: 871 sci_base_state_machine_change_state(&sci_req->state_machine, 872 SCI_BASE_REQUEST_STATE_ABORTING); 873 sci_base_state_machine_change_state(&sci_req->state_machine, 874 SCI_BASE_REQUEST_STATE_COMPLETED); 875 return SCI_SUCCESS; 876 case SCI_BASE_REQUEST_STATE_ABORTING: 877 sci_base_state_machine_change_state(&sci_req->state_machine, 878 SCI_BASE_REQUEST_STATE_COMPLETED); 879 return SCI_SUCCESS; 880 case SCI_BASE_REQUEST_STATE_COMPLETED: 881 default: 882 dev_warn(scic_to_dev(sci_req->owning_controller), 883 "%s: SCIC IO Request requested to abort while in wrong " 884 "state %d\n", 885 __func__, 886 sci_base_state_machine_get_state(&sci_req->state_machine)); 887 break; 888 } 889 890 return SCI_FAILURE_INVALID_STATE; 891} 892 893enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req) 894{ 895 enum sci_base_request_states state; 896 struct scic_sds_controller *scic = sci_req->owning_controller; 897 898 state = sci_req->state_machine.current_state_id; 899 if (WARN_ONCE(state != SCI_BASE_REQUEST_STATE_COMPLETED, 900 "isci: request completion from wrong state (%d)\n", state)) 901 return SCI_FAILURE_INVALID_STATE; 902 903 if (!sci_req->was_tag_assigned_by_user) 904 scic_controller_free_io_tag(scic, sci_req->io_tag); 905 906 if (sci_req->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) 907 scic_sds_controller_release_frame(scic, 908 sci_req->saved_rx_frame_index); 909 910 /* XXX can we just stop the machine and remove the 'final' state? */ 911 sci_base_state_machine_change_state(&sci_req->state_machine, 912 SCI_BASE_REQUEST_STATE_FINAL); 913 return SCI_SUCCESS; 914} 915 916enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req, 917 u32 event_code) 918{ 919 enum sci_base_request_states state; 920 struct scic_sds_controller *scic = sci_req->owning_controller; 921 922 state = sci_req->state_machine.current_state_id; 923 924 if (state != SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE) { 925 dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n", 926 __func__, event_code, state); 927 928 return SCI_FAILURE_INVALID_STATE; 929 } 930 931 switch (scu_get_event_specifier(event_code)) { 932 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: 933 /* We are waiting for data and the SCU has R_ERR the data frame. 934 * Go back to waiting for the D2H Register FIS 935 */ 936 sci_base_state_machine_change_state(&sci_req->state_machine, 937 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); 938 return SCI_SUCCESS; 939 default: 940 dev_err(scic_to_dev(scic), 941 "%s: pio request unexpected event %#x\n", 942 __func__, event_code); 943 944 /* TODO Should we fail the PIO request when we get an 945 * unexpected event? 946 */ 947 return SCI_FAILURE; 948 } 949} 950 951/* 952 * This function copies response data for requests returning response data 953 * instead of sense data. 954 * @sci_req: This parameter specifies the request object for which to copy 955 * the response data. 956 */ 957static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req) 958{ 959 void *resp_buf; 960 u32 len; 961 struct ssp_response_iu *ssp_response; 962 struct isci_request *ireq = sci_req_to_ireq(sci_req); 963 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 964 965 ssp_response = &sci_req->ssp.rsp; 966 967 resp_buf = &isci_tmf->resp.resp_iu; 968 969 len = min_t(u32, 970 SSP_RESP_IU_MAX_SIZE, 971 be32_to_cpu(ssp_response->response_data_len)); 972 973 memcpy(resp_buf, ssp_response->resp_data, len); 974} 975 976static enum sci_status request_started_state_tc_event(struct scic_sds_request *sci_req, 977 u32 completion_code) 978{ 979 struct ssp_response_iu *resp_iu; 980 u8 datapres; 981 982 /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000 983 * to determine SDMA status 984 */ 985 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 986 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 987 scic_sds_request_set_status(sci_req, 988 SCU_TASK_DONE_GOOD, 989 SCI_SUCCESS); 990 break; 991 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { 992 /* There are times when the SCU hardware will return an early 993 * response because the io request specified more data than is 994 * returned by the target device (mode pages, inquiry data, 995 * etc.). We must check the response stats to see if this is 996 * truly a failed request or a good request that just got 997 * completed early. 998 */ 999 struct ssp_response_iu *resp = &sci_req->ssp.rsp; 1000 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1001 1002 sci_swab32_cpy(&sci_req->ssp.rsp, 1003 &sci_req->ssp.rsp, 1004 word_cnt); 1005 1006 if (resp->status == 0) { 1007 scic_sds_request_set_status(sci_req, 1008 SCU_TASK_DONE_GOOD, 1009 SCI_SUCCESS_IO_DONE_EARLY); 1010 } else { 1011 scic_sds_request_set_status(sci_req, 1012 SCU_TASK_DONE_CHECK_RESPONSE, 1013 SCI_FAILURE_IO_RESPONSE_VALID); 1014 } 1015 break; 1016 } 1017 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { 1018 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1019 1020 sci_swab32_cpy(&sci_req->ssp.rsp, 1021 &sci_req->ssp.rsp, 1022 word_cnt); 1023 1024 scic_sds_request_set_status(sci_req, 1025 SCU_TASK_DONE_CHECK_RESPONSE, 1026 SCI_FAILURE_IO_RESPONSE_VALID); 1027 break; 1028 } 1029 1030 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): 1031 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame 1032 * guaranteed to be received before this completion status is 1033 * posted? 1034 */ 1035 resp_iu = &sci_req->ssp.rsp; 1036 datapres = resp_iu->datapres; 1037 1038 if (datapres == 1 || datapres == 2) { 1039 scic_sds_request_set_status(sci_req, 1040 SCU_TASK_DONE_CHECK_RESPONSE, 1041 SCI_FAILURE_IO_RESPONSE_VALID); 1042 } else 1043 scic_sds_request_set_status(sci_req, 1044 SCU_TASK_DONE_GOOD, 1045 SCI_SUCCESS); 1046 break; 1047 /* only stp device gets suspended. */ 1048 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1049 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): 1050 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): 1051 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): 1052 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): 1053 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): 1054 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): 1055 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): 1056 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): 1057 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 1058 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): 1059 if (sci_req->protocol == SCIC_STP_PROTOCOL) { 1060 scic_sds_request_set_status(sci_req, 1061 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1062 SCU_COMPLETION_TL_STATUS_SHIFT, 1063 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); 1064 } else { 1065 scic_sds_request_set_status(sci_req, 1066 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1067 SCU_COMPLETION_TL_STATUS_SHIFT, 1068 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1069 } 1070 break; 1071 1072 /* both stp/ssp device gets suspended */ 1073 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): 1074 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): 1075 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): 1076 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): 1077 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): 1078 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): 1079 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): 1080 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): 1081 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): 1082 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): 1083 scic_sds_request_set_status(sci_req, 1084 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1085 SCU_COMPLETION_TL_STATUS_SHIFT, 1086 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); 1087 break; 1088 1089 /* neither ssp nor stp gets suspended. */ 1090 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): 1091 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): 1092 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): 1093 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): 1094 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): 1095 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): 1096 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1097 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 1098 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 1099 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 1100 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): 1101 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): 1102 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): 1103 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): 1104 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): 1105 default: 1106 scic_sds_request_set_status( 1107 sci_req, 1108 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1109 SCU_COMPLETION_TL_STATUS_SHIFT, 1110 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1111 break; 1112 } 1113 1114 /* 1115 * TODO: This is probably wrong for ACK/NAK timeout conditions 1116 */ 1117 1118 /* In all cases we will treat this as the completion of the IO req. */ 1119 sci_base_state_machine_change_state(&sci_req->state_machine, 1120 SCI_BASE_REQUEST_STATE_COMPLETED); 1121 return SCI_SUCCESS; 1122} 1123 1124static enum sci_status request_aborting_state_tc_event(struct scic_sds_request *sci_req, 1125 u32 completion_code) 1126{ 1127 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1128 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 1129 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): 1130 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_TASK_ABORT, 1131 SCI_FAILURE_IO_TERMINATED); 1132 1133 sci_base_state_machine_change_state(&sci_req->state_machine, 1134 SCI_BASE_REQUEST_STATE_COMPLETED); 1135 break; 1136 1137 default: 1138 /* Unless we get some strange error wait for the task abort to complete 1139 * TODO: Should there be a state change for this completion? 1140 */ 1141 break; 1142 } 1143 1144 return SCI_SUCCESS; 1145} 1146 1147static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request *sci_req, 1148 u32 completion_code) 1149{ 1150 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1151 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1152 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, 1153 SCI_SUCCESS); 1154 1155 sci_base_state_machine_change_state(&sci_req->state_machine, 1156 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE); 1157 break; 1158 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1159 /* Currently, the decision is to simply allow the task request 1160 * to timeout if the task IU wasn't received successfully. 1161 * There is a potential for receiving multiple task responses if 1162 * we decide to send the task IU again. 1163 */ 1164 dev_warn(scic_to_dev(sci_req->owning_controller), 1165 "%s: TaskRequest:0x%p CompletionCode:%x - " 1166 "ACK/NAK timeout\n", __func__, sci_req, 1167 completion_code); 1168 1169 sci_base_state_machine_change_state(&sci_req->state_machine, 1170 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE); 1171 break; 1172 default: 1173 /* All other completion status cause the IO to be complete. If a NAK 1174 * was received, then it is up to the user to retry the request. 1175 */ 1176 scic_sds_request_set_status(sci_req, 1177 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1178 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1179 1180 sci_base_state_machine_change_state(&sci_req->state_machine, 1181 SCI_BASE_REQUEST_STATE_COMPLETED); 1182 break; 1183 } 1184 1185 return SCI_SUCCESS; 1186} 1187 1188static enum sci_status smp_request_await_response_tc_event(struct scic_sds_request *sci_req, 1189 u32 completion_code) 1190{ 1191 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1192 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1193 /* In the AWAIT RESPONSE state, any TC completion is 1194 * unexpected. but if the TC has success status, we 1195 * complete the IO anyway. 1196 */ 1197 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, 1198 SCI_SUCCESS); 1199 1200 sci_base_state_machine_change_state(&sci_req->state_machine, 1201 SCI_BASE_REQUEST_STATE_COMPLETED); 1202 break; 1203 1204 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1205 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 1206 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 1207 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 1208 /* These status has been seen in a specific LSI 1209 * expander, which sometimes is not able to send smp 1210 * response within 2 ms. This causes our hardware break 1211 * the connection and set TC completion with one of 1212 * these SMP_XXX_XX_ERR status. For these type of error, 1213 * we ask scic user to retry the request. 1214 */ 1215 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR, 1216 SCI_FAILURE_RETRY_REQUIRED); 1217 1218 sci_base_state_machine_change_state(&sci_req->state_machine, 1219 SCI_BASE_REQUEST_STATE_COMPLETED); 1220 break; 1221 1222 default: 1223 /* All other completion status cause the IO to be complete. If a NAK 1224 * was received, then it is up to the user to retry the request 1225 */ 1226 scic_sds_request_set_status(sci_req, 1227 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1228 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1229 1230 sci_base_state_machine_change_state(&sci_req->state_machine, 1231 SCI_BASE_REQUEST_STATE_COMPLETED); 1232 break; 1233 } 1234 1235 return SCI_SUCCESS; 1236} 1237 1238static enum sci_status smp_request_await_tc_event(struct scic_sds_request *sci_req, 1239 u32 completion_code) 1240{ 1241 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1242 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1243 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, 1244 SCI_SUCCESS); 1245 1246 sci_base_state_machine_change_state(&sci_req->state_machine, 1247 SCI_BASE_REQUEST_STATE_COMPLETED); 1248 break; 1249 default: 1250 /* All other completion status cause the IO to be 1251 * complete. If a NAK was received, then it is up to 1252 * the user to retry the request. 1253 */ 1254 scic_sds_request_set_status(sci_req, 1255 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1256 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1257 1258 sci_base_state_machine_change_state(&sci_req->state_machine, 1259 SCI_BASE_REQUEST_STATE_COMPLETED); 1260 break; 1261 } 1262 1263 return SCI_SUCCESS; 1264} 1265 1266void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req, 1267 u16 ncq_tag) 1268{ 1269 /** 1270 * @note This could be made to return an error to the user if the user 1271 * attempts to set the NCQ tag in the wrong state. 1272 */ 1273 req->task_context_buffer->type.stp.ncq_tag = ncq_tag; 1274} 1275 1276/** 1277 * 1278 * @sci_req: 1279 * 1280 * Get the next SGL element from the request. - Check on which SGL element pair 1281 * we are working - if working on SLG pair element A - advance to element B - 1282 * else - check to see if there are more SGL element pairs for this IO request 1283 * - if there are more SGL element pairs - advance to the next pair and return 1284 * element A struct scu_sgl_element* 1285 */ 1286static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req) 1287{ 1288 struct scu_sgl_element *current_sgl; 1289 struct scic_sds_request *sci_req = to_sci_req(stp_req); 1290 struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current; 1291 1292 if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) { 1293 if (pio_sgl->sgl_pair->B.address_lower == 0 && 1294 pio_sgl->sgl_pair->B.address_upper == 0) { 1295 current_sgl = NULL; 1296 } else { 1297 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B; 1298 current_sgl = &pio_sgl->sgl_pair->B; 1299 } 1300 } else { 1301 if (pio_sgl->sgl_pair->next_pair_lower == 0 && 1302 pio_sgl->sgl_pair->next_pair_upper == 0) { 1303 current_sgl = NULL; 1304 } else { 1305 u64 phys_addr; 1306 1307 phys_addr = pio_sgl->sgl_pair->next_pair_upper; 1308 phys_addr <<= 32; 1309 phys_addr |= pio_sgl->sgl_pair->next_pair_lower; 1310 1311 pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr); 1312 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A; 1313 current_sgl = &pio_sgl->sgl_pair->A; 1314 } 1315 } 1316 1317 return current_sgl; 1318} 1319 1320static enum sci_status stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req, 1321 u32 completion_code) 1322{ 1323 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1324 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1325 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, 1326 SCI_SUCCESS); 1327 1328 sci_base_state_machine_change_state(&sci_req->state_machine, 1329 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE); 1330 break; 1331 1332 default: 1333 /* All other completion status cause the IO to be 1334 * complete. If a NAK was received, then it is up to 1335 * the user to retry the request. 1336 */ 1337 scic_sds_request_set_status(sci_req, 1338 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1339 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1340 1341 sci_base_state_machine_change_state(&sci_req->state_machine, 1342 SCI_BASE_REQUEST_STATE_COMPLETED); 1343 break; 1344 } 1345 1346 return SCI_SUCCESS; 1347} 1348 1349#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ 1350 1351/* transmit DATA_FIS from (current sgl + offset) for input 1352 * parameter length. current sgl and offset is alreay stored in the IO request 1353 */ 1354static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( 1355 struct scic_sds_request *sci_req, 1356 u32 length) 1357{ 1358 struct scic_sds_controller *scic = sci_req->owning_controller; 1359 struct scic_sds_stp_request *stp_req = &sci_req->stp.req; 1360 struct scu_task_context *task_context; 1361 struct scu_sgl_element *current_sgl; 1362 1363 /* Recycle the TC and reconstruct it for sending out DATA FIS containing 1364 * for the data from current_sgl+offset for the input length 1365 */ 1366 task_context = scic_sds_controller_get_task_context_buffer(scic, 1367 sci_req->io_tag); 1368 1369 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) 1370 current_sgl = &stp_req->type.pio.request_current.sgl_pair->A; 1371 else 1372 current_sgl = &stp_req->type.pio.request_current.sgl_pair->B; 1373 1374 /* update the TC */ 1375 task_context->command_iu_upper = current_sgl->address_upper; 1376 task_context->command_iu_lower = current_sgl->address_lower; 1377 task_context->transfer_length_bytes = length; 1378 task_context->type.stp.fis_type = FIS_DATA; 1379 1380 /* send the new TC out. */ 1381 return scic_controller_continue_io(sci_req); 1382} 1383 1384static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req) 1385{ 1386 1387 struct scu_sgl_element *current_sgl; 1388 u32 sgl_offset; 1389 u32 remaining_bytes_in_current_sgl = 0; 1390 enum sci_status status = SCI_SUCCESS; 1391 struct scic_sds_stp_request *stp_req = &sci_req->stp.req; 1392 1393 sgl_offset = stp_req->type.pio.request_current.sgl_offset; 1394 1395 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) { 1396 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A); 1397 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset; 1398 } else { 1399 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B); 1400 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset; 1401 } 1402 1403 1404 if (stp_req->type.pio.pio_transfer_bytes > 0) { 1405 if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) { 1406 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */ 1407 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl); 1408 if (status == SCI_SUCCESS) { 1409 stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl; 1410 1411 /* update the current sgl, sgl_offset and save for future */ 1412 current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req); 1413 sgl_offset = 0; 1414 } 1415 } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) { 1416 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */ 1417 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes); 1418 1419 if (status == SCI_SUCCESS) { 1420 /* Sgl offset will be adjusted and saved for future */ 1421 sgl_offset += stp_req->type.pio.pio_transfer_bytes; 1422 current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes; 1423 stp_req->type.pio.pio_transfer_bytes = 0; 1424 } 1425 } 1426 } 1427 1428 if (status == SCI_SUCCESS) { 1429 stp_req->type.pio.request_current.sgl_offset = sgl_offset; 1430 } 1431 1432 return status; 1433} 1434 1435/** 1436 * 1437 * @stp_request: The request that is used for the SGL processing. 1438 * @data_buffer: The buffer of data to be copied. 1439 * @length: The length of the data transfer. 1440 * 1441 * Copy the data from the buffer for the length specified to the IO reqeust SGL 1442 * specified data region. enum sci_status 1443 */ 1444static enum sci_status 1445scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req, 1446 u8 *data_buf, u32 len) 1447{ 1448 struct scic_sds_request *sci_req; 1449 struct isci_request *ireq; 1450 u8 *src_addr; 1451 int copy_len; 1452 struct sas_task *task; 1453 struct scatterlist *sg; 1454 void *kaddr; 1455 int total_len = len; 1456 1457 sci_req = to_sci_req(stp_req); 1458 ireq = sci_req_to_ireq(sci_req); 1459 task = isci_request_access_task(ireq); 1460 src_addr = data_buf; 1461 1462 if (task->num_scatter > 0) { 1463 sg = task->scatter; 1464 1465 while (total_len > 0) { 1466 struct page *page = sg_page(sg); 1467 1468 copy_len = min_t(int, total_len, sg_dma_len(sg)); 1469 kaddr = kmap_atomic(page, KM_IRQ0); 1470 memcpy(kaddr + sg->offset, src_addr, copy_len); 1471 kunmap_atomic(kaddr, KM_IRQ0); 1472 total_len -= copy_len; 1473 src_addr += copy_len; 1474 sg = sg_next(sg); 1475 } 1476 } else { 1477 BUG_ON(task->total_xfer_len < total_len); 1478 memcpy(task->scatter, src_addr, total_len); 1479 } 1480 1481 return SCI_SUCCESS; 1482} 1483 1484/** 1485 * 1486 * @sci_req: The PIO DATA IN request that is to receive the data. 1487 * @data_buffer: The buffer to copy from. 1488 * 1489 * Copy the data buffer to the io request data region. enum sci_status 1490 */ 1491static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( 1492 struct scic_sds_stp_request *sci_req, 1493 u8 *data_buffer) 1494{ 1495 enum sci_status status; 1496 1497 /* 1498 * If there is less than 1K remaining in the transfer request 1499 * copy just the data for the transfer */ 1500 if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) { 1501 status = scic_sds_stp_request_pio_data_in_copy_data_buffer( 1502 sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes); 1503 1504 if (status == SCI_SUCCESS) 1505 sci_req->type.pio.pio_transfer_bytes = 0; 1506 } else { 1507 /* We are transfering the whole frame so copy */ 1508 status = scic_sds_stp_request_pio_data_in_copy_data_buffer( 1509 sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); 1510 1511 if (status == SCI_SUCCESS) 1512 sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE; 1513 } 1514 1515 return status; 1516} 1517 1518static enum sci_status stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req, 1519 u32 completion_code) 1520{ 1521 enum sci_status status = SCI_SUCCESS; 1522 1523 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1524 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1525 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS); 1526 1527 sci_base_state_machine_change_state(&sci_req->state_machine, 1528 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); 1529 break; 1530 1531 default: 1532 /* All other completion status cause the IO to be 1533 * complete. If a NAK was received, then it is up to 1534 * the user to retry the request. 1535 */ 1536 scic_sds_request_set_status(sci_req, 1537 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1538 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1539 1540 sci_base_state_machine_change_state(&sci_req->state_machine, 1541 SCI_BASE_REQUEST_STATE_COMPLETED); 1542 break; 1543 } 1544 1545 return status; 1546} 1547 1548static enum sci_status pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req, 1549 u32 completion_code) 1550{ 1551 enum sci_status status = SCI_SUCCESS; 1552 bool all_frames_transferred = false; 1553 struct scic_sds_stp_request *stp_req = &sci_req->stp.req; 1554 1555 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1556 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1557 /* Transmit data */ 1558 if (stp_req->type.pio.pio_transfer_bytes != 0) { 1559 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); 1560 if (status == SCI_SUCCESS) { 1561 if (stp_req->type.pio.pio_transfer_bytes == 0) 1562 all_frames_transferred = true; 1563 } 1564 } else if (stp_req->type.pio.pio_transfer_bytes == 0) { 1565 /* 1566 * this will happen if the all data is written at the 1567 * first time after the pio setup fis is received 1568 */ 1569 all_frames_transferred = true; 1570 } 1571 1572 /* all data transferred. */ 1573 if (all_frames_transferred) { 1574 /* 1575 * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE 1576 * and wait for PIO_SETUP fis / or D2H REg fis. */ 1577 sci_base_state_machine_change_state( 1578 &sci_req->state_machine, 1579 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE 1580 ); 1581 } 1582 break; 1583 default: 1584 /* 1585 * All other completion status cause the IO to be complete. If a NAK 1586 * was received, then it is up to the user to retry the request. */ 1587 scic_sds_request_set_status( 1588 sci_req, 1589 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1590 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR 1591 ); 1592 1593 sci_base_state_machine_change_state( 1594 &sci_req->state_machine, 1595 SCI_BASE_REQUEST_STATE_COMPLETED 1596 ); 1597 break; 1598 } 1599 1600 return status; 1601} 1602 1603static void scic_sds_stp_request_udma_complete_request( 1604 struct scic_sds_request *request, 1605 u32 scu_status, 1606 enum sci_status sci_status) 1607{ 1608 scic_sds_request_set_status(request, scu_status, sci_status); 1609 sci_base_state_machine_change_state(&request->state_machine, 1610 SCI_BASE_REQUEST_STATE_COMPLETED); 1611} 1612 1613static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req, 1614 u32 frame_index) 1615{ 1616 struct scic_sds_controller *scic = sci_req->owning_controller; 1617 struct dev_to_host_fis *frame_header; 1618 enum sci_status status; 1619 u32 *frame_buffer; 1620 1621 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, 1622 frame_index, 1623 (void **)&frame_header); 1624 1625 if ((status == SCI_SUCCESS) && 1626 (frame_header->fis_type == FIS_REGD2H)) { 1627 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, 1628 frame_index, 1629 (void **)&frame_buffer); 1630 1631 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, 1632 frame_header, 1633 frame_buffer); 1634 } 1635 1636 scic_sds_controller_release_frame(scic, frame_index); 1637 1638 return status; 1639} 1640 1641enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, 1642 u32 frame_index) 1643{ 1644 struct scic_sds_controller *scic = sci_req->owning_controller; 1645 struct scic_sds_stp_request *stp_req = &sci_req->stp.req; 1646 enum sci_base_request_states state; 1647 enum sci_status status; 1648 ssize_t word_cnt; 1649 1650 state = sci_req->state_machine.current_state_id; 1651 switch (state) { 1652 case SCI_BASE_REQUEST_STATE_STARTED: { 1653 struct ssp_frame_hdr ssp_hdr; 1654 void *frame_header; 1655 1656 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, 1657 frame_index, 1658 &frame_header); 1659 1660 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); 1661 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); 1662 1663 if (ssp_hdr.frame_type == SSP_RESPONSE) { 1664 struct ssp_response_iu *resp_iu; 1665 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1666 1667 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, 1668 frame_index, 1669 (void **)&resp_iu); 1670 1671 sci_swab32_cpy(&sci_req->ssp.rsp, resp_iu, word_cnt); 1672 1673 resp_iu = &sci_req->ssp.rsp; 1674 1675 if (resp_iu->datapres == 0x01 || 1676 resp_iu->datapres == 0x02) { 1677 scic_sds_request_set_status(sci_req, 1678 SCU_TASK_DONE_CHECK_RESPONSE, 1679 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1680 } else 1681 scic_sds_request_set_status(sci_req, 1682 SCU_TASK_DONE_GOOD, 1683 SCI_SUCCESS); 1684 } else { 1685 /* not a response frame, why did it get forwarded? */ 1686 dev_err(scic_to_dev(scic), 1687 "%s: SCIC IO Request 0x%p received unexpected " 1688 "frame %d type 0x%02x\n", __func__, sci_req, 1689 frame_index, ssp_hdr.frame_type); 1690 } 1691 1692 /* 1693 * In any case we are done with this frame buffer return it to the 1694 * controller 1695 */ 1696 scic_sds_controller_release_frame(scic, frame_index); 1697 1698 return SCI_SUCCESS; 1699 } 1700 case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE: 1701 scic_sds_io_request_copy_response(sci_req); 1702 sci_base_state_machine_change_state(&sci_req->state_machine, 1703 SCI_BASE_REQUEST_STATE_COMPLETED); 1704 scic_sds_controller_release_frame(scic,frame_index); 1705 return SCI_SUCCESS; 1706 case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE: { 1707 struct smp_resp *rsp_hdr = &sci_req->smp.rsp; 1708 void *frame_header; 1709 1710 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, 1711 frame_index, 1712 &frame_header); 1713 1714 /* byte swap the header. */ 1715 word_cnt = SMP_RESP_HDR_SZ / sizeof(u32); 1716 sci_swab32_cpy(rsp_hdr, frame_header, word_cnt); 1717 1718 if (rsp_hdr->frame_type == SMP_RESPONSE) { 1719 void *smp_resp; 1720 1721 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, 1722 frame_index, 1723 &smp_resp); 1724 1725 word_cnt = (sizeof(struct smp_req) - SMP_RESP_HDR_SZ) / 1726 sizeof(u32); 1727 1728 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, 1729 smp_resp, word_cnt); 1730 1731 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, 1732 SCI_SUCCESS); 1733 1734 sci_base_state_machine_change_state(&sci_req->state_machine, 1735 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION); 1736 } else { 1737 /* This was not a response frame why did it get forwarded? */ 1738 dev_err(scic_to_dev(scic), 1739 "%s: SCIC SMP Request 0x%p received unexpected frame " 1740 "%d type 0x%02x\n", __func__, sci_req, 1741 frame_index, rsp_hdr->frame_type); 1742 1743 scic_sds_request_set_status(sci_req, 1744 SCU_TASK_DONE_SMP_FRM_TYPE_ERR, 1745 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1746 1747 sci_base_state_machine_change_state(&sci_req->state_machine, 1748 SCI_BASE_REQUEST_STATE_COMPLETED); 1749 } 1750 1751 scic_sds_controller_release_frame(scic, frame_index); 1752 1753 return SCI_SUCCESS; 1754 } 1755 case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE: 1756 return scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index); 1757 case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE: 1758 /* Use the general frame handler to copy the resposne data */ 1759 status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index); 1760 1761 if (status != SCI_SUCCESS) 1762 return status; 1763 1764 scic_sds_stp_request_udma_complete_request(sci_req, 1765 SCU_TASK_DONE_CHECK_RESPONSE, 1766 SCI_FAILURE_IO_RESPONSE_VALID); 1767 return SCI_SUCCESS; 1768 case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE: { 1769 struct dev_to_host_fis *frame_header; 1770 u32 *frame_buffer; 1771 1772 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, 1773 frame_index, 1774 (void **)&frame_header); 1775 1776 if (status != SCI_SUCCESS) { 1777 dev_err(scic_to_dev(scic), 1778 "%s: SCIC IO Request 0x%p could not get frame header " 1779 "for frame index %d, status %x\n", 1780 __func__, stp_req, frame_index, status); 1781 1782 return status; 1783 } 1784 1785 switch (frame_header->fis_type) { 1786 case FIS_REGD2H: 1787 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, 1788 frame_index, 1789 (void **)&frame_buffer); 1790 1791 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, 1792 frame_header, 1793 frame_buffer); 1794 1795 /* The command has completed with error */ 1796 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE, 1797 SCI_FAILURE_IO_RESPONSE_VALID); 1798 break; 1799 1800 default: 1801 dev_warn(scic_to_dev(scic), 1802 "%s: IO Request:0x%p Frame Id:%d protocol " 1803 "violation occurred\n", __func__, stp_req, 1804 frame_index); 1805 1806 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, 1807 SCI_FAILURE_PROTOCOL_VIOLATION); 1808 break; 1809 } 1810 1811 sci_base_state_machine_change_state(&sci_req->state_machine, 1812 SCI_BASE_REQUEST_STATE_COMPLETED); 1813 1814 /* Frame has been decoded return it to the controller */ 1815 scic_sds_controller_release_frame(scic, frame_index); 1816 1817 return status; 1818 } 1819 case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE: { 1820 struct isci_request *ireq = sci_req_to_ireq(sci_req); 1821 struct sas_task *task = isci_request_access_task(ireq); 1822 struct dev_to_host_fis *frame_header; 1823 u32 *frame_buffer; 1824 1825 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, 1826 frame_index, 1827 (void **)&frame_header); 1828 1829 if (status != SCI_SUCCESS) { 1830 dev_err(scic_to_dev(scic), 1831 "%s: SCIC IO Request 0x%p could not get frame header " 1832 "for frame index %d, status %x\n", 1833 __func__, stp_req, frame_index, status); 1834 return status; 1835 } 1836 1837 switch (frame_header->fis_type) { 1838 case FIS_PIO_SETUP: 1839 /* Get from the frame buffer the PIO Setup Data */ 1840 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, 1841 frame_index, 1842 (void **)&frame_buffer); 1843 1844 /* Get the data from the PIO Setup The SCU Hardware returns 1845 * first word in the frame_header and the rest of the data is in 1846 * the frame buffer so we need to back up one dword 1847 */ 1848 1849 /* transfer_count: first 16bits in the 4th dword */ 1850 stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff; 1851 1852 /* ending_status: 4th byte in the 3rd dword */ 1853 stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff; 1854 1855 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, 1856 frame_header, 1857 frame_buffer); 1858 1859 sci_req->stp.rsp.status = stp_req->type.pio.ending_status; 1860 1861 /* The next state is dependent on whether the 1862 * request was PIO Data-in or Data out 1863 */ 1864 if (task->data_dir == DMA_FROM_DEVICE) { 1865 sci_base_state_machine_change_state(&sci_req->state_machine, 1866 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE); 1867 } else if (task->data_dir == DMA_TO_DEVICE) { 1868 /* Transmit data */ 1869 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); 1870 if (status != SCI_SUCCESS) 1871 break; 1872 sci_base_state_machine_change_state(&sci_req->state_machine, 1873 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE); 1874 } 1875 break; 1876 case FIS_SETDEVBITS: 1877 sci_base_state_machine_change_state(&sci_req->state_machine, 1878 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); 1879 break; 1880 case FIS_REGD2H: 1881 if (frame_header->status & ATA_BUSY) { 1882 /* Now why is the drive sending a D2H Register FIS when 1883 * it is still busy? Do nothing since we are still in 1884 * the right state. 1885 */ 1886 dev_dbg(scic_to_dev(scic), 1887 "%s: SCIC PIO Request 0x%p received " 1888 "D2H Register FIS with BSY status " 1889 "0x%x\n", __func__, stp_req, 1890 frame_header->status); 1891 break; 1892 } 1893 1894 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, 1895 frame_index, 1896 (void **)&frame_buffer); 1897 1898 scic_sds_controller_copy_sata_response(&sci_req->stp.req, 1899 frame_header, 1900 frame_buffer); 1901 1902 scic_sds_request_set_status(sci_req, 1903 SCU_TASK_DONE_CHECK_RESPONSE, 1904 SCI_FAILURE_IO_RESPONSE_VALID); 1905 1906 sci_base_state_machine_change_state(&sci_req->state_machine, 1907 SCI_BASE_REQUEST_STATE_COMPLETED); 1908 break; 1909 default: 1910 /* FIXME: what do we do here? */ 1911 break; 1912 } 1913 1914 /* Frame is decoded return it to the controller */ 1915 scic_sds_controller_release_frame(scic, frame_index); 1916 1917 return status; 1918 } 1919 case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE: { 1920 struct dev_to_host_fis *frame_header; 1921 struct sata_fis_data *frame_buffer; 1922 1923 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, 1924 frame_index, 1925 (void **)&frame_header); 1926 1927 if (status != SCI_SUCCESS) { 1928 dev_err(scic_to_dev(scic), 1929 "%s: SCIC IO Request 0x%p could not get frame header " 1930 "for frame index %d, status %x\n", 1931 __func__, stp_req, frame_index, status); 1932 return status; 1933 } 1934 1935 if (frame_header->fis_type != FIS_DATA) { 1936 dev_err(scic_to_dev(scic), 1937 "%s: SCIC PIO Request 0x%p received frame %d " 1938 "with fis type 0x%02x when expecting a data " 1939 "fis.\n", __func__, stp_req, frame_index, 1940 frame_header->fis_type); 1941 1942 scic_sds_request_set_status(sci_req, 1943 SCU_TASK_DONE_GOOD, 1944 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); 1945 1946 sci_base_state_machine_change_state(&sci_req->state_machine, 1947 SCI_BASE_REQUEST_STATE_COMPLETED); 1948 1949 /* Frame is decoded return it to the controller */ 1950 scic_sds_controller_release_frame(scic, frame_index); 1951 return status; 1952 } 1953 1954 if (stp_req->type.pio.request_current.sgl_pair == NULL) { 1955 sci_req->saved_rx_frame_index = frame_index; 1956 stp_req->type.pio.pio_transfer_bytes = 0; 1957 } else { 1958 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, 1959 frame_index, 1960 (void **)&frame_buffer); 1961 1962 status = scic_sds_stp_request_pio_data_in_copy_data(stp_req, 1963 (u8 *)frame_buffer); 1964 1965 /* Frame is decoded return it to the controller */ 1966 scic_sds_controller_release_frame(scic, frame_index); 1967 } 1968 1969 /* Check for the end of the transfer, are there more 1970 * bytes remaining for this data transfer 1971 */ 1972 if (status != SCI_SUCCESS || 1973 stp_req->type.pio.pio_transfer_bytes != 0) 1974 return status; 1975 1976 if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) { 1977 scic_sds_request_set_status(sci_req, 1978 SCU_TASK_DONE_CHECK_RESPONSE, 1979 SCI_FAILURE_IO_RESPONSE_VALID); 1980 1981 sci_base_state_machine_change_state(&sci_req->state_machine, 1982 SCI_BASE_REQUEST_STATE_COMPLETED); 1983 } else { 1984 sci_base_state_machine_change_state(&sci_req->state_machine, 1985 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE); 1986 } 1987 return status; 1988 } 1989 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE: { 1990 struct dev_to_host_fis *frame_header; 1991 u32 *frame_buffer; 1992 1993 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, 1994 frame_index, 1995 (void **)&frame_header); 1996 if (status != SCI_SUCCESS) { 1997 dev_err(scic_to_dev(scic), 1998 "%s: SCIC IO Request 0x%p could not get frame header " 1999 "for frame index %d, status %x\n", 2000 __func__, stp_req, frame_index, status); 2001 return status; 2002 } 2003 2004 switch (frame_header->fis_type) { 2005 case FIS_REGD2H: 2006 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, 2007 frame_index, 2008 (void **)&frame_buffer); 2009 2010 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, 2011 frame_header, 2012 frame_buffer); 2013 2014 /* The command has completed with error */ 2015 scic_sds_request_set_status(sci_req, 2016 SCU_TASK_DONE_CHECK_RESPONSE, 2017 SCI_FAILURE_IO_RESPONSE_VALID); 2018 break; 2019 default: 2020 dev_warn(scic_to_dev(scic), 2021 "%s: IO Request:0x%p Frame Id:%d protocol " 2022 "violation occurred\n", __func__, stp_req, 2023 frame_index); 2024 2025 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, 2026 SCI_FAILURE_PROTOCOL_VIOLATION); 2027 break; 2028 } 2029 2030 sci_base_state_machine_change_state(&sci_req->state_machine, 2031 SCI_BASE_REQUEST_STATE_COMPLETED); 2032 2033 /* Frame has been decoded return it to the controller */ 2034 scic_sds_controller_release_frame(scic, frame_index); 2035 2036 return status; 2037 } 2038 case SCI_BASE_REQUEST_STATE_ABORTING: 2039 /* TODO: Is it even possible to get an unsolicited frame in the 2040 * aborting state? 2041 */ 2042 scic_sds_controller_release_frame(scic, frame_index); 2043 return SCI_SUCCESS; 2044 default: 2045 dev_warn(scic_to_dev(scic), 2046 "%s: SCIC IO Request given unexpected frame %x while in " 2047 "state %d\n", __func__, frame_index, state); 2048 2049 scic_sds_controller_release_frame(scic, frame_index); 2050 return SCI_FAILURE_INVALID_STATE; 2051 } 2052} 2053 2054static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request *sci_req, 2055 u32 completion_code) 2056{ 2057 enum sci_status status = SCI_SUCCESS; 2058 2059 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2060 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2061 scic_sds_stp_request_udma_complete_request(sci_req, 2062 SCU_TASK_DONE_GOOD, 2063 SCI_SUCCESS); 2064 break; 2065 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): 2066 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 2067 /* We must check ther response buffer to see if the D2H 2068 * Register FIS was received before we got the TC 2069 * completion. 2070 */ 2071 if (sci_req->stp.rsp.fis_type == FIS_REGD2H) { 2072 scic_sds_remote_device_suspend(sci_req->target_device, 2073 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); 2074 2075 scic_sds_stp_request_udma_complete_request(sci_req, 2076 SCU_TASK_DONE_CHECK_RESPONSE, 2077 SCI_FAILURE_IO_RESPONSE_VALID); 2078 } else { 2079 /* If we have an error completion status for the 2080 * TC then we can expect a D2H register FIS from 2081 * the device so we must change state to wait 2082 * for it 2083 */ 2084 sci_base_state_machine_change_state(&sci_req->state_machine, 2085 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE); 2086 } 2087 break; 2088 2089 /* TODO Check to see if any of these completion status need to 2090 * wait for the device to host register fis. 2091 */ 2092 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR 2093 * - this comes only for B0 2094 */ 2095 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN): 2096 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): 2097 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): 2098 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): 2099 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR): 2100 scic_sds_remote_device_suspend(sci_req->target_device, 2101 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); 2102 /* Fall through to the default case */ 2103 default: 2104 /* All other completion status cause the IO to be complete. */ 2105 scic_sds_stp_request_udma_complete_request(sci_req, 2106 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 2107 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 2108 break; 2109 } 2110 2111 return status; 2112} 2113 2114static enum sci_status stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_req, 2115 u32 completion_code) 2116{ 2117 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2118 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2119 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, 2120 SCI_SUCCESS); 2121 2122 sci_base_state_machine_change_state(&sci_req->state_machine, 2123 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE); 2124 break; 2125 2126 default: 2127 /* 2128 * All other completion status cause the IO to be complete. If a NAK 2129 * was received, then it is up to the user to retry the request. */ 2130 scic_sds_request_set_status(sci_req, 2131 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 2132 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 2133 2134 sci_base_state_machine_change_state(&sci_req->state_machine, 2135 SCI_BASE_REQUEST_STATE_COMPLETED); 2136 break; 2137 } 2138 2139 return SCI_SUCCESS; 2140} 2141 2142static enum sci_status stp_request_soft_reset_await_h2d_diagnostic_tc_event( 2143 struct scic_sds_request *sci_req, 2144 u32 completion_code) 2145{ 2146 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2147 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2148 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, 2149 SCI_SUCCESS); 2150 2151 sci_base_state_machine_change_state(&sci_req->state_machine, 2152 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE); 2153 break; 2154 2155 default: 2156 /* All other completion status cause the IO to be complete. If 2157 * a NAK was received, then it is up to the user to retry the 2158 * request. 2159 */ 2160 scic_sds_request_set_status(sci_req, 2161 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 2162 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 2163 2164 sci_base_state_machine_change_state(&sci_req->state_machine, 2165 SCI_BASE_REQUEST_STATE_COMPLETED); 2166 break; 2167 } 2168 2169 return SCI_SUCCESS; 2170} 2171 2172enum sci_status 2173scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 completion_code) 2174{ 2175 enum sci_base_request_states state; 2176 struct scic_sds_controller *scic = sci_req->owning_controller; 2177 2178 state = sci_req->state_machine.current_state_id; 2179 2180 switch (state) { 2181 case SCI_BASE_REQUEST_STATE_STARTED: 2182 return request_started_state_tc_event(sci_req, completion_code); 2183 case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION: 2184 return ssp_task_request_await_tc_event(sci_req, completion_code); 2185 case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE: 2186 return smp_request_await_response_tc_event(sci_req, completion_code); 2187 case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION: 2188 return smp_request_await_tc_event(sci_req, completion_code); 2189 case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE: 2190 return stp_request_udma_await_tc_event(sci_req, completion_code); 2191 case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE: 2192 return stp_request_non_data_await_h2d_tc_event(sci_req, completion_code); 2193 case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE: 2194 return stp_request_pio_await_h2d_completion_tc_event(sci_req, completion_code); 2195 case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE: 2196 return pio_data_out_tx_done_tc_event(sci_req, completion_code); 2197 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE: 2198 return stp_request_soft_reset_await_h2d_asserted_tc_event(sci_req, completion_code); 2199 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE: 2200 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(sci_req, completion_code); 2201 case SCI_BASE_REQUEST_STATE_ABORTING: 2202 return request_aborting_state_tc_event(sci_req, completion_code); 2203 default: 2204 dev_warn(scic_to_dev(scic), 2205 "%s: SCIC IO Request given task completion notification %x " 2206 "while in wrong state %d\n", __func__, completion_code, 2207 state); 2208 return SCI_FAILURE_INVALID_STATE; 2209 } 2210} 2211 2212/** 2213 * isci_request_process_response_iu() - This function sets the status and 2214 * response iu, in the task struct, from the request object for the upper 2215 * layer driver. 2216 * @sas_task: This parameter is the task struct from the upper layer driver. 2217 * @resp_iu: This parameter points to the response iu of the completed request. 2218 * @dev: This parameter specifies the linux device struct. 2219 * 2220 * none. 2221 */ 2222static void isci_request_process_response_iu( 2223 struct sas_task *task, 2224 struct ssp_response_iu *resp_iu, 2225 struct device *dev) 2226{ 2227 dev_dbg(dev, 2228 "%s: resp_iu = %p " 2229 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " 2230 "resp_iu->response_data_len = %x, " 2231 "resp_iu->sense_data_len = %x\nrepsonse data: ", 2232 __func__, 2233 resp_iu, 2234 resp_iu->status, 2235 resp_iu->datapres, 2236 resp_iu->response_data_len, 2237 resp_iu->sense_data_len); 2238 2239 task->task_status.stat = resp_iu->status; 2240 2241 /* libsas updates the task status fields based on the response iu. */ 2242 sas_ssp_task_response(dev, task, resp_iu); 2243} 2244 2245/** 2246 * isci_request_set_open_reject_status() - This function prepares the I/O 2247 * completion for OPEN_REJECT conditions. 2248 * @request: This parameter is the completed isci_request object. 2249 * @response_ptr: This parameter specifies the service response for the I/O. 2250 * @status_ptr: This parameter specifies the exec status for the I/O. 2251 * @complete_to_host_ptr: This parameter specifies the action to be taken by 2252 * the LLDD with respect to completing this request or forcing an abort 2253 * condition on the I/O. 2254 * @open_rej_reason: This parameter specifies the encoded reason for the 2255 * abandon-class reject. 2256 * 2257 * none. 2258 */ 2259static void isci_request_set_open_reject_status( 2260 struct isci_request *request, 2261 struct sas_task *task, 2262 enum service_response *response_ptr, 2263 enum exec_status *status_ptr, 2264 enum isci_completion_selection *complete_to_host_ptr, 2265 enum sas_open_rej_reason open_rej_reason) 2266{ 2267 /* Task in the target is done. */ 2268 request->complete_in_target = true; 2269 *response_ptr = SAS_TASK_UNDELIVERED; 2270 *status_ptr = SAS_OPEN_REJECT; 2271 *complete_to_host_ptr = isci_perform_normal_io_completion; 2272 task->task_status.open_rej_reason = open_rej_reason; 2273} 2274 2275/** 2276 * isci_request_handle_controller_specific_errors() - This function decodes 2277 * controller-specific I/O completion error conditions. 2278 * @request: This parameter is the completed isci_request object. 2279 * @response_ptr: This parameter specifies the service response for the I/O. 2280 * @status_ptr: This parameter specifies the exec status for the I/O. 2281 * @complete_to_host_ptr: This parameter specifies the action to be taken by 2282 * the LLDD with respect to completing this request or forcing an abort 2283 * condition on the I/O. 2284 * 2285 * none. 2286 */ 2287static void isci_request_handle_controller_specific_errors( 2288 struct isci_remote_device *isci_device, 2289 struct isci_request *request, 2290 struct sas_task *task, 2291 enum service_response *response_ptr, 2292 enum exec_status *status_ptr, 2293 enum isci_completion_selection *complete_to_host_ptr) 2294{ 2295 unsigned int cstatus; 2296 2297 cstatus = request->sci.scu_status; 2298 2299 dev_dbg(&request->isci_host->pdev->dev, 2300 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " 2301 "- controller status = 0x%x\n", 2302 __func__, request, cstatus); 2303 2304 /* Decode the controller-specific errors; most 2305 * important is to recognize those conditions in which 2306 * the target may still have a task outstanding that 2307 * must be aborted. 2308 * 2309 * Note that there are SCU completion codes being 2310 * named in the decode below for which SCIC has already 2311 * done work to handle them in a way other than as 2312 * a controller-specific completion code; these are left 2313 * in the decode below for completeness sake. 2314 */ 2315 switch (cstatus) { 2316 case SCU_TASK_DONE_DMASETUP_DIRERR: 2317 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ 2318 case SCU_TASK_DONE_XFERCNT_ERR: 2319 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ 2320 if (task->task_proto == SAS_PROTOCOL_SMP) { 2321 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ 2322 *response_ptr = SAS_TASK_COMPLETE; 2323 2324 /* See if the device has been/is being stopped. Note 2325 * that we ignore the quiesce state, since we are 2326 * concerned about the actual device state. 2327 */ 2328 if ((isci_device->status == isci_stopping) || 2329 (isci_device->status == isci_stopped)) 2330 *status_ptr = SAS_DEVICE_UNKNOWN; 2331 else 2332 *status_ptr = SAS_ABORTED_TASK; 2333 2334 request->complete_in_target = true; 2335 2336 *complete_to_host_ptr = 2337 isci_perform_normal_io_completion; 2338 } else { 2339 /* Task in the target is not done. */ 2340 *response_ptr = SAS_TASK_UNDELIVERED; 2341 2342 if ((isci_device->status == isci_stopping) || 2343 (isci_device->status == isci_stopped)) 2344 *status_ptr = SAS_DEVICE_UNKNOWN; 2345 else 2346 *status_ptr = SAM_STAT_TASK_ABORTED; 2347 2348 request->complete_in_target = false; 2349 2350 *complete_to_host_ptr = 2351 isci_perform_error_io_completion; 2352 } 2353 2354 break; 2355 2356 case SCU_TASK_DONE_CRC_ERR: 2357 case SCU_TASK_DONE_NAK_CMD_ERR: 2358 case SCU_TASK_DONE_EXCESS_DATA: 2359 case SCU_TASK_DONE_UNEXP_FIS: 2360 /* Also SCU_TASK_DONE_UNEXP_RESP: */ 2361 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ 2362 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ 2363 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ 2364 /* These are conditions in which the target 2365 * has completed the task, so that no cleanup 2366 * is necessary. 2367 */ 2368 *response_ptr = SAS_TASK_COMPLETE; 2369 2370 /* See if the device has been/is being stopped. Note 2371 * that we ignore the quiesce state, since we are 2372 * concerned about the actual device state. 2373 */ 2374 if ((isci_device->status == isci_stopping) || 2375 (isci_device->status == isci_stopped)) 2376 *status_ptr = SAS_DEVICE_UNKNOWN; 2377 else 2378 *status_ptr = SAS_ABORTED_TASK; 2379 2380 request->complete_in_target = true; 2381 2382 *complete_to_host_ptr = isci_perform_normal_io_completion; 2383 break; 2384 2385 2386 /* Note that the only open reject completion codes seen here will be 2387 * abandon-class codes; all others are automatically retried in the SCU. 2388 */ 2389 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2390 2391 isci_request_set_open_reject_status( 2392 request, task, response_ptr, status_ptr, 2393 complete_to_host_ptr, SAS_OREJ_WRONG_DEST); 2394 break; 2395 2396 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2397 2398 /* Note - the return of AB0 will change when 2399 * libsas implements detection of zone violations. 2400 */ 2401 isci_request_set_open_reject_status( 2402 request, task, response_ptr, status_ptr, 2403 complete_to_host_ptr, SAS_OREJ_RESV_AB0); 2404 break; 2405 2406 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2407 2408 isci_request_set_open_reject_status( 2409 request, task, response_ptr, status_ptr, 2410 complete_to_host_ptr, SAS_OREJ_RESV_AB1); 2411 break; 2412 2413 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2414 2415 isci_request_set_open_reject_status( 2416 request, task, response_ptr, status_ptr, 2417 complete_to_host_ptr, SAS_OREJ_RESV_AB2); 2418 break; 2419 2420 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2421 2422 isci_request_set_open_reject_status( 2423 request, task, response_ptr, status_ptr, 2424 complete_to_host_ptr, SAS_OREJ_RESV_AB3); 2425 break; 2426 2427 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2428 2429 isci_request_set_open_reject_status( 2430 request, task, response_ptr, status_ptr, 2431 complete_to_host_ptr, SAS_OREJ_BAD_DEST); 2432 break; 2433 2434 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2435 2436 isci_request_set_open_reject_status( 2437 request, task, response_ptr, status_ptr, 2438 complete_to_host_ptr, SAS_OREJ_STP_NORES); 2439 break; 2440 2441 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2442 2443 isci_request_set_open_reject_status( 2444 request, task, response_ptr, status_ptr, 2445 complete_to_host_ptr, SAS_OREJ_EPROTO); 2446 break; 2447 2448 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2449 2450 isci_request_set_open_reject_status( 2451 request, task, response_ptr, status_ptr, 2452 complete_to_host_ptr, SAS_OREJ_CONN_RATE); 2453 break; 2454 2455 case SCU_TASK_DONE_LL_R_ERR: 2456 /* Also SCU_TASK_DONE_ACK_NAK_TO: */ 2457 case SCU_TASK_DONE_LL_PERR: 2458 case SCU_TASK_DONE_LL_SY_TERM: 2459 /* Also SCU_TASK_DONE_NAK_ERR:*/ 2460 case SCU_TASK_DONE_LL_LF_TERM: 2461 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ 2462 case SCU_TASK_DONE_LL_ABORT_ERR: 2463 case SCU_TASK_DONE_SEQ_INV_TYPE: 2464 /* Also SCU_TASK_DONE_UNEXP_XR: */ 2465 case SCU_TASK_DONE_XR_IU_LEN_ERR: 2466 case SCU_TASK_DONE_INV_FIS_LEN: 2467 /* Also SCU_TASK_DONE_XR_WD_LEN: */ 2468 case SCU_TASK_DONE_SDMA_ERR: 2469 case SCU_TASK_DONE_OFFSET_ERR: 2470 case SCU_TASK_DONE_MAX_PLD_ERR: 2471 case SCU_TASK_DONE_LF_ERR: 2472 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ 2473 case SCU_TASK_DONE_SMP_LL_RX_ERR: 2474 case SCU_TASK_DONE_UNEXP_DATA: 2475 case SCU_TASK_DONE_UNEXP_SDBFIS: 2476 case SCU_TASK_DONE_REG_ERR: 2477 case SCU_TASK_DONE_SDB_ERR: 2478 case SCU_TASK_DONE_TASK_ABORT: 2479 default: 2480 /* Task in the target is not done. */ 2481 *response_ptr = SAS_TASK_UNDELIVERED; 2482 *status_ptr = SAM_STAT_TASK_ABORTED; 2483 request->complete_in_target = false; 2484 2485 *complete_to_host_ptr = isci_perform_error_io_completion; 2486 break; 2487 } 2488} 2489 2490/** 2491 * isci_task_save_for_upper_layer_completion() - This function saves the 2492 * request for later completion to the upper layer driver. 2493 * @host: This parameter is a pointer to the host on which the the request 2494 * should be queued (either as an error or success). 2495 * @request: This parameter is the completed request. 2496 * @response: This parameter is the response code for the completed task. 2497 * @status: This parameter is the status code for the completed task. 2498 * 2499 * none. 2500 */ 2501static void isci_task_save_for_upper_layer_completion( 2502 struct isci_host *host, 2503 struct isci_request *request, 2504 enum service_response response, 2505 enum exec_status status, 2506 enum isci_completion_selection task_notification_selection) 2507{ 2508 struct sas_task *task = isci_request_access_task(request); 2509 2510 task_notification_selection 2511 = isci_task_set_completion_status(task, response, status, 2512 task_notification_selection); 2513 2514 /* Tasks aborted specifically by a call to the lldd_abort_task 2515 * function should not be completed to the host in the regular path. 2516 */ 2517 switch (task_notification_selection) { 2518 2519 case isci_perform_normal_io_completion: 2520 2521 /* Normal notification (task_done) */ 2522 dev_dbg(&host->pdev->dev, 2523 "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n", 2524 __func__, 2525 task, 2526 task->task_status.resp, response, 2527 task->task_status.stat, status); 2528 /* Add to the completed list. */ 2529 list_add(&request->completed_node, 2530 &host->requests_to_complete); 2531 2532 /* Take the request off the device's pending request list. */ 2533 list_del_init(&request->dev_node); 2534 break; 2535 2536 case isci_perform_aborted_io_completion: 2537 /* No notification to libsas because this request is 2538 * already in the abort path. 2539 */ 2540 dev_warn(&host->pdev->dev, 2541 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n", 2542 __func__, 2543 task, 2544 task->task_status.resp, response, 2545 task->task_status.stat, status); 2546 2547 /* Wake up whatever process was waiting for this 2548 * request to complete. 2549 */ 2550 WARN_ON(request->io_request_completion == NULL); 2551 2552 if (request->io_request_completion != NULL) { 2553 2554 /* Signal whoever is waiting that this 2555 * request is complete. 2556 */ 2557 complete(request->io_request_completion); 2558 } 2559 break; 2560 2561 case isci_perform_error_io_completion: 2562 /* Use sas_task_abort */ 2563 dev_warn(&host->pdev->dev, 2564 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n", 2565 __func__, 2566 task, 2567 task->task_status.resp, response, 2568 task->task_status.stat, status); 2569 /* Add to the aborted list. */ 2570 list_add(&request->completed_node, 2571 &host->requests_to_errorback); 2572 break; 2573 2574 default: 2575 dev_warn(&host->pdev->dev, 2576 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n", 2577 __func__, 2578 task, 2579 task->task_status.resp, response, 2580 task->task_status.stat, status); 2581 2582 /* Add to the error to libsas list. */ 2583 list_add(&request->completed_node, 2584 &host->requests_to_errorback); 2585 break; 2586 } 2587} 2588 2589static void isci_request_io_request_complete(struct isci_host *isci_host, 2590 struct isci_request *request, 2591 enum sci_io_status completion_status) 2592{ 2593 struct sas_task *task = isci_request_access_task(request); 2594 struct ssp_response_iu *resp_iu; 2595 void *resp_buf; 2596 unsigned long task_flags; 2597 struct isci_remote_device *isci_device = request->isci_device; 2598 enum service_response response = SAS_TASK_UNDELIVERED; 2599 enum exec_status status = SAS_ABORTED_TASK; 2600 enum isci_request_status request_status; 2601 enum isci_completion_selection complete_to_host 2602 = isci_perform_normal_io_completion; 2603 2604 dev_dbg(&isci_host->pdev->dev, 2605 "%s: request = %p, task = %p,\n" 2606 "task->data_dir = %d completion_status = 0x%x\n", 2607 __func__, 2608 request, 2609 task, 2610 task->data_dir, 2611 completion_status); 2612 2613 spin_lock(&request->state_lock); 2614 request_status = isci_request_get_state(request); 2615 2616 /* Decode the request status. Note that if the request has been 2617 * aborted by a task management function, we don't care 2618 * what the status is. 2619 */ 2620 switch (request_status) { 2621 2622 case aborted: 2623 /* "aborted" indicates that the request was aborted by a task 2624 * management function, since once a task management request is 2625 * perfomed by the device, the request only completes because 2626 * of the subsequent driver terminate. 2627 * 2628 * Aborted also means an external thread is explicitly managing 2629 * this request, so that we do not complete it up the stack. 2630 * 2631 * The target is still there (since the TMF was successful). 2632 */ 2633 request->complete_in_target = true; 2634 response = SAS_TASK_COMPLETE; 2635 2636 /* See if the device has been/is being stopped. Note 2637 * that we ignore the quiesce state, since we are 2638 * concerned about the actual device state. 2639 */ 2640 if ((isci_device->status == isci_stopping) 2641 || (isci_device->status == isci_stopped) 2642 ) 2643 status = SAS_DEVICE_UNKNOWN; 2644 else 2645 status = SAS_ABORTED_TASK; 2646 2647 complete_to_host = isci_perform_aborted_io_completion; 2648 /* This was an aborted request. */ 2649 2650 spin_unlock(&request->state_lock); 2651 break; 2652 2653 case aborting: 2654 /* aborting means that the task management function tried and 2655 * failed to abort the request. We need to note the request 2656 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the 2657 * target as down. 2658 * 2659 * Aborting also means an external thread is explicitly managing 2660 * this request, so that we do not complete it up the stack. 2661 */ 2662 request->complete_in_target = true; 2663 response = SAS_TASK_UNDELIVERED; 2664 2665 if ((isci_device->status == isci_stopping) || 2666 (isci_device->status == isci_stopped)) 2667 /* The device has been /is being stopped. Note that 2668 * we ignore the quiesce state, since we are 2669 * concerned about the actual device state. 2670 */ 2671 status = SAS_DEVICE_UNKNOWN; 2672 else 2673 status = SAS_PHY_DOWN; 2674 2675 complete_to_host = isci_perform_aborted_io_completion; 2676 2677 /* This was an aborted request. */ 2678 2679 spin_unlock(&request->state_lock); 2680 break; 2681 2682 case terminating: 2683 2684 /* This was an terminated request. This happens when 2685 * the I/O is being terminated because of an action on 2686 * the device (reset, tear down, etc.), and the I/O needs 2687 * to be completed up the stack. 2688 */ 2689 request->complete_in_target = true; 2690 response = SAS_TASK_UNDELIVERED; 2691 2692 /* See if the device has been/is being stopped. Note 2693 * that we ignore the quiesce state, since we are 2694 * concerned about the actual device state. 2695 */ 2696 if ((isci_device->status == isci_stopping) || 2697 (isci_device->status == isci_stopped)) 2698 status = SAS_DEVICE_UNKNOWN; 2699 else 2700 status = SAS_ABORTED_TASK; 2701 2702 complete_to_host = isci_perform_aborted_io_completion; 2703 2704 /* This was a terminated request. */ 2705 2706 spin_unlock(&request->state_lock); 2707 break; 2708 2709 default: 2710 2711 /* The request is done from an SCU HW perspective. */ 2712 request->status = completed; 2713 2714 spin_unlock(&request->state_lock); 2715 2716 /* This is an active request being completed from the core. */ 2717 switch (completion_status) { 2718 2719 case SCI_IO_FAILURE_RESPONSE_VALID: 2720 dev_dbg(&isci_host->pdev->dev, 2721 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", 2722 __func__, 2723 request, 2724 task); 2725 2726 if (sas_protocol_ata(task->task_proto)) { 2727 resp_buf = &request->sci.stp.rsp; 2728 isci_request_process_stp_response(task, 2729 resp_buf); 2730 } else if (SAS_PROTOCOL_SSP == task->task_proto) { 2731 2732 /* crack the iu response buffer. */ 2733 resp_iu = &request->sci.ssp.rsp; 2734 isci_request_process_response_iu(task, resp_iu, 2735 &isci_host->pdev->dev); 2736 2737 } else if (SAS_PROTOCOL_SMP == task->task_proto) { 2738 2739 dev_err(&isci_host->pdev->dev, 2740 "%s: SCI_IO_FAILURE_RESPONSE_VALID: " 2741 "SAS_PROTOCOL_SMP protocol\n", 2742 __func__); 2743 2744 } else 2745 dev_err(&isci_host->pdev->dev, 2746 "%s: unknown protocol\n", __func__); 2747 2748 /* use the task status set in the task struct by the 2749 * isci_request_process_response_iu call. 2750 */ 2751 request->complete_in_target = true; 2752 response = task->task_status.resp; 2753 status = task->task_status.stat; 2754 break; 2755 2756 case SCI_IO_SUCCESS: 2757 case SCI_IO_SUCCESS_IO_DONE_EARLY: 2758 2759 response = SAS_TASK_COMPLETE; 2760 status = SAM_STAT_GOOD; 2761 request->complete_in_target = true; 2762 2763 if (task->task_proto == SAS_PROTOCOL_SMP) { 2764 void *rsp = &request->sci.smp.rsp; 2765 2766 dev_dbg(&isci_host->pdev->dev, 2767 "%s: SMP protocol completion\n", 2768 __func__); 2769 2770 sg_copy_from_buffer( 2771 &task->smp_task.smp_resp, 1, 2772 rsp, sizeof(struct smp_resp)); 2773 } else if (completion_status 2774 == SCI_IO_SUCCESS_IO_DONE_EARLY) { 2775 2776 /* This was an SSP / STP / SATA transfer. 2777 * There is a possibility that less data than 2778 * the maximum was transferred. 2779 */ 2780 u32 transferred_length = sci_req_tx_bytes(&request->sci); 2781 2782 task->task_status.residual 2783 = task->total_xfer_len - transferred_length; 2784 2785 /* If there were residual bytes, call this an 2786 * underrun. 2787 */ 2788 if (task->task_status.residual != 0) 2789 status = SAS_DATA_UNDERRUN; 2790 2791 dev_dbg(&isci_host->pdev->dev, 2792 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", 2793 __func__, 2794 status); 2795 2796 } else 2797 dev_dbg(&isci_host->pdev->dev, 2798 "%s: SCI_IO_SUCCESS\n", 2799 __func__); 2800 2801 break; 2802 2803 case SCI_IO_FAILURE_TERMINATED: 2804 dev_dbg(&isci_host->pdev->dev, 2805 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", 2806 __func__, 2807 request, 2808 task); 2809 2810 /* The request was terminated explicitly. No handling 2811 * is needed in the SCSI error handler path. 2812 */ 2813 request->complete_in_target = true; 2814 response = SAS_TASK_UNDELIVERED; 2815 2816 /* See if the device has been/is being stopped. Note 2817 * that we ignore the quiesce state, since we are 2818 * concerned about the actual device state. 2819 */ 2820 if ((isci_device->status == isci_stopping) || 2821 (isci_device->status == isci_stopped)) 2822 status = SAS_DEVICE_UNKNOWN; 2823 else 2824 status = SAS_ABORTED_TASK; 2825 2826 complete_to_host = isci_perform_normal_io_completion; 2827 break; 2828 2829 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: 2830 2831 isci_request_handle_controller_specific_errors( 2832 isci_device, request, task, &response, &status, 2833 &complete_to_host); 2834 2835 break; 2836 2837 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: 2838 /* This is a special case, in that the I/O completion 2839 * is telling us that the device needs a reset. 2840 * In order for the device reset condition to be 2841 * noticed, the I/O has to be handled in the error 2842 * handler. Set the reset flag and cause the 2843 * SCSI error thread to be scheduled. 2844 */ 2845 spin_lock_irqsave(&task->task_state_lock, task_flags); 2846 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 2847 spin_unlock_irqrestore(&task->task_state_lock, task_flags); 2848 2849 /* Fail the I/O. */ 2850 response = SAS_TASK_UNDELIVERED; 2851 status = SAM_STAT_TASK_ABORTED; 2852 2853 complete_to_host = isci_perform_error_io_completion; 2854 request->complete_in_target = false; 2855 break; 2856 2857 default: 2858 /* Catch any otherwise unhandled error codes here. */ 2859 dev_warn(&isci_host->pdev->dev, 2860 "%s: invalid completion code: 0x%x - " 2861 "isci_request = %p\n", 2862 __func__, completion_status, request); 2863 2864 response = SAS_TASK_UNDELIVERED; 2865 2866 /* See if the device has been/is being stopped. Note 2867 * that we ignore the quiesce state, since we are 2868 * concerned about the actual device state. 2869 */ 2870 if ((isci_device->status == isci_stopping) || 2871 (isci_device->status == isci_stopped)) 2872 status = SAS_DEVICE_UNKNOWN; 2873 else 2874 status = SAS_ABORTED_TASK; 2875 2876 complete_to_host = isci_perform_error_io_completion; 2877 request->complete_in_target = false; 2878 break; 2879 } 2880 break; 2881 } 2882 2883 isci_request_unmap_sgl(request, isci_host->pdev); 2884 2885 /* Put the completed request on the correct list */ 2886 isci_task_save_for_upper_layer_completion(isci_host, request, response, 2887 status, complete_to_host 2888 ); 2889 2890 /* complete the io request to the core. */ 2891 scic_controller_complete_io(&isci_host->sci, 2892 &isci_device->sci, 2893 &request->sci); 2894 /* set terminated handle so it cannot be completed or 2895 * terminated again, and to cause any calls into abort 2896 * task to recognize the already completed case. 2897 */ 2898 request->terminated = true; 2899 2900 isci_host_can_dequeue(isci_host, 1); 2901} 2902 2903static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm) 2904{ 2905 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine); 2906 struct isci_request *ireq = sci_req_to_ireq(sci_req); 2907 struct domain_device *dev = sci_dev_to_domain(sci_req->target_device); 2908 struct sas_task *task; 2909 2910 /* XXX as hch said always creating an internal sas_task for tmf 2911 * requests would simplify the driver 2912 */ 2913 task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL; 2914 2915 /* all unaccelerated request types (non ssp or ncq) handled with 2916 * substates 2917 */ 2918 if (!task && dev->dev_type == SAS_END_DEV) { 2919 sci_base_state_machine_change_state(sm, 2920 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION); 2921 } else if (!task && 2922 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high || 2923 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) { 2924 sci_base_state_machine_change_state(sm, 2925 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE); 2926 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { 2927 sci_base_state_machine_change_state(sm, 2928 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE); 2929 } else if (task && sas_protocol_ata(task->task_proto) && 2930 !task->ata_task.use_ncq) { 2931 u32 state; 2932 2933 if (task->data_dir == DMA_NONE) 2934 state = SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE; 2935 else if (task->ata_task.dma_xfer) 2936 state = SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE; 2937 else /* PIO */ 2938 state = SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE; 2939 2940 sci_base_state_machine_change_state(sm, state); 2941 } 2942} 2943 2944static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm) 2945{ 2946 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine); 2947 struct scic_sds_controller *scic = sci_req->owning_controller; 2948 struct isci_host *ihost = scic_to_ihost(scic); 2949 struct isci_request *ireq = sci_req_to_ireq(sci_req); 2950 2951 /* Tell the SCI_USER that the IO request is complete */ 2952 if (sci_req->is_task_management_request == false) 2953 isci_request_io_request_complete(ihost, ireq, 2954 sci_req->sci_status); 2955 else 2956 isci_task_request_complete(ihost, ireq, sci_req->sci_status); 2957} 2958 2959static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm) 2960{ 2961 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine); 2962 2963 /* Setting the abort bit in the Task Context is required by the silicon. */ 2964 sci_req->task_context_buffer->abort = 1; 2965} 2966 2967static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) 2968{ 2969 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine); 2970 2971 scic_sds_remote_device_set_working_request(sci_req->target_device, 2972 sci_req); 2973} 2974 2975static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) 2976{ 2977 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine); 2978 2979 scic_sds_remote_device_set_working_request(sci_req->target_device, 2980 sci_req); 2981} 2982 2983static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) 2984{ 2985 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine); 2986 2987 scic_sds_remote_device_set_working_request(sci_req->target_device, 2988 sci_req); 2989} 2990 2991static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) 2992{ 2993 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine); 2994 struct scu_task_context *task_context; 2995 struct host_to_dev_fis *h2d_fis; 2996 enum sci_status status; 2997 2998 /* Clear the SRST bit */ 2999 h2d_fis = &sci_req->stp.cmd; 3000 h2d_fis->control = 0; 3001 3002 /* Clear the TC control bit */ 3003 task_context = scic_sds_controller_get_task_context_buffer( 3004 sci_req->owning_controller, sci_req->io_tag); 3005 task_context->control_frame = 0; 3006 3007 status = scic_controller_continue_io(sci_req); 3008 WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); 3009} 3010 3011static const struct sci_base_state scic_sds_request_state_table[] = { 3012 [SCI_BASE_REQUEST_STATE_INITIAL] = { }, 3013 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { }, 3014 [SCI_BASE_REQUEST_STATE_STARTED] = { 3015 .enter_state = scic_sds_request_started_state_enter, 3016 }, 3017 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { 3018 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter, 3019 }, 3020 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { }, 3021 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { 3022 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter, 3023 }, 3024 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { }, 3025 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { }, 3026 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { }, 3027 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { }, 3028 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { }, 3029 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { 3030 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, 3031 }, 3032 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { 3033 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, 3034 }, 3035 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { }, 3036 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { }, 3037 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { }, 3038 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { }, 3039 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { }, 3040 [SCI_BASE_REQUEST_STATE_COMPLETED] = { 3041 .enter_state = scic_sds_request_completed_state_enter, 3042 }, 3043 [SCI_BASE_REQUEST_STATE_ABORTING] = { 3044 .enter_state = scic_sds_request_aborting_state_enter, 3045 }, 3046 [SCI_BASE_REQUEST_STATE_FINAL] = { }, 3047}; 3048 3049static void scic_sds_general_request_construct(struct scic_sds_controller *scic, 3050 struct scic_sds_remote_device *sci_dev, 3051 u16 io_tag, struct scic_sds_request *sci_req) 3052{ 3053 sci_base_state_machine_construct(&sci_req->state_machine, 3054 scic_sds_request_state_table, 3055 SCI_BASE_REQUEST_STATE_INITIAL); 3056 sci_base_state_machine_start(&sci_req->state_machine); 3057 3058 sci_req->io_tag = io_tag; 3059 sci_req->owning_controller = scic; 3060 sci_req->target_device = sci_dev; 3061 sci_req->protocol = SCIC_NO_PROTOCOL; 3062 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; 3063 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev); 3064 3065 sci_req->sci_status = SCI_SUCCESS; 3066 sci_req->scu_status = 0; 3067 sci_req->post_context = 0xFFFFFFFF; 3068 3069 sci_req->is_task_management_request = false; 3070 3071 if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) { 3072 sci_req->was_tag_assigned_by_user = false; 3073 sci_req->task_context_buffer = &sci_req->tc; 3074 } else { 3075 sci_req->was_tag_assigned_by_user = true; 3076 3077 sci_req->task_context_buffer = 3078 scic_sds_controller_get_task_context_buffer(scic, io_tag); 3079 } 3080} 3081 3082static enum sci_status 3083scic_io_request_construct(struct scic_sds_controller *scic, 3084 struct scic_sds_remote_device *sci_dev, 3085 u16 io_tag, struct scic_sds_request *sci_req) 3086{ 3087 struct domain_device *dev = sci_dev_to_domain(sci_dev); 3088 enum sci_status status = SCI_SUCCESS; 3089 3090 /* Build the common part of the request */ 3091 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); 3092 3093 if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 3094 return SCI_FAILURE_INVALID_REMOTE_DEVICE; 3095 3096 if (dev->dev_type == SAS_END_DEV) 3097 /* pass */; 3098 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) 3099 memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd)); 3100 else if (dev_is_expander(dev)) 3101 memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd)); 3102 else 3103 return SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3104 3105 memset(sci_req->task_context_buffer, 0, 3106 offsetof(struct scu_task_context, sgl_pair_ab)); 3107 3108 return status; 3109} 3110 3111enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, 3112 struct scic_sds_remote_device *sci_dev, 3113 u16 io_tag, struct scic_sds_request *sci_req) 3114{ 3115 struct domain_device *dev = sci_dev_to_domain(sci_dev); 3116 enum sci_status status = SCI_SUCCESS; 3117 3118 /* Build the common part of the request */ 3119 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req); 3120 3121 if (dev->dev_type == SAS_END_DEV || 3122 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { 3123 sci_req->is_task_management_request = true; 3124 memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context)); 3125 } else 3126 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3127 3128 return status; 3129} 3130 3131static enum sci_status isci_request_ssp_request_construct( 3132 struct isci_request *request) 3133{ 3134 enum sci_status status; 3135 3136 dev_dbg(&request->isci_host->pdev->dev, 3137 "%s: request = %p\n", 3138 __func__, 3139 request); 3140 status = scic_io_request_construct_basic_ssp(&request->sci); 3141 return status; 3142} 3143 3144static enum sci_status isci_request_stp_request_construct( 3145 struct isci_request *request) 3146{ 3147 struct sas_task *task = isci_request_access_task(request); 3148 enum sci_status status; 3149 struct host_to_dev_fis *register_fis; 3150 3151 dev_dbg(&request->isci_host->pdev->dev, 3152 "%s: request = %p\n", 3153 __func__, 3154 request); 3155 3156 /* Get the host_to_dev_fis from the core and copy 3157 * the fis from the task into it. 3158 */ 3159 register_fis = isci_sata_task_to_fis_copy(task); 3160 3161 status = scic_io_request_construct_basic_sata(&request->sci); 3162 3163 /* Set the ncq tag in the fis, from the queue 3164 * command in the task. 3165 */ 3166 if (isci_sata_is_task_ncq(task)) { 3167 3168 isci_sata_set_ncq_tag( 3169 register_fis, 3170 task 3171 ); 3172 } 3173 3174 return status; 3175} 3176 3177/* 3178 * This function will fill in the SCU Task Context for a SMP request. The 3179 * following important settings are utilized: -# task_type == 3180 * SCU_TASK_TYPE_SMP. This simply indicates that a normal request type 3181 * (i.e. non-raw frame) is being utilized to perform task management. -# 3182 * control_frame == 1. This ensures that the proper endianess is set so 3183 * that the bytes are transmitted in the right order for a smp request frame. 3184 * @sci_req: This parameter specifies the smp request object being 3185 * constructed. 3186 * 3187 */ 3188static void 3189scu_smp_request_construct_task_context(struct scic_sds_request *sci_req, 3190 struct smp_req *smp_req) 3191{ 3192 dma_addr_t dma_addr; 3193 struct scic_sds_controller *scic; 3194 struct scic_sds_remote_device *sci_dev; 3195 struct scic_sds_port *sci_port; 3196 struct scu_task_context *task_context; 3197 ssize_t word_cnt = sizeof(struct smp_req) / sizeof(u32); 3198 3199 /* byte swap the smp request. */ 3200 sci_swab32_cpy(&sci_req->smp.cmd, smp_req, 3201 word_cnt); 3202 3203 task_context = scic_sds_request_get_task_context(sci_req); 3204 3205 scic = scic_sds_request_get_controller(sci_req); 3206 sci_dev = scic_sds_request_get_device(sci_req); 3207 sci_port = scic_sds_request_get_port(sci_req); 3208 3209 /* 3210 * Fill in the TC with the its required data 3211 * 00h 3212 */ 3213 task_context->priority = 0; 3214 task_context->initiator_request = 1; 3215 task_context->connection_rate = sci_dev->connection_rate; 3216 task_context->protocol_engine_index = 3217 scic_sds_controller_get_protocol_engine_group(scic); 3218 task_context->logical_port_index = scic_sds_port_get_index(sci_port); 3219 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; 3220 task_context->abort = 0; 3221 task_context->valid = SCU_TASK_CONTEXT_VALID; 3222 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 3223 3224 /* 04h */ 3225 task_context->remote_node_index = sci_dev->rnc.remote_node_index; 3226 task_context->command_code = 0; 3227 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; 3228 3229 /* 08h */ 3230 task_context->link_layer_control = 0; 3231 task_context->do_not_dma_ssp_good_response = 1; 3232 task_context->strict_ordering = 0; 3233 task_context->control_frame = 1; 3234 task_context->timeout_enable = 0; 3235 task_context->block_guard_enable = 0; 3236 3237 /* 0ch */ 3238 task_context->address_modifier = 0; 3239 3240 /* 10h */ 3241 task_context->ssp_command_iu_length = smp_req->req_len; 3242 3243 /* 14h */ 3244 task_context->transfer_length_bytes = 0; 3245 3246 /* 3247 * 18h ~ 30h, protocol specific 3248 * since commandIU has been build by framework at this point, we just 3249 * copy the frist DWord from command IU to this location. */ 3250 memcpy(&task_context->type.smp, &sci_req->smp.cmd, sizeof(u32)); 3251 3252 /* 3253 * 40h 3254 * "For SMP you could program it to zero. We would prefer that way 3255 * so that done code will be consistent." - Venki 3256 */ 3257 task_context->task_phase = 0; 3258 3259 if (sci_req->was_tag_assigned_by_user) { 3260 /* 3261 * Build the task context now since we have already read 3262 * the data 3263 */ 3264 sci_req->post_context = 3265 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 3266 (scic_sds_controller_get_protocol_engine_group(scic) << 3267 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 3268 (scic_sds_port_get_index(sci_port) << 3269 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 3270 scic_sds_io_tag_get_index(sci_req->io_tag)); 3271 } else { 3272 /* 3273 * Build the task context now since we have already read 3274 * the data. 3275 * I/O tag index is not assigned because we have to wait 3276 * until we get a TCi. 3277 */ 3278 sci_req->post_context = 3279 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 3280 (scic_sds_controller_get_protocol_engine_group(scic) << 3281 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 3282 (scic_sds_port_get_index(sci_port) << 3283 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); 3284 } 3285 3286 /* 3287 * Copy the physical address for the command buffer to the SCU Task 3288 * Context command buffer should not contain command header. 3289 */ 3290 dma_addr = scic_io_request_get_dma_addr(sci_req, 3291 ((char *) &sci_req->smp.cmd) + 3292 sizeof(u32)); 3293 3294 task_context->command_iu_upper = upper_32_bits(dma_addr); 3295 task_context->command_iu_lower = lower_32_bits(dma_addr); 3296 3297 /* SMP response comes as UF, so no need to set response IU address. */ 3298 task_context->response_iu_upper = 0; 3299 task_context->response_iu_lower = 0; 3300} 3301 3302static enum sci_status scic_io_request_construct_smp(struct scic_sds_request *sci_req) 3303{ 3304 struct smp_req *smp_req = kmalloc(sizeof(*smp_req), GFP_KERNEL); 3305 3306 if (!smp_req) 3307 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 3308 3309 sci_req->protocol = SCIC_SMP_PROTOCOL; 3310 3311 /* Construct the SMP SCU Task Context */ 3312 memcpy(smp_req, &sci_req->smp.cmd, sizeof(*smp_req)); 3313 3314 /* 3315 * Look at the SMP requests' header fields; for certain SAS 1.x SMP 3316 * functions under SAS 2.0, a zero request length really indicates 3317 * a non-zero default length. */ 3318 if (smp_req->req_len == 0) { 3319 switch (smp_req->func) { 3320 case SMP_DISCOVER: 3321 case SMP_REPORT_PHY_ERR_LOG: 3322 case SMP_REPORT_PHY_SATA: 3323 case SMP_REPORT_ROUTE_INFO: 3324 smp_req->req_len = 2; 3325 break; 3326 case SMP_CONF_ROUTE_INFO: 3327 case SMP_PHY_CONTROL: 3328 case SMP_PHY_TEST_FUNCTION: 3329 smp_req->req_len = 9; 3330 break; 3331 /* Default - zero is a valid default for 2.0. */ 3332 } 3333 } 3334 3335 scu_smp_request_construct_task_context(sci_req, smp_req); 3336 3337 sci_base_state_machine_change_state(&sci_req->state_machine, 3338 SCI_BASE_REQUEST_STATE_CONSTRUCTED); 3339 3340 kfree(smp_req); 3341 3342 return SCI_SUCCESS; 3343} 3344 3345/* 3346 * isci_smp_request_build() - This function builds the smp request. 3347 * @ireq: This parameter points to the isci_request allocated in the 3348 * request construct function. 3349 * 3350 * SCI_SUCCESS on successfull completion, or specific failure code. 3351 */ 3352static enum sci_status isci_smp_request_build(struct isci_request *ireq) 3353{ 3354 enum sci_status status = SCI_FAILURE; 3355 struct sas_task *task = isci_request_access_task(ireq); 3356 struct scic_sds_request *sci_req = &ireq->sci; 3357 3358 dev_dbg(&ireq->isci_host->pdev->dev, 3359 "%s: request = %p\n", __func__, ireq); 3360 3361 dev_dbg(&ireq->isci_host->pdev->dev, 3362 "%s: smp_req len = %d\n", 3363 __func__, 3364 task->smp_task.smp_req.length); 3365 3366 /* copy the smp_command to the address; */ 3367 sg_copy_to_buffer(&task->smp_task.smp_req, 1, 3368 &sci_req->smp.cmd, 3369 sizeof(struct smp_req)); 3370 3371 status = scic_io_request_construct_smp(sci_req); 3372 if (status != SCI_SUCCESS) 3373 dev_warn(&ireq->isci_host->pdev->dev, 3374 "%s: failed with status = %d\n", 3375 __func__, 3376 status); 3377 3378 return status; 3379} 3380 3381/** 3382 * isci_io_request_build() - This function builds the io request object. 3383 * @isci_host: This parameter specifies the ISCI host object 3384 * @request: This parameter points to the isci_request object allocated in the 3385 * request construct function. 3386 * @sci_device: This parameter is the handle for the sci core's remote device 3387 * object that is the destination for this request. 3388 * 3389 * SCI_SUCCESS on successfull completion, or specific failure code. 3390 */ 3391static enum sci_status isci_io_request_build( 3392 struct isci_host *isci_host, 3393 struct isci_request *request, 3394 struct isci_remote_device *isci_device) 3395{ 3396 enum sci_status status = SCI_SUCCESS; 3397 struct sas_task *task = isci_request_access_task(request); 3398 struct scic_sds_remote_device *sci_device = &isci_device->sci; 3399 3400 dev_dbg(&isci_host->pdev->dev, 3401 "%s: isci_device = 0x%p; request = %p, " 3402 "num_scatter = %d\n", 3403 __func__, 3404 isci_device, 3405 request, 3406 task->num_scatter); 3407 3408 /* map the sgl addresses, if present. 3409 * libata does the mapping for sata devices 3410 * before we get the request. 3411 */ 3412 if (task->num_scatter && 3413 !sas_protocol_ata(task->task_proto) && 3414 !(SAS_PROTOCOL_SMP & task->task_proto)) { 3415 3416 request->num_sg_entries = dma_map_sg( 3417 &isci_host->pdev->dev, 3418 task->scatter, 3419 task->num_scatter, 3420 task->data_dir 3421 ); 3422 3423 if (request->num_sg_entries == 0) 3424 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 3425 } 3426 3427 /* build the common request object. For now, 3428 * we will let the core allocate the IO tag. 3429 */ 3430 status = scic_io_request_construct(&isci_host->sci, sci_device, 3431 SCI_CONTROLLER_INVALID_IO_TAG, 3432 &request->sci); 3433 3434 if (status != SCI_SUCCESS) { 3435 dev_warn(&isci_host->pdev->dev, 3436 "%s: failed request construct\n", 3437 __func__); 3438 return SCI_FAILURE; 3439 } 3440 3441 switch (task->task_proto) { 3442 case SAS_PROTOCOL_SMP: 3443 status = isci_smp_request_build(request); 3444 break; 3445 case SAS_PROTOCOL_SSP: 3446 status = isci_request_ssp_request_construct(request); 3447 break; 3448 case SAS_PROTOCOL_SATA: 3449 case SAS_PROTOCOL_STP: 3450 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 3451 status = isci_request_stp_request_construct(request); 3452 break; 3453 default: 3454 dev_warn(&isci_host->pdev->dev, 3455 "%s: unknown protocol\n", __func__); 3456 return SCI_FAILURE; 3457 } 3458 3459 return SCI_SUCCESS; 3460} 3461 3462/** 3463 * isci_request_alloc_core() - This function gets the request object from the 3464 * isci_host dma cache. 3465 * @isci_host: This parameter specifies the ISCI host object 3466 * @isci_request: This parameter will contain the pointer to the new 3467 * isci_request object. 3468 * @isci_device: This parameter is the pointer to the isci remote device object 3469 * that is the destination for this request. 3470 * @gfp_flags: This parameter specifies the os allocation flags. 3471 * 3472 * SCI_SUCCESS on successfull completion, or specific failure code. 3473 */ 3474static int isci_request_alloc_core( 3475 struct isci_host *isci_host, 3476 struct isci_request **isci_request, 3477 struct isci_remote_device *isci_device, 3478 gfp_t gfp_flags) 3479{ 3480 int ret = 0; 3481 dma_addr_t handle; 3482 struct isci_request *request; 3483 3484 3485 /* get pointer to dma memory. This actually points 3486 * to both the isci_remote_device object and the 3487 * sci object. The isci object is at the beginning 3488 * of the memory allocated here. 3489 */ 3490 request = dma_pool_alloc(isci_host->dma_pool, gfp_flags, &handle); 3491 if (!request) { 3492 dev_warn(&isci_host->pdev->dev, 3493 "%s: dma_pool_alloc returned NULL\n", __func__); 3494 return -ENOMEM; 3495 } 3496 3497 /* initialize the request object. */ 3498 spin_lock_init(&request->state_lock); 3499 request->request_daddr = handle; 3500 request->isci_host = isci_host; 3501 request->isci_device = isci_device; 3502 request->io_request_completion = NULL; 3503 request->terminated = false; 3504 3505 request->num_sg_entries = 0; 3506 3507 request->complete_in_target = false; 3508 3509 INIT_LIST_HEAD(&request->completed_node); 3510 INIT_LIST_HEAD(&request->dev_node); 3511 3512 *isci_request = request; 3513 isci_request_change_state(request, allocated); 3514 3515 return ret; 3516} 3517 3518static int isci_request_alloc_io( 3519 struct isci_host *isci_host, 3520 struct sas_task *task, 3521 struct isci_request **isci_request, 3522 struct isci_remote_device *isci_device, 3523 gfp_t gfp_flags) 3524{ 3525 int retval = isci_request_alloc_core(isci_host, isci_request, 3526 isci_device, gfp_flags); 3527 3528 if (!retval) { 3529 (*isci_request)->ttype_ptr.io_task_ptr = task; 3530 (*isci_request)->ttype = io_task; 3531 3532 task->lldd_task = *isci_request; 3533 } 3534 return retval; 3535} 3536 3537/** 3538 * isci_request_alloc_tmf() - This function gets the request object from the 3539 * isci_host dma cache and initializes the relevant fields as a sas_task. 3540 * @isci_host: This parameter specifies the ISCI host object 3541 * @sas_task: This parameter is the task struct from the upper layer driver. 3542 * @isci_request: This parameter will contain the pointer to the new 3543 * isci_request object. 3544 * @isci_device: This parameter is the pointer to the isci remote device object 3545 * that is the destination for this request. 3546 * @gfp_flags: This parameter specifies the os allocation flags. 3547 * 3548 * SCI_SUCCESS on successfull completion, or specific failure code. 3549 */ 3550int isci_request_alloc_tmf( 3551 struct isci_host *isci_host, 3552 struct isci_tmf *isci_tmf, 3553 struct isci_request **isci_request, 3554 struct isci_remote_device *isci_device, 3555 gfp_t gfp_flags) 3556{ 3557 int retval = isci_request_alloc_core(isci_host, isci_request, 3558 isci_device, gfp_flags); 3559 3560 if (!retval) { 3561 3562 (*isci_request)->ttype_ptr.tmf_task_ptr = isci_tmf; 3563 (*isci_request)->ttype = tmf_task; 3564 } 3565 return retval; 3566} 3567 3568/** 3569 * isci_request_execute() - This function allocates the isci_request object, 3570 * all fills in some common fields. 3571 * @isci_host: This parameter specifies the ISCI host object 3572 * @sas_task: This parameter is the task struct from the upper layer driver. 3573 * @isci_request: This parameter will contain the pointer to the new 3574 * isci_request object. 3575 * @gfp_flags: This parameter specifies the os allocation flags. 3576 * 3577 * SCI_SUCCESS on successfull completion, or specific failure code. 3578 */ 3579int isci_request_execute( 3580 struct isci_host *isci_host, 3581 struct sas_task *task, 3582 struct isci_request **isci_request, 3583 gfp_t gfp_flags) 3584{ 3585 int ret = 0; 3586 struct scic_sds_remote_device *sci_device; 3587 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3588 struct isci_remote_device *isci_device; 3589 struct isci_request *request; 3590 unsigned long flags; 3591 3592 isci_device = task->dev->lldd_dev; 3593 sci_device = &isci_device->sci; 3594 3595 /* do common allocation and init of request object. */ 3596 ret = isci_request_alloc_io( 3597 isci_host, 3598 task, 3599 &request, 3600 isci_device, 3601 gfp_flags 3602 ); 3603 3604 if (ret) 3605 goto out; 3606 3607 status = isci_io_request_build(isci_host, request, isci_device); 3608 if (status != SCI_SUCCESS) { 3609 dev_warn(&isci_host->pdev->dev, 3610 "%s: request_construct failed - status = 0x%x\n", 3611 __func__, 3612 status); 3613 goto out; 3614 } 3615 3616 spin_lock_irqsave(&isci_host->scic_lock, flags); 3617 3618 /* send the request, let the core assign the IO TAG. */ 3619 status = scic_controller_start_io(&isci_host->sci, sci_device, 3620 &request->sci, 3621 SCI_CONTROLLER_INVALID_IO_TAG); 3622 if (status != SCI_SUCCESS && 3623 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3624 dev_warn(&isci_host->pdev->dev, 3625 "%s: failed request start (0x%x)\n", 3626 __func__, status); 3627 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 3628 goto out; 3629 } 3630 3631 /* Either I/O started OK, or the core has signaled that 3632 * the device needs a target reset. 3633 * 3634 * In either case, hold onto the I/O for later. 3635 * 3636 * Update it's status and add it to the list in the 3637 * remote device object. 3638 */ 3639 isci_request_change_state(request, started); 3640 list_add(&request->dev_node, &isci_device->reqs_in_process); 3641 3642 if (status == SCI_SUCCESS) { 3643 /* Save the tag for possible task mgmt later. */ 3644 request->io_tag = request->sci.io_tag; 3645 } else { 3646 /* The request did not really start in the 3647 * hardware, so clear the request handle 3648 * here so no terminations will be done. 3649 */ 3650 request->terminated = true; 3651 } 3652 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 3653 3654 if (status == 3655 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3656 /* Signal libsas that we need the SCSI error 3657 * handler thread to work on this I/O and that 3658 * we want a device reset. 3659 */ 3660 spin_lock_irqsave(&task->task_state_lock, flags); 3661 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 3662 spin_unlock_irqrestore(&task->task_state_lock, flags); 3663 3664 /* Cause this task to be scheduled in the SCSI error 3665 * handler thread. 3666 */ 3667 isci_execpath_callback(isci_host, task, 3668 sas_task_abort); 3669 3670 /* Change the status, since we are holding 3671 * the I/O until it is managed by the SCSI 3672 * error handler. 3673 */ 3674 status = SCI_SUCCESS; 3675 } 3676 3677 out: 3678 if (status != SCI_SUCCESS) { 3679 /* release dma memory on failure. */ 3680 isci_request_free(isci_host, request); 3681 request = NULL; 3682 ret = SCI_FAILURE; 3683 } 3684 3685 *isci_request = request; 3686 return ret; 3687} 3688