request.c revision 12ef65444de9d387a383b9991960848bed5bbe74
1/*
2 * This file is provided under a dual BSD/GPLv2 license.  When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 *   * Redistributions of source code must retain the above copyright
34 *     notice, this list of conditions and the following disclaimer.
35 *   * Redistributions in binary form must reproduce the above copyright
36 *     notice, this list of conditions and the following disclaimer in
37 *     the documentation and/or other materials provided with the
38 *     distribution.
39 *   * Neither the name of Intel Corporation nor the names of its
40 *     contributors may be used to endorse or promote products derived
41 *     from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "isci.h"
57#include "task.h"
58#include "request.h"
59#include "sata.h"
60#include "scu_completion_codes.h"
61#include "scu_event_codes.h"
62#include "sas.h"
63
64/**
65 * This method returns the sgl element pair for the specificed sgl_pair index.
66 * @sci_req: This parameter specifies the IO request for which to retrieve
67 *    the Scatter-Gather List element pair.
68 * @sgl_pair_index: This parameter specifies the index into the SGL element
69 *    pair to be retrieved.
70 *
71 * This method returns a pointer to an struct scu_sgl_element_pair.
72 */
73static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
74	struct scic_sds_request *sci_req,
75	u32 sgl_pair_index
76	) {
77	struct scu_task_context *task_context;
78
79	task_context = (struct scu_task_context *)sci_req->task_context_buffer;
80
81	if (sgl_pair_index == 0) {
82		return &task_context->sgl_pair_ab;
83	} else if (sgl_pair_index == 1) {
84		return &task_context->sgl_pair_cd;
85	}
86
87	return &sci_req->sg_table[sgl_pair_index - 2];
88}
89
90/**
91 * This function will build the SGL list for an IO request.
92 * @sci_req: This parameter specifies the IO request for which to build
93 *    the Scatter-Gather List.
94 *
95 */
96static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
97{
98	struct isci_request *isci_request = sci_req_to_ireq(sds_request);
99	struct isci_host *isci_host = isci_request->isci_host;
100	struct sas_task *task = isci_request_access_task(isci_request);
101	struct scatterlist *sg = NULL;
102	dma_addr_t dma_addr;
103	u32 sg_idx = 0;
104	struct scu_sgl_element_pair *scu_sg   = NULL;
105	struct scu_sgl_element_pair *prev_sg  = NULL;
106
107	if (task->num_scatter > 0) {
108		sg = task->scatter;
109
110		while (sg) {
111			scu_sg = scic_sds_request_get_sgl_element_pair(
112					sds_request,
113					sg_idx);
114
115			SCU_SGL_COPY(scu_sg->A, sg);
116
117			sg = sg_next(sg);
118
119			if (sg) {
120				SCU_SGL_COPY(scu_sg->B, sg);
121				sg = sg_next(sg);
122			} else
123				SCU_SGL_ZERO(scu_sg->B);
124
125			if (prev_sg) {
126				dma_addr =
127					scic_io_request_get_dma_addr(
128							sds_request,
129							scu_sg);
130
131				prev_sg->next_pair_upper =
132					upper_32_bits(dma_addr);
133				prev_sg->next_pair_lower =
134					lower_32_bits(dma_addr);
135			}
136
137			prev_sg = scu_sg;
138			sg_idx++;
139		}
140	} else {	/* handle when no sg */
141		scu_sg = scic_sds_request_get_sgl_element_pair(sds_request,
142							       sg_idx);
143
144		dma_addr = dma_map_single(&isci_host->pdev->dev,
145					  task->scatter,
146					  task->total_xfer_len,
147					  task->data_dir);
148
149		isci_request->zero_scatter_daddr = dma_addr;
150
151		scu_sg->A.length = task->total_xfer_len;
152		scu_sg->A.address_upper = upper_32_bits(dma_addr);
153		scu_sg->A.address_lower = lower_32_bits(dma_addr);
154	}
155
156	if (scu_sg) {
157		scu_sg->next_pair_upper = 0;
158		scu_sg->next_pair_lower = 0;
159	}
160}
161
162static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req)
163{
164	struct ssp_cmd_iu *cmd_iu;
165	struct isci_request *ireq = sci_req_to_ireq(sci_req);
166	struct sas_task *task = isci_request_access_task(ireq);
167
168	cmd_iu = &sci_req->ssp.cmd;
169
170	memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
171	cmd_iu->add_cdb_len = 0;
172	cmd_iu->_r_a = 0;
173	cmd_iu->_r_b = 0;
174	cmd_iu->en_fburst = 0; /* unsupported */
175	cmd_iu->task_prio = task->ssp_task.task_prio;
176	cmd_iu->task_attr = task->ssp_task.task_attr;
177	cmd_iu->_r_c = 0;
178
179	sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
180		       sizeof(task->ssp_task.cdb) / sizeof(u32));
181}
182
183static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req)
184{
185	struct ssp_task_iu *task_iu;
186	struct isci_request *ireq = sci_req_to_ireq(sci_req);
187	struct sas_task *task = isci_request_access_task(ireq);
188	struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
189
190	task_iu = &sci_req->ssp.tmf;
191
192	memset(task_iu, 0, sizeof(struct ssp_task_iu));
193
194	memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
195
196	task_iu->task_func = isci_tmf->tmf_code;
197	task_iu->task_tag =
198		(ireq->ttype == tmf_task) ?
199		isci_tmf->io_tag :
200		SCI_CONTROLLER_INVALID_IO_TAG;
201}
202
203/**
204 * This method is will fill in the SCU Task Context for any type of SSP request.
205 * @sci_req:
206 * @task_context:
207 *
208 */
209static void scu_ssp_reqeust_construct_task_context(
210	struct scic_sds_request *sds_request,
211	struct scu_task_context *task_context)
212{
213	dma_addr_t dma_addr;
214	struct scic_sds_remote_device *target_device;
215	struct scic_sds_port *target_port;
216
217	target_device = scic_sds_request_get_device(sds_request);
218	target_port = scic_sds_request_get_port(sds_request);
219
220	/* Fill in the TC with the its required data */
221	task_context->abort = 0;
222	task_context->priority = 0;
223	task_context->initiator_request = 1;
224	task_context->connection_rate = target_device->connection_rate;
225	task_context->protocol_engine_index =
226		scic_sds_controller_get_protocol_engine_group(controller);
227	task_context->logical_port_index =
228		scic_sds_port_get_index(target_port);
229	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
230	task_context->valid = SCU_TASK_CONTEXT_VALID;
231	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
232
233	task_context->remote_node_index =
234		scic_sds_remote_device_get_index(sds_request->target_device);
235	task_context->command_code = 0;
236
237	task_context->link_layer_control = 0;
238	task_context->do_not_dma_ssp_good_response = 1;
239	task_context->strict_ordering = 0;
240	task_context->control_frame = 0;
241	task_context->timeout_enable = 0;
242	task_context->block_guard_enable = 0;
243
244	task_context->address_modifier = 0;
245
246	/* task_context->type.ssp.tag = sci_req->io_tag; */
247	task_context->task_phase = 0x01;
248
249	if (sds_request->was_tag_assigned_by_user) {
250		/*
251		 * Build the task context now since we have already read
252		 * the data
253		 */
254		sds_request->post_context =
255			(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
256			 (scic_sds_controller_get_protocol_engine_group(
257							controller) <<
258			  SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
259			 (scic_sds_port_get_index(target_port) <<
260			  SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
261			 scic_sds_io_tag_get_index(sds_request->io_tag));
262	} else {
263		/*
264		 * Build the task context now since we have already read
265		 * the data
266		 *
267		 * I/O tag index is not assigned because we have to wait
268		 * until we get a TCi
269		 */
270		sds_request->post_context =
271			(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
272			 (scic_sds_controller_get_protocol_engine_group(
273							owning_controller) <<
274			  SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
275			 (scic_sds_port_get_index(target_port) <<
276			  SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
277	}
278
279	/*
280	 * Copy the physical address for the command buffer to the
281	 * SCU Task Context
282	 */
283	dma_addr = scic_io_request_get_dma_addr(sds_request,
284						&sds_request->ssp.cmd);
285
286	task_context->command_iu_upper = upper_32_bits(dma_addr);
287	task_context->command_iu_lower = lower_32_bits(dma_addr);
288
289	/*
290	 * Copy the physical address for the response buffer to the
291	 * SCU Task Context
292	 */
293	dma_addr = scic_io_request_get_dma_addr(sds_request,
294						&sds_request->ssp.rsp);
295
296	task_context->response_iu_upper = upper_32_bits(dma_addr);
297	task_context->response_iu_lower = lower_32_bits(dma_addr);
298}
299
300/**
301 * This method is will fill in the SCU Task Context for a SSP IO request.
302 * @sci_req:
303 *
304 */
305static void scu_ssp_io_request_construct_task_context(
306	struct scic_sds_request *sci_req,
307	enum dma_data_direction dir,
308	u32 len)
309{
310	struct scu_task_context *task_context;
311
312	task_context = scic_sds_request_get_task_context(sci_req);
313
314	scu_ssp_reqeust_construct_task_context(sci_req, task_context);
315
316	task_context->ssp_command_iu_length =
317		sizeof(struct ssp_cmd_iu) / sizeof(u32);
318	task_context->type.ssp.frame_type = SSP_COMMAND;
319
320	switch (dir) {
321	case DMA_FROM_DEVICE:
322	case DMA_NONE:
323	default:
324		task_context->task_type = SCU_TASK_TYPE_IOREAD;
325		break;
326	case DMA_TO_DEVICE:
327		task_context->task_type = SCU_TASK_TYPE_IOWRITE;
328		break;
329	}
330
331	task_context->transfer_length_bytes = len;
332
333	if (task_context->transfer_length_bytes > 0)
334		scic_sds_request_build_sgl(sci_req);
335}
336
337/**
338 * This method will fill in the SCU Task Context for a SSP Task request.  The
339 *    following important settings are utilized: -# priority ==
340 *    SCU_TASK_PRIORITY_HIGH.  This ensures that the task request is issued
341 *    ahead of other task destined for the same Remote Node. -# task_type ==
342 *    SCU_TASK_TYPE_IOREAD.  This simply indicates that a normal request type
343 *    (i.e. non-raw frame) is being utilized to perform task management. -#
344 *    control_frame == 1.  This ensures that the proper endianess is set so
345 *    that the bytes are transmitted in the right order for a task frame.
346 * @sci_req: This parameter specifies the task request object being
347 *    constructed.
348 *
349 */
350static void scu_ssp_task_request_construct_task_context(
351	struct scic_sds_request *sci_req)
352{
353	struct scu_task_context *task_context;
354
355	task_context = scic_sds_request_get_task_context(sci_req);
356
357	scu_ssp_reqeust_construct_task_context(sci_req, task_context);
358
359	task_context->control_frame                = 1;
360	task_context->priority                     = SCU_TASK_PRIORITY_HIGH;
361	task_context->task_type                    = SCU_TASK_TYPE_RAW_FRAME;
362	task_context->transfer_length_bytes        = 0;
363	task_context->type.ssp.frame_type          = SSP_TASK;
364	task_context->ssp_command_iu_length =
365		sizeof(struct ssp_task_iu) / sizeof(u32);
366}
367
368/**
369 * This method is will fill in the SCU Task Context for any type of SATA
370 *    request.  This is called from the various SATA constructors.
371 * @sci_req: The general IO request object which is to be used in
372 *    constructing the SCU task context.
373 * @task_context: The buffer pointer for the SCU task context which is being
374 *    constructed.
375 *
376 * The general io request construction is complete. The buffer assignment for
377 * the command buffer is complete. none Revisit task context construction to
378 * determine what is common for SSP/SMP/STP task context structures.
379 */
380static void scu_sata_reqeust_construct_task_context(
381	struct scic_sds_request *sci_req,
382	struct scu_task_context *task_context)
383{
384	dma_addr_t dma_addr;
385	struct scic_sds_remote_device *target_device;
386	struct scic_sds_port *target_port;
387
388	target_device = scic_sds_request_get_device(sci_req);
389	target_port = scic_sds_request_get_port(sci_req);
390
391	/* Fill in the TC with the its required data */
392	task_context->abort = 0;
393	task_context->priority = SCU_TASK_PRIORITY_NORMAL;
394	task_context->initiator_request = 1;
395	task_context->connection_rate = target_device->connection_rate;
396	task_context->protocol_engine_index =
397		scic_sds_controller_get_protocol_engine_group(controller);
398	task_context->logical_port_index =
399		scic_sds_port_get_index(target_port);
400	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
401	task_context->valid = SCU_TASK_CONTEXT_VALID;
402	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
403
404	task_context->remote_node_index =
405		scic_sds_remote_device_get_index(sci_req->target_device);
406	task_context->command_code = 0;
407
408	task_context->link_layer_control = 0;
409	task_context->do_not_dma_ssp_good_response = 1;
410	task_context->strict_ordering = 0;
411	task_context->control_frame = 0;
412	task_context->timeout_enable = 0;
413	task_context->block_guard_enable = 0;
414
415	task_context->address_modifier = 0;
416	task_context->task_phase = 0x01;
417
418	task_context->ssp_command_iu_length =
419		(sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
420
421	/* Set the first word of the H2D REG FIS */
422	task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
423
424	if (sci_req->was_tag_assigned_by_user) {
425		/*
426		 * Build the task context now since we have already read
427		 * the data
428		 */
429		sci_req->post_context =
430			(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
431			 (scic_sds_controller_get_protocol_engine_group(
432							controller) <<
433			  SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
434			 (scic_sds_port_get_index(target_port) <<
435			  SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
436			 scic_sds_io_tag_get_index(sci_req->io_tag));
437	} else {
438		/*
439		 * Build the task context now since we have already read
440		 * the data.
441		 * I/O tag index is not assigned because we have to wait
442		 * until we get a TCi.
443		 */
444		sci_req->post_context =
445			(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
446			 (scic_sds_controller_get_protocol_engine_group(
447							controller) <<
448			  SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
449			 (scic_sds_port_get_index(target_port) <<
450			  SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
451	}
452
453	/*
454	 * Copy the physical address for the command buffer to the SCU Task
455	 * Context. We must offset the command buffer by 4 bytes because the
456	 * first 4 bytes are transfered in the body of the TC.
457	 */
458	dma_addr = scic_io_request_get_dma_addr(sci_req,
459						((char *) &sci_req->stp.cmd) +
460						sizeof(u32));
461
462	task_context->command_iu_upper = upper_32_bits(dma_addr);
463	task_context->command_iu_lower = lower_32_bits(dma_addr);
464
465	/* SATA Requests do not have a response buffer */
466	task_context->response_iu_upper = 0;
467	task_context->response_iu_lower = 0;
468}
469
470
471
472/**
473 * scu_stp_raw_request_construct_task_context -
474 * @sci_req: This parameter specifies the STP request object for which to
475 *    construct a RAW command frame task context.
476 * @task_context: This parameter specifies the SCU specific task context buffer
477 *    to construct.
478 *
479 * This method performs the operations common to all SATA/STP requests
480 * utilizing the raw frame method. none
481 */
482static void scu_stp_raw_request_construct_task_context(struct scic_sds_stp_request *stp_req,
483						       struct scu_task_context *task_context)
484{
485	struct scic_sds_request *sci_req = to_sci_req(stp_req);
486
487	scu_sata_reqeust_construct_task_context(sci_req, task_context);
488
489	task_context->control_frame         = 0;
490	task_context->priority              = SCU_TASK_PRIORITY_NORMAL;
491	task_context->task_type             = SCU_TASK_TYPE_SATA_RAW_FRAME;
492	task_context->type.stp.fis_type     = FIS_REGH2D;
493	task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
494}
495
496static enum sci_status
497scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
498				   bool copy_rx_frame)
499{
500	struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
501	struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
502
503	scu_stp_raw_request_construct_task_context(stp_req,
504						   sci_req->task_context_buffer);
505
506	pio->current_transfer_bytes = 0;
507	pio->ending_error = 0;
508	pio->ending_status = 0;
509
510	pio->request_current.sgl_offset = 0;
511	pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
512
513	if (copy_rx_frame) {
514		scic_sds_request_build_sgl(sci_req);
515		/* Since the IO request copy of the TC contains the same data as
516		 * the actual TC this pointer is vaild for either.
517		 */
518		pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
519	} else {
520		/* The user does not want the data copied to the SGL buffer location */
521		pio->request_current.sgl_pair = NULL;
522	}
523
524	return SCI_SUCCESS;
525}
526
527/**
528 *
529 * @sci_req: This parameter specifies the request to be constructed as an
530 *    optimized request.
531 * @optimized_task_type: This parameter specifies whether the request is to be
532 *    an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
533 *    value of 1 indicates NCQ.
534 *
535 * This method will perform request construction common to all types of STP
536 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
537 * returns an indication as to whether the construction was successful.
538 */
539static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
540						     u8 optimized_task_type,
541						     u32 len,
542						     enum dma_data_direction dir)
543{
544	struct scu_task_context *task_context = sci_req->task_context_buffer;
545
546	/* Build the STP task context structure */
547	scu_sata_reqeust_construct_task_context(sci_req, task_context);
548
549	/* Copy over the SGL elements */
550	scic_sds_request_build_sgl(sci_req);
551
552	/* Copy over the number of bytes to be transfered */
553	task_context->transfer_length_bytes = len;
554
555	if (dir == DMA_TO_DEVICE) {
556		/*
557		 * The difference between the DMA IN and DMA OUT request task type
558		 * values are consistent with the difference between FPDMA READ
559		 * and FPDMA WRITE values.  Add the supplied task type parameter
560		 * to this difference to set the task type properly for this
561		 * DATA OUT (WRITE) case. */
562		task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
563								 - SCU_TASK_TYPE_DMA_IN);
564	} else {
565		/*
566		 * For the DATA IN (READ) case, simply save the supplied
567		 * optimized task type. */
568		task_context->task_type = optimized_task_type;
569	}
570}
571
572
573
574static enum sci_status
575scic_io_request_construct_sata(struct scic_sds_request *sci_req,
576			       u32 len,
577			       enum dma_data_direction dir,
578			       bool copy)
579{
580	enum sci_status status = SCI_SUCCESS;
581	struct isci_request *ireq = sci_req_to_ireq(sci_req);
582	struct sas_task *task = isci_request_access_task(ireq);
583
584	/* check for management protocols */
585	if (ireq->ttype == tmf_task) {
586		struct isci_tmf *tmf = isci_request_access_tmf(ireq);
587
588		if (tmf->tmf_code == isci_tmf_sata_srst_high ||
589		    tmf->tmf_code == isci_tmf_sata_srst_low) {
590			scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
591								   sci_req->task_context_buffer);
592			return SCI_SUCCESS;
593		} else {
594			dev_err(scic_to_dev(sci_req->owning_controller),
595				"%s: Request 0x%p received un-handled SAT "
596				"management protocol 0x%x.\n",
597				__func__, sci_req, tmf->tmf_code);
598
599			return SCI_FAILURE;
600		}
601	}
602
603	if (!sas_protocol_ata(task->task_proto)) {
604		dev_err(scic_to_dev(sci_req->owning_controller),
605			"%s: Non-ATA protocol in SATA path: 0x%x\n",
606			__func__,
607			task->task_proto);
608		return SCI_FAILURE;
609
610	}
611
612	/* non data */
613	if (task->data_dir == DMA_NONE) {
614		scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
615							   sci_req->task_context_buffer);
616		return SCI_SUCCESS;
617	}
618
619	/* NCQ */
620	if (task->ata_task.use_ncq) {
621		scic_sds_stp_optimized_request_construct(sci_req,
622							 SCU_TASK_TYPE_FPDMAQ_READ,
623							 len, dir);
624		return SCI_SUCCESS;
625	}
626
627	/* DMA */
628	if (task->ata_task.dma_xfer) {
629		scic_sds_stp_optimized_request_construct(sci_req,
630							 SCU_TASK_TYPE_DMA_IN,
631							 len, dir);
632		return SCI_SUCCESS;
633	} else /* PIO */
634		return scic_sds_stp_pio_request_construct(sci_req, copy);
635
636	return status;
637}
638
639static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req)
640{
641	struct isci_request *ireq = sci_req_to_ireq(sci_req);
642	struct sas_task *task = isci_request_access_task(ireq);
643
644	sci_req->protocol = SCIC_SSP_PROTOCOL;
645
646	scu_ssp_io_request_construct_task_context(sci_req,
647						  task->data_dir,
648						  task->total_xfer_len);
649
650	scic_sds_io_request_build_ssp_command_iu(sci_req);
651
652	sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
653
654	return SCI_SUCCESS;
655}
656
657enum sci_status scic_task_request_construct_ssp(
658	struct scic_sds_request *sci_req)
659{
660	/* Construct the SSP Task SCU Task Context */
661	scu_ssp_task_request_construct_task_context(sci_req);
662
663	/* Fill in the SSP Task IU */
664	scic_sds_task_request_build_ssp_task_iu(sci_req);
665
666	sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
667
668	return SCI_SUCCESS;
669}
670
671static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req)
672{
673	enum sci_status status;
674	bool copy = false;
675	struct isci_request *isci_request = sci_req_to_ireq(sci_req);
676	struct sas_task *task = isci_request_access_task(isci_request);
677
678	sci_req->protocol = SCIC_STP_PROTOCOL;
679
680	copy = (task->data_dir == DMA_NONE) ? false : true;
681
682	status = scic_io_request_construct_sata(sci_req,
683						task->total_xfer_len,
684						task->data_dir,
685						copy);
686
687	if (status == SCI_SUCCESS)
688		sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
689
690	return status;
691}
692
693enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req)
694{
695	enum sci_status status = SCI_SUCCESS;
696	struct isci_request *ireq = sci_req_to_ireq(sci_req);
697
698	/* check for management protocols */
699	if (ireq->ttype == tmf_task) {
700		struct isci_tmf *tmf = isci_request_access_tmf(ireq);
701
702		if (tmf->tmf_code == isci_tmf_sata_srst_high ||
703		    tmf->tmf_code == isci_tmf_sata_srst_low) {
704			scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
705								   sci_req->task_context_buffer);
706		} else {
707			dev_err(scic_to_dev(sci_req->owning_controller),
708				"%s: Request 0x%p received un-handled SAT "
709				"Protocol 0x%x.\n",
710				__func__, sci_req, tmf->tmf_code);
711
712			return SCI_FAILURE;
713		}
714	}
715
716	if (status != SCI_SUCCESS)
717		return status;
718	sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
719
720	return status;
721}
722
723/**
724 * sci_req_tx_bytes - bytes transferred when reply underruns request
725 * @sci_req: request that was terminated early
726 */
727#define SCU_TASK_CONTEXT_SRAM 0x200000
728static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req)
729{
730	struct scic_sds_controller *scic = sci_req->owning_controller;
731	u32 ret_val = 0;
732
733	if (readl(&scic->smu_registers->address_modifier) == 0) {
734		void __iomem *scu_reg_base = scic->scu_registers;
735
736		/* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
737		 *   BAR1 is the scu_registers
738		 *   0x20002C = 0x200000 + 0x2c
739		 *            = start of task context SRAM + offset of (type.ssp.data_offset)
740		 *   TCi is the io_tag of struct scic_sds_request
741		 */
742		ret_val = readl(scu_reg_base +
743				(SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
744				((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(sci_req->io_tag)));
745	}
746
747	return ret_val;
748}
749
750enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req)
751{
752	struct scic_sds_controller *scic = sci_req->owning_controller;
753	struct scu_task_context *task_context;
754	enum sci_base_request_states state;
755
756	if (sci_req->device_sequence !=
757	    scic_sds_remote_device_get_sequence(sci_req->target_device))
758		return SCI_FAILURE;
759
760	state = sci_req->sm.current_state_id;
761	if (state != SCI_REQ_CONSTRUCTED) {
762		dev_warn(scic_to_dev(scic),
763			"%s: SCIC IO Request requested to start while in wrong "
764			 "state %d\n", __func__, state);
765		return SCI_FAILURE_INVALID_STATE;
766	}
767
768	/* if necessary, allocate a TCi for the io request object and then will,
769	 * if necessary, copy the constructed TC data into the actual TC buffer.
770	 * If everything is successful the post context field is updated with
771	 * the TCi so the controller can post the request to the hardware.
772	 */
773	if (sci_req->io_tag == SCI_CONTROLLER_INVALID_IO_TAG)
774		sci_req->io_tag = scic_controller_allocate_io_tag(scic);
775
776	/* Record the IO Tag in the request */
777	if (sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) {
778		task_context = sci_req->task_context_buffer;
779
780		task_context->task_index = scic_sds_io_tag_get_index(sci_req->io_tag);
781
782		switch (task_context->protocol_type) {
783		case SCU_TASK_CONTEXT_PROTOCOL_SMP:
784		case SCU_TASK_CONTEXT_PROTOCOL_SSP:
785			/* SSP/SMP Frame */
786			task_context->type.ssp.tag = sci_req->io_tag;
787			task_context->type.ssp.target_port_transfer_tag =
788				0xFFFF;
789			break;
790
791		case SCU_TASK_CONTEXT_PROTOCOL_STP:
792			/* STP/SATA Frame
793			 * task_context->type.stp.ncq_tag = sci_req->ncq_tag;
794			 */
795			break;
796
797		case SCU_TASK_CONTEXT_PROTOCOL_NONE:
798			/* / @todo When do we set no protocol type? */
799			break;
800
801		default:
802			/* This should never happen since we build the IO
803			 * requests */
804			break;
805		}
806
807		/*
808		 * Check to see if we need to copy the task context buffer
809		 * or have been building into the task context buffer */
810		if (sci_req->was_tag_assigned_by_user == false)
811			scic_sds_controller_copy_task_context(scic, sci_req);
812
813		/* Add to the post_context the io tag value */
814		sci_req->post_context |= scic_sds_io_tag_get_index(sci_req->io_tag);
815
816		/* Everything is good go ahead and change state */
817		sci_change_state(&sci_req->sm, SCI_REQ_STARTED);
818
819		return SCI_SUCCESS;
820	}
821
822	return SCI_FAILURE_INSUFFICIENT_RESOURCES;
823}
824
825enum sci_status
826scic_sds_io_request_terminate(struct scic_sds_request *sci_req)
827{
828	enum sci_base_request_states state;
829
830	state = sci_req->sm.current_state_id;
831
832	switch (state) {
833	case SCI_REQ_CONSTRUCTED:
834		scic_sds_request_set_status(sci_req,
835			SCU_TASK_DONE_TASK_ABORT,
836			SCI_FAILURE_IO_TERMINATED);
837
838		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
839		return SCI_SUCCESS;
840	case SCI_REQ_STARTED:
841	case SCI_REQ_TASK_WAIT_TC_COMP:
842	case SCI_REQ_SMP_WAIT_RESP:
843	case SCI_REQ_SMP_WAIT_TC_COMP:
844	case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
845	case SCI_REQ_STP_UDMA_WAIT_D2H:
846	case SCI_REQ_STP_NON_DATA_WAIT_H2D:
847	case SCI_REQ_STP_NON_DATA_WAIT_D2H:
848	case SCI_REQ_STP_PIO_WAIT_H2D:
849	case SCI_REQ_STP_PIO_WAIT_FRAME:
850	case SCI_REQ_STP_PIO_DATA_IN:
851	case SCI_REQ_STP_PIO_DATA_OUT:
852	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
853	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
854	case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
855		sci_change_state(&sci_req->sm, SCI_REQ_ABORTING);
856		return SCI_SUCCESS;
857	case SCI_REQ_TASK_WAIT_TC_RESP:
858		sci_change_state(&sci_req->sm, SCI_REQ_ABORTING);
859		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
860		return SCI_SUCCESS;
861	case SCI_REQ_ABORTING:
862		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
863		return SCI_SUCCESS;
864	case SCI_REQ_COMPLETED:
865	default:
866		dev_warn(scic_to_dev(sci_req->owning_controller),
867			 "%s: SCIC IO Request requested to abort while in wrong "
868			 "state %d\n",
869			 __func__,
870			 sci_req->sm.current_state_id);
871		break;
872	}
873
874	return SCI_FAILURE_INVALID_STATE;
875}
876
877enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req)
878{
879	enum sci_base_request_states state;
880	struct scic_sds_controller *scic = sci_req->owning_controller;
881
882	state = sci_req->sm.current_state_id;
883	if (WARN_ONCE(state != SCI_REQ_COMPLETED,
884		      "isci: request completion from wrong state (%d)\n", state))
885		return SCI_FAILURE_INVALID_STATE;
886
887	if (!sci_req->was_tag_assigned_by_user)
888		scic_controller_free_io_tag(scic, sci_req->io_tag);
889
890	if (sci_req->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
891		scic_sds_controller_release_frame(scic,
892						  sci_req->saved_rx_frame_index);
893
894	/* XXX can we just stop the machine and remove the 'final' state? */
895	sci_change_state(&sci_req->sm, SCI_REQ_FINAL);
896	return SCI_SUCCESS;
897}
898
899enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
900						  u32 event_code)
901{
902	enum sci_base_request_states state;
903	struct scic_sds_controller *scic = sci_req->owning_controller;
904
905	state = sci_req->sm.current_state_id;
906
907	if (state != SCI_REQ_STP_PIO_DATA_IN) {
908		dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n",
909			 __func__, event_code, state);
910
911		return SCI_FAILURE_INVALID_STATE;
912	}
913
914	switch (scu_get_event_specifier(event_code)) {
915	case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
916		/* We are waiting for data and the SCU has R_ERR the data frame.
917		 * Go back to waiting for the D2H Register FIS
918		 */
919		sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
920		return SCI_SUCCESS;
921	default:
922		dev_err(scic_to_dev(scic),
923			"%s: pio request unexpected event %#x\n",
924			__func__, event_code);
925
926		/* TODO Should we fail the PIO request when we get an
927		 * unexpected event?
928		 */
929		return SCI_FAILURE;
930	}
931}
932
933/*
934 * This function copies response data for requests returning response data
935 *    instead of sense data.
936 * @sci_req: This parameter specifies the request object for which to copy
937 *    the response data.
938 */
939static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req)
940{
941	void *resp_buf;
942	u32 len;
943	struct ssp_response_iu *ssp_response;
944	struct isci_request *ireq = sci_req_to_ireq(sci_req);
945	struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
946
947	ssp_response = &sci_req->ssp.rsp;
948
949	resp_buf = &isci_tmf->resp.resp_iu;
950
951	len = min_t(u32,
952		    SSP_RESP_IU_MAX_SIZE,
953		    be32_to_cpu(ssp_response->response_data_len));
954
955	memcpy(resp_buf, ssp_response->resp_data, len);
956}
957
958static enum sci_status
959request_started_state_tc_event(struct scic_sds_request *sci_req,
960			       u32 completion_code)
961{
962	struct ssp_response_iu *resp_iu;
963	u8 datapres;
964
965	/* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
966	 * to determine SDMA status
967	 */
968	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
969	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
970		scic_sds_request_set_status(sci_req,
971					    SCU_TASK_DONE_GOOD,
972					    SCI_SUCCESS);
973		break;
974	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
975		/* There are times when the SCU hardware will return an early
976		 * response because the io request specified more data than is
977		 * returned by the target device (mode pages, inquiry data,
978		 * etc.).  We must check the response stats to see if this is
979		 * truly a failed request or a good request that just got
980		 * completed early.
981		 */
982		struct ssp_response_iu *resp = &sci_req->ssp.rsp;
983		ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
984
985		sci_swab32_cpy(&sci_req->ssp.rsp,
986			       &sci_req->ssp.rsp,
987			       word_cnt);
988
989		if (resp->status == 0) {
990			scic_sds_request_set_status(sci_req,
991						    SCU_TASK_DONE_GOOD,
992						    SCI_SUCCESS_IO_DONE_EARLY);
993		} else {
994			scic_sds_request_set_status(sci_req,
995						    SCU_TASK_DONE_CHECK_RESPONSE,
996						    SCI_FAILURE_IO_RESPONSE_VALID);
997		}
998		break;
999	}
1000	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
1001		ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1002
1003		sci_swab32_cpy(&sci_req->ssp.rsp,
1004			       &sci_req->ssp.rsp,
1005			       word_cnt);
1006
1007		scic_sds_request_set_status(sci_req,
1008					    SCU_TASK_DONE_CHECK_RESPONSE,
1009					    SCI_FAILURE_IO_RESPONSE_VALID);
1010		break;
1011	}
1012
1013	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
1014		/* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
1015		 * guaranteed to be received before this completion status is
1016		 * posted?
1017		 */
1018		resp_iu = &sci_req->ssp.rsp;
1019		datapres = resp_iu->datapres;
1020
1021		if (datapres == 1 || datapres == 2) {
1022			scic_sds_request_set_status(sci_req,
1023						    SCU_TASK_DONE_CHECK_RESPONSE,
1024						    SCI_FAILURE_IO_RESPONSE_VALID);
1025		} else
1026			scic_sds_request_set_status(sci_req,
1027						    SCU_TASK_DONE_GOOD,
1028						    SCI_SUCCESS);
1029		break;
1030	/* only stp device gets suspended. */
1031	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1032	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
1033	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
1034	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
1035	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
1036	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
1037	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1038	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
1039	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
1040	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1041	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
1042		if (sci_req->protocol == SCIC_STP_PROTOCOL) {
1043			scic_sds_request_set_status(sci_req,
1044				SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1045				SCU_COMPLETION_TL_STATUS_SHIFT,
1046				SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
1047		} else {
1048			scic_sds_request_set_status(sci_req,
1049				SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1050				SCU_COMPLETION_TL_STATUS_SHIFT,
1051				SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1052		}
1053		break;
1054
1055	/* both stp/ssp device gets suspended */
1056	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
1057	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
1058	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
1059	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
1060	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
1061	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
1062	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
1063	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
1064	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
1065	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
1066		scic_sds_request_set_status(sci_req,
1067					    SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1068					    SCU_COMPLETION_TL_STATUS_SHIFT,
1069					    SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
1070		break;
1071
1072	/* neither ssp nor stp gets suspended. */
1073	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
1074	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
1075	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
1076	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
1077	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
1078	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
1079	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1080	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1081	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1082	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1083	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
1084	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
1085	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
1086	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
1087	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
1088	default:
1089		scic_sds_request_set_status(
1090			sci_req,
1091			SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1092			SCU_COMPLETION_TL_STATUS_SHIFT,
1093			SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1094		break;
1095	}
1096
1097	/*
1098	 * TODO: This is probably wrong for ACK/NAK timeout conditions
1099	 */
1100
1101	/* In all cases we will treat this as the completion of the IO req. */
1102	sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1103	return SCI_SUCCESS;
1104}
1105
1106static enum sci_status
1107request_aborting_state_tc_event(struct scic_sds_request *sci_req,
1108				u32 completion_code)
1109{
1110	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1111	case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1112	case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1113		scic_sds_request_set_status(sci_req, SCU_TASK_DONE_TASK_ABORT,
1114					    SCI_FAILURE_IO_TERMINATED);
1115
1116		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1117		break;
1118
1119	default:
1120		/* Unless we get some strange error wait for the task abort to complete
1121		 * TODO: Should there be a state change for this completion?
1122		 */
1123		break;
1124	}
1125
1126	return SCI_SUCCESS;
1127}
1128
1129static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request *sci_req,
1130						       u32 completion_code)
1131{
1132	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1133	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1134		scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1135					    SCI_SUCCESS);
1136
1137		sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1138		break;
1139	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1140		/* Currently, the decision is to simply allow the task request
1141		 * to timeout if the task IU wasn't received successfully.
1142		 * There is a potential for receiving multiple task responses if
1143		 * we decide to send the task IU again.
1144		 */
1145		dev_warn(scic_to_dev(sci_req->owning_controller),
1146			 "%s: TaskRequest:0x%p CompletionCode:%x - "
1147			 "ACK/NAK timeout\n", __func__, sci_req,
1148			 completion_code);
1149
1150		sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1151		break;
1152	default:
1153		/*
1154		 * All other completion status cause the IO to be complete.
1155		 * If a NAK was received, then it is up to the user to retry
1156		 * the request.
1157		 */
1158		scic_sds_request_set_status(sci_req,
1159			SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1160			SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1161
1162		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1163		break;
1164	}
1165
1166	return SCI_SUCCESS;
1167}
1168
1169static enum sci_status
1170smp_request_await_response_tc_event(struct scic_sds_request *sci_req,
1171				    u32 completion_code)
1172{
1173	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1174	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1175		/* In the AWAIT RESPONSE state, any TC completion is
1176		 * unexpected.  but if the TC has success status, we
1177		 * complete the IO anyway.
1178		 */
1179		scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1180					    SCI_SUCCESS);
1181
1182		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1183		break;
1184
1185	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1186	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1187	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1188	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1189		/* These status has been seen in a specific LSI
1190		 * expander, which sometimes is not able to send smp
1191		 * response within 2 ms. This causes our hardware break
1192		 * the connection and set TC completion with one of
1193		 * these SMP_XXX_XX_ERR status. For these type of error,
1194		 * we ask scic user to retry the request.
1195		 */
1196		scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR,
1197					    SCI_FAILURE_RETRY_REQUIRED);
1198
1199		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1200		break;
1201
1202	default:
1203		/* All other completion status cause the IO to be complete.  If a NAK
1204		 * was received, then it is up to the user to retry the request
1205		 */
1206		scic_sds_request_set_status(sci_req,
1207					    SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1208					    SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1209
1210		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1211		break;
1212	}
1213
1214	return SCI_SUCCESS;
1215}
1216
1217static enum sci_status
1218smp_request_await_tc_event(struct scic_sds_request *sci_req,
1219			   u32 completion_code)
1220{
1221	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1222	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1223		scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1224					    SCI_SUCCESS);
1225
1226		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1227		break;
1228	default:
1229		/* All other completion status cause the IO to be
1230		 * complete.  If a NAK was received, then it is up to
1231		 * the user to retry the request.
1232		 */
1233		scic_sds_request_set_status(sci_req,
1234					    SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1235					    SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1236
1237		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1238		break;
1239	}
1240
1241	return SCI_SUCCESS;
1242}
1243
1244void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req,
1245				     u16 ncq_tag)
1246{
1247	/**
1248	 * @note This could be made to return an error to the user if the user
1249	 *       attempts to set the NCQ tag in the wrong state.
1250	 */
1251	req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
1252}
1253
1254/**
1255 *
1256 * @sci_req:
1257 *
1258 * Get the next SGL element from the request. - Check on which SGL element pair
1259 * we are working - if working on SLG pair element A - advance to element B -
1260 * else - check to see if there are more SGL element pairs for this IO request
1261 * - if there are more SGL element pairs - advance to the next pair and return
1262 * element A struct scu_sgl_element*
1263 */
1264static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
1265{
1266	struct scu_sgl_element *current_sgl;
1267	struct scic_sds_request *sci_req = to_sci_req(stp_req);
1268	struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
1269
1270	if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1271		if (pio_sgl->sgl_pair->B.address_lower == 0 &&
1272		    pio_sgl->sgl_pair->B.address_upper == 0) {
1273			current_sgl = NULL;
1274		} else {
1275			pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
1276			current_sgl = &pio_sgl->sgl_pair->B;
1277		}
1278	} else {
1279		if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
1280		    pio_sgl->sgl_pair->next_pair_upper == 0) {
1281			current_sgl = NULL;
1282		} else {
1283			u64 phys_addr;
1284
1285			phys_addr = pio_sgl->sgl_pair->next_pair_upper;
1286			phys_addr <<= 32;
1287			phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
1288
1289			pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
1290			pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1291			current_sgl = &pio_sgl->sgl_pair->A;
1292		}
1293	}
1294
1295	return current_sgl;
1296}
1297
1298static enum sci_status
1299stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req,
1300					u32 completion_code)
1301{
1302	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1303	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1304		scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1305					    SCI_SUCCESS);
1306
1307		sci_change_state(&sci_req->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1308		break;
1309
1310	default:
1311		/* All other completion status cause the IO to be
1312		 * complete.  If a NAK was received, then it is up to
1313		 * the user to retry the request.
1314		 */
1315		scic_sds_request_set_status(sci_req,
1316					    SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1317					    SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1318
1319		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1320		break;
1321	}
1322
1323	return SCI_SUCCESS;
1324}
1325
1326#define SCU_MAX_FRAME_BUFFER_SIZE  0x400  /* 1K is the maximum SCU frame data payload */
1327
1328/* transmit DATA_FIS from (current sgl + offset) for input
1329 * parameter length. current sgl and offset is alreay stored in the IO request
1330 */
1331static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
1332	struct scic_sds_request *sci_req,
1333	u32 length)
1334{
1335	struct scic_sds_controller *scic = sci_req->owning_controller;
1336	struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1337	struct scu_task_context *task_context;
1338	struct scu_sgl_element *current_sgl;
1339
1340	/* Recycle the TC and reconstruct it for sending out DATA FIS containing
1341	 * for the data from current_sgl+offset for the input length
1342	 */
1343	task_context = scic_sds_controller_get_task_context_buffer(scic,
1344								   sci_req->io_tag);
1345
1346	if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
1347		current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
1348	else
1349		current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
1350
1351	/* update the TC */
1352	task_context->command_iu_upper = current_sgl->address_upper;
1353	task_context->command_iu_lower = current_sgl->address_lower;
1354	task_context->transfer_length_bytes = length;
1355	task_context->type.stp.fis_type = FIS_DATA;
1356
1357	/* send the new TC out. */
1358	return scic_controller_continue_io(sci_req);
1359}
1360
1361static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
1362{
1363
1364	struct scu_sgl_element *current_sgl;
1365	u32 sgl_offset;
1366	u32 remaining_bytes_in_current_sgl = 0;
1367	enum sci_status status = SCI_SUCCESS;
1368	struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1369
1370	sgl_offset = stp_req->type.pio.request_current.sgl_offset;
1371
1372	if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1373		current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
1374		remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
1375	} else {
1376		current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
1377		remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
1378	}
1379
1380
1381	if (stp_req->type.pio.pio_transfer_bytes > 0) {
1382		if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
1383			/* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
1384			status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
1385			if (status == SCI_SUCCESS) {
1386				stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
1387
1388				/* update the current sgl, sgl_offset and save for future */
1389				current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
1390				sgl_offset = 0;
1391			}
1392		} else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
1393			/* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
1394			scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
1395
1396			if (status == SCI_SUCCESS) {
1397				/* Sgl offset will be adjusted and saved for future */
1398				sgl_offset += stp_req->type.pio.pio_transfer_bytes;
1399				current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
1400				stp_req->type.pio.pio_transfer_bytes = 0;
1401			}
1402		}
1403	}
1404
1405	if (status == SCI_SUCCESS) {
1406		stp_req->type.pio.request_current.sgl_offset = sgl_offset;
1407	}
1408
1409	return status;
1410}
1411
1412/**
1413 *
1414 * @stp_request: The request that is used for the SGL processing.
1415 * @data_buffer: The buffer of data to be copied.
1416 * @length: The length of the data transfer.
1417 *
1418 * Copy the data from the buffer for the length specified to the IO reqeust SGL
1419 * specified data region. enum sci_status
1420 */
1421static enum sci_status
1422scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
1423						  u8 *data_buf, u32 len)
1424{
1425	struct scic_sds_request *sci_req;
1426	struct isci_request *ireq;
1427	u8 *src_addr;
1428	int copy_len;
1429	struct sas_task *task;
1430	struct scatterlist *sg;
1431	void *kaddr;
1432	int total_len = len;
1433
1434	sci_req = to_sci_req(stp_req);
1435	ireq = sci_req_to_ireq(sci_req);
1436	task = isci_request_access_task(ireq);
1437	src_addr = data_buf;
1438
1439	if (task->num_scatter > 0) {
1440		sg = task->scatter;
1441
1442		while (total_len > 0) {
1443			struct page *page = sg_page(sg);
1444
1445			copy_len = min_t(int, total_len, sg_dma_len(sg));
1446			kaddr = kmap_atomic(page, KM_IRQ0);
1447			memcpy(kaddr + sg->offset, src_addr, copy_len);
1448			kunmap_atomic(kaddr, KM_IRQ0);
1449			total_len -= copy_len;
1450			src_addr += copy_len;
1451			sg = sg_next(sg);
1452		}
1453	} else {
1454		BUG_ON(task->total_xfer_len < total_len);
1455		memcpy(task->scatter, src_addr, total_len);
1456	}
1457
1458	return SCI_SUCCESS;
1459}
1460
1461/**
1462 *
1463 * @sci_req: The PIO DATA IN request that is to receive the data.
1464 * @data_buffer: The buffer to copy from.
1465 *
1466 * Copy the data buffer to the io request data region. enum sci_status
1467 */
1468static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
1469	struct scic_sds_stp_request *sci_req,
1470	u8 *data_buffer)
1471{
1472	enum sci_status status;
1473
1474	/*
1475	 * If there is less than 1K remaining in the transfer request
1476	 * copy just the data for the transfer */
1477	if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
1478		status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1479			sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
1480
1481		if (status == SCI_SUCCESS)
1482			sci_req->type.pio.pio_transfer_bytes = 0;
1483	} else {
1484		/* We are transfering the whole frame so copy */
1485		status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1486			sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1487
1488		if (status == SCI_SUCCESS)
1489			sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
1490	}
1491
1492	return status;
1493}
1494
1495static enum sci_status
1496stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req,
1497					      u32 completion_code)
1498{
1499	enum sci_status status = SCI_SUCCESS;
1500
1501	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1502	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1503		scic_sds_request_set_status(sci_req,
1504					    SCU_TASK_DONE_GOOD,
1505					    SCI_SUCCESS);
1506
1507		sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1508		break;
1509
1510	default:
1511		/* All other completion status cause the IO to be
1512		 * complete.  If a NAK was received, then it is up to
1513		 * the user to retry the request.
1514		 */
1515		scic_sds_request_set_status(sci_req,
1516					    SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1517					    SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1518
1519		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1520		break;
1521	}
1522
1523	return status;
1524}
1525
1526static enum sci_status
1527pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req,
1528			      u32 completion_code)
1529{
1530	enum sci_status status = SCI_SUCCESS;
1531	bool all_frames_transferred = false;
1532	struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1533
1534	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1535	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1536		/* Transmit data */
1537		if (stp_req->type.pio.pio_transfer_bytes != 0) {
1538			status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
1539			if (status == SCI_SUCCESS) {
1540				if (stp_req->type.pio.pio_transfer_bytes == 0)
1541					all_frames_transferred = true;
1542			}
1543		} else if (stp_req->type.pio.pio_transfer_bytes == 0) {
1544			/*
1545			 * this will happen if the all data is written at the
1546			 * first time after the pio setup fis is received
1547			 */
1548			all_frames_transferred  = true;
1549		}
1550
1551		/* all data transferred. */
1552		if (all_frames_transferred) {
1553			/*
1554			 * Change the state to SCI_REQ_STP_PIO_DATA_IN
1555			 * and wait for PIO_SETUP fis / or D2H REg fis. */
1556			sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1557		}
1558		break;
1559
1560	default:
1561		/*
1562		 * All other completion status cause the IO to be complete.
1563		 * If a NAK was received, then it is up to the user to retry
1564		 * the request.
1565		 */
1566		scic_sds_request_set_status(
1567			sci_req,
1568			SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1569			SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1570
1571		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1572		break;
1573	}
1574
1575	return status;
1576}
1577
1578static void scic_sds_stp_request_udma_complete_request(
1579	struct scic_sds_request *request,
1580	u32 scu_status,
1581	enum sci_status sci_status)
1582{
1583	scic_sds_request_set_status(request, scu_status, sci_status);
1584	sci_change_state(&request->sm, SCI_REQ_COMPLETED);
1585}
1586
1587static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
1588								       u32 frame_index)
1589{
1590	struct scic_sds_controller *scic = sci_req->owning_controller;
1591	struct dev_to_host_fis *frame_header;
1592	enum sci_status status;
1593	u32 *frame_buffer;
1594
1595	status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1596							       frame_index,
1597							       (void **)&frame_header);
1598
1599	if ((status == SCI_SUCCESS) &&
1600	    (frame_header->fis_type == FIS_REGD2H)) {
1601		scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1602							      frame_index,
1603							      (void **)&frame_buffer);
1604
1605		scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1606						       frame_header,
1607						       frame_buffer);
1608	}
1609
1610	scic_sds_controller_release_frame(scic, frame_index);
1611
1612	return status;
1613}
1614
1615enum sci_status
1616scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1617				  u32 frame_index)
1618{
1619	struct scic_sds_controller *scic = sci_req->owning_controller;
1620	struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1621	enum sci_base_request_states state;
1622	enum sci_status status;
1623	ssize_t word_cnt;
1624
1625	state = sci_req->sm.current_state_id;
1626	switch (state)  {
1627	case SCI_REQ_STARTED: {
1628		struct ssp_frame_hdr ssp_hdr;
1629		void *frame_header;
1630
1631		scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1632							      frame_index,
1633							      &frame_header);
1634
1635		word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1636		sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1637
1638		if (ssp_hdr.frame_type == SSP_RESPONSE) {
1639			struct ssp_response_iu *resp_iu;
1640			ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1641
1642			scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1643								      frame_index,
1644								      (void **)&resp_iu);
1645
1646			sci_swab32_cpy(&sci_req->ssp.rsp, resp_iu, word_cnt);
1647
1648			resp_iu = &sci_req->ssp.rsp;
1649
1650			if (resp_iu->datapres == 0x01 ||
1651			    resp_iu->datapres == 0x02) {
1652				scic_sds_request_set_status(sci_req,
1653							    SCU_TASK_DONE_CHECK_RESPONSE,
1654							    SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1655			} else
1656				scic_sds_request_set_status(sci_req,
1657							    SCU_TASK_DONE_GOOD,
1658							    SCI_SUCCESS);
1659		} else {
1660			/* not a response frame, why did it get forwarded? */
1661			dev_err(scic_to_dev(scic),
1662				"%s: SCIC IO Request 0x%p received unexpected "
1663				"frame %d type 0x%02x\n", __func__, sci_req,
1664				frame_index, ssp_hdr.frame_type);
1665		}
1666
1667		/*
1668		 * In any case we are done with this frame buffer return it to
1669		 * the controller
1670		 */
1671		scic_sds_controller_release_frame(scic, frame_index);
1672
1673		return SCI_SUCCESS;
1674	}
1675
1676	case SCI_REQ_TASK_WAIT_TC_RESP:
1677		scic_sds_io_request_copy_response(sci_req);
1678		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1679		scic_sds_controller_release_frame(scic,frame_index);
1680		return SCI_SUCCESS;
1681
1682	case SCI_REQ_SMP_WAIT_RESP: {
1683		struct smp_resp *rsp_hdr = &sci_req->smp.rsp;
1684		void *frame_header;
1685
1686		scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1687							      frame_index,
1688							      &frame_header);
1689
1690		/* byte swap the header. */
1691		word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
1692		sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
1693
1694		if (rsp_hdr->frame_type == SMP_RESPONSE) {
1695			void *smp_resp;
1696
1697			scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1698								      frame_index,
1699								      &smp_resp);
1700
1701			word_cnt = (sizeof(struct smp_req) - SMP_RESP_HDR_SZ) /
1702				sizeof(u32);
1703
1704			sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1705				       smp_resp, word_cnt);
1706
1707			scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1708						    SCI_SUCCESS);
1709
1710			sci_change_state(&sci_req->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1711		} else {
1712			/*
1713			 * This was not a response frame why did it get
1714			 * forwarded?
1715			 */
1716			dev_err(scic_to_dev(scic),
1717				"%s: SCIC SMP Request 0x%p received unexpected "
1718				"frame %d type 0x%02x\n",
1719				__func__,
1720				sci_req,
1721				frame_index,
1722				rsp_hdr->frame_type);
1723
1724			scic_sds_request_set_status(sci_req,
1725						    SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1726						    SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1727
1728			sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1729		}
1730
1731		scic_sds_controller_release_frame(scic, frame_index);
1732
1733		return SCI_SUCCESS;
1734	}
1735
1736	case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1737		return scic_sds_stp_request_udma_general_frame_handler(sci_req,
1738								       frame_index);
1739
1740	case SCI_REQ_STP_UDMA_WAIT_D2H:
1741		/* Use the general frame handler to copy the resposne data */
1742		status = scic_sds_stp_request_udma_general_frame_handler(sci_req,
1743									 frame_index);
1744
1745		if (status != SCI_SUCCESS)
1746			return status;
1747
1748		scic_sds_stp_request_udma_complete_request(sci_req,
1749							   SCU_TASK_DONE_CHECK_RESPONSE,
1750							   SCI_FAILURE_IO_RESPONSE_VALID);
1751
1752		return SCI_SUCCESS;
1753
1754	case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
1755		struct dev_to_host_fis *frame_header;
1756		u32 *frame_buffer;
1757
1758		status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1759								       frame_index,
1760								       (void **)&frame_header);
1761
1762		if (status != SCI_SUCCESS) {
1763			dev_err(scic_to_dev(scic),
1764				"%s: SCIC IO Request 0x%p could not get frame "
1765				"header for frame index %d, status %x\n",
1766				__func__,
1767				stp_req,
1768				frame_index,
1769				status);
1770
1771			return status;
1772		}
1773
1774		switch (frame_header->fis_type) {
1775		case FIS_REGD2H:
1776			scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1777								      frame_index,
1778								      (void **)&frame_buffer);
1779
1780			scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1781							       frame_header,
1782							       frame_buffer);
1783
1784			/* The command has completed with error */
1785			scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
1786						    SCI_FAILURE_IO_RESPONSE_VALID);
1787			break;
1788
1789		default:
1790			dev_warn(scic_to_dev(scic),
1791				 "%s: IO Request:0x%p Frame Id:%d protocol "
1792				  "violation occurred\n", __func__, stp_req,
1793				  frame_index);
1794
1795			scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
1796						    SCI_FAILURE_PROTOCOL_VIOLATION);
1797			break;
1798		}
1799
1800		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1801
1802		/* Frame has been decoded return it to the controller */
1803		scic_sds_controller_release_frame(scic, frame_index);
1804
1805		return status;
1806	}
1807
1808	case SCI_REQ_STP_PIO_WAIT_FRAME: {
1809		struct isci_request *ireq = sci_req_to_ireq(sci_req);
1810		struct sas_task *task = isci_request_access_task(ireq);
1811		struct dev_to_host_fis *frame_header;
1812		u32 *frame_buffer;
1813
1814		status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1815								       frame_index,
1816								       (void **)&frame_header);
1817
1818		if (status != SCI_SUCCESS) {
1819			dev_err(scic_to_dev(scic),
1820				"%s: SCIC IO Request 0x%p could not get frame "
1821				"header for frame index %d, status %x\n",
1822				__func__, stp_req, frame_index, status);
1823			return status;
1824		}
1825
1826		switch (frame_header->fis_type) {
1827		case FIS_PIO_SETUP:
1828			/* Get from the frame buffer the PIO Setup Data */
1829			scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1830								      frame_index,
1831								      (void **)&frame_buffer);
1832
1833			/* Get the data from the PIO Setup The SCU Hardware
1834			 * returns first word in the frame_header and the rest
1835			 * of the data is in the frame buffer so we need to
1836			 * back up one dword
1837			 */
1838
1839			/* transfer_count: first 16bits in the 4th dword */
1840			stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
1841
1842			/* ending_status: 4th byte in the 3rd dword */
1843			stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
1844
1845			scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1846							       frame_header,
1847							       frame_buffer);
1848
1849			sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
1850
1851			/* The next state is dependent on whether the
1852			 * request was PIO Data-in or Data out
1853			 */
1854			if (task->data_dir == DMA_FROM_DEVICE) {
1855				sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_IN);
1856			} else if (task->data_dir == DMA_TO_DEVICE) {
1857				/* Transmit data */
1858				status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
1859				if (status != SCI_SUCCESS)
1860					break;
1861				sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_OUT);
1862			}
1863			break;
1864
1865		case FIS_SETDEVBITS:
1866			sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1867			break;
1868
1869		case FIS_REGD2H:
1870			if (frame_header->status & ATA_BUSY) {
1871				/*
1872				 * Now why is the drive sending a D2H Register
1873				 * FIS when it is still busy?  Do nothing since
1874				 * we are still in the right state.
1875				 */
1876				dev_dbg(scic_to_dev(scic),
1877					"%s: SCIC PIO Request 0x%p received "
1878					"D2H Register FIS with BSY status "
1879					"0x%x\n",
1880					__func__,
1881					stp_req,
1882					frame_header->status);
1883				break;
1884			}
1885
1886			scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1887								      frame_index,
1888								      (void **)&frame_buffer);
1889
1890			scic_sds_controller_copy_sata_response(&sci_req->stp.req,
1891							       frame_header,
1892							       frame_buffer);
1893
1894			scic_sds_request_set_status(sci_req,
1895						    SCU_TASK_DONE_CHECK_RESPONSE,
1896						    SCI_FAILURE_IO_RESPONSE_VALID);
1897
1898			sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1899			break;
1900
1901		default:
1902			/* FIXME: what do we do here? */
1903			break;
1904		}
1905
1906		/* Frame is decoded return it to the controller */
1907		scic_sds_controller_release_frame(scic, frame_index);
1908
1909		return status;
1910	}
1911
1912	case SCI_REQ_STP_PIO_DATA_IN: {
1913		struct dev_to_host_fis *frame_header;
1914		struct sata_fis_data *frame_buffer;
1915
1916		status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1917								       frame_index,
1918								       (void **)&frame_header);
1919
1920		if (status != SCI_SUCCESS) {
1921			dev_err(scic_to_dev(scic),
1922				"%s: SCIC IO Request 0x%p could not get frame "
1923				"header for frame index %d, status %x\n",
1924				__func__,
1925				stp_req,
1926				frame_index,
1927				status);
1928			return status;
1929		}
1930
1931		if (frame_header->fis_type != FIS_DATA) {
1932			dev_err(scic_to_dev(scic),
1933				"%s: SCIC PIO Request 0x%p received frame %d "
1934				"with fis type 0x%02x when expecting a data "
1935				"fis.\n",
1936				__func__,
1937				stp_req,
1938				frame_index,
1939				frame_header->fis_type);
1940
1941			scic_sds_request_set_status(sci_req,
1942						    SCU_TASK_DONE_GOOD,
1943						    SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
1944
1945			sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1946
1947			/* Frame is decoded return it to the controller */
1948			scic_sds_controller_release_frame(scic, frame_index);
1949			return status;
1950		}
1951
1952		if (stp_req->type.pio.request_current.sgl_pair == NULL) {
1953			sci_req->saved_rx_frame_index = frame_index;
1954			stp_req->type.pio.pio_transfer_bytes = 0;
1955		} else {
1956			scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1957								      frame_index,
1958								      (void **)&frame_buffer);
1959
1960			status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
1961									    (u8 *)frame_buffer);
1962
1963			/* Frame is decoded return it to the controller */
1964			scic_sds_controller_release_frame(scic, frame_index);
1965		}
1966
1967		/* Check for the end of the transfer, are there more
1968		 * bytes remaining for this data transfer
1969		 */
1970		if (status != SCI_SUCCESS ||
1971		    stp_req->type.pio.pio_transfer_bytes != 0)
1972			return status;
1973
1974		if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
1975			scic_sds_request_set_status(sci_req,
1976						    SCU_TASK_DONE_CHECK_RESPONSE,
1977						    SCI_FAILURE_IO_RESPONSE_VALID);
1978
1979			sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1980		} else {
1981			sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1982		}
1983		return status;
1984	}
1985
1986	case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
1987		struct dev_to_host_fis *frame_header;
1988		u32 *frame_buffer;
1989
1990		status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1991								       frame_index,
1992								       (void **)&frame_header);
1993		if (status != SCI_SUCCESS) {
1994			dev_err(scic_to_dev(scic),
1995				"%s: SCIC IO Request 0x%p could not get frame "
1996				"header for frame index %d, status %x\n",
1997				__func__,
1998				stp_req,
1999				frame_index,
2000				status);
2001			return status;
2002		}
2003
2004		switch (frame_header->fis_type) {
2005		case FIS_REGD2H:
2006			scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2007								      frame_index,
2008								      (void **)&frame_buffer);
2009
2010			scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2011							       frame_header,
2012							       frame_buffer);
2013
2014			/* The command has completed with error */
2015			scic_sds_request_set_status(sci_req,
2016						    SCU_TASK_DONE_CHECK_RESPONSE,
2017						    SCI_FAILURE_IO_RESPONSE_VALID);
2018			break;
2019
2020		default:
2021			dev_warn(scic_to_dev(scic),
2022				 "%s: IO Request:0x%p Frame Id:%d protocol "
2023				 "violation occurred\n",
2024				 __func__,
2025				 stp_req,
2026				 frame_index);
2027
2028			scic_sds_request_set_status(sci_req,
2029						    SCU_TASK_DONE_UNEXP_FIS,
2030						    SCI_FAILURE_PROTOCOL_VIOLATION);
2031			break;
2032		}
2033
2034		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
2035
2036		/* Frame has been decoded return it to the controller */
2037		scic_sds_controller_release_frame(scic, frame_index);
2038
2039		return status;
2040	}
2041	case SCI_REQ_ABORTING:
2042		/*
2043		 * TODO: Is it even possible to get an unsolicited frame in the
2044		 * aborting state?
2045		 */
2046		scic_sds_controller_release_frame(scic, frame_index);
2047		return SCI_SUCCESS;
2048
2049	default:
2050		dev_warn(scic_to_dev(scic),
2051			 "%s: SCIC IO Request given unexpected frame %x while "
2052			 "in state %d\n",
2053			 __func__,
2054			 frame_index,
2055			 state);
2056
2057		scic_sds_controller_release_frame(scic, frame_index);
2058		return SCI_FAILURE_INVALID_STATE;
2059	}
2060}
2061
2062static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request *sci_req,
2063						       u32 completion_code)
2064{
2065	enum sci_status status = SCI_SUCCESS;
2066
2067	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2068	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2069		scic_sds_stp_request_udma_complete_request(sci_req,
2070							   SCU_TASK_DONE_GOOD,
2071							   SCI_SUCCESS);
2072		break;
2073	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
2074	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
2075		/* We must check ther response buffer to see if the D2H
2076		 * Register FIS was received before we got the TC
2077		 * completion.
2078		 */
2079		if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
2080			scic_sds_remote_device_suspend(sci_req->target_device,
2081				SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2082
2083			scic_sds_stp_request_udma_complete_request(sci_req,
2084								   SCU_TASK_DONE_CHECK_RESPONSE,
2085								   SCI_FAILURE_IO_RESPONSE_VALID);
2086		} else {
2087			/* If we have an error completion status for the
2088			 * TC then we can expect a D2H register FIS from
2089			 * the device so we must change state to wait
2090			 * for it
2091			 */
2092			sci_change_state(&sci_req->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
2093		}
2094		break;
2095
2096	/* TODO Check to see if any of these completion status need to
2097	 * wait for the device to host register fis.
2098	 */
2099	/* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
2100	 * - this comes only for B0
2101	 */
2102	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
2103	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
2104	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
2105	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
2106	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
2107		scic_sds_remote_device_suspend(sci_req->target_device,
2108			SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2109	/* Fall through to the default case */
2110	default:
2111		/* All other completion status cause the IO to be complete. */
2112		scic_sds_stp_request_udma_complete_request(sci_req,
2113					SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2114					SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2115		break;
2116	}
2117
2118	return status;
2119}
2120
2121static enum sci_status
2122stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_req,
2123						   u32 completion_code)
2124{
2125	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2126	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2127		scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
2128					    SCI_SUCCESS);
2129
2130		sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
2131		break;
2132
2133	default:
2134		/*
2135		 * All other completion status cause the IO to be complete.
2136		 * If a NAK was received, then it is up to the user to retry
2137		 * the request.
2138		 */
2139		scic_sds_request_set_status(sci_req,
2140					    SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2141					    SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2142
2143		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
2144		break;
2145	}
2146
2147	return SCI_SUCCESS;
2148}
2149
2150static enum sci_status
2151stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct scic_sds_request *sci_req,
2152						     u32 completion_code)
2153{
2154	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2155	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2156		scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
2157					    SCI_SUCCESS);
2158
2159		sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
2160		break;
2161
2162	default:
2163		/* All other completion status cause the IO to be complete.  If
2164		 * a NAK was received, then it is up to the user to retry the
2165		 * request.
2166		 */
2167		scic_sds_request_set_status(sci_req,
2168			SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2169			SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2170
2171		sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
2172		break;
2173	}
2174
2175	return SCI_SUCCESS;
2176}
2177
2178enum sci_status
2179scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req,
2180				  u32 completion_code)
2181{
2182	enum sci_base_request_states state;
2183	struct scic_sds_controller *scic = sci_req->owning_controller;
2184
2185	state = sci_req->sm.current_state_id;
2186
2187	switch (state) {
2188	case SCI_REQ_STARTED:
2189		return request_started_state_tc_event(sci_req, completion_code);
2190
2191	case SCI_REQ_TASK_WAIT_TC_COMP:
2192		return ssp_task_request_await_tc_event(sci_req,
2193						       completion_code);
2194
2195	case SCI_REQ_SMP_WAIT_RESP:
2196		return smp_request_await_response_tc_event(sci_req,
2197							   completion_code);
2198
2199	case SCI_REQ_SMP_WAIT_TC_COMP:
2200		return smp_request_await_tc_event(sci_req, completion_code);
2201
2202	case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
2203		return stp_request_udma_await_tc_event(sci_req,
2204						       completion_code);
2205
2206	case SCI_REQ_STP_NON_DATA_WAIT_H2D:
2207		return stp_request_non_data_await_h2d_tc_event(sci_req,
2208							       completion_code);
2209
2210	case SCI_REQ_STP_PIO_WAIT_H2D:
2211		return stp_request_pio_await_h2d_completion_tc_event(sci_req,
2212								     completion_code);
2213
2214	case SCI_REQ_STP_PIO_DATA_OUT:
2215		return pio_data_out_tx_done_tc_event(sci_req, completion_code);
2216
2217	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
2218		return stp_request_soft_reset_await_h2d_asserted_tc_event(sci_req,
2219									  completion_code);
2220
2221	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
2222		return stp_request_soft_reset_await_h2d_diagnostic_tc_event(sci_req,
2223									    completion_code);
2224
2225	case SCI_REQ_ABORTING:
2226		return request_aborting_state_tc_event(sci_req,
2227						       completion_code);
2228
2229	default:
2230		dev_warn(scic_to_dev(scic),
2231			 "%s: SCIC IO Request given task completion "
2232			 "notification %x while in wrong state %d\n",
2233			 __func__,
2234			 completion_code,
2235			 state);
2236		return SCI_FAILURE_INVALID_STATE;
2237	}
2238}
2239
2240/**
2241 * isci_request_process_response_iu() - This function sets the status and
2242 *    response iu, in the task struct, from the request object for the upper
2243 *    layer driver.
2244 * @sas_task: This parameter is the task struct from the upper layer driver.
2245 * @resp_iu: This parameter points to the response iu of the completed request.
2246 * @dev: This parameter specifies the linux device struct.
2247 *
2248 * none.
2249 */
2250static void isci_request_process_response_iu(
2251	struct sas_task *task,
2252	struct ssp_response_iu *resp_iu,
2253	struct device *dev)
2254{
2255	dev_dbg(dev,
2256		"%s: resp_iu = %p "
2257		"resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2258		"resp_iu->response_data_len = %x, "
2259		"resp_iu->sense_data_len = %x\nrepsonse data: ",
2260		__func__,
2261		resp_iu,
2262		resp_iu->status,
2263		resp_iu->datapres,
2264		resp_iu->response_data_len,
2265		resp_iu->sense_data_len);
2266
2267	task->task_status.stat = resp_iu->status;
2268
2269	/* libsas updates the task status fields based on the response iu. */
2270	sas_ssp_task_response(dev, task, resp_iu);
2271}
2272
2273/**
2274 * isci_request_set_open_reject_status() - This function prepares the I/O
2275 *    completion for OPEN_REJECT conditions.
2276 * @request: This parameter is the completed isci_request object.
2277 * @response_ptr: This parameter specifies the service response for the I/O.
2278 * @status_ptr: This parameter specifies the exec status for the I/O.
2279 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2280 *    the LLDD with respect to completing this request or forcing an abort
2281 *    condition on the I/O.
2282 * @open_rej_reason: This parameter specifies the encoded reason for the
2283 *    abandon-class reject.
2284 *
2285 * none.
2286 */
2287static void isci_request_set_open_reject_status(
2288	struct isci_request *request,
2289	struct sas_task *task,
2290	enum service_response *response_ptr,
2291	enum exec_status *status_ptr,
2292	enum isci_completion_selection *complete_to_host_ptr,
2293	enum sas_open_rej_reason open_rej_reason)
2294{
2295	/* Task in the target is done. */
2296	request->complete_in_target       = true;
2297	*response_ptr                     = SAS_TASK_UNDELIVERED;
2298	*status_ptr                       = SAS_OPEN_REJECT;
2299	*complete_to_host_ptr             = isci_perform_normal_io_completion;
2300	task->task_status.open_rej_reason = open_rej_reason;
2301}
2302
2303/**
2304 * isci_request_handle_controller_specific_errors() - This function decodes
2305 *    controller-specific I/O completion error conditions.
2306 * @request: This parameter is the completed isci_request object.
2307 * @response_ptr: This parameter specifies the service response for the I/O.
2308 * @status_ptr: This parameter specifies the exec status for the I/O.
2309 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2310 *    the LLDD with respect to completing this request or forcing an abort
2311 *    condition on the I/O.
2312 *
2313 * none.
2314 */
2315static void isci_request_handle_controller_specific_errors(
2316	struct isci_remote_device *isci_device,
2317	struct isci_request *request,
2318	struct sas_task *task,
2319	enum service_response *response_ptr,
2320	enum exec_status *status_ptr,
2321	enum isci_completion_selection *complete_to_host_ptr)
2322{
2323	unsigned int cstatus;
2324
2325	cstatus = request->sci.scu_status;
2326
2327	dev_dbg(&request->isci_host->pdev->dev,
2328		"%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2329		"- controller status = 0x%x\n",
2330		__func__, request, cstatus);
2331
2332	/* Decode the controller-specific errors; most
2333	 * important is to recognize those conditions in which
2334	 * the target may still have a task outstanding that
2335	 * must be aborted.
2336	 *
2337	 * Note that there are SCU completion codes being
2338	 * named in the decode below for which SCIC has already
2339	 * done work to handle them in a way other than as
2340	 * a controller-specific completion code; these are left
2341	 * in the decode below for completeness sake.
2342	 */
2343	switch (cstatus) {
2344	case SCU_TASK_DONE_DMASETUP_DIRERR:
2345	/* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2346	case SCU_TASK_DONE_XFERCNT_ERR:
2347		/* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2348		if (task->task_proto == SAS_PROTOCOL_SMP) {
2349			/* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2350			*response_ptr = SAS_TASK_COMPLETE;
2351
2352			/* See if the device has been/is being stopped. Note
2353			 * that we ignore the quiesce state, since we are
2354			 * concerned about the actual device state.
2355			 */
2356			if ((isci_device->status == isci_stopping) ||
2357			    (isci_device->status == isci_stopped))
2358				*status_ptr = SAS_DEVICE_UNKNOWN;
2359			else
2360				*status_ptr = SAS_ABORTED_TASK;
2361
2362			request->complete_in_target = true;
2363
2364			*complete_to_host_ptr =
2365				isci_perform_normal_io_completion;
2366		} else {
2367			/* Task in the target is not done. */
2368			*response_ptr = SAS_TASK_UNDELIVERED;
2369
2370			if ((isci_device->status == isci_stopping) ||
2371			    (isci_device->status == isci_stopped))
2372				*status_ptr = SAS_DEVICE_UNKNOWN;
2373			else
2374				*status_ptr = SAM_STAT_TASK_ABORTED;
2375
2376			request->complete_in_target = false;
2377
2378			*complete_to_host_ptr =
2379				isci_perform_error_io_completion;
2380		}
2381
2382		break;
2383
2384	case SCU_TASK_DONE_CRC_ERR:
2385	case SCU_TASK_DONE_NAK_CMD_ERR:
2386	case SCU_TASK_DONE_EXCESS_DATA:
2387	case SCU_TASK_DONE_UNEXP_FIS:
2388	/* Also SCU_TASK_DONE_UNEXP_RESP: */
2389	case SCU_TASK_DONE_VIIT_ENTRY_NV:       /* TODO - conditions? */
2390	case SCU_TASK_DONE_IIT_ENTRY_NV:        /* TODO - conditions? */
2391	case SCU_TASK_DONE_RNCNV_OUTBOUND:      /* TODO - conditions? */
2392		/* These are conditions in which the target
2393		 * has completed the task, so that no cleanup
2394		 * is necessary.
2395		 */
2396		*response_ptr = SAS_TASK_COMPLETE;
2397
2398		/* See if the device has been/is being stopped. Note
2399		 * that we ignore the quiesce state, since we are
2400		 * concerned about the actual device state.
2401		 */
2402		if ((isci_device->status == isci_stopping) ||
2403		    (isci_device->status == isci_stopped))
2404			*status_ptr = SAS_DEVICE_UNKNOWN;
2405		else
2406			*status_ptr = SAS_ABORTED_TASK;
2407
2408		request->complete_in_target = true;
2409
2410		*complete_to_host_ptr = isci_perform_normal_io_completion;
2411		break;
2412
2413
2414	/* Note that the only open reject completion codes seen here will be
2415	 * abandon-class codes; all others are automatically retried in the SCU.
2416	 */
2417	case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2418
2419		isci_request_set_open_reject_status(
2420			request, task, response_ptr, status_ptr,
2421			complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2422		break;
2423
2424	case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2425
2426		/* Note - the return of AB0 will change when
2427		 * libsas implements detection of zone violations.
2428		 */
2429		isci_request_set_open_reject_status(
2430			request, task, response_ptr, status_ptr,
2431			complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2432		break;
2433
2434	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2435
2436		isci_request_set_open_reject_status(
2437			request, task, response_ptr, status_ptr,
2438			complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2439		break;
2440
2441	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2442
2443		isci_request_set_open_reject_status(
2444			request, task, response_ptr, status_ptr,
2445			complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2446		break;
2447
2448	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2449
2450		isci_request_set_open_reject_status(
2451			request, task, response_ptr, status_ptr,
2452			complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2453		break;
2454
2455	case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2456
2457		isci_request_set_open_reject_status(
2458			request, task, response_ptr, status_ptr,
2459			complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2460		break;
2461
2462	case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2463
2464		isci_request_set_open_reject_status(
2465			request, task, response_ptr, status_ptr,
2466			complete_to_host_ptr, SAS_OREJ_STP_NORES);
2467		break;
2468
2469	case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2470
2471		isci_request_set_open_reject_status(
2472			request, task, response_ptr, status_ptr,
2473			complete_to_host_ptr, SAS_OREJ_EPROTO);
2474		break;
2475
2476	case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2477
2478		isci_request_set_open_reject_status(
2479			request, task, response_ptr, status_ptr,
2480			complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2481		break;
2482
2483	case SCU_TASK_DONE_LL_R_ERR:
2484	/* Also SCU_TASK_DONE_ACK_NAK_TO: */
2485	case SCU_TASK_DONE_LL_PERR:
2486	case SCU_TASK_DONE_LL_SY_TERM:
2487	/* Also SCU_TASK_DONE_NAK_ERR:*/
2488	case SCU_TASK_DONE_LL_LF_TERM:
2489	/* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2490	case SCU_TASK_DONE_LL_ABORT_ERR:
2491	case SCU_TASK_DONE_SEQ_INV_TYPE:
2492	/* Also SCU_TASK_DONE_UNEXP_XR: */
2493	case SCU_TASK_DONE_XR_IU_LEN_ERR:
2494	case SCU_TASK_DONE_INV_FIS_LEN:
2495	/* Also SCU_TASK_DONE_XR_WD_LEN: */
2496	case SCU_TASK_DONE_SDMA_ERR:
2497	case SCU_TASK_DONE_OFFSET_ERR:
2498	case SCU_TASK_DONE_MAX_PLD_ERR:
2499	case SCU_TASK_DONE_LF_ERR:
2500	case SCU_TASK_DONE_SMP_RESP_TO_ERR:  /* Escalate to dev reset? */
2501	case SCU_TASK_DONE_SMP_LL_RX_ERR:
2502	case SCU_TASK_DONE_UNEXP_DATA:
2503	case SCU_TASK_DONE_UNEXP_SDBFIS:
2504	case SCU_TASK_DONE_REG_ERR:
2505	case SCU_TASK_DONE_SDB_ERR:
2506	case SCU_TASK_DONE_TASK_ABORT:
2507	default:
2508		/* Task in the target is not done. */
2509		*response_ptr = SAS_TASK_UNDELIVERED;
2510		*status_ptr = SAM_STAT_TASK_ABORTED;
2511		request->complete_in_target = false;
2512
2513		*complete_to_host_ptr = isci_perform_error_io_completion;
2514		break;
2515	}
2516}
2517
2518/**
2519 * isci_task_save_for_upper_layer_completion() - This function saves the
2520 *    request for later completion to the upper layer driver.
2521 * @host: This parameter is a pointer to the host on which the the request
2522 *    should be queued (either as an error or success).
2523 * @request: This parameter is the completed request.
2524 * @response: This parameter is the response code for the completed task.
2525 * @status: This parameter is the status code for the completed task.
2526 *
2527 * none.
2528 */
2529static void isci_task_save_for_upper_layer_completion(
2530	struct isci_host *host,
2531	struct isci_request *request,
2532	enum service_response response,
2533	enum exec_status status,
2534	enum isci_completion_selection task_notification_selection)
2535{
2536	struct sas_task *task = isci_request_access_task(request);
2537
2538	task_notification_selection
2539		= isci_task_set_completion_status(task, response, status,
2540						  task_notification_selection);
2541
2542	/* Tasks aborted specifically by a call to the lldd_abort_task
2543	 * function should not be completed to the host in the regular path.
2544	 */
2545	switch (task_notification_selection) {
2546
2547	case isci_perform_normal_io_completion:
2548
2549		/* Normal notification (task_done) */
2550		dev_dbg(&host->pdev->dev,
2551			"%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
2552			__func__,
2553			task,
2554			task->task_status.resp, response,
2555			task->task_status.stat, status);
2556		/* Add to the completed list. */
2557		list_add(&request->completed_node,
2558			 &host->requests_to_complete);
2559
2560		/* Take the request off the device's pending request list. */
2561		list_del_init(&request->dev_node);
2562		break;
2563
2564	case isci_perform_aborted_io_completion:
2565		/* No notification to libsas because this request is
2566		 * already in the abort path.
2567		 */
2568		dev_warn(&host->pdev->dev,
2569			 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
2570			 __func__,
2571			 task,
2572			 task->task_status.resp, response,
2573			 task->task_status.stat, status);
2574
2575		/* Wake up whatever process was waiting for this
2576		 * request to complete.
2577		 */
2578		WARN_ON(request->io_request_completion == NULL);
2579
2580		if (request->io_request_completion != NULL) {
2581
2582			/* Signal whoever is waiting that this
2583			* request is complete.
2584			*/
2585			complete(request->io_request_completion);
2586		}
2587		break;
2588
2589	case isci_perform_error_io_completion:
2590		/* Use sas_task_abort */
2591		dev_warn(&host->pdev->dev,
2592			 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
2593			 __func__,
2594			 task,
2595			 task->task_status.resp, response,
2596			 task->task_status.stat, status);
2597		/* Add to the aborted list. */
2598		list_add(&request->completed_node,
2599			 &host->requests_to_errorback);
2600		break;
2601
2602	default:
2603		dev_warn(&host->pdev->dev,
2604			 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
2605			 __func__,
2606			 task,
2607			 task->task_status.resp, response,
2608			 task->task_status.stat, status);
2609
2610		/* Add to the error to libsas list. */
2611		list_add(&request->completed_node,
2612			 &host->requests_to_errorback);
2613		break;
2614	}
2615}
2616
2617static void isci_request_io_request_complete(struct isci_host *isci_host,
2618					     struct isci_request *request,
2619					     enum sci_io_status completion_status)
2620{
2621	struct sas_task *task = isci_request_access_task(request);
2622	struct ssp_response_iu *resp_iu;
2623	void *resp_buf;
2624	unsigned long task_flags;
2625	struct isci_remote_device *isci_device   = request->isci_device;
2626	enum service_response response       = SAS_TASK_UNDELIVERED;
2627	enum exec_status status         = SAS_ABORTED_TASK;
2628	enum isci_request_status request_status;
2629	enum isci_completion_selection complete_to_host
2630		= isci_perform_normal_io_completion;
2631
2632	dev_dbg(&isci_host->pdev->dev,
2633		"%s: request = %p, task = %p,\n"
2634		"task->data_dir = %d completion_status = 0x%x\n",
2635		__func__,
2636		request,
2637		task,
2638		task->data_dir,
2639		completion_status);
2640
2641	spin_lock(&request->state_lock);
2642	request_status = isci_request_get_state(request);
2643
2644	/* Decode the request status.  Note that if the request has been
2645	 * aborted by a task management function, we don't care
2646	 * what the status is.
2647	 */
2648	switch (request_status) {
2649
2650	case aborted:
2651		/* "aborted" indicates that the request was aborted by a task
2652		 * management function, since once a task management request is
2653		 * perfomed by the device, the request only completes because
2654		 * of the subsequent driver terminate.
2655		 *
2656		 * Aborted also means an external thread is explicitly managing
2657		 * this request, so that we do not complete it up the stack.
2658		 *
2659		 * The target is still there (since the TMF was successful).
2660		 */
2661		request->complete_in_target = true;
2662		response = SAS_TASK_COMPLETE;
2663
2664		/* See if the device has been/is being stopped. Note
2665		 * that we ignore the quiesce state, since we are
2666		 * concerned about the actual device state.
2667		 */
2668		if ((isci_device->status == isci_stopping)
2669		    || (isci_device->status == isci_stopped)
2670		    )
2671			status = SAS_DEVICE_UNKNOWN;
2672		else
2673			status = SAS_ABORTED_TASK;
2674
2675		complete_to_host = isci_perform_aborted_io_completion;
2676		/* This was an aborted request. */
2677
2678		spin_unlock(&request->state_lock);
2679		break;
2680
2681	case aborting:
2682		/* aborting means that the task management function tried and
2683		 * failed to abort the request. We need to note the request
2684		 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
2685		 * target as down.
2686		 *
2687		 * Aborting also means an external thread is explicitly managing
2688		 * this request, so that we do not complete it up the stack.
2689		 */
2690		request->complete_in_target = true;
2691		response = SAS_TASK_UNDELIVERED;
2692
2693		if ((isci_device->status == isci_stopping) ||
2694		    (isci_device->status == isci_stopped))
2695			/* The device has been /is being stopped. Note that
2696			 * we ignore the quiesce state, since we are
2697			 * concerned about the actual device state.
2698			 */
2699			status = SAS_DEVICE_UNKNOWN;
2700		else
2701			status = SAS_PHY_DOWN;
2702
2703		complete_to_host = isci_perform_aborted_io_completion;
2704
2705		/* This was an aborted request. */
2706
2707		spin_unlock(&request->state_lock);
2708		break;
2709
2710	case terminating:
2711
2712		/* This was an terminated request.  This happens when
2713		 * the I/O is being terminated because of an action on
2714		 * the device (reset, tear down, etc.), and the I/O needs
2715		 * to be completed up the stack.
2716		 */
2717		request->complete_in_target = true;
2718		response = SAS_TASK_UNDELIVERED;
2719
2720		/* See if the device has been/is being stopped. Note
2721		 * that we ignore the quiesce state, since we are
2722		 * concerned about the actual device state.
2723		 */
2724		if ((isci_device->status == isci_stopping) ||
2725		    (isci_device->status == isci_stopped))
2726			status = SAS_DEVICE_UNKNOWN;
2727		else
2728			status = SAS_ABORTED_TASK;
2729
2730		complete_to_host = isci_perform_aborted_io_completion;
2731
2732		/* This was a terminated request. */
2733
2734		spin_unlock(&request->state_lock);
2735		break;
2736
2737	default:
2738
2739		/* The request is done from an SCU HW perspective. */
2740		request->status = completed;
2741
2742		spin_unlock(&request->state_lock);
2743
2744		/* This is an active request being completed from the core. */
2745		switch (completion_status) {
2746
2747		case SCI_IO_FAILURE_RESPONSE_VALID:
2748			dev_dbg(&isci_host->pdev->dev,
2749				"%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2750				__func__,
2751				request,
2752				task);
2753
2754			if (sas_protocol_ata(task->task_proto)) {
2755				resp_buf = &request->sci.stp.rsp;
2756				isci_request_process_stp_response(task,
2757								  resp_buf);
2758			} else if (SAS_PROTOCOL_SSP == task->task_proto) {
2759
2760				/* crack the iu response buffer. */
2761				resp_iu = &request->sci.ssp.rsp;
2762				isci_request_process_response_iu(task, resp_iu,
2763								 &isci_host->pdev->dev);
2764
2765			} else if (SAS_PROTOCOL_SMP == task->task_proto) {
2766
2767				dev_err(&isci_host->pdev->dev,
2768					"%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2769					"SAS_PROTOCOL_SMP protocol\n",
2770					__func__);
2771
2772			} else
2773				dev_err(&isci_host->pdev->dev,
2774					"%s: unknown protocol\n", __func__);
2775
2776			/* use the task status set in the task struct by the
2777			 * isci_request_process_response_iu call.
2778			 */
2779			request->complete_in_target = true;
2780			response = task->task_status.resp;
2781			status = task->task_status.stat;
2782			break;
2783
2784		case SCI_IO_SUCCESS:
2785		case SCI_IO_SUCCESS_IO_DONE_EARLY:
2786
2787			response = SAS_TASK_COMPLETE;
2788			status   = SAM_STAT_GOOD;
2789			request->complete_in_target = true;
2790
2791			if (task->task_proto == SAS_PROTOCOL_SMP) {
2792				void *rsp = &request->sci.smp.rsp;
2793
2794				dev_dbg(&isci_host->pdev->dev,
2795					"%s: SMP protocol completion\n",
2796					__func__);
2797
2798				sg_copy_from_buffer(
2799					&task->smp_task.smp_resp, 1,
2800					rsp, sizeof(struct smp_resp));
2801			} else if (completion_status
2802				   == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2803
2804				/* This was an SSP / STP / SATA transfer.
2805				 * There is a possibility that less data than
2806				 * the maximum was transferred.
2807				 */
2808				u32 transferred_length = sci_req_tx_bytes(&request->sci);
2809
2810				task->task_status.residual
2811					= task->total_xfer_len - transferred_length;
2812
2813				/* If there were residual bytes, call this an
2814				 * underrun.
2815				 */
2816				if (task->task_status.residual != 0)
2817					status = SAS_DATA_UNDERRUN;
2818
2819				dev_dbg(&isci_host->pdev->dev,
2820					"%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2821					__func__,
2822					status);
2823
2824			} else
2825				dev_dbg(&isci_host->pdev->dev,
2826					"%s: SCI_IO_SUCCESS\n",
2827					__func__);
2828
2829			break;
2830
2831		case SCI_IO_FAILURE_TERMINATED:
2832			dev_dbg(&isci_host->pdev->dev,
2833				"%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2834				__func__,
2835				request,
2836				task);
2837
2838			/* The request was terminated explicitly.  No handling
2839			 * is needed in the SCSI error handler path.
2840			 */
2841			request->complete_in_target = true;
2842			response = SAS_TASK_UNDELIVERED;
2843
2844			/* See if the device has been/is being stopped. Note
2845			 * that we ignore the quiesce state, since we are
2846			 * concerned about the actual device state.
2847			 */
2848			if ((isci_device->status == isci_stopping) ||
2849			    (isci_device->status == isci_stopped))
2850				status = SAS_DEVICE_UNKNOWN;
2851			else
2852				status = SAS_ABORTED_TASK;
2853
2854			complete_to_host = isci_perform_normal_io_completion;
2855			break;
2856
2857		case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2858
2859			isci_request_handle_controller_specific_errors(
2860				isci_device, request, task, &response, &status,
2861				&complete_to_host);
2862
2863			break;
2864
2865		case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2866			/* This is a special case, in that the I/O completion
2867			 * is telling us that the device needs a reset.
2868			 * In order for the device reset condition to be
2869			 * noticed, the I/O has to be handled in the error
2870			 * handler.  Set the reset flag and cause the
2871			 * SCSI error thread to be scheduled.
2872			 */
2873			spin_lock_irqsave(&task->task_state_lock, task_flags);
2874			task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2875			spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2876
2877			/* Fail the I/O. */
2878			response = SAS_TASK_UNDELIVERED;
2879			status = SAM_STAT_TASK_ABORTED;
2880
2881			complete_to_host = isci_perform_error_io_completion;
2882			request->complete_in_target = false;
2883			break;
2884
2885		default:
2886			/* Catch any otherwise unhandled error codes here. */
2887			dev_warn(&isci_host->pdev->dev,
2888				 "%s: invalid completion code: 0x%x - "
2889				 "isci_request = %p\n",
2890				 __func__, completion_status, request);
2891
2892			response = SAS_TASK_UNDELIVERED;
2893
2894			/* See if the device has been/is being stopped. Note
2895			 * that we ignore the quiesce state, since we are
2896			 * concerned about the actual device state.
2897			 */
2898			if ((isci_device->status == isci_stopping) ||
2899			    (isci_device->status == isci_stopped))
2900				status = SAS_DEVICE_UNKNOWN;
2901			else
2902				status = SAS_ABORTED_TASK;
2903
2904			complete_to_host = isci_perform_error_io_completion;
2905			request->complete_in_target = false;
2906			break;
2907		}
2908		break;
2909	}
2910
2911	isci_request_unmap_sgl(request, isci_host->pdev);
2912
2913	/* Put the completed request on the correct list */
2914	isci_task_save_for_upper_layer_completion(isci_host, request, response,
2915						  status, complete_to_host
2916						  );
2917
2918	/* complete the io request to the core. */
2919	scic_controller_complete_io(&isci_host->sci,
2920				    &isci_device->sci,
2921				    &request->sci);
2922	/* set terminated handle so it cannot be completed or
2923	 * terminated again, and to cause any calls into abort
2924	 * task to recognize the already completed case.
2925	 */
2926	request->terminated = true;
2927
2928	isci_host_can_dequeue(isci_host, 1);
2929}
2930
2931static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm)
2932{
2933	struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
2934	struct isci_request *ireq = sci_req_to_ireq(sci_req);
2935	struct domain_device *dev = sci_dev_to_domain(sci_req->target_device);
2936	struct sas_task *task;
2937
2938	/* XXX as hch said always creating an internal sas_task for tmf
2939	 * requests would simplify the driver
2940	 */
2941	task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
2942
2943	/* all unaccelerated request types (non ssp or ncq) handled with
2944	 * substates
2945	 */
2946	if (!task && dev->dev_type == SAS_END_DEV) {
2947		sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
2948	} else if (!task &&
2949		   (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
2950		    isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
2951		sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
2952	} else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2953		sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
2954	} else if (task && sas_protocol_ata(task->task_proto) &&
2955		   !task->ata_task.use_ncq) {
2956		u32 state;
2957
2958		if (task->data_dir == DMA_NONE)
2959			state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
2960		else if (task->ata_task.dma_xfer)
2961			state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
2962		else /* PIO */
2963			state = SCI_REQ_STP_PIO_WAIT_H2D;
2964
2965		sci_change_state(sm, state);
2966	}
2967}
2968
2969static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm)
2970{
2971	struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
2972	struct scic_sds_controller *scic = sci_req->owning_controller;
2973	struct isci_host *ihost = scic_to_ihost(scic);
2974	struct isci_request *ireq = sci_req_to_ireq(sci_req);
2975
2976	/* Tell the SCI_USER that the IO request is complete */
2977	if (sci_req->is_task_management_request == false)
2978		isci_request_io_request_complete(ihost, ireq,
2979						 sci_req->sci_status);
2980	else
2981		isci_task_request_complete(ihost, ireq, sci_req->sci_status);
2982}
2983
2984static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm)
2985{
2986	struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
2987
2988	/* Setting the abort bit in the Task Context is required by the silicon. */
2989	sci_req->task_context_buffer->abort = 1;
2990}
2991
2992static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2993{
2994	struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
2995
2996	scic_sds_remote_device_set_working_request(sci_req->target_device,
2997						   sci_req);
2998}
2999
3000static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3001{
3002	struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
3003
3004	scic_sds_remote_device_set_working_request(sci_req->target_device,
3005						   sci_req);
3006}
3007
3008static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
3009{
3010	struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
3011
3012	scic_sds_remote_device_set_working_request(sci_req->target_device,
3013						   sci_req);
3014}
3015
3016static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
3017{
3018	struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
3019	struct scu_task_context *task_context;
3020	struct host_to_dev_fis *h2d_fis;
3021	enum sci_status status;
3022
3023	/* Clear the SRST bit */
3024	h2d_fis = &sci_req->stp.cmd;
3025	h2d_fis->control = 0;
3026
3027	/* Clear the TC control bit */
3028	task_context = scic_sds_controller_get_task_context_buffer(
3029		sci_req->owning_controller, sci_req->io_tag);
3030	task_context->control_frame = 0;
3031
3032	status = scic_controller_continue_io(sci_req);
3033	WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
3034}
3035
3036static const struct sci_base_state scic_sds_request_state_table[] = {
3037	[SCI_REQ_INIT] = { },
3038	[SCI_REQ_CONSTRUCTED] = { },
3039	[SCI_REQ_STARTED] = {
3040		.enter_state = scic_sds_request_started_state_enter,
3041	},
3042	[SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
3043		.enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
3044	},
3045	[SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
3046	[SCI_REQ_STP_PIO_WAIT_H2D] = {
3047		.enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
3048	},
3049	[SCI_REQ_STP_PIO_WAIT_FRAME] = { },
3050	[SCI_REQ_STP_PIO_DATA_IN] = { },
3051	[SCI_REQ_STP_PIO_DATA_OUT] = { },
3052	[SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
3053	[SCI_REQ_STP_UDMA_WAIT_D2H] = { },
3054	[SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
3055		.enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
3056	},
3057	[SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
3058		.enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
3059	},
3060	[SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
3061	[SCI_REQ_TASK_WAIT_TC_COMP] = { },
3062	[SCI_REQ_TASK_WAIT_TC_RESP] = { },
3063	[SCI_REQ_SMP_WAIT_RESP] = { },
3064	[SCI_REQ_SMP_WAIT_TC_COMP] = { },
3065	[SCI_REQ_COMPLETED] = {
3066		.enter_state = scic_sds_request_completed_state_enter,
3067	},
3068	[SCI_REQ_ABORTING] = {
3069		.enter_state = scic_sds_request_aborting_state_enter,
3070	},
3071	[SCI_REQ_FINAL] = { },
3072};
3073
3074static void
3075scic_sds_general_request_construct(struct scic_sds_controller *scic,
3076				   struct scic_sds_remote_device *sci_dev,
3077				   u16 io_tag,
3078				   struct scic_sds_request *sci_req)
3079{
3080	sci_init_sm(&sci_req->sm, scic_sds_request_state_table, SCI_REQ_INIT);
3081
3082	sci_req->io_tag = io_tag;
3083	sci_req->owning_controller = scic;
3084	sci_req->target_device = sci_dev;
3085	sci_req->protocol = SCIC_NO_PROTOCOL;
3086	sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3087	sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev);
3088
3089	sci_req->sci_status   = SCI_SUCCESS;
3090	sci_req->scu_status   = 0;
3091	sci_req->post_context = 0xFFFFFFFF;
3092
3093	sci_req->is_task_management_request = false;
3094
3095	if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
3096		sci_req->was_tag_assigned_by_user = false;
3097		sci_req->task_context_buffer = &sci_req->tc;
3098	} else {
3099		sci_req->was_tag_assigned_by_user = true;
3100
3101		sci_req->task_context_buffer =
3102			scic_sds_controller_get_task_context_buffer(scic, io_tag);
3103	}
3104}
3105
3106static enum sci_status
3107scic_io_request_construct(struct scic_sds_controller *scic,
3108			  struct scic_sds_remote_device *sci_dev,
3109			  u16 io_tag, struct scic_sds_request *sci_req)
3110{
3111	struct domain_device *dev = sci_dev_to_domain(sci_dev);
3112	enum sci_status status = SCI_SUCCESS;
3113
3114	/* Build the common part of the request */
3115	scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
3116
3117	if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3118		return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3119
3120	if (dev->dev_type == SAS_END_DEV)
3121		/* pass */;
3122	else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
3123		memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd));
3124	else if (dev_is_expander(dev))
3125		memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd));
3126	else
3127		return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3128
3129	memset(sci_req->task_context_buffer, 0,
3130	       offsetof(struct scu_task_context, sgl_pair_ab));
3131
3132	return status;
3133}
3134
3135enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
3136					    struct scic_sds_remote_device *sci_dev,
3137					    u16 io_tag, struct scic_sds_request *sci_req)
3138{
3139	struct domain_device *dev = sci_dev_to_domain(sci_dev);
3140	enum sci_status status = SCI_SUCCESS;
3141
3142	/* Build the common part of the request */
3143	scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
3144
3145	if (dev->dev_type == SAS_END_DEV ||
3146	    dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
3147		sci_req->is_task_management_request = true;
3148		memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context));
3149	} else
3150		status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3151
3152	return status;
3153}
3154
3155static enum sci_status isci_request_ssp_request_construct(
3156	struct isci_request *request)
3157{
3158	enum sci_status status;
3159
3160	dev_dbg(&request->isci_host->pdev->dev,
3161		"%s: request = %p\n",
3162		__func__,
3163		request);
3164	status = scic_io_request_construct_basic_ssp(&request->sci);
3165	return status;
3166}
3167
3168static enum sci_status isci_request_stp_request_construct(
3169	struct isci_request *request)
3170{
3171	struct sas_task *task = isci_request_access_task(request);
3172	enum sci_status status;
3173	struct host_to_dev_fis *register_fis;
3174
3175	dev_dbg(&request->isci_host->pdev->dev,
3176		"%s: request = %p\n",
3177		__func__,
3178		request);
3179
3180	/* Get the host_to_dev_fis from the core and copy
3181	 * the fis from the task into it.
3182	 */
3183	register_fis = isci_sata_task_to_fis_copy(task);
3184
3185	status = scic_io_request_construct_basic_sata(&request->sci);
3186
3187	/* Set the ncq tag in the fis, from the queue
3188	 * command in the task.
3189	 */
3190	if (isci_sata_is_task_ncq(task)) {
3191
3192		isci_sata_set_ncq_tag(
3193			register_fis,
3194			task
3195			);
3196	}
3197
3198	return status;
3199}
3200
3201/*
3202 * This function will fill in the SCU Task Context for a SMP request. The
3203 *    following important settings are utilized: -# task_type ==
3204 *    SCU_TASK_TYPE_SMP.  This simply indicates that a normal request type
3205 *    (i.e. non-raw frame) is being utilized to perform task management. -#
3206 *    control_frame == 1.  This ensures that the proper endianess is set so
3207 *    that the bytes are transmitted in the right order for a smp request frame.
3208 * @sci_req: This parameter specifies the smp request object being
3209 *    constructed.
3210 *
3211 */
3212static void
3213scu_smp_request_construct_task_context(struct scic_sds_request *sci_req,
3214				       ssize_t req_len)
3215{
3216	dma_addr_t dma_addr;
3217	struct scic_sds_remote_device *sci_dev;
3218	struct scic_sds_port *sci_port;
3219	struct scu_task_context *task_context;
3220	ssize_t word_cnt = sizeof(struct smp_req) / sizeof(u32);
3221
3222	/* byte swap the smp request. */
3223	sci_swab32_cpy(&sci_req->smp.cmd, &sci_req->smp.cmd,
3224		       word_cnt);
3225
3226	task_context = scic_sds_request_get_task_context(sci_req);
3227
3228	sci_dev = scic_sds_request_get_device(sci_req);
3229	sci_port = scic_sds_request_get_port(sci_req);
3230
3231	/*
3232	 * Fill in the TC with the its required data
3233	 * 00h
3234	 */
3235	task_context->priority = 0;
3236	task_context->initiator_request = 1;
3237	task_context->connection_rate = sci_dev->connection_rate;
3238	task_context->protocol_engine_index =
3239		scic_sds_controller_get_protocol_engine_group(scic);
3240	task_context->logical_port_index = scic_sds_port_get_index(sci_port);
3241	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3242	task_context->abort = 0;
3243	task_context->valid = SCU_TASK_CONTEXT_VALID;
3244	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3245
3246	/* 04h */
3247	task_context->remote_node_index = sci_dev->rnc.remote_node_index;
3248	task_context->command_code = 0;
3249	task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3250
3251	/* 08h */
3252	task_context->link_layer_control = 0;
3253	task_context->do_not_dma_ssp_good_response = 1;
3254	task_context->strict_ordering = 0;
3255	task_context->control_frame = 1;
3256	task_context->timeout_enable = 0;
3257	task_context->block_guard_enable = 0;
3258
3259	/* 0ch */
3260	task_context->address_modifier = 0;
3261
3262	/* 10h */
3263	task_context->ssp_command_iu_length = req_len;
3264
3265	/* 14h */
3266	task_context->transfer_length_bytes = 0;
3267
3268	/*
3269	 * 18h ~ 30h, protocol specific
3270	 * since commandIU has been build by framework at this point, we just
3271	 * copy the frist DWord from command IU to this location. */
3272	memcpy(&task_context->type.smp, &sci_req->smp.cmd, sizeof(u32));
3273
3274	/*
3275	 * 40h
3276	 * "For SMP you could program it to zero. We would prefer that way
3277	 * so that done code will be consistent." - Venki
3278	 */
3279	task_context->task_phase = 0;
3280
3281	if (sci_req->was_tag_assigned_by_user) {
3282		/*
3283		 * Build the task context now since we have already read
3284		 * the data
3285		 */
3286		sci_req->post_context =
3287			(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3288			 (scic_sds_controller_get_protocol_engine_group(scic) <<
3289			  SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3290			 (scic_sds_port_get_index(sci_port) <<
3291			  SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3292			 scic_sds_io_tag_get_index(sci_req->io_tag));
3293	} else {
3294		/*
3295		 * Build the task context now since we have already read
3296		 * the data.
3297		 * I/O tag index is not assigned because we have to wait
3298		 * until we get a TCi.
3299		 */
3300		sci_req->post_context =
3301			(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3302			 (scic_sds_controller_get_protocol_engine_group(scic) <<
3303			  SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3304			 (scic_sds_port_get_index(sci_port) <<
3305			  SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
3306	}
3307
3308	/*
3309	 * Copy the physical address for the command buffer to the SCU Task
3310	 * Context command buffer should not contain command header.
3311	 */
3312	dma_addr = scic_io_request_get_dma_addr(sci_req,
3313						((char *) &sci_req->smp.cmd) +
3314						sizeof(u32));
3315
3316	task_context->command_iu_upper = upper_32_bits(dma_addr);
3317	task_context->command_iu_lower = lower_32_bits(dma_addr);
3318
3319	/* SMP response comes as UF, so no need to set response IU address. */
3320	task_context->response_iu_upper = 0;
3321	task_context->response_iu_lower = 0;
3322}
3323
3324static enum sci_status
3325scic_io_request_construct_smp(struct scic_sds_request *sci_req)
3326{
3327	struct smp_req *smp_req = &sci_req->smp.cmd;
3328
3329	sci_req->protocol = SCIC_SMP_PROTOCOL;
3330
3331	/*
3332	 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3333	 * functions under SAS 2.0, a zero request length really indicates
3334	 * a non-zero default length.
3335	 */
3336	if (smp_req->req_len == 0) {
3337		switch (smp_req->func) {
3338		case SMP_DISCOVER:
3339		case SMP_REPORT_PHY_ERR_LOG:
3340		case SMP_REPORT_PHY_SATA:
3341		case SMP_REPORT_ROUTE_INFO:
3342			smp_req->req_len = 2;
3343			break;
3344		case SMP_CONF_ROUTE_INFO:
3345		case SMP_PHY_CONTROL:
3346		case SMP_PHY_TEST_FUNCTION:
3347			smp_req->req_len = 9;
3348			break;
3349			/* Default - zero is a valid default for 2.0. */
3350		}
3351	}
3352
3353	scu_smp_request_construct_task_context(sci_req, smp_req->req_len);
3354
3355	sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
3356
3357	return SCI_SUCCESS;
3358}
3359
3360/*
3361 * isci_smp_request_build() - This function builds the smp request.
3362 * @ireq: This parameter points to the isci_request allocated in the
3363 *    request construct function.
3364 *
3365 * SCI_SUCCESS on successfull completion, or specific failure code.
3366 */
3367static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3368{
3369	enum sci_status status = SCI_FAILURE;
3370	struct sas_task *task = isci_request_access_task(ireq);
3371	struct scic_sds_request *sci_req = &ireq->sci;
3372
3373	dev_dbg(&ireq->isci_host->pdev->dev,
3374		"%s: request = %p\n", __func__, ireq);
3375
3376	dev_dbg(&ireq->isci_host->pdev->dev,
3377		"%s: smp_req len = %d\n",
3378		__func__,
3379		task->smp_task.smp_req.length);
3380
3381	/* copy the smp_command to the address; */
3382	sg_copy_to_buffer(&task->smp_task.smp_req, 1,
3383			  &sci_req->smp.cmd,
3384			  sizeof(struct smp_req));
3385
3386	status = scic_io_request_construct_smp(sci_req);
3387	if (status != SCI_SUCCESS)
3388		dev_warn(&ireq->isci_host->pdev->dev,
3389			 "%s: failed with status = %d\n",
3390			 __func__,
3391			 status);
3392
3393	return status;
3394}
3395
3396/**
3397 * isci_io_request_build() - This function builds the io request object.
3398 * @isci_host: This parameter specifies the ISCI host object
3399 * @request: This parameter points to the isci_request object allocated in the
3400 *    request construct function.
3401 * @sci_device: This parameter is the handle for the sci core's remote device
3402 *    object that is the destination for this request.
3403 *
3404 * SCI_SUCCESS on successfull completion, or specific failure code.
3405 */
3406static enum sci_status isci_io_request_build(
3407	struct isci_host *isci_host,
3408	struct isci_request *request,
3409	struct isci_remote_device *isci_device)
3410{
3411	enum sci_status status = SCI_SUCCESS;
3412	struct sas_task *task = isci_request_access_task(request);
3413	struct scic_sds_remote_device *sci_device = &isci_device->sci;
3414
3415	dev_dbg(&isci_host->pdev->dev,
3416		"%s: isci_device = 0x%p; request = %p, "
3417		"num_scatter = %d\n",
3418		__func__,
3419		isci_device,
3420		request,
3421		task->num_scatter);
3422
3423	/* map the sgl addresses, if present.
3424	 * libata does the mapping for sata devices
3425	 * before we get the request.
3426	 */
3427	if (task->num_scatter &&
3428	    !sas_protocol_ata(task->task_proto) &&
3429	    !(SAS_PROTOCOL_SMP & task->task_proto)) {
3430
3431		request->num_sg_entries = dma_map_sg(
3432			&isci_host->pdev->dev,
3433			task->scatter,
3434			task->num_scatter,
3435			task->data_dir
3436			);
3437
3438		if (request->num_sg_entries == 0)
3439			return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3440	}
3441
3442	/* build the common request object. For now,
3443	 * we will let the core allocate the IO tag.
3444	 */
3445	status = scic_io_request_construct(&isci_host->sci, sci_device,
3446					   SCI_CONTROLLER_INVALID_IO_TAG,
3447					   &request->sci);
3448
3449	if (status != SCI_SUCCESS) {
3450		dev_warn(&isci_host->pdev->dev,
3451			 "%s: failed request construct\n",
3452			 __func__);
3453		return SCI_FAILURE;
3454	}
3455
3456	switch (task->task_proto) {
3457	case SAS_PROTOCOL_SMP:
3458		status = isci_smp_request_build(request);
3459		break;
3460	case SAS_PROTOCOL_SSP:
3461		status = isci_request_ssp_request_construct(request);
3462		break;
3463	case SAS_PROTOCOL_SATA:
3464	case SAS_PROTOCOL_STP:
3465	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3466		status = isci_request_stp_request_construct(request);
3467		break;
3468	default:
3469		dev_warn(&isci_host->pdev->dev,
3470			 "%s: unknown protocol\n", __func__);
3471		return SCI_FAILURE;
3472	}
3473
3474	return SCI_SUCCESS;
3475}
3476
3477/**
3478 * isci_request_alloc_core() - This function gets the request object from the
3479 *    isci_host dma cache.
3480 * @isci_host: This parameter specifies the ISCI host object
3481 * @isci_request: This parameter will contain the pointer to the new
3482 *    isci_request object.
3483 * @isci_device: This parameter is the pointer to the isci remote device object
3484 *    that is the destination for this request.
3485 * @gfp_flags: This parameter specifies the os allocation flags.
3486 *
3487 * SCI_SUCCESS on successfull completion, or specific failure code.
3488 */
3489static int isci_request_alloc_core(
3490	struct isci_host *isci_host,
3491	struct isci_request **isci_request,
3492	struct isci_remote_device *isci_device,
3493	gfp_t gfp_flags)
3494{
3495	int ret = 0;
3496	dma_addr_t handle;
3497	struct isci_request *request;
3498
3499
3500	/* get pointer to dma memory. This actually points
3501	 * to both the isci_remote_device object and the
3502	 * sci object. The isci object is at the beginning
3503	 * of the memory allocated here.
3504	 */
3505	request = dma_pool_alloc(isci_host->dma_pool, gfp_flags, &handle);
3506	if (!request) {
3507		dev_warn(&isci_host->pdev->dev,
3508			 "%s: dma_pool_alloc returned NULL\n", __func__);
3509		return -ENOMEM;
3510	}
3511
3512	/* initialize the request object.	*/
3513	spin_lock_init(&request->state_lock);
3514	request->request_daddr = handle;
3515	request->isci_host = isci_host;
3516	request->isci_device = isci_device;
3517	request->io_request_completion = NULL;
3518	request->terminated = false;
3519
3520	request->num_sg_entries = 0;
3521
3522	request->complete_in_target = false;
3523
3524	INIT_LIST_HEAD(&request->completed_node);
3525	INIT_LIST_HEAD(&request->dev_node);
3526
3527	*isci_request = request;
3528	isci_request_change_state(request, allocated);
3529
3530	return ret;
3531}
3532
3533static int isci_request_alloc_io(
3534	struct isci_host *isci_host,
3535	struct sas_task *task,
3536	struct isci_request **isci_request,
3537	struct isci_remote_device *isci_device,
3538	gfp_t gfp_flags)
3539{
3540	int retval = isci_request_alloc_core(isci_host, isci_request,
3541					     isci_device, gfp_flags);
3542
3543	if (!retval) {
3544		(*isci_request)->ttype_ptr.io_task_ptr = task;
3545		(*isci_request)->ttype                 = io_task;
3546
3547		task->lldd_task = *isci_request;
3548	}
3549	return retval;
3550}
3551
3552/**
3553 * isci_request_alloc_tmf() - This function gets the request object from the
3554 *    isci_host dma cache and initializes the relevant fields as a sas_task.
3555 * @isci_host: This parameter specifies the ISCI host object
3556 * @sas_task: This parameter is the task struct from the upper layer driver.
3557 * @isci_request: This parameter will contain the pointer to the new
3558 *    isci_request object.
3559 * @isci_device: This parameter is the pointer to the isci remote device object
3560 *    that is the destination for this request.
3561 * @gfp_flags: This parameter specifies the os allocation flags.
3562 *
3563 * SCI_SUCCESS on successfull completion, or specific failure code.
3564 */
3565int isci_request_alloc_tmf(
3566	struct isci_host *isci_host,
3567	struct isci_tmf *isci_tmf,
3568	struct isci_request **isci_request,
3569	struct isci_remote_device *isci_device,
3570	gfp_t gfp_flags)
3571{
3572	int retval = isci_request_alloc_core(isci_host, isci_request,
3573					     isci_device, gfp_flags);
3574
3575	if (!retval) {
3576
3577		(*isci_request)->ttype_ptr.tmf_task_ptr = isci_tmf;
3578		(*isci_request)->ttype = tmf_task;
3579	}
3580	return retval;
3581}
3582
3583/**
3584 * isci_request_execute() - This function allocates the isci_request object,
3585 *    all fills in some common fields.
3586 * @isci_host: This parameter specifies the ISCI host object
3587 * @sas_task: This parameter is the task struct from the upper layer driver.
3588 * @isci_request: This parameter will contain the pointer to the new
3589 *    isci_request object.
3590 * @gfp_flags: This parameter specifies the os allocation flags.
3591 *
3592 * SCI_SUCCESS on successfull completion, or specific failure code.
3593 */
3594int isci_request_execute(
3595	struct isci_host *isci_host,
3596	struct sas_task *task,
3597	struct isci_request **isci_request,
3598	gfp_t gfp_flags)
3599{
3600	int ret = 0;
3601	struct scic_sds_remote_device *sci_device;
3602	enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3603	struct isci_remote_device *isci_device;
3604	struct isci_request *request;
3605	unsigned long flags;
3606
3607	isci_device = task->dev->lldd_dev;
3608	sci_device = &isci_device->sci;
3609
3610	/* do common allocation and init of request object. */
3611	ret = isci_request_alloc_io(
3612		isci_host,
3613		task,
3614		&request,
3615		isci_device,
3616		gfp_flags
3617		);
3618
3619	if (ret)
3620		goto out;
3621
3622	status = isci_io_request_build(isci_host, request, isci_device);
3623	if (status != SCI_SUCCESS) {
3624		dev_warn(&isci_host->pdev->dev,
3625			 "%s: request_construct failed - status = 0x%x\n",
3626			 __func__,
3627			 status);
3628		goto out;
3629	}
3630
3631	spin_lock_irqsave(&isci_host->scic_lock, flags);
3632
3633	/* send the request, let the core assign the IO TAG.	*/
3634	status = scic_controller_start_io(&isci_host->sci, sci_device,
3635					  &request->sci,
3636					  SCI_CONTROLLER_INVALID_IO_TAG);
3637	if (status != SCI_SUCCESS &&
3638	    status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3639		dev_warn(&isci_host->pdev->dev,
3640			 "%s: failed request start (0x%x)\n",
3641			 __func__, status);
3642		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
3643		goto out;
3644	}
3645
3646	/* Either I/O started OK, or the core has signaled that
3647	 * the device needs a target reset.
3648	 *
3649	 * In either case, hold onto the I/O for later.
3650	 *
3651	 * Update it's status and add it to the list in the
3652	 * remote device object.
3653	 */
3654	isci_request_change_state(request, started);
3655	list_add(&request->dev_node, &isci_device->reqs_in_process);
3656
3657	if (status == SCI_SUCCESS) {
3658		/* Save the tag for possible task mgmt later. */
3659		request->io_tag = request->sci.io_tag;
3660	} else {
3661		/* The request did not really start in the
3662		 * hardware, so clear the request handle
3663		 * here so no terminations will be done.
3664		 */
3665		request->terminated = true;
3666	}
3667	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
3668
3669	if (status ==
3670	    SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3671		/* Signal libsas that we need the SCSI error
3672		* handler thread to work on this I/O and that
3673		* we want a device reset.
3674		*/
3675		spin_lock_irqsave(&task->task_state_lock, flags);
3676		task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3677		spin_unlock_irqrestore(&task->task_state_lock, flags);
3678
3679		/* Cause this task to be scheduled in the SCSI error
3680		* handler thread.
3681		*/
3682		isci_execpath_callback(isci_host, task,
3683				       sas_task_abort);
3684
3685		/* Change the status, since we are holding
3686		* the I/O until it is managed by the SCSI
3687		* error handler.
3688		*/
3689		status = SCI_SUCCESS;
3690	}
3691
3692 out:
3693	if (status != SCI_SUCCESS) {
3694		/* release dma memory on failure. */
3695		isci_request_free(isci_host, request);
3696		request = NULL;
3697		ret = SCI_FAILURE;
3698	}
3699
3700	*isci_request = request;
3701	return ret;
3702}
3703