request.c revision 5dec6f4e41340196d223caf922578c44dfe2295a
1/*
2 * This file is provided under a dual BSD/GPLv2 license.  When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 *   * Redistributions of source code must retain the above copyright
34 *     notice, this list of conditions and the following disclaimer.
35 *   * Redistributions in binary form must reproduce the above copyright
36 *     notice, this list of conditions and the following disclaimer in
37 *     the documentation and/or other materials provided with the
38 *     distribution.
39 *   * Neither the name of Intel Corporation nor the names of its
40 *     contributors may be used to endorse or promote products derived
41 *     from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "isci.h"
57#include "task.h"
58#include "request.h"
59#include "sata.h"
60#include "scu_completion_codes.h"
61#include "scu_event_codes.h"
62#include "sas.h"
63
64/**
65 * This method returns the sgl element pair for the specificed sgl_pair index.
66 * @sci_req: This parameter specifies the IO request for which to retrieve
67 *    the Scatter-Gather List element pair.
68 * @sgl_pair_index: This parameter specifies the index into the SGL element
69 *    pair to be retrieved.
70 *
71 * This method returns a pointer to an struct scu_sgl_element_pair.
72 */
73static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
74	struct scic_sds_request *sci_req,
75	u32 sgl_pair_index
76	) {
77	struct scu_task_context *task_context;
78
79	task_context = (struct scu_task_context *)sci_req->task_context_buffer;
80
81	if (sgl_pair_index == 0) {
82		return &task_context->sgl_pair_ab;
83	} else if (sgl_pair_index == 1) {
84		return &task_context->sgl_pair_cd;
85	}
86
87	return &sci_req->sg_table[sgl_pair_index - 2];
88}
89
90/**
91 * This function will build the SGL list for an IO request.
92 * @sci_req: This parameter specifies the IO request for which to build
93 *    the Scatter-Gather List.
94 *
95 */
96static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
97{
98	struct isci_request *isci_request = sci_req_to_ireq(sds_request);
99	struct isci_host *isci_host = isci_request->isci_host;
100	struct sas_task *task = isci_request_access_task(isci_request);
101	struct scatterlist *sg = NULL;
102	dma_addr_t dma_addr;
103	u32 sg_idx = 0;
104	struct scu_sgl_element_pair *scu_sg   = NULL;
105	struct scu_sgl_element_pair *prev_sg  = NULL;
106
107	if (task->num_scatter > 0) {
108		sg = task->scatter;
109
110		while (sg) {
111			scu_sg = scic_sds_request_get_sgl_element_pair(
112					sds_request,
113					sg_idx);
114
115			SCU_SGL_COPY(scu_sg->A, sg);
116
117			sg = sg_next(sg);
118
119			if (sg) {
120				SCU_SGL_COPY(scu_sg->B, sg);
121				sg = sg_next(sg);
122			} else
123				SCU_SGL_ZERO(scu_sg->B);
124
125			if (prev_sg) {
126				dma_addr =
127					scic_io_request_get_dma_addr(
128							sds_request,
129							scu_sg);
130
131				prev_sg->next_pair_upper =
132					upper_32_bits(dma_addr);
133				prev_sg->next_pair_lower =
134					lower_32_bits(dma_addr);
135			}
136
137			prev_sg = scu_sg;
138			sg_idx++;
139		}
140	} else {	/* handle when no sg */
141		scu_sg = scic_sds_request_get_sgl_element_pair(sds_request,
142							       sg_idx);
143
144		dma_addr = dma_map_single(&isci_host->pdev->dev,
145					  task->scatter,
146					  task->total_xfer_len,
147					  task->data_dir);
148
149		isci_request->zero_scatter_daddr = dma_addr;
150
151		scu_sg->A.length = task->total_xfer_len;
152		scu_sg->A.address_upper = upper_32_bits(dma_addr);
153		scu_sg->A.address_lower = lower_32_bits(dma_addr);
154	}
155
156	if (scu_sg) {
157		scu_sg->next_pair_upper = 0;
158		scu_sg->next_pair_lower = 0;
159	}
160}
161
162static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req)
163{
164	struct ssp_cmd_iu *cmd_iu;
165	struct isci_request *ireq = sci_req_to_ireq(sci_req);
166	struct sas_task *task = isci_request_access_task(ireq);
167
168	cmd_iu = &sci_req->ssp.cmd;
169
170	memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
171	cmd_iu->add_cdb_len = 0;
172	cmd_iu->_r_a = 0;
173	cmd_iu->_r_b = 0;
174	cmd_iu->en_fburst = 0; /* unsupported */
175	cmd_iu->task_prio = task->ssp_task.task_prio;
176	cmd_iu->task_attr = task->ssp_task.task_attr;
177	cmd_iu->_r_c = 0;
178
179	sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
180		       sizeof(task->ssp_task.cdb) / sizeof(u32));
181}
182
183static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req)
184{
185	struct ssp_task_iu *task_iu;
186	struct isci_request *ireq = sci_req_to_ireq(sci_req);
187	struct sas_task *task = isci_request_access_task(ireq);
188	struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
189
190	task_iu = &sci_req->ssp.tmf;
191
192	memset(task_iu, 0, sizeof(struct ssp_task_iu));
193
194	memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
195
196	task_iu->task_func = isci_tmf->tmf_code;
197	task_iu->task_tag =
198		(ireq->ttype == tmf_task) ?
199		isci_tmf->io_tag :
200		SCI_CONTROLLER_INVALID_IO_TAG;
201}
202
203/**
204 * This method is will fill in the SCU Task Context for any type of SSP request.
205 * @sci_req:
206 * @task_context:
207 *
208 */
209static void scu_ssp_reqeust_construct_task_context(
210	struct scic_sds_request *sds_request,
211	struct scu_task_context *task_context)
212{
213	dma_addr_t dma_addr;
214	struct scic_sds_controller *controller;
215	struct scic_sds_remote_device *target_device;
216	struct scic_sds_port *target_port;
217
218	controller = scic_sds_request_get_controller(sds_request);
219	target_device = scic_sds_request_get_device(sds_request);
220	target_port = scic_sds_request_get_port(sds_request);
221
222	/* Fill in the TC with the its required data */
223	task_context->abort = 0;
224	task_context->priority = 0;
225	task_context->initiator_request = 1;
226	task_context->connection_rate = target_device->connection_rate;
227	task_context->protocol_engine_index =
228		scic_sds_controller_get_protocol_engine_group(controller);
229	task_context->logical_port_index =
230		scic_sds_port_get_index(target_port);
231	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
232	task_context->valid = SCU_TASK_CONTEXT_VALID;
233	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
234
235	task_context->remote_node_index =
236		scic_sds_remote_device_get_index(sds_request->target_device);
237	task_context->command_code = 0;
238
239	task_context->link_layer_control = 0;
240	task_context->do_not_dma_ssp_good_response = 1;
241	task_context->strict_ordering = 0;
242	task_context->control_frame = 0;
243	task_context->timeout_enable = 0;
244	task_context->block_guard_enable = 0;
245
246	task_context->address_modifier = 0;
247
248	/* task_context->type.ssp.tag = sci_req->io_tag; */
249	task_context->task_phase = 0x01;
250
251	if (sds_request->was_tag_assigned_by_user) {
252		/*
253		 * Build the task context now since we have already read
254		 * the data
255		 */
256		sds_request->post_context =
257			(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
258			 (scic_sds_controller_get_protocol_engine_group(
259							controller) <<
260			  SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
261			 (scic_sds_port_get_index(target_port) <<
262			  SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
263			 scic_sds_io_tag_get_index(sds_request->io_tag));
264	} else {
265		/*
266		 * Build the task context now since we have already read
267		 * the data
268		 *
269		 * I/O tag index is not assigned because we have to wait
270		 * until we get a TCi
271		 */
272		sds_request->post_context =
273			(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
274			 (scic_sds_controller_get_protocol_engine_group(
275							owning_controller) <<
276			  SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
277			 (scic_sds_port_get_index(target_port) <<
278			  SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
279	}
280
281	/*
282	 * Copy the physical address for the command buffer to the
283	 * SCU Task Context
284	 */
285	dma_addr = scic_io_request_get_dma_addr(sds_request,
286						&sds_request->ssp.cmd);
287
288	task_context->command_iu_upper = upper_32_bits(dma_addr);
289	task_context->command_iu_lower = lower_32_bits(dma_addr);
290
291	/*
292	 * Copy the physical address for the response buffer to the
293	 * SCU Task Context
294	 */
295	dma_addr = scic_io_request_get_dma_addr(sds_request,
296						&sds_request->ssp.rsp);
297
298	task_context->response_iu_upper = upper_32_bits(dma_addr);
299	task_context->response_iu_lower = lower_32_bits(dma_addr);
300}
301
302/**
303 * This method is will fill in the SCU Task Context for a SSP IO request.
304 * @sci_req:
305 *
306 */
307static void scu_ssp_io_request_construct_task_context(
308	struct scic_sds_request *sci_req,
309	enum dma_data_direction dir,
310	u32 len)
311{
312	struct scu_task_context *task_context;
313
314	task_context = scic_sds_request_get_task_context(sci_req);
315
316	scu_ssp_reqeust_construct_task_context(sci_req, task_context);
317
318	task_context->ssp_command_iu_length =
319		sizeof(struct ssp_cmd_iu) / sizeof(u32);
320	task_context->type.ssp.frame_type = SSP_COMMAND;
321
322	switch (dir) {
323	case DMA_FROM_DEVICE:
324	case DMA_NONE:
325	default:
326		task_context->task_type = SCU_TASK_TYPE_IOREAD;
327		break;
328	case DMA_TO_DEVICE:
329		task_context->task_type = SCU_TASK_TYPE_IOWRITE;
330		break;
331	}
332
333	task_context->transfer_length_bytes = len;
334
335	if (task_context->transfer_length_bytes > 0)
336		scic_sds_request_build_sgl(sci_req);
337}
338
339/**
340 * This method will fill in the SCU Task Context for a SSP Task request.  The
341 *    following important settings are utilized: -# priority ==
342 *    SCU_TASK_PRIORITY_HIGH.  This ensures that the task request is issued
343 *    ahead of other task destined for the same Remote Node. -# task_type ==
344 *    SCU_TASK_TYPE_IOREAD.  This simply indicates that a normal request type
345 *    (i.e. non-raw frame) is being utilized to perform task management. -#
346 *    control_frame == 1.  This ensures that the proper endianess is set so
347 *    that the bytes are transmitted in the right order for a task frame.
348 * @sci_req: This parameter specifies the task request object being
349 *    constructed.
350 *
351 */
352static void scu_ssp_task_request_construct_task_context(
353	struct scic_sds_request *sci_req)
354{
355	struct scu_task_context *task_context;
356
357	task_context = scic_sds_request_get_task_context(sci_req);
358
359	scu_ssp_reqeust_construct_task_context(sci_req, task_context);
360
361	task_context->control_frame                = 1;
362	task_context->priority                     = SCU_TASK_PRIORITY_HIGH;
363	task_context->task_type                    = SCU_TASK_TYPE_RAW_FRAME;
364	task_context->transfer_length_bytes        = 0;
365	task_context->type.ssp.frame_type          = SSP_TASK;
366	task_context->ssp_command_iu_length =
367		sizeof(struct ssp_task_iu) / sizeof(u32);
368}
369
370/**
371 * This method is will fill in the SCU Task Context for any type of SATA
372 *    request.  This is called from the various SATA constructors.
373 * @sci_req: The general IO request object which is to be used in
374 *    constructing the SCU task context.
375 * @task_context: The buffer pointer for the SCU task context which is being
376 *    constructed.
377 *
378 * The general io request construction is complete. The buffer assignment for
379 * the command buffer is complete. none Revisit task context construction to
380 * determine what is common for SSP/SMP/STP task context structures.
381 */
382static void scu_sata_reqeust_construct_task_context(
383	struct scic_sds_request *sci_req,
384	struct scu_task_context *task_context)
385{
386	dma_addr_t dma_addr;
387	struct scic_sds_controller *controller;
388	struct scic_sds_remote_device *target_device;
389	struct scic_sds_port *target_port;
390
391	controller = scic_sds_request_get_controller(sci_req);
392	target_device = scic_sds_request_get_device(sci_req);
393	target_port = scic_sds_request_get_port(sci_req);
394
395	/* Fill in the TC with the its required data */
396	task_context->abort = 0;
397	task_context->priority = SCU_TASK_PRIORITY_NORMAL;
398	task_context->initiator_request = 1;
399	task_context->connection_rate = target_device->connection_rate;
400	task_context->protocol_engine_index =
401		scic_sds_controller_get_protocol_engine_group(controller);
402	task_context->logical_port_index =
403		scic_sds_port_get_index(target_port);
404	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
405	task_context->valid = SCU_TASK_CONTEXT_VALID;
406	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
407
408	task_context->remote_node_index =
409		scic_sds_remote_device_get_index(sci_req->target_device);
410	task_context->command_code = 0;
411
412	task_context->link_layer_control = 0;
413	task_context->do_not_dma_ssp_good_response = 1;
414	task_context->strict_ordering = 0;
415	task_context->control_frame = 0;
416	task_context->timeout_enable = 0;
417	task_context->block_guard_enable = 0;
418
419	task_context->address_modifier = 0;
420	task_context->task_phase = 0x01;
421
422	task_context->ssp_command_iu_length =
423		(sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
424
425	/* Set the first word of the H2D REG FIS */
426	task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
427
428	if (sci_req->was_tag_assigned_by_user) {
429		/*
430		 * Build the task context now since we have already read
431		 * the data
432		 */
433		sci_req->post_context =
434			(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
435			 (scic_sds_controller_get_protocol_engine_group(
436							controller) <<
437			  SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
438			 (scic_sds_port_get_index(target_port) <<
439			  SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
440			 scic_sds_io_tag_get_index(sci_req->io_tag));
441	} else {
442		/*
443		 * Build the task context now since we have already read
444		 * the data.
445		 * I/O tag index is not assigned because we have to wait
446		 * until we get a TCi.
447		 */
448		sci_req->post_context =
449			(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
450			 (scic_sds_controller_get_protocol_engine_group(
451							controller) <<
452			  SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
453			 (scic_sds_port_get_index(target_port) <<
454			  SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
455	}
456
457	/*
458	 * Copy the physical address for the command buffer to the SCU Task
459	 * Context. We must offset the command buffer by 4 bytes because the
460	 * first 4 bytes are transfered in the body of the TC.
461	 */
462	dma_addr = scic_io_request_get_dma_addr(sci_req,
463						((char *) &sci_req->stp.cmd) +
464						sizeof(u32));
465
466	task_context->command_iu_upper = upper_32_bits(dma_addr);
467	task_context->command_iu_lower = lower_32_bits(dma_addr);
468
469	/* SATA Requests do not have a response buffer */
470	task_context->response_iu_upper = 0;
471	task_context->response_iu_lower = 0;
472}
473
474
475
476/**
477 * scu_stp_raw_request_construct_task_context -
478 * @sci_req: This parameter specifies the STP request object for which to
479 *    construct a RAW command frame task context.
480 * @task_context: This parameter specifies the SCU specific task context buffer
481 *    to construct.
482 *
483 * This method performs the operations common to all SATA/STP requests
484 * utilizing the raw frame method. none
485 */
486static void scu_stp_raw_request_construct_task_context(struct scic_sds_stp_request *stp_req,
487						       struct scu_task_context *task_context)
488{
489	struct scic_sds_request *sci_req = to_sci_req(stp_req);
490
491	scu_sata_reqeust_construct_task_context(sci_req, task_context);
492
493	task_context->control_frame         = 0;
494	task_context->priority              = SCU_TASK_PRIORITY_NORMAL;
495	task_context->task_type             = SCU_TASK_TYPE_SATA_RAW_FRAME;
496	task_context->type.stp.fis_type     = FIS_REGH2D;
497	task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
498}
499
500static enum sci_status
501scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
502				   bool copy_rx_frame)
503{
504	struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
505	struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
506
507	scu_stp_raw_request_construct_task_context(stp_req,
508						   sci_req->task_context_buffer);
509
510	pio->current_transfer_bytes = 0;
511	pio->ending_error = 0;
512	pio->ending_status = 0;
513
514	pio->request_current.sgl_offset = 0;
515	pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
516
517	if (copy_rx_frame) {
518		scic_sds_request_build_sgl(sci_req);
519		/* Since the IO request copy of the TC contains the same data as
520		 * the actual TC this pointer is vaild for either.
521		 */
522		pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
523	} else {
524		/* The user does not want the data copied to the SGL buffer location */
525		pio->request_current.sgl_pair = NULL;
526	}
527
528	return SCI_SUCCESS;
529}
530
531/**
532 *
533 * @sci_req: This parameter specifies the request to be constructed as an
534 *    optimized request.
535 * @optimized_task_type: This parameter specifies whether the request is to be
536 *    an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
537 *    value of 1 indicates NCQ.
538 *
539 * This method will perform request construction common to all types of STP
540 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
541 * returns an indication as to whether the construction was successful.
542 */
543static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
544						     u8 optimized_task_type,
545						     u32 len,
546						     enum dma_data_direction dir)
547{
548	struct scu_task_context *task_context = sci_req->task_context_buffer;
549
550	/* Build the STP task context structure */
551	scu_sata_reqeust_construct_task_context(sci_req, task_context);
552
553	/* Copy over the SGL elements */
554	scic_sds_request_build_sgl(sci_req);
555
556	/* Copy over the number of bytes to be transfered */
557	task_context->transfer_length_bytes = len;
558
559	if (dir == DMA_TO_DEVICE) {
560		/*
561		 * The difference between the DMA IN and DMA OUT request task type
562		 * values are consistent with the difference between FPDMA READ
563		 * and FPDMA WRITE values.  Add the supplied task type parameter
564		 * to this difference to set the task type properly for this
565		 * DATA OUT (WRITE) case. */
566		task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
567								 - SCU_TASK_TYPE_DMA_IN);
568	} else {
569		/*
570		 * For the DATA IN (READ) case, simply save the supplied
571		 * optimized task type. */
572		task_context->task_type = optimized_task_type;
573	}
574}
575
576
577
578static enum sci_status
579scic_io_request_construct_sata(struct scic_sds_request *sci_req,
580			       u32 len,
581			       enum dma_data_direction dir,
582			       bool copy)
583{
584	enum sci_status status = SCI_SUCCESS;
585	struct isci_request *ireq = sci_req_to_ireq(sci_req);
586	struct sas_task *task = isci_request_access_task(ireq);
587
588	/* check for management protocols */
589	if (ireq->ttype == tmf_task) {
590		struct isci_tmf *tmf = isci_request_access_tmf(ireq);
591
592		if (tmf->tmf_code == isci_tmf_sata_srst_high ||
593		    tmf->tmf_code == isci_tmf_sata_srst_low) {
594			scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
595								   sci_req->task_context_buffer);
596			return SCI_SUCCESS;
597		} else {
598			dev_err(scic_to_dev(sci_req->owning_controller),
599				"%s: Request 0x%p received un-handled SAT "
600				"management protocol 0x%x.\n",
601				__func__, sci_req, tmf->tmf_code);
602
603			return SCI_FAILURE;
604		}
605	}
606
607	if (!sas_protocol_ata(task->task_proto)) {
608		dev_err(scic_to_dev(sci_req->owning_controller),
609			"%s: Non-ATA protocol in SATA path: 0x%x\n",
610			__func__,
611			task->task_proto);
612		return SCI_FAILURE;
613
614	}
615
616	/* non data */
617	if (task->data_dir == DMA_NONE) {
618		scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
619							   sci_req->task_context_buffer);
620		return SCI_SUCCESS;
621	}
622
623	/* NCQ */
624	if (task->ata_task.use_ncq) {
625		scic_sds_stp_optimized_request_construct(sci_req,
626							 SCU_TASK_TYPE_FPDMAQ_READ,
627							 len, dir);
628		return SCI_SUCCESS;
629	}
630
631	/* DMA */
632	if (task->ata_task.dma_xfer) {
633		scic_sds_stp_optimized_request_construct(sci_req,
634							 SCU_TASK_TYPE_DMA_IN,
635							 len, dir);
636		return SCI_SUCCESS;
637	} else /* PIO */
638		return scic_sds_stp_pio_request_construct(sci_req, copy);
639
640	return status;
641}
642
643static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req)
644{
645	struct isci_request *ireq = sci_req_to_ireq(sci_req);
646	struct sas_task *task = isci_request_access_task(ireq);
647
648	sci_req->protocol = SCIC_SSP_PROTOCOL;
649
650	scu_ssp_io_request_construct_task_context(sci_req,
651						  task->data_dir,
652						  task->total_xfer_len);
653
654	scic_sds_io_request_build_ssp_command_iu(sci_req);
655
656	sci_base_state_machine_change_state(&sci_req->state_machine,
657					    SCI_BASE_REQUEST_STATE_CONSTRUCTED);
658
659	return SCI_SUCCESS;
660}
661
662enum sci_status scic_task_request_construct_ssp(
663	struct scic_sds_request *sci_req)
664{
665	/* Construct the SSP Task SCU Task Context */
666	scu_ssp_task_request_construct_task_context(sci_req);
667
668	/* Fill in the SSP Task IU */
669	scic_sds_task_request_build_ssp_task_iu(sci_req);
670
671	sci_base_state_machine_change_state(&sci_req->state_machine,
672					    SCI_BASE_REQUEST_STATE_CONSTRUCTED);
673
674	return SCI_SUCCESS;
675}
676
677static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req)
678{
679	enum sci_status status;
680	struct scic_sds_stp_request *stp_req;
681	bool copy = false;
682	struct isci_request *isci_request = sci_req_to_ireq(sci_req);
683	struct sas_task *task = isci_request_access_task(isci_request);
684
685	stp_req = &sci_req->stp.req;
686	sci_req->protocol = SCIC_STP_PROTOCOL;
687
688	copy = (task->data_dir == DMA_NONE) ? false : true;
689
690	status = scic_io_request_construct_sata(sci_req,
691						task->total_xfer_len,
692						task->data_dir,
693						copy);
694
695	if (status == SCI_SUCCESS)
696		sci_base_state_machine_change_state(&sci_req->state_machine,
697						    SCI_BASE_REQUEST_STATE_CONSTRUCTED);
698
699	return status;
700}
701
702enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req)
703{
704	enum sci_status status = SCI_SUCCESS;
705	struct isci_request *ireq = sci_req_to_ireq(sci_req);
706
707	/* check for management protocols */
708	if (ireq->ttype == tmf_task) {
709		struct isci_tmf *tmf = isci_request_access_tmf(ireq);
710
711		if (tmf->tmf_code == isci_tmf_sata_srst_high ||
712		    tmf->tmf_code == isci_tmf_sata_srst_low) {
713			scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
714								   sci_req->task_context_buffer);
715		} else {
716			dev_err(scic_to_dev(sci_req->owning_controller),
717				"%s: Request 0x%p received un-handled SAT "
718				"Protocol 0x%x.\n",
719				__func__, sci_req, tmf->tmf_code);
720
721			return SCI_FAILURE;
722		}
723	}
724
725	if (status != SCI_SUCCESS)
726		return status;
727	sci_base_state_machine_change_state(&sci_req->state_machine,
728					    SCI_BASE_REQUEST_STATE_CONSTRUCTED);
729
730	return status;
731}
732
733/**
734 * sci_req_tx_bytes - bytes transferred when reply underruns request
735 * @sci_req: request that was terminated early
736 */
737#define SCU_TASK_CONTEXT_SRAM 0x200000
738static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req)
739{
740	struct scic_sds_controller *scic = sci_req->owning_controller;
741	u32 ret_val = 0;
742
743	if (readl(&scic->smu_registers->address_modifier) == 0) {
744		void __iomem *scu_reg_base = scic->scu_registers;
745
746		/* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
747		 *   BAR1 is the scu_registers
748		 *   0x20002C = 0x200000 + 0x2c
749		 *            = start of task context SRAM + offset of (type.ssp.data_offset)
750		 *   TCi is the io_tag of struct scic_sds_request
751		 */
752		ret_val = readl(scu_reg_base +
753				(SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
754				((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(sci_req->io_tag)));
755	}
756
757	return ret_val;
758}
759
760enum sci_status
761scic_sds_request_start(struct scic_sds_request *request)
762{
763	if (request->device_sequence !=
764	    scic_sds_remote_device_get_sequence(request->target_device))
765		return SCI_FAILURE;
766
767	if (request->state_handlers->start_handler)
768		return request->state_handlers->start_handler(request);
769
770	dev_warn(scic_to_dev(request->owning_controller),
771		 "%s: SCIC IO Request requested to start while in wrong "
772		 "state %d\n",
773		 __func__,
774		 sci_base_state_machine_get_state(&request->state_machine));
775
776	return SCI_FAILURE_INVALID_STATE;
777}
778
779enum sci_status
780scic_sds_io_request_terminate(struct scic_sds_request *request)
781{
782	if (request->state_handlers->abort_handler)
783		return request->state_handlers->abort_handler(request);
784
785	dev_warn(scic_to_dev(request->owning_controller),
786		"%s: SCIC IO Request requested to abort while in wrong "
787		"state %d\n",
788		__func__,
789		sci_base_state_machine_get_state(&request->state_machine));
790
791	return SCI_FAILURE_INVALID_STATE;
792}
793
794enum sci_status scic_sds_io_request_event_handler(
795	struct scic_sds_request *request,
796	u32 event_code)
797{
798	if (request->state_handlers->event_handler)
799		return request->state_handlers->event_handler(request, event_code);
800
801	dev_warn(scic_to_dev(request->owning_controller),
802		 "%s: SCIC IO Request given event code notification %x while "
803		 "in wrong state %d\n",
804		 __func__,
805		 event_code,
806		 sci_base_state_machine_get_state(&request->state_machine));
807
808	return SCI_FAILURE_INVALID_STATE;
809}
810
811/**
812 *
813 * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the start
814 *    operation is to be executed.
815 * @frame_index: The frame index returned by the hardware for the reqeust
816 *    object.
817 *
818 * This method invokes the core state frame handler for the
819 * SCIC_SDS_IO_REQUEST_T object. enum sci_status
820 */
821enum sci_status scic_sds_io_request_frame_handler(
822	struct scic_sds_request *request,
823	u32 frame_index)
824{
825	if (request->state_handlers->frame_handler)
826		return request->state_handlers->frame_handler(request, frame_index);
827
828	dev_warn(scic_to_dev(request->owning_controller),
829		 "%s: SCIC IO Request given unexpected frame %x while in "
830		 "state %d\n",
831		 __func__,
832		 frame_index,
833		 sci_base_state_machine_get_state(&request->state_machine));
834
835	scic_sds_controller_release_frame(request->owning_controller, frame_index);
836	return SCI_FAILURE_INVALID_STATE;
837}
838
839/*
840 * This function copies response data for requests returning response data
841 *    instead of sense data.
842 * @sci_req: This parameter specifies the request object for which to copy
843 *    the response data.
844 */
845static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req)
846{
847	void *resp_buf;
848	u32 len;
849	struct ssp_response_iu *ssp_response;
850	struct isci_request *ireq = sci_req_to_ireq(sci_req);
851	struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
852
853	ssp_response = &sci_req->ssp.rsp;
854
855	resp_buf = &isci_tmf->resp.resp_iu;
856
857	len = min_t(u32,
858		    SSP_RESP_IU_MAX_SIZE,
859		    be32_to_cpu(ssp_response->response_data_len));
860
861	memcpy(resp_buf, ssp_response->resp_data, len);
862}
863
864/*
865 * This method implements the action taken when a constructed
866 * SCIC_SDS_IO_REQUEST_T object receives a scic_sds_request_start() request.
867 * This method will, if necessary, allocate a TCi for the io request object and
868 * then will, if necessary, copy the constructed TC data into the actual TC
869 * buffer.  If everything is successful the post context field is updated with
870 * the TCi so the controller can post the request to the hardware. enum sci_status
871 * SCI_SUCCESS SCI_FAILURE_INSUFFICIENT_RESOURCES
872 */
873static enum sci_status scic_sds_request_constructed_state_start_handler(
874	struct scic_sds_request *request)
875{
876	struct scu_task_context *task_context;
877
878	if (request->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
879		request->io_tag =
880			scic_controller_allocate_io_tag(request->owning_controller);
881	}
882
883	/* Record the IO Tag in the request */
884	if (request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) {
885		task_context = request->task_context_buffer;
886
887		task_context->task_index = scic_sds_io_tag_get_index(request->io_tag);
888
889		switch (task_context->protocol_type) {
890		case SCU_TASK_CONTEXT_PROTOCOL_SMP:
891		case SCU_TASK_CONTEXT_PROTOCOL_SSP:
892			/* SSP/SMP Frame */
893			task_context->type.ssp.tag = request->io_tag;
894			task_context->type.ssp.target_port_transfer_tag = 0xFFFF;
895			break;
896
897		case SCU_TASK_CONTEXT_PROTOCOL_STP:
898			/*
899			 * STP/SATA Frame
900			 * task_context->type.stp.ncq_tag = request->ncq_tag; */
901			break;
902
903		case SCU_TASK_CONTEXT_PROTOCOL_NONE:
904			/* / @todo When do we set no protocol type? */
905			break;
906
907		default:
908			/* This should never happen since we build the IO requests */
909			break;
910		}
911
912		/*
913		 * Check to see if we need to copy the task context buffer
914		 * or have been building into the task context buffer */
915		if (request->was_tag_assigned_by_user == false) {
916			scic_sds_controller_copy_task_context(
917				request->owning_controller, request);
918		}
919
920		/* Add to the post_context the io tag value */
921		request->post_context |= scic_sds_io_tag_get_index(request->io_tag);
922
923		/* Everything is good go ahead and change state */
924		sci_base_state_machine_change_state(&request->state_machine,
925						    SCI_BASE_REQUEST_STATE_STARTED);
926
927		return SCI_SUCCESS;
928	}
929
930	return SCI_FAILURE_INSUFFICIENT_RESOURCES;
931}
932
933/*
934 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
935 * object receives a scic_sds_request_terminate() request. Since the request
936 * has not yet been posted to the hardware the request transitions to the
937 * completed state. enum sci_status SCI_SUCCESS
938 */
939static enum sci_status scic_sds_request_constructed_state_abort_handler(
940	struct scic_sds_request *request)
941{
942	/*
943	 * This request has been terminated by the user make sure that the correct
944	 * status code is returned */
945	scic_sds_request_set_status(request,
946		SCU_TASK_DONE_TASK_ABORT,
947		SCI_FAILURE_IO_TERMINATED);
948
949	sci_base_state_machine_change_state(&request->state_machine,
950					    SCI_BASE_REQUEST_STATE_COMPLETED);
951	return SCI_SUCCESS;
952}
953
954static enum sci_status scic_sds_request_started_state_abort_handler(struct scic_sds_request *sci_req)
955{
956	sci_base_state_machine_change_state(&sci_req->state_machine,
957					    SCI_BASE_REQUEST_STATE_ABORTING);
958	return SCI_SUCCESS;
959}
960
961/*
962 * scic_sds_request_started_state_tc_completion_handler() - This method process
963 *    TC (task context) completions for normal IO request (i.e. Task/Abort
964 *    Completions of type 0).  This method will update the
965 *    SCIC_SDS_IO_REQUEST_T::status field.
966 * @sci_req: This parameter specifies the request for which a completion
967 *    occurred.
968 * @completion_code: This parameter specifies the completion code received from
969 *    the SCU.
970 *
971 */
972static enum sci_status
973scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sci_req,
974						     u32 completion_code)
975{
976	u8 datapres;
977	struct ssp_response_iu *resp_iu;
978
979	/*
980	 * TODO: Any SDMA return code of other than 0 is bad
981	 *       decode 0x003C0000 to determine SDMA status
982	 */
983	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
984	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
985		scic_sds_request_set_status(sci_req,
986					    SCU_TASK_DONE_GOOD,
987					    SCI_SUCCESS);
988		break;
989
990	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP):
991	{
992		/*
993		 * There are times when the SCU hardware will return an early
994		 * response because the io request specified more data than is
995		 * returned by the target device (mode pages, inquiry data,
996		 * etc.).  We must check the response stats to see if this is
997		 * truly a failed request or a good request that just got
998		 * completed early.
999		 */
1000		struct ssp_response_iu *resp = &sci_req->ssp.rsp;
1001		ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1002
1003		sci_swab32_cpy(&sci_req->ssp.rsp,
1004			       &sci_req->ssp.rsp,
1005			       word_cnt);
1006
1007		if (resp->status == 0) {
1008			scic_sds_request_set_status(
1009				sci_req,
1010				SCU_TASK_DONE_GOOD,
1011				SCI_SUCCESS_IO_DONE_EARLY);
1012		} else {
1013			scic_sds_request_set_status(
1014				sci_req,
1015				SCU_TASK_DONE_CHECK_RESPONSE,
1016				SCI_FAILURE_IO_RESPONSE_VALID);
1017		}
1018	}
1019	break;
1020
1021	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE):
1022	{
1023		ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1024
1025		sci_swab32_cpy(&sci_req->ssp.rsp,
1026			       &sci_req->ssp.rsp,
1027			       word_cnt);
1028
1029		scic_sds_request_set_status(sci_req,
1030					    SCU_TASK_DONE_CHECK_RESPONSE,
1031					    SCI_FAILURE_IO_RESPONSE_VALID);
1032		break;
1033	}
1034
1035	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
1036		/*
1037		 * / @todo With TASK_DONE_RESP_LEN_ERR is the response frame
1038		 * guaranteed to be received before this completion status is
1039		 * posted?
1040		 */
1041		resp_iu = &sci_req->ssp.rsp;
1042		datapres = resp_iu->datapres;
1043
1044		if ((datapres == 0x01) || (datapres == 0x02)) {
1045			scic_sds_request_set_status(
1046				sci_req,
1047				SCU_TASK_DONE_CHECK_RESPONSE,
1048				SCI_FAILURE_IO_RESPONSE_VALID);
1049		} else
1050			scic_sds_request_set_status(
1051				sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
1052		break;
1053
1054	/* only stp device gets suspended. */
1055	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1056	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
1057	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
1058	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
1059	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
1060	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
1061	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1062	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
1063	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
1064	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1065	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
1066		if (sci_req->protocol == SCIC_STP_PROTOCOL) {
1067			scic_sds_request_set_status(
1068				sci_req,
1069				SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1070				SCU_COMPLETION_TL_STATUS_SHIFT,
1071				SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
1072		} else {
1073			scic_sds_request_set_status(
1074				sci_req,
1075				SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1076				SCU_COMPLETION_TL_STATUS_SHIFT,
1077				SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1078		}
1079		break;
1080
1081	/* both stp/ssp device gets suspended */
1082	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
1083	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
1084	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
1085	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
1086	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
1087	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
1088	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
1089	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
1090	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
1091	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
1092		scic_sds_request_set_status(
1093			sci_req,
1094			SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1095			SCU_COMPLETION_TL_STATUS_SHIFT,
1096			SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
1097		break;
1098
1099	/* neither ssp nor stp gets suspended. */
1100	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
1101	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
1102	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
1103	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
1104	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
1105	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
1106	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1107	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1108	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1109	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1110	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
1111	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
1112	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
1113	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
1114	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
1115	default:
1116		scic_sds_request_set_status(
1117			sci_req,
1118			SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1119			SCU_COMPLETION_TL_STATUS_SHIFT,
1120			SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1121		break;
1122	}
1123
1124	/*
1125	 * TODO: This is probably wrong for ACK/NAK timeout conditions
1126	 */
1127
1128	/* In all cases we will treat this as the completion of the IO req. */
1129	sci_base_state_machine_change_state(&sci_req->state_machine,
1130					    SCI_BASE_REQUEST_STATE_COMPLETED);
1131	return SCI_SUCCESS;
1132}
1133
1134enum sci_status
1135scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code)
1136{
1137	if (request->state_handlers->tc_completion_handler)
1138		return request->state_handlers->tc_completion_handler(request, completion_code);
1139
1140	dev_warn(scic_to_dev(request->owning_controller),
1141		"%s: SCIC IO Request given task completion notification %x "
1142		"while in wrong state %d\n",
1143		__func__,
1144		completion_code,
1145		sci_base_state_machine_get_state(&request->state_machine));
1146
1147	return SCI_FAILURE_INVALID_STATE;
1148}
1149
1150/*
1151 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1152 * object receives a scic_sds_request_frame_handler() request. This method
1153 * first determines the frame type received.  If this is a response frame then
1154 * the response data is copied to the io request response buffer for processing
1155 * at completion time. If the frame type is not a response buffer an error is
1156 * logged. enum sci_status SCI_SUCCESS SCI_FAILURE_INVALID_PARAMETER_VALUE
1157 */
1158static enum sci_status
1159scic_sds_request_started_state_frame_handler(struct scic_sds_request *sci_req,
1160					     u32 frame_index)
1161{
1162	enum sci_status status;
1163	u32 *frame_header;
1164	struct ssp_frame_hdr ssp_hdr;
1165	ssize_t word_cnt;
1166
1167	status = scic_sds_unsolicited_frame_control_get_header(
1168		&(scic_sds_request_get_controller(sci_req)->uf_control),
1169		frame_index,
1170		(void **)&frame_header);
1171
1172	word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1173	sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1174
1175	if (ssp_hdr.frame_type == SSP_RESPONSE) {
1176		struct ssp_response_iu *resp_iu;
1177		ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1178
1179		status = scic_sds_unsolicited_frame_control_get_buffer(
1180			&(scic_sds_request_get_controller(sci_req)->uf_control),
1181			frame_index,
1182			(void **)&resp_iu);
1183
1184		sci_swab32_cpy(&sci_req->ssp.rsp,
1185			       resp_iu, word_cnt);
1186
1187		resp_iu = &sci_req->ssp.rsp;
1188
1189		if ((resp_iu->datapres == 0x01) ||
1190		    (resp_iu->datapres == 0x02)) {
1191			scic_sds_request_set_status(
1192				sci_req,
1193				SCU_TASK_DONE_CHECK_RESPONSE,
1194				SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1195		} else
1196			scic_sds_request_set_status(
1197				sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
1198	} else {
1199		/* This was not a response frame why did it get forwarded? */
1200		dev_err(scic_to_dev(sci_req->owning_controller),
1201			"%s: SCIC IO Request 0x%p received unexpected "
1202			"frame %d type 0x%02x\n",
1203			__func__,
1204			sci_req,
1205			frame_index,
1206			ssp_hdr.frame_type);
1207	}
1208
1209	/*
1210	 * In any case we are done with this frame buffer return it to the
1211	 * controller
1212	 */
1213	scic_sds_controller_release_frame(
1214		sci_req->owning_controller, frame_index);
1215
1216	return SCI_SUCCESS;
1217}
1218
1219/*
1220 * *****************************************************************************
1221 * *  COMPLETED STATE HANDLERS
1222 * ***************************************************************************** */
1223
1224
1225/*
1226 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1227 * object receives a scic_sds_request_complete() request. This method frees up
1228 * any io request resources that have been allocated and transitions the
1229 * request to its final state. Consider stopping the state machine instead of
1230 * transitioning to the final state? enum sci_status SCI_SUCCESS
1231 */
1232static enum sci_status scic_sds_request_completed_state_complete_handler(
1233	struct scic_sds_request *request)
1234{
1235	if (request->was_tag_assigned_by_user != true) {
1236		scic_controller_free_io_tag(
1237			request->owning_controller, request->io_tag);
1238	}
1239
1240	if (request->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) {
1241		scic_sds_controller_release_frame(
1242			request->owning_controller, request->saved_rx_frame_index);
1243	}
1244
1245	sci_base_state_machine_change_state(&request->state_machine,
1246					    SCI_BASE_REQUEST_STATE_FINAL);
1247	return SCI_SUCCESS;
1248}
1249
1250/*
1251 * *****************************************************************************
1252 * *  ABORTING STATE HANDLERS
1253 * ***************************************************************************** */
1254
1255/*
1256 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1257 * object receives a scic_sds_request_terminate() request. This method is the
1258 * io request aborting state abort handlers.  On receipt of a multiple
1259 * terminate requests the io request will transition to the completed state.
1260 * This should not happen in normal operation. enum sci_status SCI_SUCCESS
1261 */
1262static enum sci_status scic_sds_request_aborting_state_abort_handler(
1263	struct scic_sds_request *request)
1264{
1265	sci_base_state_machine_change_state(&request->state_machine,
1266					    SCI_BASE_REQUEST_STATE_COMPLETED);
1267	return SCI_SUCCESS;
1268}
1269
1270/*
1271 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1272 * object receives a scic_sds_request_task_completion() request. This method
1273 * decodes the completion type waiting for the abort task complete
1274 * notification. When the abort task complete is received the io request
1275 * transitions to the completed state. enum sci_status SCI_SUCCESS
1276 */
1277static enum sci_status scic_sds_request_aborting_state_tc_completion_handler(
1278	struct scic_sds_request *sci_req,
1279	u32 completion_code)
1280{
1281	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1282	case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1283	case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1284		scic_sds_request_set_status(
1285			sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED
1286			);
1287
1288		sci_base_state_machine_change_state(&sci_req->state_machine,
1289						    SCI_BASE_REQUEST_STATE_COMPLETED);
1290		break;
1291
1292	default:
1293		/*
1294		 * Unless we get some strange error wait for the task abort to complete
1295		 * TODO: Should there be a state change for this completion? */
1296		break;
1297	}
1298
1299	return SCI_SUCCESS;
1300}
1301
1302/*
1303 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1304 * object receives a scic_sds_request_frame_handler() request. This method
1305 * discards the unsolicited frame since we are waiting for the abort task
1306 * completion. enum sci_status SCI_SUCCESS
1307 */
1308static enum sci_status scic_sds_request_aborting_state_frame_handler(
1309	struct scic_sds_request *sci_req,
1310	u32 frame_index)
1311{
1312	/* TODO: Is it even possible to get an unsolicited frame in the aborting state? */
1313
1314	scic_sds_controller_release_frame(
1315		sci_req->owning_controller, frame_index);
1316
1317	return SCI_SUCCESS;
1318}
1319
1320/**
1321 * This method processes the completions transport layer (TL) status to
1322 *    determine if the RAW task management frame was sent successfully. If the
1323 *    raw frame was sent successfully, then the state for the task request
1324 *    transitions to waiting for a response frame.
1325 * @sci_req: This parameter specifies the request for which the TC
1326 *    completion was received.
1327 * @completion_code: This parameter indicates the completion status information
1328 *    for the TC.
1329 *
1330 * Indicate if the tc completion handler was successful. SCI_SUCCESS currently
1331 * this method always returns success.
1332 */
1333static enum sci_status scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler(
1334	struct scic_sds_request *sci_req,
1335	u32 completion_code)
1336{
1337	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1338	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1339		scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1340					    SCI_SUCCESS);
1341
1342		sci_base_state_machine_change_state(&sci_req->state_machine,
1343						    SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
1344		break;
1345
1346	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1347		/*
1348		 * Currently, the decision is to simply allow the task request to
1349		 * timeout if the task IU wasn't received successfully.
1350		 * There is a potential for receiving multiple task responses if we
1351		 * decide to send the task IU again. */
1352		dev_warn(scic_to_dev(sci_req->owning_controller),
1353			 "%s: TaskRequest:0x%p CompletionCode:%x - "
1354			 "ACK/NAK timeout\n",
1355			 __func__,
1356			 sci_req,
1357			 completion_code);
1358
1359		sci_base_state_machine_change_state(&sci_req->state_machine,
1360						    SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
1361		break;
1362
1363	default:
1364		/*
1365		 * All other completion status cause the IO to be complete.  If a NAK
1366		 * was received, then it is up to the user to retry the request. */
1367		scic_sds_request_set_status(
1368			sci_req,
1369			SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1370			SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1371			);
1372
1373		sci_base_state_machine_change_state(&sci_req->state_machine,
1374						    SCI_BASE_REQUEST_STATE_COMPLETED);
1375		break;
1376	}
1377
1378	return SCI_SUCCESS;
1379}
1380
1381/**
1382 * This method is responsible for processing a terminate/abort request for this
1383 *    TC while the request is waiting for the task management response
1384 *    unsolicited frame.
1385 * @sci_req: This parameter specifies the request for which the
1386 *    termination was requested.
1387 *
1388 * This method returns an indication as to whether the abort request was
1389 * successfully handled. need to update to ensure the received UF doesn't cause
1390 * damage to subsequent requests (i.e. put the extended tag in a holding
1391 * pattern for this particular device).
1392 */
1393static enum sci_status scic_sds_ssp_task_request_await_tc_response_abort_handler(
1394	struct scic_sds_request *request)
1395{
1396	sci_base_state_machine_change_state(&request->state_machine,
1397					    SCI_BASE_REQUEST_STATE_ABORTING);
1398	sci_base_state_machine_change_state(&request->state_machine,
1399					    SCI_BASE_REQUEST_STATE_COMPLETED);
1400	return SCI_SUCCESS;
1401}
1402
1403/**
1404 * This method processes an unsolicited frame while the task mgmt request is
1405 *    waiting for a response frame.  It will copy the response data, release
1406 *    the unsolicited frame, and transition the request to the
1407 *    SCI_BASE_REQUEST_STATE_COMPLETED state.
1408 * @sci_req: This parameter specifies the request for which the
1409 *    unsolicited frame was received.
1410 * @frame_index: This parameter indicates the unsolicited frame index that
1411 *    should contain the response.
1412 *
1413 * This method returns an indication of whether the TC response frame was
1414 * handled successfully or not. SCI_SUCCESS Currently this value is always
1415 * returned and indicates successful processing of the TC response. Should
1416 * probably update to check frame type and make sure it is a response frame.
1417 */
1418static enum sci_status scic_sds_ssp_task_request_await_tc_response_frame_handler(
1419	struct scic_sds_request *request,
1420	u32 frame_index)
1421{
1422	scic_sds_io_request_copy_response(request);
1423
1424	sci_base_state_machine_change_state(&request->state_machine,
1425					    SCI_BASE_REQUEST_STATE_COMPLETED);
1426	scic_sds_controller_release_frame(request->owning_controller,
1427			frame_index);
1428	return SCI_SUCCESS;
1429}
1430
1431/**
1432 * This method processes an abnormal TC completion while the SMP request is
1433 *    waiting for a response frame.  It decides what happened to the IO based
1434 *    on TC completion status.
1435 * @sci_req: This parameter specifies the request for which the TC
1436 *    completion was received.
1437 * @completion_code: This parameter indicates the completion status information
1438 *    for the TC.
1439 *
1440 * Indicate if the tc completion handler was successful. SCI_SUCCESS currently
1441 * this method always returns success.
1442 */
1443static enum sci_status scic_sds_smp_request_await_response_tc_completion_handler(
1444	struct scic_sds_request *sci_req,
1445	u32 completion_code)
1446{
1447	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1448	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1449		/*
1450		 * In the AWAIT RESPONSE state, any TC completion is unexpected.
1451		 * but if the TC has success status, we complete the IO anyway. */
1452		scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1453					    SCI_SUCCESS);
1454
1455		sci_base_state_machine_change_state(&sci_req->state_machine,
1456						    SCI_BASE_REQUEST_STATE_COMPLETED);
1457		break;
1458
1459	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1460	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1461	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1462	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1463		/*
1464		 * These status has been seen in a specific LSI expander, which sometimes
1465		 * is not able to send smp response within 2 ms. This causes our hardware
1466		 * break the connection and set TC completion with one of these SMP_XXX_XX_ERR
1467		 * status. For these type of error, we ask scic user to retry the request. */
1468		scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR,
1469					    SCI_FAILURE_RETRY_REQUIRED);
1470
1471		sci_base_state_machine_change_state(&sci_req->state_machine,
1472						    SCI_BASE_REQUEST_STATE_COMPLETED);
1473		break;
1474
1475	default:
1476		/*
1477		 * All other completion status cause the IO to be complete.  If a NAK
1478		 * was received, then it is up to the user to retry the request. */
1479		scic_sds_request_set_status(
1480			sci_req,
1481			SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1482			SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1483			);
1484
1485		sci_base_state_machine_change_state(&sci_req->state_machine,
1486						    SCI_BASE_REQUEST_STATE_COMPLETED);
1487		break;
1488	}
1489
1490	return SCI_SUCCESS;
1491}
1492
1493/*
1494 * This function processes an unsolicited frame while the SMP request is waiting
1495 *    for a response frame.  It will copy the response data, release the
1496 *    unsolicited frame, and transition the request to the
1497 *    SCI_BASE_REQUEST_STATE_COMPLETED state.
1498 * @sci_req: This parameter specifies the request for which the
1499 *    unsolicited frame was received.
1500 * @frame_index: This parameter indicates the unsolicited frame index that
1501 *    should contain the response.
1502 *
1503 * This function returns an indication of whether the response frame was handled
1504 * successfully or not. SCI_SUCCESS Currently this value is always returned and
1505 * indicates successful processing of the TC response.
1506 */
1507static enum sci_status
1508scic_sds_smp_request_await_response_frame_handler(struct scic_sds_request *sci_req,
1509						  u32 frame_index)
1510{
1511	enum sci_status status;
1512	void *frame_header;
1513	struct smp_resp *rsp_hdr = &sci_req->smp.rsp;
1514	ssize_t word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
1515
1516	status = scic_sds_unsolicited_frame_control_get_header(
1517		&(scic_sds_request_get_controller(sci_req)->uf_control),
1518		frame_index,
1519		&frame_header);
1520
1521	/* byte swap the header. */
1522	sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
1523
1524	if (rsp_hdr->frame_type == SMP_RESPONSE) {
1525		void *smp_resp;
1526
1527		status = scic_sds_unsolicited_frame_control_get_buffer(
1528			&(scic_sds_request_get_controller(sci_req)->uf_control),
1529			frame_index,
1530			&smp_resp);
1531
1532		word_cnt = (sizeof(struct smp_req) - SMP_RESP_HDR_SZ) /
1533			sizeof(u32);
1534
1535		sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1536			       smp_resp, word_cnt);
1537
1538		scic_sds_request_set_status(
1539			sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
1540
1541		sci_base_state_machine_change_state(&sci_req->state_machine,
1542						    SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION);
1543	} else {
1544		/* This was not a response frame why did it get forwarded? */
1545		dev_err(scic_to_dev(sci_req->owning_controller),
1546			"%s: SCIC SMP Request 0x%p received unexpected frame "
1547			"%d type 0x%02x\n",
1548			__func__,
1549			sci_req,
1550			frame_index,
1551			rsp_hdr->frame_type);
1552
1553		scic_sds_request_set_status(
1554			sci_req,
1555			SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1556			SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1557
1558		sci_base_state_machine_change_state(&sci_req->state_machine,
1559						    SCI_BASE_REQUEST_STATE_COMPLETED);
1560	}
1561
1562	scic_sds_controller_release_frame(sci_req->owning_controller,
1563					  frame_index);
1564
1565	return SCI_SUCCESS;
1566}
1567
1568/**
1569 * This method processes the completions transport layer (TL) status to
1570 *    determine if the SMP request was sent successfully. If the SMP request
1571 *    was sent successfully, then the state for the SMP request transits to
1572 *    waiting for a response frame.
1573 * @sci_req: This parameter specifies the request for which the TC
1574 *    completion was received.
1575 * @completion_code: This parameter indicates the completion status information
1576 *    for the TC.
1577 *
1578 * Indicate if the tc completion handler was successful. SCI_SUCCESS currently
1579 * this method always returns success.
1580 */
1581static enum sci_status scic_sds_smp_request_await_tc_completion_tc_completion_handler(
1582	struct scic_sds_request *sci_req,
1583	u32 completion_code)
1584{
1585	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1586	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1587		scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1588					    SCI_SUCCESS);
1589
1590		sci_base_state_machine_change_state(&sci_req->state_machine,
1591						    SCI_BASE_REQUEST_STATE_COMPLETED);
1592		break;
1593
1594	default:
1595		/*
1596		 * All other completion status cause the IO to be complete.  If a NAK
1597		 * was received, then it is up to the user to retry the request. */
1598		scic_sds_request_set_status(
1599			sci_req,
1600			SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1601			SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1602			);
1603
1604		sci_base_state_machine_change_state(
1605			&sci_req->state_machine,
1606			SCI_BASE_REQUEST_STATE_COMPLETED);
1607		break;
1608	}
1609
1610	return SCI_SUCCESS;
1611}
1612
1613void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req,
1614				     u16 ncq_tag)
1615{
1616	/**
1617	 * @note This could be made to return an error to the user if the user
1618	 *       attempts to set the NCQ tag in the wrong state.
1619	 */
1620	req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
1621}
1622
1623/**
1624 *
1625 * @sci_req:
1626 *
1627 * Get the next SGL element from the request. - Check on which SGL element pair
1628 * we are working - if working on SLG pair element A - advance to element B -
1629 * else - check to see if there are more SGL element pairs for this IO request
1630 * - if there are more SGL element pairs - advance to the next pair and return
1631 * element A struct scu_sgl_element*
1632 */
1633static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
1634{
1635	struct scu_sgl_element *current_sgl;
1636	struct scic_sds_request *sci_req = to_sci_req(stp_req);
1637	struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
1638
1639	if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1640		if (pio_sgl->sgl_pair->B.address_lower == 0 &&
1641		    pio_sgl->sgl_pair->B.address_upper == 0) {
1642			current_sgl = NULL;
1643		} else {
1644			pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
1645			current_sgl = &pio_sgl->sgl_pair->B;
1646		}
1647	} else {
1648		if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
1649		    pio_sgl->sgl_pair->next_pair_upper == 0) {
1650			current_sgl = NULL;
1651		} else {
1652			u64 phys_addr;
1653
1654			phys_addr = pio_sgl->sgl_pair->next_pair_upper;
1655			phys_addr <<= 32;
1656			phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
1657
1658			pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
1659			pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1660			current_sgl = &pio_sgl->sgl_pair->A;
1661		}
1662	}
1663
1664	return current_sgl;
1665}
1666
1667/**
1668 *
1669 * @sci_req:
1670 * @completion_code:
1671 *
1672 * This method processes a TC completion.  The expected TC completion is for
1673 * the transmission of the H2D register FIS containing the SATA/STP non-data
1674 * request. This method always successfully processes the TC completion.
1675 * SCI_SUCCESS This value is always returned.
1676 */
1677static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler(
1678	struct scic_sds_request *sci_req,
1679	u32 completion_code)
1680{
1681	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1682	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1683		scic_sds_request_set_status(
1684			sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1685			);
1686
1687		sci_base_state_machine_change_state(
1688			&sci_req->state_machine,
1689			SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
1690			);
1691		break;
1692
1693	default:
1694		/*
1695		 * All other completion status cause the IO to be complete.  If a NAK
1696		 * was received, then it is up to the user to retry the request. */
1697		scic_sds_request_set_status(
1698			sci_req,
1699			SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1700			SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1701			);
1702
1703		sci_base_state_machine_change_state(
1704			&sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
1705		break;
1706	}
1707
1708	return SCI_SUCCESS;
1709}
1710
1711/**
1712 *
1713 * @request: This parameter specifies the request for which a frame has been
1714 *    received.
1715 * @frame_index: This parameter specifies the index of the frame that has been
1716 *    received.
1717 *
1718 * This method processes frames received from the target while waiting for a
1719 * device to host register FIS.  If a non-register FIS is received during this
1720 * time, it is treated as a protocol violation from an IO perspective. Indicate
1721 * if the received frame was processed successfully.
1722 */
1723static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler(
1724	struct scic_sds_request *sci_req,
1725	u32 frame_index)
1726{
1727	enum sci_status status;
1728	struct dev_to_host_fis *frame_header;
1729	u32 *frame_buffer;
1730	struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1731	struct scic_sds_controller *scic = sci_req->owning_controller;
1732
1733	status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1734							       frame_index,
1735							       (void **)&frame_header);
1736
1737	if (status != SCI_SUCCESS) {
1738		dev_err(scic_to_dev(sci_req->owning_controller),
1739			"%s: SCIC IO Request 0x%p could not get frame header "
1740			"for frame index %d, status %x\n",
1741			__func__, stp_req, frame_index, status);
1742
1743		return status;
1744	}
1745
1746	switch (frame_header->fis_type) {
1747	case FIS_REGD2H:
1748		scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1749							      frame_index,
1750							      (void **)&frame_buffer);
1751
1752		scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1753						       frame_header,
1754						       frame_buffer);
1755
1756		/* The command has completed with error */
1757		scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
1758					    SCI_FAILURE_IO_RESPONSE_VALID);
1759		break;
1760
1761	default:
1762		dev_warn(scic_to_dev(scic),
1763			 "%s: IO Request:0x%p Frame Id:%d protocol "
1764			  "violation occurred\n", __func__, stp_req,
1765			  frame_index);
1766
1767		scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
1768					    SCI_FAILURE_PROTOCOL_VIOLATION);
1769		break;
1770	}
1771
1772	sci_base_state_machine_change_state(&sci_req->state_machine,
1773					    SCI_BASE_REQUEST_STATE_COMPLETED);
1774
1775	/* Frame has been decoded return it to the controller */
1776	scic_sds_controller_release_frame(scic, frame_index);
1777
1778	return status;
1779}
1780
1781#define SCU_MAX_FRAME_BUFFER_SIZE  0x400  /* 1K is the maximum SCU frame data payload */
1782
1783/* transmit DATA_FIS from (current sgl + offset) for input
1784 * parameter length. current sgl and offset is alreay stored in the IO request
1785 */
1786static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
1787	struct scic_sds_request *sci_req,
1788	u32 length)
1789{
1790	struct scic_sds_controller *scic = sci_req->owning_controller;
1791	struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1792	struct scu_task_context *task_context;
1793	struct scu_sgl_element *current_sgl;
1794
1795	/* Recycle the TC and reconstruct it for sending out DATA FIS containing
1796	 * for the data from current_sgl+offset for the input length
1797	 */
1798	task_context = scic_sds_controller_get_task_context_buffer(scic,
1799								   sci_req->io_tag);
1800
1801	if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
1802		current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
1803	else
1804		current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
1805
1806	/* update the TC */
1807	task_context->command_iu_upper = current_sgl->address_upper;
1808	task_context->command_iu_lower = current_sgl->address_lower;
1809	task_context->transfer_length_bytes = length;
1810	task_context->type.stp.fis_type = FIS_DATA;
1811
1812	/* send the new TC out. */
1813	return scic_controller_continue_io(sci_req);
1814}
1815
1816static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
1817{
1818
1819	struct scu_sgl_element *current_sgl;
1820	u32 sgl_offset;
1821	u32 remaining_bytes_in_current_sgl = 0;
1822	enum sci_status status = SCI_SUCCESS;
1823	struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1824
1825	sgl_offset = stp_req->type.pio.request_current.sgl_offset;
1826
1827	if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1828		current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
1829		remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
1830	} else {
1831		current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
1832		remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
1833	}
1834
1835
1836	if (stp_req->type.pio.pio_transfer_bytes > 0) {
1837		if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
1838			/* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
1839			status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
1840			if (status == SCI_SUCCESS) {
1841				stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
1842
1843				/* update the current sgl, sgl_offset and save for future */
1844				current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
1845				sgl_offset = 0;
1846			}
1847		} else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
1848			/* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
1849			scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
1850
1851			if (status == SCI_SUCCESS) {
1852				/* Sgl offset will be adjusted and saved for future */
1853				sgl_offset += stp_req->type.pio.pio_transfer_bytes;
1854				current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
1855				stp_req->type.pio.pio_transfer_bytes = 0;
1856			}
1857		}
1858	}
1859
1860	if (status == SCI_SUCCESS) {
1861		stp_req->type.pio.request_current.sgl_offset = sgl_offset;
1862	}
1863
1864	return status;
1865}
1866
1867/**
1868 *
1869 * @stp_request: The request that is used for the SGL processing.
1870 * @data_buffer: The buffer of data to be copied.
1871 * @length: The length of the data transfer.
1872 *
1873 * Copy the data from the buffer for the length specified to the IO reqeust SGL
1874 * specified data region. enum sci_status
1875 */
1876static enum sci_status
1877scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
1878						  u8 *data_buf, u32 len)
1879{
1880	struct scic_sds_request *sci_req;
1881	struct isci_request *ireq;
1882	u8 *src_addr;
1883	int copy_len;
1884	struct sas_task *task;
1885	struct scatterlist *sg;
1886	void *kaddr;
1887	int total_len = len;
1888
1889	sci_req = to_sci_req(stp_req);
1890	ireq = sci_req_to_ireq(sci_req);
1891	task = isci_request_access_task(ireq);
1892	src_addr = data_buf;
1893
1894	if (task->num_scatter > 0) {
1895		sg = task->scatter;
1896
1897		while (total_len > 0) {
1898			struct page *page = sg_page(sg);
1899
1900			copy_len = min_t(int, total_len, sg_dma_len(sg));
1901			kaddr = kmap_atomic(page, KM_IRQ0);
1902			memcpy(kaddr + sg->offset, src_addr, copy_len);
1903			kunmap_atomic(kaddr, KM_IRQ0);
1904			total_len -= copy_len;
1905			src_addr += copy_len;
1906			sg = sg_next(sg);
1907		}
1908	} else {
1909		BUG_ON(task->total_xfer_len < total_len);
1910		memcpy(task->scatter, src_addr, total_len);
1911	}
1912
1913	return SCI_SUCCESS;
1914}
1915
1916/**
1917 *
1918 * @sci_req: The PIO DATA IN request that is to receive the data.
1919 * @data_buffer: The buffer to copy from.
1920 *
1921 * Copy the data buffer to the io request data region. enum sci_status
1922 */
1923static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
1924	struct scic_sds_stp_request *sci_req,
1925	u8 *data_buffer)
1926{
1927	enum sci_status status;
1928
1929	/*
1930	 * If there is less than 1K remaining in the transfer request
1931	 * copy just the data for the transfer */
1932	if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
1933		status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1934			sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
1935
1936		if (status == SCI_SUCCESS)
1937			sci_req->type.pio.pio_transfer_bytes = 0;
1938	} else {
1939		/* We are transfering the whole frame so copy */
1940		status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1941			sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1942
1943		if (status == SCI_SUCCESS)
1944			sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
1945	}
1946
1947	return status;
1948}
1949
1950/**
1951 *
1952 * @sci_req:
1953 * @completion_code:
1954 *
1955 * enum sci_status
1956 */
1957static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler(
1958	struct scic_sds_request *sci_req,
1959	u32 completion_code)
1960{
1961	enum sci_status status = SCI_SUCCESS;
1962
1963	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1964	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1965		scic_sds_request_set_status(
1966			sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1967			);
1968
1969		sci_base_state_machine_change_state(
1970			&sci_req->state_machine,
1971			SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1972			);
1973		break;
1974
1975	default:
1976		/*
1977		 * All other completion status cause the IO to be complete.  If a NAK
1978		 * was received, then it is up to the user to retry the request. */
1979		scic_sds_request_set_status(
1980			sci_req,
1981			SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1982			SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1983			);
1984
1985		sci_base_state_machine_change_state(
1986			&sci_req->state_machine,
1987			SCI_BASE_REQUEST_STATE_COMPLETED
1988			);
1989		break;
1990	}
1991
1992	return status;
1993}
1994
1995static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req,
1996									  u32 frame_index)
1997{
1998	struct scic_sds_controller *scic = sci_req->owning_controller;
1999	struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
2000	struct isci_request *ireq = sci_req_to_ireq(sci_req);
2001	struct sas_task *task = isci_request_access_task(ireq);
2002	struct dev_to_host_fis *frame_header;
2003	enum sci_status status;
2004	u32 *frame_buffer;
2005
2006	status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2007							       frame_index,
2008							       (void **)&frame_header);
2009
2010	if (status != SCI_SUCCESS) {
2011		dev_err(scic_to_dev(scic),
2012			"%s: SCIC IO Request 0x%p could not get frame header "
2013			"for frame index %d, status %x\n",
2014			__func__, stp_req, frame_index, status);
2015		return status;
2016	}
2017
2018	switch (frame_header->fis_type) {
2019	case FIS_PIO_SETUP:
2020		/* Get from the frame buffer the PIO Setup Data */
2021		scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2022							      frame_index,
2023							      (void **)&frame_buffer);
2024
2025		/* Get the data from the PIO Setup The SCU Hardware returns
2026		 * first word in the frame_header and the rest of the data is in
2027		 * the frame buffer so we need to back up one dword
2028		 */
2029
2030		/* transfer_count: first 16bits in the 4th dword */
2031		stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
2032
2033		/* ending_status: 4th byte in the 3rd dword */
2034		stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
2035
2036		scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2037						       frame_header,
2038						       frame_buffer);
2039
2040		sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
2041
2042		/* The next state is dependent on whether the
2043		 * request was PIO Data-in or Data out
2044		 */
2045		if (task->data_dir == DMA_FROM_DEVICE) {
2046			sci_base_state_machine_change_state(&sci_req->state_machine,
2047							    SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE);
2048		} else if (task->data_dir == DMA_TO_DEVICE) {
2049			/* Transmit data */
2050			status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
2051			if (status != SCI_SUCCESS)
2052				break;
2053			sci_base_state_machine_change_state(&sci_req->state_machine,
2054							    SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE);
2055		}
2056		break;
2057	case FIS_SETDEVBITS:
2058		sci_base_state_machine_change_state(&sci_req->state_machine,
2059						    SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
2060		break;
2061	case FIS_REGD2H:
2062		if (frame_header->status & ATA_BUSY) {
2063			/* Now why is the drive sending a D2H Register FIS when
2064			 * it is still busy?  Do nothing since we are still in
2065			 * the right state.
2066			 */
2067			dev_dbg(scic_to_dev(scic),
2068				"%s: SCIC PIO Request 0x%p received "
2069				"D2H Register FIS with BSY status "
2070				"0x%x\n", __func__, stp_req,
2071				frame_header->status);
2072			break;
2073		}
2074
2075		scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2076							      frame_index,
2077							      (void **)&frame_buffer);
2078
2079		scic_sds_controller_copy_sata_response(&sci_req->stp.req,
2080						       frame_header,
2081						       frame_buffer);
2082
2083		scic_sds_request_set_status(sci_req,
2084					    SCU_TASK_DONE_CHECK_RESPONSE,
2085					    SCI_FAILURE_IO_RESPONSE_VALID);
2086
2087		sci_base_state_machine_change_state(&sci_req->state_machine,
2088						    SCI_BASE_REQUEST_STATE_COMPLETED);
2089		break;
2090	default:
2091		/* FIXME: what do we do here? */
2092		break;
2093	}
2094
2095	/* Frame is decoded return it to the controller */
2096	scic_sds_controller_release_frame(scic, frame_index);
2097
2098	return status;
2099}
2100
2101static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req,
2102										 u32 frame_index)
2103{
2104	enum sci_status status;
2105	struct dev_to_host_fis *frame_header;
2106	struct sata_fis_data *frame_buffer;
2107	struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
2108	struct scic_sds_controller *scic = sci_req->owning_controller;
2109
2110	status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2111							       frame_index,
2112							       (void **)&frame_header);
2113
2114	if (status != SCI_SUCCESS) {
2115		dev_err(scic_to_dev(scic),
2116			"%s: SCIC IO Request 0x%p could not get frame header "
2117			"for frame index %d, status %x\n",
2118			__func__, stp_req, frame_index, status);
2119		return status;
2120	}
2121
2122	if (frame_header->fis_type == FIS_DATA) {
2123		if (stp_req->type.pio.request_current.sgl_pair == NULL) {
2124			sci_req->saved_rx_frame_index = frame_index;
2125			stp_req->type.pio.pio_transfer_bytes = 0;
2126		} else {
2127			scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2128								      frame_index,
2129								      (void **)&frame_buffer);
2130
2131			status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
2132									    (u8 *)frame_buffer);
2133
2134			/* Frame is decoded return it to the controller */
2135			scic_sds_controller_release_frame(scic, frame_index);
2136		}
2137
2138		/* Check for the end of the transfer, are there more
2139		 * bytes remaining for this data transfer
2140		 */
2141		if (status != SCI_SUCCESS ||
2142		    stp_req->type.pio.pio_transfer_bytes != 0)
2143			return status;
2144
2145		if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
2146			scic_sds_request_set_status(sci_req,
2147						    SCU_TASK_DONE_CHECK_RESPONSE,
2148						    SCI_FAILURE_IO_RESPONSE_VALID);
2149
2150			sci_base_state_machine_change_state(&sci_req->state_machine,
2151							    SCI_BASE_REQUEST_STATE_COMPLETED);
2152		} else {
2153			sci_base_state_machine_change_state(&sci_req->state_machine,
2154							    SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
2155		}
2156	} else {
2157		dev_err(scic_to_dev(scic),
2158			"%s: SCIC PIO Request 0x%p received frame %d "
2159			"with fis type 0x%02x when expecting a data "
2160			"fis.\n", __func__, stp_req, frame_index,
2161			frame_header->fis_type);
2162
2163		scic_sds_request_set_status(sci_req,
2164					    SCU_TASK_DONE_GOOD,
2165					    SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
2166
2167		sci_base_state_machine_change_state(&sci_req->state_machine,
2168						    SCI_BASE_REQUEST_STATE_COMPLETED);
2169
2170		/* Frame is decoded return it to the controller */
2171		scic_sds_controller_release_frame(scic, frame_index);
2172	}
2173
2174	return status;
2175}
2176
2177
2178/**
2179 *
2180 * @sci_req:
2181 * @completion_code:
2182 *
2183 * enum sci_status
2184 */
2185static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler(
2186
2187	struct scic_sds_request *sci_req,
2188	u32 completion_code)
2189{
2190	enum sci_status status = SCI_SUCCESS;
2191	bool all_frames_transferred = false;
2192	struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
2193
2194	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2195	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2196		/* Transmit data */
2197		if (stp_req->type.pio.pio_transfer_bytes != 0) {
2198			status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
2199			if (status == SCI_SUCCESS) {
2200				if (stp_req->type.pio.pio_transfer_bytes == 0)
2201					all_frames_transferred = true;
2202			}
2203		} else if (stp_req->type.pio.pio_transfer_bytes == 0) {
2204			/*
2205			 * this will happen if the all data is written at the
2206			 * first time after the pio setup fis is received
2207			 */
2208			all_frames_transferred  = true;
2209		}
2210
2211		/* all data transferred. */
2212		if (all_frames_transferred) {
2213			/*
2214			 * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
2215			 * and wait for PIO_SETUP fis / or D2H REg fis. */
2216			sci_base_state_machine_change_state(
2217				&sci_req->state_machine,
2218				SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
2219				);
2220		}
2221		break;
2222
2223	default:
2224		/*
2225		 * All other completion status cause the IO to be complete.  If a NAK
2226		 * was received, then it is up to the user to retry the request. */
2227		scic_sds_request_set_status(
2228			sci_req,
2229			SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2230			SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
2231			);
2232
2233		sci_base_state_machine_change_state(
2234			&sci_req->state_machine,
2235			SCI_BASE_REQUEST_STATE_COMPLETED
2236			);
2237		break;
2238	}
2239
2240	return status;
2241}
2242
2243/**
2244 *
2245 * @request: This is the request which is receiving the event.
2246 * @event_code: This is the event code that the request on which the request is
2247 *    expected to take action.
2248 *
2249 * This method will handle any link layer events while waiting for the data
2250 * frame. enum sci_status SCI_SUCCESS SCI_FAILURE
2251 */
2252static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler(
2253	struct scic_sds_request *request,
2254	u32 event_code)
2255{
2256	enum sci_status status;
2257
2258	switch (scu_get_event_specifier(event_code)) {
2259	case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
2260		/*
2261		 * We are waiting for data and the SCU has R_ERR the data frame.
2262		 * Go back to waiting for the D2H Register FIS */
2263		sci_base_state_machine_change_state(
2264			&request->state_machine,
2265			SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
2266			);
2267
2268		status = SCI_SUCCESS;
2269		break;
2270
2271	default:
2272		dev_err(scic_to_dev(request->owning_controller),
2273			"%s: SCIC PIO Request 0x%p received unexpected "
2274			"event 0x%08x\n",
2275			__func__, request, event_code);
2276
2277		/* / @todo Should we fail the PIO request when we get an unexpected event? */
2278		status = SCI_FAILURE;
2279		break;
2280	}
2281
2282	return status;
2283}
2284
2285static void scic_sds_stp_request_udma_complete_request(
2286	struct scic_sds_request *request,
2287	u32 scu_status,
2288	enum sci_status sci_status)
2289{
2290	scic_sds_request_set_status(request, scu_status, sci_status);
2291	sci_base_state_machine_change_state(&request->state_machine,
2292		SCI_BASE_REQUEST_STATE_COMPLETED);
2293}
2294
2295static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
2296								       u32 frame_index)
2297{
2298	struct scic_sds_controller *scic = sci_req->owning_controller;
2299	struct dev_to_host_fis *frame_header;
2300	enum sci_status status;
2301	u32 *frame_buffer;
2302
2303	status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2304							       frame_index,
2305							       (void **)&frame_header);
2306
2307	if ((status == SCI_SUCCESS) &&
2308	    (frame_header->fis_type == FIS_REGD2H)) {
2309		scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2310							      frame_index,
2311							      (void **)&frame_buffer);
2312
2313		scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2314						       frame_header,
2315						       frame_buffer);
2316	}
2317
2318	scic_sds_controller_release_frame(scic, frame_index);
2319
2320	return status;
2321}
2322
2323static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler(
2324	struct scic_sds_request *sci_req,
2325	u32 completion_code)
2326{
2327	enum sci_status status = SCI_SUCCESS;
2328
2329	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2330	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2331		scic_sds_stp_request_udma_complete_request(sci_req,
2332							   SCU_TASK_DONE_GOOD,
2333							   SCI_SUCCESS);
2334		break;
2335	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
2336	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
2337		/*
2338		 * We must check ther response buffer to see if the D2H Register FIS was
2339		 * received before we got the TC completion. */
2340		if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
2341			scic_sds_remote_device_suspend(sci_req->target_device,
2342				SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2343
2344			scic_sds_stp_request_udma_complete_request(sci_req,
2345								   SCU_TASK_DONE_CHECK_RESPONSE,
2346								   SCI_FAILURE_IO_RESPONSE_VALID);
2347		} else {
2348			/*
2349			 * If we have an error completion status for the TC then we can expect a
2350			 * D2H register FIS from the device so we must change state to wait for it */
2351			sci_base_state_machine_change_state(&sci_req->state_machine,
2352				SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE);
2353		}
2354		break;
2355
2356	/*
2357	 * / @todo Check to see if any of these completion status need to wait for
2358	 * /       the device to host register fis. */
2359	/* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */
2360	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
2361	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
2362	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
2363	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
2364	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
2365		scic_sds_remote_device_suspend(sci_req->target_device,
2366			SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2367	/* Fall through to the default case */
2368	default:
2369		/* All other completion status cause the IO to be complete. */
2370		scic_sds_stp_request_udma_complete_request(sci_req,
2371					SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2372					SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2373		break;
2374	}
2375
2376	return status;
2377}
2378
2379static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler(
2380	struct scic_sds_request *sci_req,
2381	u32 frame_index)
2382{
2383	enum sci_status status;
2384
2385	/* Use the general frame handler to copy the resposne data */
2386	status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
2387
2388	if (status != SCI_SUCCESS)
2389		return status;
2390
2391	scic_sds_stp_request_udma_complete_request(sci_req,
2392						   SCU_TASK_DONE_CHECK_RESPONSE,
2393						   SCI_FAILURE_IO_RESPONSE_VALID);
2394
2395	return status;
2396}
2397
2398enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req,
2399						    u32 len,
2400						    enum dma_data_direction dir)
2401{
2402	return SCI_SUCCESS;
2403}
2404
2405/**
2406 *
2407 * @sci_req:
2408 * @completion_code:
2409 *
2410 * This method processes a TC completion.  The expected TC completion is for
2411 * the transmission of the H2D register FIS containing the SATA/STP non-data
2412 * request. This method always successfully processes the TC completion.
2413 * SCI_SUCCESS This value is always returned.
2414 */
2415static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler(
2416	struct scic_sds_request *sci_req,
2417	u32 completion_code)
2418{
2419	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2420	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2421		scic_sds_request_set_status(
2422			sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
2423			);
2424
2425		sci_base_state_machine_change_state(
2426			&sci_req->state_machine,
2427			SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
2428			);
2429		break;
2430
2431	default:
2432		/*
2433		 * All other completion status cause the IO to be complete.  If a NAK
2434		 * was received, then it is up to the user to retry the request. */
2435		scic_sds_request_set_status(
2436			sci_req,
2437			SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2438			SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
2439			);
2440
2441		sci_base_state_machine_change_state(
2442			&sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
2443		break;
2444	}
2445
2446	return SCI_SUCCESS;
2447}
2448
2449/**
2450 *
2451 * @sci_req:
2452 * @completion_code:
2453 *
2454 * This method processes a TC completion.  The expected TC completion is for
2455 * the transmission of the H2D register FIS containing the SATA/STP non-data
2456 * request. This method always successfully processes the TC completion.
2457 * SCI_SUCCESS This value is always returned.
2458 */
2459static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler(
2460	struct scic_sds_request *sci_req,
2461	u32 completion_code)
2462{
2463	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2464	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2465		scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
2466					    SCI_SUCCESS);
2467
2468		sci_base_state_machine_change_state(&sci_req->state_machine,
2469			SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE);
2470		break;
2471
2472	default:
2473		/*
2474		 * All other completion status cause the IO to be complete.  If a NAK
2475		 * was received, then it is up to the user to retry the request. */
2476		scic_sds_request_set_status(
2477			sci_req,
2478			SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2479			SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
2480			);
2481
2482		sci_base_state_machine_change_state(&sci_req->state_machine,
2483				SCI_BASE_REQUEST_STATE_COMPLETED);
2484		break;
2485	}
2486
2487	return SCI_SUCCESS;
2488}
2489
2490/**
2491 *
2492 * @request: This parameter specifies the request for which a frame has been
2493 *    received.
2494 * @frame_index: This parameter specifies the index of the frame that has been
2495 *    received.
2496 *
2497 * This method processes frames received from the target while waiting for a
2498 * device to host register FIS.  If a non-register FIS is received during this
2499 * time, it is treated as a protocol violation from an IO perspective. Indicate
2500 * if the received frame was processed successfully.
2501 */
2502static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler(
2503	struct scic_sds_request *sci_req,
2504	u32 frame_index)
2505{
2506	enum sci_status status;
2507	struct dev_to_host_fis *frame_header;
2508	u32 *frame_buffer;
2509	struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
2510	struct scic_sds_controller *scic = sci_req->owning_controller;
2511
2512	status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2513							       frame_index,
2514							       (void **)&frame_header);
2515	if (status != SCI_SUCCESS) {
2516		dev_err(scic_to_dev(scic),
2517			"%s: SCIC IO Request 0x%p could not get frame header "
2518			"for frame index %d, status %x\n",
2519			__func__, stp_req, frame_index, status);
2520		return status;
2521	}
2522
2523	switch (frame_header->fis_type) {
2524	case FIS_REGD2H:
2525		scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2526							      frame_index,
2527							      (void **)&frame_buffer);
2528
2529		scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2530						       frame_header,
2531						       frame_buffer);
2532
2533		/* The command has completed with error */
2534		scic_sds_request_set_status(sci_req,
2535					    SCU_TASK_DONE_CHECK_RESPONSE,
2536					    SCI_FAILURE_IO_RESPONSE_VALID);
2537		break;
2538
2539	default:
2540		dev_warn(scic_to_dev(scic),
2541			 "%s: IO Request:0x%p Frame Id:%d protocol "
2542			 "violation occurred\n", __func__, stp_req,
2543			 frame_index);
2544
2545		scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
2546					    SCI_FAILURE_PROTOCOL_VIOLATION);
2547		break;
2548	}
2549
2550	sci_base_state_machine_change_state(&sci_req->state_machine,
2551					    SCI_BASE_REQUEST_STATE_COMPLETED);
2552
2553	/* Frame has been decoded return it to the controller */
2554	scic_sds_controller_release_frame(scic, frame_index);
2555
2556	return status;
2557}
2558
2559static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = {
2560	[SCI_BASE_REQUEST_STATE_INITIAL] = { },
2561	[SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
2562		.start_handler		= scic_sds_request_constructed_state_start_handler,
2563		.abort_handler		= scic_sds_request_constructed_state_abort_handler,
2564	},
2565	[SCI_BASE_REQUEST_STATE_STARTED] = {
2566		.abort_handler		= scic_sds_request_started_state_abort_handler,
2567		.tc_completion_handler	= scic_sds_request_started_state_tc_completion_handler,
2568		.frame_handler		= scic_sds_request_started_state_frame_handler,
2569	},
2570	[SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = {
2571		.abort_handler		= scic_sds_request_started_state_abort_handler,
2572		.tc_completion_handler	= scic_sds_ssp_task_request_await_tc_completion_tc_completion_handler,
2573	},
2574	[SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = {
2575		.abort_handler		= scic_sds_ssp_task_request_await_tc_response_abort_handler,
2576		.frame_handler		= scic_sds_ssp_task_request_await_tc_response_frame_handler,
2577	},
2578	[SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = {
2579		.abort_handler		= scic_sds_request_started_state_abort_handler,
2580		.tc_completion_handler	= scic_sds_smp_request_await_response_tc_completion_handler,
2581		.frame_handler		= scic_sds_smp_request_await_response_frame_handler,
2582	},
2583	[SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = {
2584		.abort_handler		= scic_sds_request_started_state_abort_handler,
2585		.tc_completion_handler	=  scic_sds_smp_request_await_tc_completion_tc_completion_handler,
2586	},
2587	[SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
2588		.abort_handler		= scic_sds_request_started_state_abort_handler,
2589		.tc_completion_handler	= scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler,
2590		.frame_handler		= scic_sds_stp_request_udma_general_frame_handler,
2591	},
2592	[SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
2593		.abort_handler		= scic_sds_request_started_state_abort_handler,
2594		.frame_handler		= scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler,
2595	},
2596	[SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
2597		.abort_handler		= scic_sds_request_started_state_abort_handler,
2598		.tc_completion_handler	= scic_sds_stp_request_non_data_await_h2d_tc_completion_handler,
2599	},
2600	[SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
2601		.abort_handler		= scic_sds_request_started_state_abort_handler,
2602		.frame_handler		= scic_sds_stp_request_non_data_await_d2h_frame_handler,
2603	},
2604	[SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
2605		.abort_handler		= scic_sds_request_started_state_abort_handler,
2606		.tc_completion_handler	= scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler,
2607	},
2608	[SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
2609		.abort_handler		= scic_sds_request_started_state_abort_handler,
2610		.frame_handler		= scic_sds_stp_request_pio_await_frame_frame_handler
2611	},
2612	[SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
2613		.abort_handler		= scic_sds_request_started_state_abort_handler,
2614		.event_handler		= scic_sds_stp_request_pio_data_in_await_data_event_handler,
2615		.frame_handler		= scic_sds_stp_request_pio_data_in_await_data_frame_handler
2616	},
2617	[SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
2618		.abort_handler		= scic_sds_request_started_state_abort_handler,
2619		.tc_completion_handler	= scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler,
2620	},
2621	[SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
2622		.abort_handler		= scic_sds_request_started_state_abort_handler,
2623		.tc_completion_handler	= scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler,
2624	},
2625	[SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
2626		.abort_handler		= scic_sds_request_started_state_abort_handler,
2627		.tc_completion_handler	= scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler,
2628	},
2629	[SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
2630		.abort_handler		= scic_sds_request_started_state_abort_handler,
2631		.frame_handler		= scic_sds_stp_request_soft_reset_await_d2h_frame_handler,
2632	},
2633	[SCI_BASE_REQUEST_STATE_COMPLETED] = {
2634		.complete_handler	= scic_sds_request_completed_state_complete_handler,
2635	},
2636	[SCI_BASE_REQUEST_STATE_ABORTING] = {
2637		.abort_handler		= scic_sds_request_aborting_state_abort_handler,
2638		.tc_completion_handler	= scic_sds_request_aborting_state_tc_completion_handler,
2639		.frame_handler		= scic_sds_request_aborting_state_frame_handler,
2640	},
2641	[SCI_BASE_REQUEST_STATE_FINAL] = { },
2642};
2643
2644
2645/**
2646 * isci_request_process_response_iu() - This function sets the status and
2647 *    response iu, in the task struct, from the request object for the upper
2648 *    layer driver.
2649 * @sas_task: This parameter is the task struct from the upper layer driver.
2650 * @resp_iu: This parameter points to the response iu of the completed request.
2651 * @dev: This parameter specifies the linux device struct.
2652 *
2653 * none.
2654 */
2655static void isci_request_process_response_iu(
2656	struct sas_task *task,
2657	struct ssp_response_iu *resp_iu,
2658	struct device *dev)
2659{
2660	dev_dbg(dev,
2661		"%s: resp_iu = %p "
2662		"resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2663		"resp_iu->response_data_len = %x, "
2664		"resp_iu->sense_data_len = %x\nrepsonse data: ",
2665		__func__,
2666		resp_iu,
2667		resp_iu->status,
2668		resp_iu->datapres,
2669		resp_iu->response_data_len,
2670		resp_iu->sense_data_len);
2671
2672	task->task_status.stat = resp_iu->status;
2673
2674	/* libsas updates the task status fields based on the response iu. */
2675	sas_ssp_task_response(dev, task, resp_iu);
2676}
2677
2678/**
2679 * isci_request_set_open_reject_status() - This function prepares the I/O
2680 *    completion for OPEN_REJECT conditions.
2681 * @request: This parameter is the completed isci_request object.
2682 * @response_ptr: This parameter specifies the service response for the I/O.
2683 * @status_ptr: This parameter specifies the exec status for the I/O.
2684 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2685 *    the LLDD with respect to completing this request or forcing an abort
2686 *    condition on the I/O.
2687 * @open_rej_reason: This parameter specifies the encoded reason for the
2688 *    abandon-class reject.
2689 *
2690 * none.
2691 */
2692static void isci_request_set_open_reject_status(
2693	struct isci_request *request,
2694	struct sas_task *task,
2695	enum service_response *response_ptr,
2696	enum exec_status *status_ptr,
2697	enum isci_completion_selection *complete_to_host_ptr,
2698	enum sas_open_rej_reason open_rej_reason)
2699{
2700	/* Task in the target is done. */
2701	request->complete_in_target       = true;
2702	*response_ptr                     = SAS_TASK_UNDELIVERED;
2703	*status_ptr                       = SAS_OPEN_REJECT;
2704	*complete_to_host_ptr             = isci_perform_normal_io_completion;
2705	task->task_status.open_rej_reason = open_rej_reason;
2706}
2707
2708/**
2709 * isci_request_handle_controller_specific_errors() - This function decodes
2710 *    controller-specific I/O completion error conditions.
2711 * @request: This parameter is the completed isci_request object.
2712 * @response_ptr: This parameter specifies the service response for the I/O.
2713 * @status_ptr: This parameter specifies the exec status for the I/O.
2714 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2715 *    the LLDD with respect to completing this request or forcing an abort
2716 *    condition on the I/O.
2717 *
2718 * none.
2719 */
2720static void isci_request_handle_controller_specific_errors(
2721	struct isci_remote_device *isci_device,
2722	struct isci_request *request,
2723	struct sas_task *task,
2724	enum service_response *response_ptr,
2725	enum exec_status *status_ptr,
2726	enum isci_completion_selection *complete_to_host_ptr)
2727{
2728	unsigned int cstatus;
2729
2730	cstatus = request->sci.scu_status;
2731
2732	dev_dbg(&request->isci_host->pdev->dev,
2733		"%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2734		"- controller status = 0x%x\n",
2735		__func__, request, cstatus);
2736
2737	/* Decode the controller-specific errors; most
2738	 * important is to recognize those conditions in which
2739	 * the target may still have a task outstanding that
2740	 * must be aborted.
2741	 *
2742	 * Note that there are SCU completion codes being
2743	 * named in the decode below for which SCIC has already
2744	 * done work to handle them in a way other than as
2745	 * a controller-specific completion code; these are left
2746	 * in the decode below for completeness sake.
2747	 */
2748	switch (cstatus) {
2749	case SCU_TASK_DONE_DMASETUP_DIRERR:
2750	/* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2751	case SCU_TASK_DONE_XFERCNT_ERR:
2752		/* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2753		if (task->task_proto == SAS_PROTOCOL_SMP) {
2754			/* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2755			*response_ptr = SAS_TASK_COMPLETE;
2756
2757			/* See if the device has been/is being stopped. Note
2758			 * that we ignore the quiesce state, since we are
2759			 * concerned about the actual device state.
2760			 */
2761			if ((isci_device->status == isci_stopping) ||
2762			    (isci_device->status == isci_stopped))
2763				*status_ptr = SAS_DEVICE_UNKNOWN;
2764			else
2765				*status_ptr = SAS_ABORTED_TASK;
2766
2767			request->complete_in_target = true;
2768
2769			*complete_to_host_ptr =
2770				isci_perform_normal_io_completion;
2771		} else {
2772			/* Task in the target is not done. */
2773			*response_ptr = SAS_TASK_UNDELIVERED;
2774
2775			if ((isci_device->status == isci_stopping) ||
2776			    (isci_device->status == isci_stopped))
2777				*status_ptr = SAS_DEVICE_UNKNOWN;
2778			else
2779				*status_ptr = SAM_STAT_TASK_ABORTED;
2780
2781			request->complete_in_target = false;
2782
2783			*complete_to_host_ptr =
2784				isci_perform_error_io_completion;
2785		}
2786
2787		break;
2788
2789	case SCU_TASK_DONE_CRC_ERR:
2790	case SCU_TASK_DONE_NAK_CMD_ERR:
2791	case SCU_TASK_DONE_EXCESS_DATA:
2792	case SCU_TASK_DONE_UNEXP_FIS:
2793	/* Also SCU_TASK_DONE_UNEXP_RESP: */
2794	case SCU_TASK_DONE_VIIT_ENTRY_NV:       /* TODO - conditions? */
2795	case SCU_TASK_DONE_IIT_ENTRY_NV:        /* TODO - conditions? */
2796	case SCU_TASK_DONE_RNCNV_OUTBOUND:      /* TODO - conditions? */
2797		/* These are conditions in which the target
2798		 * has completed the task, so that no cleanup
2799		 * is necessary.
2800		 */
2801		*response_ptr = SAS_TASK_COMPLETE;
2802
2803		/* See if the device has been/is being stopped. Note
2804		 * that we ignore the quiesce state, since we are
2805		 * concerned about the actual device state.
2806		 */
2807		if ((isci_device->status == isci_stopping) ||
2808		    (isci_device->status == isci_stopped))
2809			*status_ptr = SAS_DEVICE_UNKNOWN;
2810		else
2811			*status_ptr = SAS_ABORTED_TASK;
2812
2813		request->complete_in_target = true;
2814
2815		*complete_to_host_ptr = isci_perform_normal_io_completion;
2816		break;
2817
2818
2819	/* Note that the only open reject completion codes seen here will be
2820	 * abandon-class codes; all others are automatically retried in the SCU.
2821	 */
2822	case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2823
2824		isci_request_set_open_reject_status(
2825			request, task, response_ptr, status_ptr,
2826			complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2827		break;
2828
2829	case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2830
2831		/* Note - the return of AB0 will change when
2832		 * libsas implements detection of zone violations.
2833		 */
2834		isci_request_set_open_reject_status(
2835			request, task, response_ptr, status_ptr,
2836			complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2837		break;
2838
2839	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2840
2841		isci_request_set_open_reject_status(
2842			request, task, response_ptr, status_ptr,
2843			complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2844		break;
2845
2846	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2847
2848		isci_request_set_open_reject_status(
2849			request, task, response_ptr, status_ptr,
2850			complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2851		break;
2852
2853	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2854
2855		isci_request_set_open_reject_status(
2856			request, task, response_ptr, status_ptr,
2857			complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2858		break;
2859
2860	case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2861
2862		isci_request_set_open_reject_status(
2863			request, task, response_ptr, status_ptr,
2864			complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2865		break;
2866
2867	case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2868
2869		isci_request_set_open_reject_status(
2870			request, task, response_ptr, status_ptr,
2871			complete_to_host_ptr, SAS_OREJ_STP_NORES);
2872		break;
2873
2874	case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2875
2876		isci_request_set_open_reject_status(
2877			request, task, response_ptr, status_ptr,
2878			complete_to_host_ptr, SAS_OREJ_EPROTO);
2879		break;
2880
2881	case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2882
2883		isci_request_set_open_reject_status(
2884			request, task, response_ptr, status_ptr,
2885			complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2886		break;
2887
2888	case SCU_TASK_DONE_LL_R_ERR:
2889	/* Also SCU_TASK_DONE_ACK_NAK_TO: */
2890	case SCU_TASK_DONE_LL_PERR:
2891	case SCU_TASK_DONE_LL_SY_TERM:
2892	/* Also SCU_TASK_DONE_NAK_ERR:*/
2893	case SCU_TASK_DONE_LL_LF_TERM:
2894	/* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2895	case SCU_TASK_DONE_LL_ABORT_ERR:
2896	case SCU_TASK_DONE_SEQ_INV_TYPE:
2897	/* Also SCU_TASK_DONE_UNEXP_XR: */
2898	case SCU_TASK_DONE_XR_IU_LEN_ERR:
2899	case SCU_TASK_DONE_INV_FIS_LEN:
2900	/* Also SCU_TASK_DONE_XR_WD_LEN: */
2901	case SCU_TASK_DONE_SDMA_ERR:
2902	case SCU_TASK_DONE_OFFSET_ERR:
2903	case SCU_TASK_DONE_MAX_PLD_ERR:
2904	case SCU_TASK_DONE_LF_ERR:
2905	case SCU_TASK_DONE_SMP_RESP_TO_ERR:  /* Escalate to dev reset? */
2906	case SCU_TASK_DONE_SMP_LL_RX_ERR:
2907	case SCU_TASK_DONE_UNEXP_DATA:
2908	case SCU_TASK_DONE_UNEXP_SDBFIS:
2909	case SCU_TASK_DONE_REG_ERR:
2910	case SCU_TASK_DONE_SDB_ERR:
2911	case SCU_TASK_DONE_TASK_ABORT:
2912	default:
2913		/* Task in the target is not done. */
2914		*response_ptr = SAS_TASK_UNDELIVERED;
2915		*status_ptr = SAM_STAT_TASK_ABORTED;
2916		request->complete_in_target = false;
2917
2918		*complete_to_host_ptr = isci_perform_error_io_completion;
2919		break;
2920	}
2921}
2922
2923/**
2924 * isci_task_save_for_upper_layer_completion() - This function saves the
2925 *    request for later completion to the upper layer driver.
2926 * @host: This parameter is a pointer to the host on which the the request
2927 *    should be queued (either as an error or success).
2928 * @request: This parameter is the completed request.
2929 * @response: This parameter is the response code for the completed task.
2930 * @status: This parameter is the status code for the completed task.
2931 *
2932 * none.
2933 */
2934static void isci_task_save_for_upper_layer_completion(
2935	struct isci_host *host,
2936	struct isci_request *request,
2937	enum service_response response,
2938	enum exec_status status,
2939	enum isci_completion_selection task_notification_selection)
2940{
2941	struct sas_task *task = isci_request_access_task(request);
2942
2943	task_notification_selection
2944		= isci_task_set_completion_status(task, response, status,
2945						  task_notification_selection);
2946
2947	/* Tasks aborted specifically by a call to the lldd_abort_task
2948	 * function should not be completed to the host in the regular path.
2949	 */
2950	switch (task_notification_selection) {
2951
2952	case isci_perform_normal_io_completion:
2953
2954		/* Normal notification (task_done) */
2955		dev_dbg(&host->pdev->dev,
2956			"%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
2957			__func__,
2958			task,
2959			task->task_status.resp, response,
2960			task->task_status.stat, status);
2961		/* Add to the completed list. */
2962		list_add(&request->completed_node,
2963			 &host->requests_to_complete);
2964
2965		/* Take the request off the device's pending request list. */
2966		list_del_init(&request->dev_node);
2967		break;
2968
2969	case isci_perform_aborted_io_completion:
2970		/* No notification to libsas because this request is
2971		 * already in the abort path.
2972		 */
2973		dev_warn(&host->pdev->dev,
2974			 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
2975			 __func__,
2976			 task,
2977			 task->task_status.resp, response,
2978			 task->task_status.stat, status);
2979
2980		/* Wake up whatever process was waiting for this
2981		 * request to complete.
2982		 */
2983		WARN_ON(request->io_request_completion == NULL);
2984
2985		if (request->io_request_completion != NULL) {
2986
2987			/* Signal whoever is waiting that this
2988			* request is complete.
2989			*/
2990			complete(request->io_request_completion);
2991		}
2992		break;
2993
2994	case isci_perform_error_io_completion:
2995		/* Use sas_task_abort */
2996		dev_warn(&host->pdev->dev,
2997			 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
2998			 __func__,
2999			 task,
3000			 task->task_status.resp, response,
3001			 task->task_status.stat, status);
3002		/* Add to the aborted list. */
3003		list_add(&request->completed_node,
3004			 &host->requests_to_errorback);
3005		break;
3006
3007	default:
3008		dev_warn(&host->pdev->dev,
3009			 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
3010			 __func__,
3011			 task,
3012			 task->task_status.resp, response,
3013			 task->task_status.stat, status);
3014
3015		/* Add to the error to libsas list. */
3016		list_add(&request->completed_node,
3017			 &host->requests_to_errorback);
3018		break;
3019	}
3020}
3021
3022static void isci_request_io_request_complete(struct isci_host *isci_host,
3023					     struct isci_request *request,
3024					     enum sci_io_status completion_status)
3025{
3026	struct sas_task *task = isci_request_access_task(request);
3027	struct ssp_response_iu *resp_iu;
3028	void *resp_buf;
3029	unsigned long task_flags;
3030	struct isci_remote_device *isci_device   = request->isci_device;
3031	enum service_response response       = SAS_TASK_UNDELIVERED;
3032	enum exec_status status         = SAS_ABORTED_TASK;
3033	enum isci_request_status request_status;
3034	enum isci_completion_selection complete_to_host
3035		= isci_perform_normal_io_completion;
3036
3037	dev_dbg(&isci_host->pdev->dev,
3038		"%s: request = %p, task = %p,\n"
3039		"task->data_dir = %d completion_status = 0x%x\n",
3040		__func__,
3041		request,
3042		task,
3043		task->data_dir,
3044		completion_status);
3045
3046	spin_lock(&request->state_lock);
3047	request_status = isci_request_get_state(request);
3048
3049	/* Decode the request status.  Note that if the request has been
3050	 * aborted by a task management function, we don't care
3051	 * what the status is.
3052	 */
3053	switch (request_status) {
3054
3055	case aborted:
3056		/* "aborted" indicates that the request was aborted by a task
3057		 * management function, since once a task management request is
3058		 * perfomed by the device, the request only completes because
3059		 * of the subsequent driver terminate.
3060		 *
3061		 * Aborted also means an external thread is explicitly managing
3062		 * this request, so that we do not complete it up the stack.
3063		 *
3064		 * The target is still there (since the TMF was successful).
3065		 */
3066		request->complete_in_target = true;
3067		response = SAS_TASK_COMPLETE;
3068
3069		/* See if the device has been/is being stopped. Note
3070		 * that we ignore the quiesce state, since we are
3071		 * concerned about the actual device state.
3072		 */
3073		if ((isci_device->status == isci_stopping)
3074		    || (isci_device->status == isci_stopped)
3075		    )
3076			status = SAS_DEVICE_UNKNOWN;
3077		else
3078			status = SAS_ABORTED_TASK;
3079
3080		complete_to_host = isci_perform_aborted_io_completion;
3081		/* This was an aborted request. */
3082
3083		spin_unlock(&request->state_lock);
3084		break;
3085
3086	case aborting:
3087		/* aborting means that the task management function tried and
3088		 * failed to abort the request. We need to note the request
3089		 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
3090		 * target as down.
3091		 *
3092		 * Aborting also means an external thread is explicitly managing
3093		 * this request, so that we do not complete it up the stack.
3094		 */
3095		request->complete_in_target = true;
3096		response = SAS_TASK_UNDELIVERED;
3097
3098		if ((isci_device->status == isci_stopping) ||
3099		    (isci_device->status == isci_stopped))
3100			/* The device has been /is being stopped. Note that
3101			 * we ignore the quiesce state, since we are
3102			 * concerned about the actual device state.
3103			 */
3104			status = SAS_DEVICE_UNKNOWN;
3105		else
3106			status = SAS_PHY_DOWN;
3107
3108		complete_to_host = isci_perform_aborted_io_completion;
3109
3110		/* This was an aborted request. */
3111
3112		spin_unlock(&request->state_lock);
3113		break;
3114
3115	case terminating:
3116
3117		/* This was an terminated request.  This happens when
3118		 * the I/O is being terminated because of an action on
3119		 * the device (reset, tear down, etc.), and the I/O needs
3120		 * to be completed up the stack.
3121		 */
3122		request->complete_in_target = true;
3123		response = SAS_TASK_UNDELIVERED;
3124
3125		/* See if the device has been/is being stopped. Note
3126		 * that we ignore the quiesce state, since we are
3127		 * concerned about the actual device state.
3128		 */
3129		if ((isci_device->status == isci_stopping) ||
3130		    (isci_device->status == isci_stopped))
3131			status = SAS_DEVICE_UNKNOWN;
3132		else
3133			status = SAS_ABORTED_TASK;
3134
3135		complete_to_host = isci_perform_aborted_io_completion;
3136
3137		/* This was a terminated request. */
3138
3139		spin_unlock(&request->state_lock);
3140		break;
3141
3142	default:
3143
3144		/* The request is done from an SCU HW perspective. */
3145		request->status = completed;
3146
3147		spin_unlock(&request->state_lock);
3148
3149		/* This is an active request being completed from the core. */
3150		switch (completion_status) {
3151
3152		case SCI_IO_FAILURE_RESPONSE_VALID:
3153			dev_dbg(&isci_host->pdev->dev,
3154				"%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
3155				__func__,
3156				request,
3157				task);
3158
3159			if (sas_protocol_ata(task->task_proto)) {
3160				resp_buf = &request->sci.stp.rsp;
3161				isci_request_process_stp_response(task,
3162								  resp_buf);
3163			} else if (SAS_PROTOCOL_SSP == task->task_proto) {
3164
3165				/* crack the iu response buffer. */
3166				resp_iu = &request->sci.ssp.rsp;
3167				isci_request_process_response_iu(task, resp_iu,
3168								 &isci_host->pdev->dev);
3169
3170			} else if (SAS_PROTOCOL_SMP == task->task_proto) {
3171
3172				dev_err(&isci_host->pdev->dev,
3173					"%s: SCI_IO_FAILURE_RESPONSE_VALID: "
3174					"SAS_PROTOCOL_SMP protocol\n",
3175					__func__);
3176
3177			} else
3178				dev_err(&isci_host->pdev->dev,
3179					"%s: unknown protocol\n", __func__);
3180
3181			/* use the task status set in the task struct by the
3182			 * isci_request_process_response_iu call.
3183			 */
3184			request->complete_in_target = true;
3185			response = task->task_status.resp;
3186			status = task->task_status.stat;
3187			break;
3188
3189		case SCI_IO_SUCCESS:
3190		case SCI_IO_SUCCESS_IO_DONE_EARLY:
3191
3192			response = SAS_TASK_COMPLETE;
3193			status   = SAM_STAT_GOOD;
3194			request->complete_in_target = true;
3195
3196			if (task->task_proto == SAS_PROTOCOL_SMP) {
3197				void *rsp = &request->sci.smp.rsp;
3198
3199				dev_dbg(&isci_host->pdev->dev,
3200					"%s: SMP protocol completion\n",
3201					__func__);
3202
3203				sg_copy_from_buffer(
3204					&task->smp_task.smp_resp, 1,
3205					rsp, sizeof(struct smp_resp));
3206			} else if (completion_status
3207				   == SCI_IO_SUCCESS_IO_DONE_EARLY) {
3208
3209				/* This was an SSP / STP / SATA transfer.
3210				 * There is a possibility that less data than
3211				 * the maximum was transferred.
3212				 */
3213				u32 transferred_length = sci_req_tx_bytes(&request->sci);
3214
3215				task->task_status.residual
3216					= task->total_xfer_len - transferred_length;
3217
3218				/* If there were residual bytes, call this an
3219				 * underrun.
3220				 */
3221				if (task->task_status.residual != 0)
3222					status = SAS_DATA_UNDERRUN;
3223
3224				dev_dbg(&isci_host->pdev->dev,
3225					"%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
3226					__func__,
3227					status);
3228
3229			} else
3230				dev_dbg(&isci_host->pdev->dev,
3231					"%s: SCI_IO_SUCCESS\n",
3232					__func__);
3233
3234			break;
3235
3236		case SCI_IO_FAILURE_TERMINATED:
3237			dev_dbg(&isci_host->pdev->dev,
3238				"%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
3239				__func__,
3240				request,
3241				task);
3242
3243			/* The request was terminated explicitly.  No handling
3244			 * is needed in the SCSI error handler path.
3245			 */
3246			request->complete_in_target = true;
3247			response = SAS_TASK_UNDELIVERED;
3248
3249			/* See if the device has been/is being stopped. Note
3250			 * that we ignore the quiesce state, since we are
3251			 * concerned about the actual device state.
3252			 */
3253			if ((isci_device->status == isci_stopping) ||
3254			    (isci_device->status == isci_stopped))
3255				status = SAS_DEVICE_UNKNOWN;
3256			else
3257				status = SAS_ABORTED_TASK;
3258
3259			complete_to_host = isci_perform_normal_io_completion;
3260			break;
3261
3262		case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
3263
3264			isci_request_handle_controller_specific_errors(
3265				isci_device, request, task, &response, &status,
3266				&complete_to_host);
3267
3268			break;
3269
3270		case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
3271			/* This is a special case, in that the I/O completion
3272			 * is telling us that the device needs a reset.
3273			 * In order for the device reset condition to be
3274			 * noticed, the I/O has to be handled in the error
3275			 * handler.  Set the reset flag and cause the
3276			 * SCSI error thread to be scheduled.
3277			 */
3278			spin_lock_irqsave(&task->task_state_lock, task_flags);
3279			task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3280			spin_unlock_irqrestore(&task->task_state_lock, task_flags);
3281
3282			/* Fail the I/O. */
3283			response = SAS_TASK_UNDELIVERED;
3284			status = SAM_STAT_TASK_ABORTED;
3285
3286			complete_to_host = isci_perform_error_io_completion;
3287			request->complete_in_target = false;
3288			break;
3289
3290		default:
3291			/* Catch any otherwise unhandled error codes here. */
3292			dev_warn(&isci_host->pdev->dev,
3293				 "%s: invalid completion code: 0x%x - "
3294				 "isci_request = %p\n",
3295				 __func__, completion_status, request);
3296
3297			response = SAS_TASK_UNDELIVERED;
3298
3299			/* See if the device has been/is being stopped. Note
3300			 * that we ignore the quiesce state, since we are
3301			 * concerned about the actual device state.
3302			 */
3303			if ((isci_device->status == isci_stopping) ||
3304			    (isci_device->status == isci_stopped))
3305				status = SAS_DEVICE_UNKNOWN;
3306			else
3307				status = SAS_ABORTED_TASK;
3308
3309			complete_to_host = isci_perform_error_io_completion;
3310			request->complete_in_target = false;
3311			break;
3312		}
3313		break;
3314	}
3315
3316	isci_request_unmap_sgl(request, isci_host->pdev);
3317
3318	/* Put the completed request on the correct list */
3319	isci_task_save_for_upper_layer_completion(isci_host, request, response,
3320						  status, complete_to_host
3321						  );
3322
3323	/* complete the io request to the core. */
3324	scic_controller_complete_io(&isci_host->sci,
3325				    &isci_device->sci,
3326				    &request->sci);
3327	/* set terminated handle so it cannot be completed or
3328	 * terminated again, and to cause any calls into abort
3329	 * task to recognize the already completed case.
3330	 */
3331	request->terminated = true;
3332
3333	isci_host_can_dequeue(isci_host, 1);
3334}
3335
3336/**
3337 * scic_sds_request_initial_state_enter() -
3338 * @object: This parameter specifies the base object for which the state
3339 *    transition is occurring.
3340 *
3341 * This method implements the actions taken when entering the
3342 * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial
3343 * base request is constructed. Entry into the initial state sets all handlers
3344 * for the io request object to their default handlers. none
3345 */
3346static void scic_sds_request_initial_state_enter(void *object)
3347{
3348	struct scic_sds_request *sci_req = object;
3349
3350	SET_STATE_HANDLER(
3351		sci_req,
3352		scic_sds_request_state_handler_table,
3353		SCI_BASE_REQUEST_STATE_INITIAL
3354		);
3355}
3356
3357/**
3358 * scic_sds_request_constructed_state_enter() -
3359 * @object: The io request object that is to enter the constructed state.
3360 *
3361 * This method implements the actions taken when entering the
3362 * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers
3363 * for the the constructed state. none
3364 */
3365static void scic_sds_request_constructed_state_enter(void *object)
3366{
3367	struct scic_sds_request *sci_req = object;
3368
3369	SET_STATE_HANDLER(
3370		sci_req,
3371		scic_sds_request_state_handler_table,
3372		SCI_BASE_REQUEST_STATE_CONSTRUCTED
3373		);
3374}
3375
3376static void scic_sds_request_started_state_enter(void *object)
3377{
3378	struct scic_sds_request *sci_req = object;
3379	struct sci_base_state_machine *sm = &sci_req->state_machine;
3380	struct isci_request *ireq = sci_req_to_ireq(sci_req);
3381	struct domain_device *dev = sci_dev_to_domain(sci_req->target_device);
3382	struct sas_task *task;
3383
3384	/* XXX as hch said always creating an internal sas_task for tmf
3385	 * requests would simplify the driver
3386	 */
3387	task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
3388
3389	SET_STATE_HANDLER(
3390		sci_req,
3391		scic_sds_request_state_handler_table,
3392		SCI_BASE_REQUEST_STATE_STARTED
3393		);
3394
3395	/* all unaccelerated request types (non ssp or ncq) handled with
3396	 * substates
3397	 */
3398	if (!task && dev->dev_type == SAS_END_DEV) {
3399		sci_base_state_machine_change_state(sm,
3400			SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION);
3401	} else if (!task &&
3402		   (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
3403		    isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
3404		sci_base_state_machine_change_state(sm,
3405			SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE);
3406	} else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
3407		sci_base_state_machine_change_state(sm,
3408			SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE);
3409	} else if (task && sas_protocol_ata(task->task_proto) &&
3410		   !task->ata_task.use_ncq) {
3411		u32 state;
3412
3413		if (task->data_dir == DMA_NONE)
3414			 state = SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE;
3415		else if (task->ata_task.dma_xfer)
3416			state = SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE;
3417		else /* PIO */
3418			state = SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE;
3419
3420		sci_base_state_machine_change_state(sm, state);
3421	}
3422}
3423
3424/**
3425 * scic_sds_request_completed_state_enter() -
3426 * @object: This parameter specifies the base object for which the state
3427 *    transition is occurring.  This object is cast into a SCIC_SDS_IO_REQUEST
3428 *    object.
3429 *
3430 * This method implements the actions taken when entering the
3431 * SCI_BASE_REQUEST_STATE_COMPLETED state.  This state is entered when the
3432 * SCIC_SDS_IO_REQUEST has completed.  The method will decode the request
3433 * completion status and convert it to an enum sci_status to return in the
3434 * completion callback function. none
3435 */
3436static void scic_sds_request_completed_state_enter(void *object)
3437{
3438	struct scic_sds_request *sci_req = object;
3439	struct scic_sds_controller *scic =
3440		scic_sds_request_get_controller(sci_req);
3441	struct isci_host *ihost = scic_to_ihost(scic);
3442	struct isci_request *ireq = sci_req_to_ireq(sci_req);
3443
3444	SET_STATE_HANDLER(sci_req,
3445			  scic_sds_request_state_handler_table,
3446			  SCI_BASE_REQUEST_STATE_COMPLETED);
3447
3448	/* Tell the SCI_USER that the IO request is complete */
3449	if (sci_req->is_task_management_request == false)
3450		isci_request_io_request_complete(ihost, ireq,
3451						 sci_req->sci_status);
3452	else
3453		isci_task_request_complete(ihost, ireq, sci_req->sci_status);
3454}
3455
3456/**
3457 * scic_sds_request_aborting_state_enter() -
3458 * @object: This parameter specifies the base object for which the state
3459 *    transition is occurring.  This object is cast into a SCIC_SDS_IO_REQUEST
3460 *    object.
3461 *
3462 * This method implements the actions taken when entering the
3463 * SCI_BASE_REQUEST_STATE_ABORTING state. none
3464 */
3465static void scic_sds_request_aborting_state_enter(void *object)
3466{
3467	struct scic_sds_request *sci_req = object;
3468
3469	/* Setting the abort bit in the Task Context is required by the silicon. */
3470	sci_req->task_context_buffer->abort = 1;
3471
3472	SET_STATE_HANDLER(
3473		sci_req,
3474		scic_sds_request_state_handler_table,
3475		SCI_BASE_REQUEST_STATE_ABORTING
3476		);
3477}
3478
3479/**
3480 * scic_sds_request_final_state_enter() -
3481 * @object: This parameter specifies the base object for which the state
3482 *    transition is occurring.  This is cast into a SCIC_SDS_IO_REQUEST object.
3483 *
3484 * This method implements the actions taken when entering the
3485 * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the
3486 * state handlers in place. none
3487 */
3488static void scic_sds_request_final_state_enter(void *object)
3489{
3490	struct scic_sds_request *sci_req = object;
3491
3492	SET_STATE_HANDLER(
3493		sci_req,
3494		scic_sds_request_state_handler_table,
3495		SCI_BASE_REQUEST_STATE_FINAL
3496		);
3497}
3498
3499static void scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter(
3500	void *object)
3501{
3502	struct scic_sds_request *sci_req = object;
3503
3504	SET_STATE_HANDLER(
3505		sci_req,
3506		scic_sds_request_state_handler_table,
3507		SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION
3508		);
3509}
3510
3511static void scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter(
3512	void *object)
3513{
3514	struct scic_sds_request *sci_req = object;
3515
3516	SET_STATE_HANDLER(
3517		sci_req,
3518		scic_sds_request_state_handler_table,
3519		SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE
3520		);
3521}
3522
3523static void scic_sds_smp_request_started_await_response_substate_enter(void *object)
3524{
3525	struct scic_sds_request *sci_req = object;
3526
3527	SET_STATE_HANDLER(
3528		sci_req,
3529		scic_sds_request_state_handler_table,
3530		SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE
3531		);
3532}
3533
3534static void scic_sds_smp_request_started_await_tc_completion_substate_enter(void *object)
3535{
3536	struct scic_sds_request *sci_req = object;
3537
3538	SET_STATE_HANDLER(
3539		sci_req,
3540		scic_sds_request_state_handler_table,
3541		SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION
3542		);
3543}
3544
3545static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(
3546	void *object)
3547{
3548	struct scic_sds_request *sci_req = object;
3549
3550	SET_STATE_HANDLER(
3551		sci_req,
3552		scic_sds_request_state_handler_table,
3553		SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
3554		);
3555
3556	scic_sds_remote_device_set_working_request(
3557		sci_req->target_device, sci_req
3558		);
3559}
3560
3561static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object)
3562{
3563	struct scic_sds_request *sci_req = object;
3564
3565	SET_STATE_HANDLER(
3566		sci_req,
3567		scic_sds_request_state_handler_table,
3568		SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
3569		);
3570}
3571
3572
3573
3574static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(
3575	void *object)
3576{
3577	struct scic_sds_request *sci_req = object;
3578
3579	SET_STATE_HANDLER(
3580		sci_req,
3581		scic_sds_request_state_handler_table,
3582		SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
3583		);
3584
3585	scic_sds_remote_device_set_working_request(
3586		sci_req->target_device, sci_req);
3587}
3588
3589static void scic_sds_stp_request_started_pio_await_frame_enter(void *object)
3590{
3591	struct scic_sds_request *sci_req = object;
3592
3593	SET_STATE_HANDLER(
3594		sci_req,
3595		scic_sds_request_state_handler_table,
3596		SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
3597		);
3598}
3599
3600static void scic_sds_stp_request_started_pio_data_in_await_data_enter(
3601	void *object)
3602{
3603	struct scic_sds_request *sci_req = object;
3604
3605	SET_STATE_HANDLER(
3606		sci_req,
3607		scic_sds_request_state_handler_table,
3608		SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
3609		);
3610}
3611
3612static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter(
3613	void *object)
3614{
3615	struct scic_sds_request *sci_req = object;
3616
3617	SET_STATE_HANDLER(
3618		sci_req,
3619		scic_sds_request_state_handler_table,
3620		SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
3621		);
3622}
3623
3624
3625
3626static void scic_sds_stp_request_started_udma_await_tc_completion_enter(
3627	void *object)
3628{
3629	struct scic_sds_request *sci_req = object;
3630
3631	SET_STATE_HANDLER(
3632		sci_req,
3633		scic_sds_request_state_handler_table,
3634		SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
3635		);
3636}
3637
3638/**
3639 *
3640 *
3641 * This state is entered when there is an TC completion failure.  The hardware
3642 * received an unexpected condition while processing the IO request and now
3643 * will UF the D2H register FIS to complete the IO.
3644 */
3645static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter(
3646	void *object)
3647{
3648	struct scic_sds_request *sci_req = object;
3649
3650	SET_STATE_HANDLER(
3651		sci_req,
3652		scic_sds_request_state_handler_table,
3653		SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
3654		);
3655}
3656
3657
3658
3659static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(
3660	void *object)
3661{
3662	struct scic_sds_request *sci_req = object;
3663
3664	SET_STATE_HANDLER(
3665		sci_req,
3666		scic_sds_request_state_handler_table,
3667		SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
3668		);
3669
3670	scic_sds_remote_device_set_working_request(
3671		sci_req->target_device, sci_req
3672		);
3673}
3674
3675static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(
3676	void *object)
3677{
3678	struct scic_sds_request *sci_req = object;
3679	struct scu_task_context *task_context;
3680	struct host_to_dev_fis *h2d_fis;
3681	enum sci_status status;
3682
3683	/* Clear the SRST bit */
3684	h2d_fis = &sci_req->stp.cmd;
3685	h2d_fis->control = 0;
3686
3687	/* Clear the TC control bit */
3688	task_context = scic_sds_controller_get_task_context_buffer(
3689		sci_req->owning_controller, sci_req->io_tag);
3690	task_context->control_frame = 0;
3691
3692	status = scic_controller_continue_io(sci_req);
3693	if (status == SCI_SUCCESS) {
3694		SET_STATE_HANDLER(
3695			sci_req,
3696			scic_sds_request_state_handler_table,
3697			SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
3698			);
3699	}
3700}
3701
3702static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter(
3703	void *object)
3704{
3705	struct scic_sds_request *sci_req = object;
3706
3707	SET_STATE_HANDLER(
3708		sci_req,
3709		scic_sds_request_state_handler_table,
3710		SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
3711		);
3712}
3713
3714static const struct sci_base_state scic_sds_request_state_table[] = {
3715	[SCI_BASE_REQUEST_STATE_INITIAL] = {
3716		.enter_state = scic_sds_request_initial_state_enter,
3717	},
3718	[SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
3719		.enter_state = scic_sds_request_constructed_state_enter,
3720	},
3721	[SCI_BASE_REQUEST_STATE_STARTED] = {
3722		.enter_state = scic_sds_request_started_state_enter,
3723	},
3724	[SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
3725		.enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
3726	},
3727	[SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
3728		.enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter,
3729	},
3730	[SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
3731		.enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
3732	},
3733	[SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
3734		.enter_state = scic_sds_stp_request_started_pio_await_frame_enter,
3735	},
3736	[SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
3737		.enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter,
3738	},
3739	[SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
3740		.enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter,
3741	},
3742	[SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
3743		.enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter,
3744	},
3745	[SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
3746		.enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter,
3747	},
3748	[SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
3749		.enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
3750	},
3751	[SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
3752		.enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
3753	},
3754	[SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
3755		.enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter,
3756	},
3757	[SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = {
3758		.enter_state = scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter,
3759	},
3760	[SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = {
3761		.enter_state = scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter,
3762	},
3763	[SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = {
3764		.enter_state = scic_sds_smp_request_started_await_response_substate_enter,
3765	},
3766	[SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = {
3767		.enter_state = scic_sds_smp_request_started_await_tc_completion_substate_enter,
3768	},
3769	[SCI_BASE_REQUEST_STATE_COMPLETED] = {
3770		.enter_state = scic_sds_request_completed_state_enter,
3771	},
3772	[SCI_BASE_REQUEST_STATE_ABORTING] = {
3773		.enter_state = scic_sds_request_aborting_state_enter,
3774	},
3775	[SCI_BASE_REQUEST_STATE_FINAL] = {
3776		.enter_state = scic_sds_request_final_state_enter,
3777	},
3778};
3779
3780static void scic_sds_general_request_construct(struct scic_sds_controller *scic,
3781					       struct scic_sds_remote_device *sci_dev,
3782					       u16 io_tag, struct scic_sds_request *sci_req)
3783{
3784	sci_base_state_machine_construct(&sci_req->state_machine, sci_req,
3785			scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL);
3786	sci_base_state_machine_start(&sci_req->state_machine);
3787
3788	sci_req->io_tag = io_tag;
3789	sci_req->owning_controller = scic;
3790	sci_req->target_device = sci_dev;
3791	sci_req->protocol = SCIC_NO_PROTOCOL;
3792	sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3793	sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev);
3794
3795	sci_req->sci_status   = SCI_SUCCESS;
3796	sci_req->scu_status   = 0;
3797	sci_req->post_context = 0xFFFFFFFF;
3798
3799	sci_req->is_task_management_request = false;
3800
3801	if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
3802		sci_req->was_tag_assigned_by_user = false;
3803		sci_req->task_context_buffer = &sci_req->tc;
3804	} else {
3805		sci_req->was_tag_assigned_by_user = true;
3806
3807		sci_req->task_context_buffer =
3808			scic_sds_controller_get_task_context_buffer(scic, io_tag);
3809	}
3810}
3811
3812static enum sci_status
3813scic_io_request_construct(struct scic_sds_controller *scic,
3814			  struct scic_sds_remote_device *sci_dev,
3815			  u16 io_tag, struct scic_sds_request *sci_req)
3816{
3817	struct domain_device *dev = sci_dev_to_domain(sci_dev);
3818	enum sci_status status = SCI_SUCCESS;
3819
3820	/* Build the common part of the request */
3821	scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
3822
3823	if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3824		return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3825
3826	if (dev->dev_type == SAS_END_DEV)
3827		/* pass */;
3828	else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
3829		memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd));
3830	else if (dev_is_expander(dev))
3831		memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd));
3832	else
3833		return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3834
3835	memset(sci_req->task_context_buffer, 0,
3836	       offsetof(struct scu_task_context, sgl_pair_ab));
3837
3838	return status;
3839}
3840
3841enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
3842					    struct scic_sds_remote_device *sci_dev,
3843					    u16 io_tag, struct scic_sds_request *sci_req)
3844{
3845	struct domain_device *dev = sci_dev_to_domain(sci_dev);
3846	enum sci_status status = SCI_SUCCESS;
3847
3848	/* Build the common part of the request */
3849	scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
3850
3851	if (dev->dev_type == SAS_END_DEV ||
3852	    dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
3853		sci_req->is_task_management_request = true;
3854		memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context));
3855	} else
3856		status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3857
3858	return status;
3859}
3860
3861static enum sci_status isci_request_ssp_request_construct(
3862	struct isci_request *request)
3863{
3864	enum sci_status status;
3865
3866	dev_dbg(&request->isci_host->pdev->dev,
3867		"%s: request = %p\n",
3868		__func__,
3869		request);
3870	status = scic_io_request_construct_basic_ssp(&request->sci);
3871	return status;
3872}
3873
3874static enum sci_status isci_request_stp_request_construct(
3875	struct isci_request *request)
3876{
3877	struct sas_task *task = isci_request_access_task(request);
3878	enum sci_status status;
3879	struct host_to_dev_fis *register_fis;
3880
3881	dev_dbg(&request->isci_host->pdev->dev,
3882		"%s: request = %p\n",
3883		__func__,
3884		request);
3885
3886	/* Get the host_to_dev_fis from the core and copy
3887	 * the fis from the task into it.
3888	 */
3889	register_fis = isci_sata_task_to_fis_copy(task);
3890
3891	status = scic_io_request_construct_basic_sata(&request->sci);
3892
3893	/* Set the ncq tag in the fis, from the queue
3894	 * command in the task.
3895	 */
3896	if (isci_sata_is_task_ncq(task)) {
3897
3898		isci_sata_set_ncq_tag(
3899			register_fis,
3900			task
3901			);
3902	}
3903
3904	return status;
3905}
3906
3907/*
3908 * This function will fill in the SCU Task Context for a SMP request. The
3909 *    following important settings are utilized: -# task_type ==
3910 *    SCU_TASK_TYPE_SMP.  This simply indicates that a normal request type
3911 *    (i.e. non-raw frame) is being utilized to perform task management. -#
3912 *    control_frame == 1.  This ensures that the proper endianess is set so
3913 *    that the bytes are transmitted in the right order for a smp request frame.
3914 * @sci_req: This parameter specifies the smp request object being
3915 *    constructed.
3916 *
3917 */
3918static void
3919scu_smp_request_construct_task_context(struct scic_sds_request *sci_req,
3920				       struct smp_req *smp_req)
3921{
3922	dma_addr_t dma_addr;
3923	struct scic_sds_controller *scic;
3924	struct scic_sds_remote_device *sci_dev;
3925	struct scic_sds_port *sci_port;
3926	struct scu_task_context *task_context;
3927	ssize_t word_cnt = sizeof(struct smp_req) / sizeof(u32);
3928
3929	/* byte swap the smp request. */
3930	sci_swab32_cpy(&sci_req->smp.cmd, smp_req,
3931		       word_cnt);
3932
3933	task_context = scic_sds_request_get_task_context(sci_req);
3934
3935	scic = scic_sds_request_get_controller(sci_req);
3936	sci_dev = scic_sds_request_get_device(sci_req);
3937	sci_port = scic_sds_request_get_port(sci_req);
3938
3939	/*
3940	 * Fill in the TC with the its required data
3941	 * 00h
3942	 */
3943	task_context->priority = 0;
3944	task_context->initiator_request = 1;
3945	task_context->connection_rate = sci_dev->connection_rate;
3946	task_context->protocol_engine_index =
3947		scic_sds_controller_get_protocol_engine_group(scic);
3948	task_context->logical_port_index = scic_sds_port_get_index(sci_port);
3949	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3950	task_context->abort = 0;
3951	task_context->valid = SCU_TASK_CONTEXT_VALID;
3952	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3953
3954	/* 04h */
3955	task_context->remote_node_index = sci_dev->rnc.remote_node_index;
3956	task_context->command_code = 0;
3957	task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3958
3959	/* 08h */
3960	task_context->link_layer_control = 0;
3961	task_context->do_not_dma_ssp_good_response = 1;
3962	task_context->strict_ordering = 0;
3963	task_context->control_frame = 1;
3964	task_context->timeout_enable = 0;
3965	task_context->block_guard_enable = 0;
3966
3967	/* 0ch */
3968	task_context->address_modifier = 0;
3969
3970	/* 10h */
3971	task_context->ssp_command_iu_length = smp_req->req_len;
3972
3973	/* 14h */
3974	task_context->transfer_length_bytes = 0;
3975
3976	/*
3977	 * 18h ~ 30h, protocol specific
3978	 * since commandIU has been build by framework at this point, we just
3979	 * copy the frist DWord from command IU to this location. */
3980	memcpy(&task_context->type.smp, &sci_req->smp.cmd, sizeof(u32));
3981
3982	/*
3983	 * 40h
3984	 * "For SMP you could program it to zero. We would prefer that way
3985	 * so that done code will be consistent." - Venki
3986	 */
3987	task_context->task_phase = 0;
3988
3989	if (sci_req->was_tag_assigned_by_user) {
3990		/*
3991		 * Build the task context now since we have already read
3992		 * the data
3993		 */
3994		sci_req->post_context =
3995			(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3996			 (scic_sds_controller_get_protocol_engine_group(scic) <<
3997			  SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3998			 (scic_sds_port_get_index(sci_port) <<
3999			  SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
4000			 scic_sds_io_tag_get_index(sci_req->io_tag));
4001	} else {
4002		/*
4003		 * Build the task context now since we have already read
4004		 * the data.
4005		 * I/O tag index is not assigned because we have to wait
4006		 * until we get a TCi.
4007		 */
4008		sci_req->post_context =
4009			(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
4010			 (scic_sds_controller_get_protocol_engine_group(scic) <<
4011			  SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
4012			 (scic_sds_port_get_index(sci_port) <<
4013			  SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
4014	}
4015
4016	/*
4017	 * Copy the physical address for the command buffer to the SCU Task
4018	 * Context command buffer should not contain command header.
4019	 */
4020	dma_addr = scic_io_request_get_dma_addr(sci_req,
4021						((char *) &sci_req->smp.cmd) +
4022						sizeof(u32));
4023
4024	task_context->command_iu_upper = upper_32_bits(dma_addr);
4025	task_context->command_iu_lower = lower_32_bits(dma_addr);
4026
4027	/* SMP response comes as UF, so no need to set response IU address. */
4028	task_context->response_iu_upper = 0;
4029	task_context->response_iu_lower = 0;
4030}
4031
4032static enum sci_status scic_io_request_construct_smp(struct scic_sds_request *sci_req)
4033{
4034	struct smp_req *smp_req = kmalloc(sizeof(*smp_req), GFP_KERNEL);
4035
4036	if (!smp_req)
4037		return SCI_FAILURE_INSUFFICIENT_RESOURCES;
4038
4039	sci_req->protocol = SCIC_SMP_PROTOCOL;
4040
4041	/* Construct the SMP SCU Task Context */
4042	memcpy(smp_req, &sci_req->smp.cmd, sizeof(*smp_req));
4043
4044	/*
4045	 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
4046	 * functions under SAS 2.0, a zero request length really indicates
4047	 * a non-zero default length. */
4048	if (smp_req->req_len == 0) {
4049		switch (smp_req->func) {
4050		case SMP_DISCOVER:
4051		case SMP_REPORT_PHY_ERR_LOG:
4052		case SMP_REPORT_PHY_SATA:
4053		case SMP_REPORT_ROUTE_INFO:
4054			smp_req->req_len = 2;
4055			break;
4056		case SMP_CONF_ROUTE_INFO:
4057		case SMP_PHY_CONTROL:
4058		case SMP_PHY_TEST_FUNCTION:
4059			smp_req->req_len = 9;
4060			break;
4061			/* Default - zero is a valid default for 2.0. */
4062		}
4063	}
4064
4065	scu_smp_request_construct_task_context(sci_req, smp_req);
4066
4067	sci_base_state_machine_change_state(&sci_req->state_machine,
4068		SCI_BASE_REQUEST_STATE_CONSTRUCTED);
4069
4070	kfree(smp_req);
4071
4072	return SCI_SUCCESS;
4073}
4074
4075/*
4076 * isci_smp_request_build() - This function builds the smp request.
4077 * @ireq: This parameter points to the isci_request allocated in the
4078 *    request construct function.
4079 *
4080 * SCI_SUCCESS on successfull completion, or specific failure code.
4081 */
4082static enum sci_status isci_smp_request_build(struct isci_request *ireq)
4083{
4084	enum sci_status status = SCI_FAILURE;
4085	struct sas_task *task = isci_request_access_task(ireq);
4086	struct scic_sds_request *sci_req = &ireq->sci;
4087
4088	dev_dbg(&ireq->isci_host->pdev->dev,
4089		"%s: request = %p\n", __func__, ireq);
4090
4091	dev_dbg(&ireq->isci_host->pdev->dev,
4092		"%s: smp_req len = %d\n",
4093		__func__,
4094		task->smp_task.smp_req.length);
4095
4096	/* copy the smp_command to the address; */
4097	sg_copy_to_buffer(&task->smp_task.smp_req, 1,
4098			  &sci_req->smp.cmd,
4099			  sizeof(struct smp_req));
4100
4101	status = scic_io_request_construct_smp(sci_req);
4102	if (status != SCI_SUCCESS)
4103		dev_warn(&ireq->isci_host->pdev->dev,
4104			 "%s: failed with status = %d\n",
4105			 __func__,
4106			 status);
4107
4108	return status;
4109}
4110
4111/**
4112 * isci_io_request_build() - This function builds the io request object.
4113 * @isci_host: This parameter specifies the ISCI host object
4114 * @request: This parameter points to the isci_request object allocated in the
4115 *    request construct function.
4116 * @sci_device: This parameter is the handle for the sci core's remote device
4117 *    object that is the destination for this request.
4118 *
4119 * SCI_SUCCESS on successfull completion, or specific failure code.
4120 */
4121static enum sci_status isci_io_request_build(
4122	struct isci_host *isci_host,
4123	struct isci_request *request,
4124	struct isci_remote_device *isci_device)
4125{
4126	enum sci_status status = SCI_SUCCESS;
4127	struct sas_task *task = isci_request_access_task(request);
4128	struct scic_sds_remote_device *sci_device = &isci_device->sci;
4129
4130	dev_dbg(&isci_host->pdev->dev,
4131		"%s: isci_device = 0x%p; request = %p, "
4132		"num_scatter = %d\n",
4133		__func__,
4134		isci_device,
4135		request,
4136		task->num_scatter);
4137
4138	/* map the sgl addresses, if present.
4139	 * libata does the mapping for sata devices
4140	 * before we get the request.
4141	 */
4142	if (task->num_scatter &&
4143	    !sas_protocol_ata(task->task_proto) &&
4144	    !(SAS_PROTOCOL_SMP & task->task_proto)) {
4145
4146		request->num_sg_entries = dma_map_sg(
4147			&isci_host->pdev->dev,
4148			task->scatter,
4149			task->num_scatter,
4150			task->data_dir
4151			);
4152
4153		if (request->num_sg_entries == 0)
4154			return SCI_FAILURE_INSUFFICIENT_RESOURCES;
4155	}
4156
4157	/* build the common request object. For now,
4158	 * we will let the core allocate the IO tag.
4159	 */
4160	status = scic_io_request_construct(&isci_host->sci, sci_device,
4161					   SCI_CONTROLLER_INVALID_IO_TAG,
4162					   &request->sci);
4163
4164	if (status != SCI_SUCCESS) {
4165		dev_warn(&isci_host->pdev->dev,
4166			 "%s: failed request construct\n",
4167			 __func__);
4168		return SCI_FAILURE;
4169	}
4170
4171	switch (task->task_proto) {
4172	case SAS_PROTOCOL_SMP:
4173		status = isci_smp_request_build(request);
4174		break;
4175	case SAS_PROTOCOL_SSP:
4176		status = isci_request_ssp_request_construct(request);
4177		break;
4178	case SAS_PROTOCOL_SATA:
4179	case SAS_PROTOCOL_STP:
4180	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
4181		status = isci_request_stp_request_construct(request);
4182		break;
4183	default:
4184		dev_warn(&isci_host->pdev->dev,
4185			 "%s: unknown protocol\n", __func__);
4186		return SCI_FAILURE;
4187	}
4188
4189	return SCI_SUCCESS;
4190}
4191
4192/**
4193 * isci_request_alloc_core() - This function gets the request object from the
4194 *    isci_host dma cache.
4195 * @isci_host: This parameter specifies the ISCI host object
4196 * @isci_request: This parameter will contain the pointer to the new
4197 *    isci_request object.
4198 * @isci_device: This parameter is the pointer to the isci remote device object
4199 *    that is the destination for this request.
4200 * @gfp_flags: This parameter specifies the os allocation flags.
4201 *
4202 * SCI_SUCCESS on successfull completion, or specific failure code.
4203 */
4204static int isci_request_alloc_core(
4205	struct isci_host *isci_host,
4206	struct isci_request **isci_request,
4207	struct isci_remote_device *isci_device,
4208	gfp_t gfp_flags)
4209{
4210	int ret = 0;
4211	dma_addr_t handle;
4212	struct isci_request *request;
4213
4214
4215	/* get pointer to dma memory. This actually points
4216	 * to both the isci_remote_device object and the
4217	 * sci object. The isci object is at the beginning
4218	 * of the memory allocated here.
4219	 */
4220	request = dma_pool_alloc(isci_host->dma_pool, gfp_flags, &handle);
4221	if (!request) {
4222		dev_warn(&isci_host->pdev->dev,
4223			 "%s: dma_pool_alloc returned NULL\n", __func__);
4224		return -ENOMEM;
4225	}
4226
4227	/* initialize the request object.	*/
4228	spin_lock_init(&request->state_lock);
4229	request->request_daddr = handle;
4230	request->isci_host = isci_host;
4231	request->isci_device = isci_device;
4232	request->io_request_completion = NULL;
4233	request->terminated = false;
4234
4235	request->num_sg_entries = 0;
4236
4237	request->complete_in_target = false;
4238
4239	INIT_LIST_HEAD(&request->completed_node);
4240	INIT_LIST_HEAD(&request->dev_node);
4241
4242	*isci_request = request;
4243	isci_request_change_state(request, allocated);
4244
4245	return ret;
4246}
4247
4248static int isci_request_alloc_io(
4249	struct isci_host *isci_host,
4250	struct sas_task *task,
4251	struct isci_request **isci_request,
4252	struct isci_remote_device *isci_device,
4253	gfp_t gfp_flags)
4254{
4255	int retval = isci_request_alloc_core(isci_host, isci_request,
4256					     isci_device, gfp_flags);
4257
4258	if (!retval) {
4259		(*isci_request)->ttype_ptr.io_task_ptr = task;
4260		(*isci_request)->ttype                 = io_task;
4261
4262		task->lldd_task = *isci_request;
4263	}
4264	return retval;
4265}
4266
4267/**
4268 * isci_request_alloc_tmf() - This function gets the request object from the
4269 *    isci_host dma cache and initializes the relevant fields as a sas_task.
4270 * @isci_host: This parameter specifies the ISCI host object
4271 * @sas_task: This parameter is the task struct from the upper layer driver.
4272 * @isci_request: This parameter will contain the pointer to the new
4273 *    isci_request object.
4274 * @isci_device: This parameter is the pointer to the isci remote device object
4275 *    that is the destination for this request.
4276 * @gfp_flags: This parameter specifies the os allocation flags.
4277 *
4278 * SCI_SUCCESS on successfull completion, or specific failure code.
4279 */
4280int isci_request_alloc_tmf(
4281	struct isci_host *isci_host,
4282	struct isci_tmf *isci_tmf,
4283	struct isci_request **isci_request,
4284	struct isci_remote_device *isci_device,
4285	gfp_t gfp_flags)
4286{
4287	int retval = isci_request_alloc_core(isci_host, isci_request,
4288					     isci_device, gfp_flags);
4289
4290	if (!retval) {
4291
4292		(*isci_request)->ttype_ptr.tmf_task_ptr = isci_tmf;
4293		(*isci_request)->ttype = tmf_task;
4294	}
4295	return retval;
4296}
4297
4298/**
4299 * isci_request_execute() - This function allocates the isci_request object,
4300 *    all fills in some common fields.
4301 * @isci_host: This parameter specifies the ISCI host object
4302 * @sas_task: This parameter is the task struct from the upper layer driver.
4303 * @isci_request: This parameter will contain the pointer to the new
4304 *    isci_request object.
4305 * @gfp_flags: This parameter specifies the os allocation flags.
4306 *
4307 * SCI_SUCCESS on successfull completion, or specific failure code.
4308 */
4309int isci_request_execute(
4310	struct isci_host *isci_host,
4311	struct sas_task *task,
4312	struct isci_request **isci_request,
4313	gfp_t gfp_flags)
4314{
4315	int ret = 0;
4316	struct scic_sds_remote_device *sci_device;
4317	enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
4318	struct isci_remote_device *isci_device;
4319	struct isci_request *request;
4320	unsigned long flags;
4321
4322	isci_device = task->dev->lldd_dev;
4323	sci_device = &isci_device->sci;
4324
4325	/* do common allocation and init of request object. */
4326	ret = isci_request_alloc_io(
4327		isci_host,
4328		task,
4329		&request,
4330		isci_device,
4331		gfp_flags
4332		);
4333
4334	if (ret)
4335		goto out;
4336
4337	status = isci_io_request_build(isci_host, request, isci_device);
4338	if (status != SCI_SUCCESS) {
4339		dev_warn(&isci_host->pdev->dev,
4340			 "%s: request_construct failed - status = 0x%x\n",
4341			 __func__,
4342			 status);
4343		goto out;
4344	}
4345
4346	spin_lock_irqsave(&isci_host->scic_lock, flags);
4347
4348	/* send the request, let the core assign the IO TAG.	*/
4349	status = scic_controller_start_io(&isci_host->sci, sci_device,
4350					  &request->sci,
4351					  SCI_CONTROLLER_INVALID_IO_TAG);
4352	if (status != SCI_SUCCESS &&
4353	    status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
4354		dev_warn(&isci_host->pdev->dev,
4355			 "%s: failed request start (0x%x)\n",
4356			 __func__, status);
4357		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
4358		goto out;
4359	}
4360
4361	/* Either I/O started OK, or the core has signaled that
4362	 * the device needs a target reset.
4363	 *
4364	 * In either case, hold onto the I/O for later.
4365	 *
4366	 * Update it's status and add it to the list in the
4367	 * remote device object.
4368	 */
4369	isci_request_change_state(request, started);
4370	list_add(&request->dev_node, &isci_device->reqs_in_process);
4371
4372	if (status == SCI_SUCCESS) {
4373		/* Save the tag for possible task mgmt later. */
4374		request->io_tag = request->sci.io_tag;
4375	} else {
4376		/* The request did not really start in the
4377		 * hardware, so clear the request handle
4378		 * here so no terminations will be done.
4379		 */
4380		request->terminated = true;
4381	}
4382	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
4383
4384	if (status ==
4385	    SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
4386		/* Signal libsas that we need the SCSI error
4387		* handler thread to work on this I/O and that
4388		* we want a device reset.
4389		*/
4390		spin_lock_irqsave(&task->task_state_lock, flags);
4391		task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
4392		spin_unlock_irqrestore(&task->task_state_lock, flags);
4393
4394		/* Cause this task to be scheduled in the SCSI error
4395		* handler thread.
4396		*/
4397		isci_execpath_callback(isci_host, task,
4398				       sas_task_abort);
4399
4400		/* Change the status, since we are holding
4401		* the I/O until it is managed by the SCSI
4402		* error handler.
4403		*/
4404		status = SCI_SUCCESS;
4405	}
4406
4407 out:
4408	if (status != SCI_SUCCESS) {
4409		/* release dma memory on failure. */
4410		isci_request_free(isci_host, request);
4411		request = NULL;
4412		ret = SCI_FAILURE;
4413	}
4414
4415	*isci_request = request;
4416	return ret;
4417}
4418