2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 #include <scsi/scsi_cmnd.h>
60 #include "scu_completion_codes.h"
61 #include "scu_event_codes.h"
64 static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
68 return &ireq->tc->sgl_pair_ab;
70 return &ireq->tc->sgl_pair_cd;
74 return &ireq->sg_table[idx - 2];
77 static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
78 struct isci_request *ireq, u32 idx)
83 offset = (void *) &ireq->tc->sgl_pair_ab -
84 (void *) &ihost->task_context_table[0];
85 return ihost->task_context_dma + offset;
86 } else if (idx == 1) {
87 offset = (void *) &ireq->tc->sgl_pair_cd -
88 (void *) &ihost->task_context_table[0];
89 return ihost->task_context_dma + offset;
92 return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
95 static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
97 e->length = sg_dma_len(sg);
98 e->address_upper = upper_32_bits(sg_dma_address(sg));
99 e->address_lower = lower_32_bits(sg_dma_address(sg));
100 e->address_modifier = 0;
103 static void sci_request_build_sgl(struct isci_request *ireq)
105 struct isci_host *ihost = ireq->isci_host;
106 struct sas_task *task = isci_request_access_task(ireq);
107 struct scatterlist *sg = NULL;
110 struct scu_sgl_element_pair *scu_sg = NULL;
111 struct scu_sgl_element_pair *prev_sg = NULL;
113 if (task->num_scatter > 0) {
117 scu_sg = to_sgl_element_pair(ireq, sg_idx);
118 init_sgl_element(&scu_sg->A, sg);
121 init_sgl_element(&scu_sg->B, sg);
124 memset(&scu_sg->B, 0, sizeof(scu_sg->B));
127 dma_addr = to_sgl_element_pair_dma(ihost,
131 prev_sg->next_pair_upper =
132 upper_32_bits(dma_addr);
133 prev_sg->next_pair_lower =
134 lower_32_bits(dma_addr);
140 } else { /* handle when no sg */
141 scu_sg = to_sgl_element_pair(ireq, sg_idx);
143 dma_addr = dma_map_single(&ihost->pdev->dev,
145 task->total_xfer_len,
148 ireq->zero_scatter_daddr = dma_addr;
150 scu_sg->A.length = task->total_xfer_len;
151 scu_sg->A.address_upper = upper_32_bits(dma_addr);
152 scu_sg->A.address_lower = lower_32_bits(dma_addr);
156 scu_sg->next_pair_upper = 0;
157 scu_sg->next_pair_lower = 0;
161 static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
163 struct ssp_cmd_iu *cmd_iu;
164 struct sas_task *task = isci_request_access_task(ireq);
166 cmd_iu = &ireq->ssp.cmd;
168 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
169 cmd_iu->add_cdb_len = 0;
172 cmd_iu->en_fburst = 0; /* unsupported */
173 cmd_iu->task_prio = task->ssp_task.task_prio;
174 cmd_iu->task_attr = task->ssp_task.task_attr;
177 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
178 sizeof(task->ssp_task.cdb) / sizeof(u32));
181 static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
183 struct ssp_task_iu *task_iu;
184 struct sas_task *task = isci_request_access_task(ireq);
185 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
187 task_iu = &ireq->ssp.tmf;
189 memset(task_iu, 0, sizeof(struct ssp_task_iu));
191 memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
193 task_iu->task_func = isci_tmf->tmf_code;
195 (test_bit(IREQ_TMF, &ireq->flags)) ?
197 SCI_CONTROLLER_INVALID_IO_TAG;
201 * This method is will fill in the SCU Task Context for any type of SSP request.
206 static void scu_ssp_reqeust_construct_task_context(
207 struct isci_request *ireq,
208 struct scu_task_context *task_context)
211 struct isci_remote_device *idev;
212 struct isci_port *iport;
214 idev = ireq->target_device;
215 iport = idev->owning_port;
217 /* Fill in the TC with the its required data */
218 task_context->abort = 0;
219 task_context->priority = 0;
220 task_context->initiator_request = 1;
221 task_context->connection_rate = idev->connection_rate;
222 task_context->protocol_engine_index = ISCI_PEG;
223 task_context->logical_port_index = iport->physical_port_index;
224 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
225 task_context->valid = SCU_TASK_CONTEXT_VALID;
226 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
228 task_context->remote_node_index = idev->rnc.remote_node_index;
229 task_context->command_code = 0;
231 task_context->link_layer_control = 0;
232 task_context->do_not_dma_ssp_good_response = 1;
233 task_context->strict_ordering = 0;
234 task_context->control_frame = 0;
235 task_context->timeout_enable = 0;
236 task_context->block_guard_enable = 0;
238 task_context->address_modifier = 0;
240 /* task_context->type.ssp.tag = ireq->io_tag; */
241 task_context->task_phase = 0x01;
243 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
244 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
245 (iport->physical_port_index <<
246 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
247 ISCI_TAG_TCI(ireq->io_tag));
250 * Copy the physical address for the command buffer to the
253 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
255 task_context->command_iu_upper = upper_32_bits(dma_addr);
256 task_context->command_iu_lower = lower_32_bits(dma_addr);
259 * Copy the physical address for the response buffer to the
262 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
264 task_context->response_iu_upper = upper_32_bits(dma_addr);
265 task_context->response_iu_lower = lower_32_bits(dma_addr);
268 static u8 scu_bg_blk_size(struct scsi_device *sdp)
270 switch (sdp->sector_size) {
282 static u32 scu_dif_bytes(u32 len, u32 sector_size)
284 return (len >> ilog2(sector_size)) * 8;
287 static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
289 struct scu_task_context *tc = ireq->tc;
290 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
291 u8 blk_sz = scu_bg_blk_size(scmd->device);
293 tc->block_guard_enable = 1;
296 /* DIF write insert */
297 tc->blk_prot_func = 0x2;
299 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
300 scmd->device->sector_size);
302 /* always init to 0, used by hw */
303 tc->interm_crc_val = 0;
305 tc->init_crc_seed = 0;
306 tc->app_tag_verify = 0;
308 tc->ref_tag_seed_verify = 0;
310 /* always init to same as bg_blk_sz */
311 tc->UD_bytes_immed_val = scmd->device->sector_size;
313 tc->reserved_DC_0 = 0;
315 /* always init to 8 */
316 tc->DIF_bytes_immed_val = 8;
318 tc->reserved_DC_1 = 0;
319 tc->bgc_blk_sz = scmd->device->sector_size;
320 tc->reserved_E0_0 = 0;
321 tc->app_tag_gen_mask = 0;
323 /** setup block guard control **/
326 /* DIF write insert */
327 tc->bgctl_f.op = 0x2;
329 tc->app_tag_verify_mask = 0;
331 /* must init to 0 for hw */
332 tc->blk_guard_err = 0;
334 tc->reserved_E8_0 = 0;
336 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
337 tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
338 else if (type & SCSI_PROT_DIF_TYPE3)
339 tc->ref_tag_seed_gen = 0;
342 static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
344 struct scu_task_context *tc = ireq->tc;
345 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
346 u8 blk_sz = scu_bg_blk_size(scmd->device);
348 tc->block_guard_enable = 1;
352 tc->blk_prot_func = 0x1;
354 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
355 scmd->device->sector_size);
357 /* always init to 0, used by hw */
358 tc->interm_crc_val = 0;
360 tc->init_crc_seed = 0;
361 tc->app_tag_verify = 0;
364 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
365 tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
366 else if (type & SCSI_PROT_DIF_TYPE3)
367 tc->ref_tag_seed_verify = 0;
369 /* always init to same as bg_blk_sz */
370 tc->UD_bytes_immed_val = scmd->device->sector_size;
372 tc->reserved_DC_0 = 0;
374 /* always init to 8 */
375 tc->DIF_bytes_immed_val = 8;
377 tc->reserved_DC_1 = 0;
378 tc->bgc_blk_sz = scmd->device->sector_size;
379 tc->reserved_E0_0 = 0;
380 tc->app_tag_gen_mask = 0;
382 /** setup block guard control **/
386 tc->bgctl_f.crc_verify = 1;
387 tc->bgctl_f.op = 0x1;
388 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) {
389 tc->bgctl_f.ref_tag_chk = 1;
390 tc->bgctl_f.app_f_detect = 1;
391 } else if (type & SCSI_PROT_DIF_TYPE3)
392 tc->bgctl_f.app_ref_f_detect = 1;
394 tc->app_tag_verify_mask = 0;
396 /* must init to 0 for hw */
397 tc->blk_guard_err = 0;
399 tc->reserved_E8_0 = 0;
400 tc->ref_tag_seed_gen = 0;
404 * This method is will fill in the SCU Task Context for a SSP IO request.
408 static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
409 enum dma_data_direction dir,
412 struct scu_task_context *task_context = ireq->tc;
413 struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr;
414 struct scsi_cmnd *scmd = sas_task->uldd_task;
415 u8 prot_type = scsi_get_prot_type(scmd);
416 u8 prot_op = scsi_get_prot_op(scmd);
418 scu_ssp_reqeust_construct_task_context(ireq, task_context);
420 task_context->ssp_command_iu_length =
421 sizeof(struct ssp_cmd_iu) / sizeof(u32);
422 task_context->type.ssp.frame_type = SSP_COMMAND;
425 case DMA_FROM_DEVICE:
428 task_context->task_type = SCU_TASK_TYPE_IOREAD;
431 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
435 task_context->transfer_length_bytes = len;
437 if (task_context->transfer_length_bytes > 0)
438 sci_request_build_sgl(ireq);
440 if (prot_type != SCSI_PROT_DIF_TYPE0) {
441 if (prot_op == SCSI_PROT_READ_STRIP)
442 scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op);
443 else if (prot_op == SCSI_PROT_WRITE_INSERT)
444 scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op);
449 * This method will fill in the SCU Task Context for a SSP Task request. The
450 * following important settings are utilized: -# priority ==
451 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
452 * ahead of other task destined for the same Remote Node. -# task_type ==
453 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
454 * (i.e. non-raw frame) is being utilized to perform task management. -#
455 * control_frame == 1. This ensures that the proper endianess is set so
456 * that the bytes are transmitted in the right order for a task frame.
457 * @sci_req: This parameter specifies the task request object being
461 static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
463 struct scu_task_context *task_context = ireq->tc;
465 scu_ssp_reqeust_construct_task_context(ireq, task_context);
467 task_context->control_frame = 1;
468 task_context->priority = SCU_TASK_PRIORITY_HIGH;
469 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
470 task_context->transfer_length_bytes = 0;
471 task_context->type.ssp.frame_type = SSP_TASK;
472 task_context->ssp_command_iu_length =
473 sizeof(struct ssp_task_iu) / sizeof(u32);
477 * This method is will fill in the SCU Task Context for any type of SATA
478 * request. This is called from the various SATA constructors.
479 * @sci_req: The general IO request object which is to be used in
480 * constructing the SCU task context.
481 * @task_context: The buffer pointer for the SCU task context which is being
484 * The general io request construction is complete. The buffer assignment for
485 * the command buffer is complete. none Revisit task context construction to
486 * determine what is common for SSP/SMP/STP task context structures.
488 static void scu_sata_reqeust_construct_task_context(
489 struct isci_request *ireq,
490 struct scu_task_context *task_context)
493 struct isci_remote_device *idev;
494 struct isci_port *iport;
496 idev = ireq->target_device;
497 iport = idev->owning_port;
499 /* Fill in the TC with the its required data */
500 task_context->abort = 0;
501 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
502 task_context->initiator_request = 1;
503 task_context->connection_rate = idev->connection_rate;
504 task_context->protocol_engine_index = ISCI_PEG;
505 task_context->logical_port_index = iport->physical_port_index;
506 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
507 task_context->valid = SCU_TASK_CONTEXT_VALID;
508 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
510 task_context->remote_node_index = idev->rnc.remote_node_index;
511 task_context->command_code = 0;
513 task_context->link_layer_control = 0;
514 task_context->do_not_dma_ssp_good_response = 1;
515 task_context->strict_ordering = 0;
516 task_context->control_frame = 0;
517 task_context->timeout_enable = 0;
518 task_context->block_guard_enable = 0;
520 task_context->address_modifier = 0;
521 task_context->task_phase = 0x01;
523 task_context->ssp_command_iu_length =
524 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
526 /* Set the first word of the H2D REG FIS */
527 task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
529 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
530 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
531 (iport->physical_port_index <<
532 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
533 ISCI_TAG_TCI(ireq->io_tag));
535 * Copy the physical address for the command buffer to the SCU Task
536 * Context. We must offset the command buffer by 4 bytes because the
537 * first 4 bytes are transfered in the body of the TC.
539 dma_addr = sci_io_request_get_dma_addr(ireq,
540 ((char *) &ireq->stp.cmd) +
543 task_context->command_iu_upper = upper_32_bits(dma_addr);
544 task_context->command_iu_lower = lower_32_bits(dma_addr);
546 /* SATA Requests do not have a response buffer */
547 task_context->response_iu_upper = 0;
548 task_context->response_iu_lower = 0;
551 static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
553 struct scu_task_context *task_context = ireq->tc;
555 scu_sata_reqeust_construct_task_context(ireq, task_context);
557 task_context->control_frame = 0;
558 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
559 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
560 task_context->type.stp.fis_type = FIS_REGH2D;
561 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
564 static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
567 struct isci_stp_request *stp_req = &ireq->stp.req;
569 scu_stp_raw_request_construct_task_context(ireq);
572 stp_req->sgl.offset = 0;
573 stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
576 sci_request_build_sgl(ireq);
577 stp_req->sgl.index = 0;
579 /* The user does not want the data copied to the SGL buffer location */
580 stp_req->sgl.index = -1;
588 * @sci_req: This parameter specifies the request to be constructed as an
590 * @optimized_task_type: This parameter specifies whether the request is to be
591 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
592 * value of 1 indicates NCQ.
594 * This method will perform request construction common to all types of STP
595 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
596 * returns an indication as to whether the construction was successful.
598 static void sci_stp_optimized_request_construct(struct isci_request *ireq,
599 u8 optimized_task_type,
601 enum dma_data_direction dir)
603 struct scu_task_context *task_context = ireq->tc;
605 /* Build the STP task context structure */
606 scu_sata_reqeust_construct_task_context(ireq, task_context);
608 /* Copy over the SGL elements */
609 sci_request_build_sgl(ireq);
611 /* Copy over the number of bytes to be transfered */
612 task_context->transfer_length_bytes = len;
614 if (dir == DMA_TO_DEVICE) {
616 * The difference between the DMA IN and DMA OUT request task type
617 * values are consistent with the difference between FPDMA READ
618 * and FPDMA WRITE values. Add the supplied task type parameter
619 * to this difference to set the task type properly for this
620 * DATA OUT (WRITE) case. */
621 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
622 - SCU_TASK_TYPE_DMA_IN);
625 * For the DATA IN (READ) case, simply save the supplied
626 * optimized task type. */
627 task_context->task_type = optimized_task_type;
631 static void sci_atapi_construct(struct isci_request *ireq)
633 struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd;
634 struct sas_task *task;
636 /* To simplify the implementation we take advantage of the
637 * silicon's partial acceleration of atapi protocol (dma data
638 * transfers), so we promote all commands to dma protocol. This
639 * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
641 h2d_fis->features |= ATAPI_PKT_DMA;
643 scu_stp_raw_request_construct_task_context(ireq);
645 task = isci_request_access_task(ireq);
646 if (task->data_dir == DMA_NONE)
647 task->total_xfer_len = 0;
649 /* clear the response so we can detect arrivial of an
650 * unsolicited h2d fis
652 ireq->stp.rsp.fis_type = 0;
655 static enum sci_status
656 sci_io_request_construct_sata(struct isci_request *ireq,
658 enum dma_data_direction dir,
661 enum sci_status status = SCI_SUCCESS;
662 struct sas_task *task = isci_request_access_task(ireq);
663 struct domain_device *dev = ireq->target_device->domain_dev;
665 /* check for management protocols */
666 if (test_bit(IREQ_TMF, &ireq->flags)) {
667 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
669 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
670 tmf->tmf_code == isci_tmf_sata_srst_low) {
671 scu_stp_raw_request_construct_task_context(ireq);
674 dev_err(&ireq->owning_controller->pdev->dev,
675 "%s: Request 0x%p received un-handled SAT "
676 "management protocol 0x%x.\n",
677 __func__, ireq, tmf->tmf_code);
683 if (!sas_protocol_ata(task->task_proto)) {
684 dev_err(&ireq->owning_controller->pdev->dev,
685 "%s: Non-ATA protocol in SATA path: 0x%x\n",
693 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
694 task->ata_task.fis.command == ATA_CMD_PACKET) {
695 sci_atapi_construct(ireq);
700 if (task->data_dir == DMA_NONE) {
701 scu_stp_raw_request_construct_task_context(ireq);
706 if (task->ata_task.use_ncq) {
707 sci_stp_optimized_request_construct(ireq,
708 SCU_TASK_TYPE_FPDMAQ_READ,
714 if (task->ata_task.dma_xfer) {
715 sci_stp_optimized_request_construct(ireq,
716 SCU_TASK_TYPE_DMA_IN,
720 return sci_stp_pio_request_construct(ireq, copy);
725 static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
727 struct sas_task *task = isci_request_access_task(ireq);
729 ireq->protocol = SCIC_SSP_PROTOCOL;
731 scu_ssp_io_request_construct_task_context(ireq,
733 task->total_xfer_len);
735 sci_io_request_build_ssp_command_iu(ireq);
737 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
742 enum sci_status sci_task_request_construct_ssp(
743 struct isci_request *ireq)
745 /* Construct the SSP Task SCU Task Context */
746 scu_ssp_task_request_construct_task_context(ireq);
748 /* Fill in the SSP Task IU */
749 sci_task_request_build_ssp_task_iu(ireq);
751 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
756 static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
758 enum sci_status status;
760 struct sas_task *task = isci_request_access_task(ireq);
762 ireq->protocol = SCIC_STP_PROTOCOL;
764 copy = (task->data_dir == DMA_NONE) ? false : true;
766 status = sci_io_request_construct_sata(ireq,
767 task->total_xfer_len,
771 if (status == SCI_SUCCESS)
772 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
777 enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
779 enum sci_status status = SCI_SUCCESS;
781 /* check for management protocols */
782 if (test_bit(IREQ_TMF, &ireq->flags)) {
783 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
785 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
786 tmf->tmf_code == isci_tmf_sata_srst_low) {
787 scu_stp_raw_request_construct_task_context(ireq);
789 dev_err(&ireq->owning_controller->pdev->dev,
790 "%s: Request 0x%p received un-handled SAT "
792 __func__, ireq, tmf->tmf_code);
798 if (status != SCI_SUCCESS)
800 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
806 * sci_req_tx_bytes - bytes transferred when reply underruns request
807 * @ireq: request that was terminated early
809 #define SCU_TASK_CONTEXT_SRAM 0x200000
810 static u32 sci_req_tx_bytes(struct isci_request *ireq)
812 struct isci_host *ihost = ireq->owning_controller;
815 if (readl(&ihost->smu_registers->address_modifier) == 0) {
816 void __iomem *scu_reg_base = ihost->scu_registers;
818 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
819 * BAR1 is the scu_registers
820 * 0x20002C = 0x200000 + 0x2c
821 * = start of task context SRAM + offset of (type.ssp.data_offset)
822 * TCi is the io_tag of struct sci_request
824 ret_val = readl(scu_reg_base +
825 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
826 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
832 enum sci_status sci_request_start(struct isci_request *ireq)
834 enum sci_base_request_states state;
835 struct scu_task_context *tc = ireq->tc;
836 struct isci_host *ihost = ireq->owning_controller;
838 state = ireq->sm.current_state_id;
839 if (state != SCI_REQ_CONSTRUCTED) {
840 dev_warn(&ihost->pdev->dev,
841 "%s: SCIC IO Request requested to start while in wrong "
842 "state %d\n", __func__, state);
843 return SCI_FAILURE_INVALID_STATE;
846 tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
848 switch (tc->protocol_type) {
849 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
850 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
852 tc->type.ssp.tag = ireq->io_tag;
853 tc->type.ssp.target_port_transfer_tag = 0xFFFF;
856 case SCU_TASK_CONTEXT_PROTOCOL_STP:
858 * tc->type.stp.ncq_tag = ireq->ncq_tag;
862 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
863 /* / @todo When do we set no protocol type? */
867 /* This should never happen since we build the IO
872 /* Add to the post_context the io tag value */
873 ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
875 /* Everything is good go ahead and change state */
876 sci_change_state(&ireq->sm, SCI_REQ_STARTED);
882 sci_io_request_terminate(struct isci_request *ireq)
884 enum sci_base_request_states state;
886 state = ireq->sm.current_state_id;
889 case SCI_REQ_CONSTRUCTED:
890 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
891 ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
892 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
894 case SCI_REQ_STARTED:
895 case SCI_REQ_TASK_WAIT_TC_COMP:
896 case SCI_REQ_SMP_WAIT_RESP:
897 case SCI_REQ_SMP_WAIT_TC_COMP:
898 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
899 case SCI_REQ_STP_UDMA_WAIT_D2H:
900 case SCI_REQ_STP_NON_DATA_WAIT_H2D:
901 case SCI_REQ_STP_NON_DATA_WAIT_D2H:
902 case SCI_REQ_STP_PIO_WAIT_H2D:
903 case SCI_REQ_STP_PIO_WAIT_FRAME:
904 case SCI_REQ_STP_PIO_DATA_IN:
905 case SCI_REQ_STP_PIO_DATA_OUT:
906 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
907 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
908 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
909 case SCI_REQ_ATAPI_WAIT_H2D:
910 case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
911 case SCI_REQ_ATAPI_WAIT_D2H:
912 case SCI_REQ_ATAPI_WAIT_TC_COMP:
913 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
915 case SCI_REQ_TASK_WAIT_TC_RESP:
916 /* The task frame was already confirmed to have been
917 * sent by the SCU HW. Since the state machine is
918 * now only waiting for the task response itself,
919 * abort the request and complete it immediately
920 * and don't wait for the task response.
922 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
923 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
925 case SCI_REQ_ABORTING:
926 /* If a request has a termination requested twice, return
927 * a failure indication, since HW confirmation of the first
928 * abort is still outstanding.
930 case SCI_REQ_COMPLETED:
932 dev_warn(&ireq->owning_controller->pdev->dev,
933 "%s: SCIC IO Request requested to abort while in wrong "
936 ireq->sm.current_state_id);
940 return SCI_FAILURE_INVALID_STATE;
943 enum sci_status sci_request_complete(struct isci_request *ireq)
945 enum sci_base_request_states state;
946 struct isci_host *ihost = ireq->owning_controller;
948 state = ireq->sm.current_state_id;
949 if (WARN_ONCE(state != SCI_REQ_COMPLETED,
950 "isci: request completion from wrong state (%d)\n", state))
951 return SCI_FAILURE_INVALID_STATE;
953 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
954 sci_controller_release_frame(ihost,
955 ireq->saved_rx_frame_index);
957 /* XXX can we just stop the machine and remove the 'final' state? */
958 sci_change_state(&ireq->sm, SCI_REQ_FINAL);
962 enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
965 enum sci_base_request_states state;
966 struct isci_host *ihost = ireq->owning_controller;
968 state = ireq->sm.current_state_id;
970 if (state != SCI_REQ_STP_PIO_DATA_IN) {
971 dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n",
972 __func__, event_code, state);
974 return SCI_FAILURE_INVALID_STATE;
977 switch (scu_get_event_specifier(event_code)) {
978 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
979 /* We are waiting for data and the SCU has R_ERR the data frame.
980 * Go back to waiting for the D2H Register FIS
982 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
985 dev_err(&ihost->pdev->dev,
986 "%s: pio request unexpected event %#x\n",
987 __func__, event_code);
989 /* TODO Should we fail the PIO request when we get an
997 * This function copies response data for requests returning response data
998 * instead of sense data.
999 * @sci_req: This parameter specifies the request object for which to copy
1000 * the response data.
1002 static void sci_io_request_copy_response(struct isci_request *ireq)
1006 struct ssp_response_iu *ssp_response;
1007 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
1009 ssp_response = &ireq->ssp.rsp;
1011 resp_buf = &isci_tmf->resp.resp_iu;
1014 SSP_RESP_IU_MAX_SIZE,
1015 be32_to_cpu(ssp_response->response_data_len));
1017 memcpy(resp_buf, ssp_response->resp_data, len);
1020 static enum sci_status
1021 request_started_state_tc_event(struct isci_request *ireq,
1022 u32 completion_code)
1024 struct ssp_response_iu *resp_iu;
1027 /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
1028 * to determine SDMA status
1030 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1031 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1032 ireq->scu_status = SCU_TASK_DONE_GOOD;
1033 ireq->sci_status = SCI_SUCCESS;
1035 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
1036 /* There are times when the SCU hardware will return an early
1037 * response because the io request specified more data than is
1038 * returned by the target device (mode pages, inquiry data,
1039 * etc.). We must check the response stats to see if this is
1040 * truly a failed request or a good request that just got
1043 struct ssp_response_iu *resp = &ireq->ssp.rsp;
1044 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1046 sci_swab32_cpy(&ireq->ssp.rsp,
1050 if (resp->status == 0) {
1051 ireq->scu_status = SCU_TASK_DONE_GOOD;
1052 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
1054 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1055 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1059 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
1060 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1062 sci_swab32_cpy(&ireq->ssp.rsp,
1066 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1067 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1071 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
1072 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
1073 * guaranteed to be received before this completion status is
1076 resp_iu = &ireq->ssp.rsp;
1077 datapres = resp_iu->datapres;
1079 if (datapres == 1 || datapres == 2) {
1080 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1081 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1083 ireq->scu_status = SCU_TASK_DONE_GOOD;
1084 ireq->sci_status = SCI_SUCCESS;
1087 /* only stp device gets suspended. */
1088 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1089 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
1090 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
1091 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
1092 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
1093 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
1094 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1095 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
1096 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
1097 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1098 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
1099 if (ireq->protocol == SCIC_STP_PROTOCOL) {
1100 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1101 SCU_COMPLETION_TL_STATUS_SHIFT;
1102 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
1104 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1105 SCU_COMPLETION_TL_STATUS_SHIFT;
1106 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1110 /* both stp/ssp device gets suspended */
1111 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
1112 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
1113 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
1114 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
1115 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
1116 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
1117 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
1118 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
1119 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
1120 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
1121 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1122 SCU_COMPLETION_TL_STATUS_SHIFT;
1123 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
1126 /* neither ssp nor stp gets suspended. */
1127 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
1128 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
1129 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
1130 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
1131 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
1132 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
1133 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1134 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1135 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1136 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1137 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
1138 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
1139 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
1140 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
1141 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
1143 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1144 SCU_COMPLETION_TL_STATUS_SHIFT;
1145 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1150 * TODO: This is probably wrong for ACK/NAK timeout conditions
1153 /* In all cases we will treat this as the completion of the IO req. */
1154 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1158 static enum sci_status
1159 request_aborting_state_tc_event(struct isci_request *ireq,
1160 u32 completion_code)
1162 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1163 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1164 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1165 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
1166 ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
1167 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1171 /* Unless we get some strange error wait for the task abort to complete
1172 * TODO: Should there be a state change for this completion?
1180 static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
1181 u32 completion_code)
1183 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1184 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1185 ireq->scu_status = SCU_TASK_DONE_GOOD;
1186 ireq->sci_status = SCI_SUCCESS;
1187 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1189 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1190 /* Currently, the decision is to simply allow the task request
1191 * to timeout if the task IU wasn't received successfully.
1192 * There is a potential for receiving multiple task responses if
1193 * we decide to send the task IU again.
1195 dev_warn(&ireq->owning_controller->pdev->dev,
1196 "%s: TaskRequest:0x%p CompletionCode:%x - "
1197 "ACK/NAK timeout\n", __func__, ireq,
1200 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1204 * All other completion status cause the IO to be complete.
1205 * If a NAK was received, then it is up to the user to retry
1208 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1209 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1210 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1217 static enum sci_status
1218 smp_request_await_response_tc_event(struct isci_request *ireq,
1219 u32 completion_code)
1221 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1222 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1223 /* In the AWAIT RESPONSE state, any TC completion is
1224 * unexpected. but if the TC has success status, we
1225 * complete the IO anyway.
1227 ireq->scu_status = SCU_TASK_DONE_GOOD;
1228 ireq->sci_status = SCI_SUCCESS;
1229 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1231 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1232 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1233 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1234 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1235 /* These status has been seen in a specific LSI
1236 * expander, which sometimes is not able to send smp
1237 * response within 2 ms. This causes our hardware break
1238 * the connection and set TC completion with one of
1239 * these SMP_XXX_XX_ERR status. For these type of error,
1240 * we ask ihost user to retry the request.
1242 ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
1243 ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
1244 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1247 /* All other completion status cause the IO to be complete. If a NAK
1248 * was received, then it is up to the user to retry the request
1250 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1251 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1252 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1259 static enum sci_status
1260 smp_request_await_tc_event(struct isci_request *ireq,
1261 u32 completion_code)
1263 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1264 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1265 ireq->scu_status = SCU_TASK_DONE_GOOD;
1266 ireq->sci_status = SCI_SUCCESS;
1267 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1270 /* All other completion status cause the IO to be
1271 * complete. If a NAK was received, then it is up to
1272 * the user to retry the request.
1274 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1275 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1276 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1283 static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
1285 struct scu_sgl_element *sgl;
1286 struct scu_sgl_element_pair *sgl_pair;
1287 struct isci_request *ireq = to_ireq(stp_req);
1288 struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
1290 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1293 else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
1294 if (sgl_pair->B.address_lower == 0 &&
1295 sgl_pair->B.address_upper == 0) {
1298 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
1302 if (sgl_pair->next_pair_lower == 0 &&
1303 sgl_pair->next_pair_upper == 0) {
1307 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
1308 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1316 static enum sci_status
1317 stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
1318 u32 completion_code)
1320 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1321 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1322 ireq->scu_status = SCU_TASK_DONE_GOOD;
1323 ireq->sci_status = SCI_SUCCESS;
1324 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1328 /* All other completion status cause the IO to be
1329 * complete. If a NAK was received, then it is up to
1330 * the user to retry the request.
1332 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1333 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1334 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1341 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
1343 /* transmit DATA_FIS from (current sgl + offset) for input
1344 * parameter length. current sgl and offset is alreay stored in the IO request
1346 static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
1347 struct isci_request *ireq,
1350 struct isci_stp_request *stp_req = &ireq->stp.req;
1351 struct scu_task_context *task_context = ireq->tc;
1352 struct scu_sgl_element_pair *sgl_pair;
1353 struct scu_sgl_element *current_sgl;
1355 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1356 * for the data from current_sgl+offset for the input length
1358 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1359 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
1360 current_sgl = &sgl_pair->A;
1362 current_sgl = &sgl_pair->B;
1365 task_context->command_iu_upper = current_sgl->address_upper;
1366 task_context->command_iu_lower = current_sgl->address_lower;
1367 task_context->transfer_length_bytes = length;
1368 task_context->type.stp.fis_type = FIS_DATA;
1370 /* send the new TC out. */
1371 return sci_controller_continue_io(ireq);
1374 static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
1376 struct isci_stp_request *stp_req = &ireq->stp.req;
1377 struct scu_sgl_element_pair *sgl_pair;
1378 enum sci_status status = SCI_SUCCESS;
1379 struct scu_sgl_element *sgl;
1383 offset = stp_req->sgl.offset;
1384 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1385 if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
1388 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
1390 len = sgl_pair->A.length - offset;
1393 len = sgl_pair->B.length - offset;
1396 if (stp_req->pio_len == 0)
1399 if (stp_req->pio_len >= len) {
1400 status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
1401 if (status != SCI_SUCCESS)
1403 stp_req->pio_len -= len;
1405 /* update the current sgl, offset and save for future */
1406 sgl = pio_sgl_next(stp_req);
1408 } else if (stp_req->pio_len < len) {
1409 sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
1411 /* Sgl offset will be adjusted and saved for future */
1412 offset += stp_req->pio_len;
1413 sgl->address_lower += stp_req->pio_len;
1414 stp_req->pio_len = 0;
1417 stp_req->sgl.offset = offset;
1424 * @stp_request: The request that is used for the SGL processing.
1425 * @data_buffer: The buffer of data to be copied.
1426 * @length: The length of the data transfer.
1428 * Copy the data from the buffer for the length specified to the IO reqeust SGL
1429 * specified data region. enum sci_status
1431 static enum sci_status
1432 sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1433 u8 *data_buf, u32 len)
1435 struct isci_request *ireq;
1438 struct sas_task *task;
1439 struct scatterlist *sg;
1441 int total_len = len;
1443 ireq = to_ireq(stp_req);
1444 task = isci_request_access_task(ireq);
1445 src_addr = data_buf;
1447 if (task->num_scatter > 0) {
1450 while (total_len > 0) {
1451 struct page *page = sg_page(sg);
1453 copy_len = min_t(int, total_len, sg_dma_len(sg));
1454 kaddr = kmap_atomic(page, KM_IRQ0);
1455 memcpy(kaddr + sg->offset, src_addr, copy_len);
1456 kunmap_atomic(kaddr, KM_IRQ0);
1457 total_len -= copy_len;
1458 src_addr += copy_len;
1462 BUG_ON(task->total_xfer_len < total_len);
1463 memcpy(task->scatter, src_addr, total_len);
1471 * @sci_req: The PIO DATA IN request that is to receive the data.
1472 * @data_buffer: The buffer to copy from.
1474 * Copy the data buffer to the io request data region. enum sci_status
1476 static enum sci_status sci_stp_request_pio_data_in_copy_data(
1477 struct isci_stp_request *stp_req,
1480 enum sci_status status;
1483 * If there is less than 1K remaining in the transfer request
1484 * copy just the data for the transfer */
1485 if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
1486 status = sci_stp_request_pio_data_in_copy_data_buffer(
1487 stp_req, data_buffer, stp_req->pio_len);
1489 if (status == SCI_SUCCESS)
1490 stp_req->pio_len = 0;
1492 /* We are transfering the whole frame so copy */
1493 status = sci_stp_request_pio_data_in_copy_data_buffer(
1494 stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1496 if (status == SCI_SUCCESS)
1497 stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
1503 static enum sci_status
1504 stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
1505 u32 completion_code)
1507 enum sci_status status = SCI_SUCCESS;
1509 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1510 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1511 ireq->scu_status = SCU_TASK_DONE_GOOD;
1512 ireq->sci_status = SCI_SUCCESS;
1513 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1517 /* All other completion status cause the IO to be
1518 * complete. If a NAK was received, then it is up to
1519 * the user to retry the request.
1521 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1522 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1523 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1530 static enum sci_status
1531 pio_data_out_tx_done_tc_event(struct isci_request *ireq,
1532 u32 completion_code)
1534 enum sci_status status = SCI_SUCCESS;
1535 bool all_frames_transferred = false;
1536 struct isci_stp_request *stp_req = &ireq->stp.req;
1538 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1539 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1541 if (stp_req->pio_len != 0) {
1542 status = sci_stp_request_pio_data_out_transmit_data(ireq);
1543 if (status == SCI_SUCCESS) {
1544 if (stp_req->pio_len == 0)
1545 all_frames_transferred = true;
1547 } else if (stp_req->pio_len == 0) {
1549 * this will happen if the all data is written at the
1550 * first time after the pio setup fis is received
1552 all_frames_transferred = true;
1555 /* all data transferred. */
1556 if (all_frames_transferred) {
1558 * Change the state to SCI_REQ_STP_PIO_DATA_IN
1559 * and wait for PIO_SETUP fis / or D2H REg fis. */
1560 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1566 * All other completion status cause the IO to be complete.
1567 * If a NAK was received, then it is up to the user to retry
1570 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1571 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1572 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1579 static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
1582 struct isci_host *ihost = ireq->owning_controller;
1583 struct dev_to_host_fis *frame_header;
1584 enum sci_status status;
1587 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1589 (void **)&frame_header);
1591 if ((status == SCI_SUCCESS) &&
1592 (frame_header->fis_type == FIS_REGD2H)) {
1593 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1595 (void **)&frame_buffer);
1597 sci_controller_copy_sata_response(&ireq->stp.rsp,
1602 sci_controller_release_frame(ihost, frame_index);
1607 static enum sci_status process_unsolicited_fis(struct isci_request *ireq,
1610 struct isci_host *ihost = ireq->owning_controller;
1611 enum sci_status status;
1612 struct dev_to_host_fis *frame_header;
1615 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1617 (void **)&frame_header);
1619 if (status != SCI_SUCCESS)
1622 if (frame_header->fis_type != FIS_REGD2H) {
1623 dev_err(&ireq->isci_host->pdev->dev,
1624 "%s ERROR: invalid fis type 0x%X\n",
1625 __func__, frame_header->fis_type);
1629 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1631 (void **)&frame_buffer);
1633 sci_controller_copy_sata_response(&ireq->stp.rsp,
1634 (u32 *)frame_header,
1637 /* Frame has been decoded return it to the controller */
1638 sci_controller_release_frame(ihost, frame_index);
1643 static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
1646 struct sas_task *task = isci_request_access_task(ireq);
1647 enum sci_status status;
1649 status = process_unsolicited_fis(ireq, frame_index);
1651 if (status == SCI_SUCCESS) {
1652 if (ireq->stp.rsp.status & ATA_ERR)
1653 status = SCI_IO_FAILURE_RESPONSE_VALID;
1655 status = SCI_IO_FAILURE_RESPONSE_VALID;
1658 if (status != SCI_SUCCESS) {
1659 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1660 ireq->sci_status = status;
1662 ireq->scu_status = SCU_TASK_DONE_GOOD;
1663 ireq->sci_status = SCI_SUCCESS;
1666 /* the d2h ufi is the end of non-data commands */
1667 if (task->data_dir == DMA_NONE)
1668 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1673 static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq)
1675 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
1676 void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet;
1677 struct scu_task_context *task_context = ireq->tc;
1679 /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame
1680 * type. The TC for previous Packet fis was already there, we only need to
1681 * change the H2D fis content.
1683 memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis));
1684 memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN);
1685 memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context));
1686 task_context->type.stp.fis_type = FIS_DATA;
1687 task_context->transfer_length_bytes = dev->cdb_len;
1690 static void scu_atapi_construct_task_context(struct isci_request *ireq)
1692 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
1693 struct sas_task *task = isci_request_access_task(ireq);
1694 struct scu_task_context *task_context = ireq->tc;
1695 int cdb_len = dev->cdb_len;
1697 /* reference: SSTL 1.13.4.2
1698 * task_type, sata_direction
1700 if (task->data_dir == DMA_TO_DEVICE) {
1701 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT;
1702 task_context->sata_direction = 0;
1704 /* todo: for NO_DATA command, we need to send out raw frame. */
1705 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN;
1706 task_context->sata_direction = 1;
1709 memset(&task_context->type.stp, 0, sizeof(task_context->type.stp));
1710 task_context->type.stp.fis_type = FIS_DATA;
1712 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
1713 memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len);
1714 task_context->ssp_command_iu_length = cdb_len / sizeof(u32);
1716 /* task phase is set to TX_CMD */
1717 task_context->task_phase = 0x1;
1720 task_context->stp_retry_count = 0;
1722 /* data transfer size. */
1723 task_context->transfer_length_bytes = task->total_xfer_len;
1726 sci_request_build_sgl(ireq);
1730 sci_io_request_frame_handler(struct isci_request *ireq,
1733 struct isci_host *ihost = ireq->owning_controller;
1734 struct isci_stp_request *stp_req = &ireq->stp.req;
1735 enum sci_base_request_states state;
1736 enum sci_status status;
1739 state = ireq->sm.current_state_id;
1741 case SCI_REQ_STARTED: {
1742 struct ssp_frame_hdr ssp_hdr;
1745 sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1749 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1750 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1752 if (ssp_hdr.frame_type == SSP_RESPONSE) {
1753 struct ssp_response_iu *resp_iu;
1754 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1756 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1760 sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
1762 resp_iu = &ireq->ssp.rsp;
1764 if (resp_iu->datapres == 0x01 ||
1765 resp_iu->datapres == 0x02) {
1766 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1767 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1769 ireq->scu_status = SCU_TASK_DONE_GOOD;
1770 ireq->sci_status = SCI_SUCCESS;
1773 /* not a response frame, why did it get forwarded? */
1774 dev_err(&ihost->pdev->dev,
1775 "%s: SCIC IO Request 0x%p received unexpected "
1776 "frame %d type 0x%02x\n", __func__, ireq,
1777 frame_index, ssp_hdr.frame_type);
1781 * In any case we are done with this frame buffer return it to
1784 sci_controller_release_frame(ihost, frame_index);
1789 case SCI_REQ_TASK_WAIT_TC_RESP:
1790 sci_io_request_copy_response(ireq);
1791 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1792 sci_controller_release_frame(ihost, frame_index);
1795 case SCI_REQ_SMP_WAIT_RESP: {
1796 struct sas_task *task = isci_request_access_task(ireq);
1797 struct scatterlist *sg = &task->smp_task.smp_resp;
1798 void *frame_header, *kaddr;
1801 sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1804 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
1805 rsp = kaddr + sg->offset;
1806 sci_swab32_cpy(rsp, frame_header, 1);
1808 if (rsp[0] == SMP_RESPONSE) {
1811 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1815 word_cnt = (sg->length/4)-1;
1817 word_cnt = min_t(unsigned int, word_cnt,
1818 SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4);
1819 sci_swab32_cpy(rsp + 4, smp_resp, word_cnt);
1821 ireq->scu_status = SCU_TASK_DONE_GOOD;
1822 ireq->sci_status = SCI_SUCCESS;
1823 sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1826 * This was not a response frame why did it get
1829 dev_err(&ihost->pdev->dev,
1830 "%s: SCIC SMP Request 0x%p received unexpected "
1831 "frame %d type 0x%02x\n",
1837 ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
1838 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1839 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1841 kunmap_atomic(kaddr, KM_IRQ0);
1843 sci_controller_release_frame(ihost, frame_index);
1848 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1849 return sci_stp_request_udma_general_frame_handler(ireq,
1852 case SCI_REQ_STP_UDMA_WAIT_D2H:
1853 /* Use the general frame handler to copy the resposne data */
1854 status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
1856 if (status != SCI_SUCCESS)
1859 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1860 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1861 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1864 case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
1865 struct dev_to_host_fis *frame_header;
1868 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1870 (void **)&frame_header);
1872 if (status != SCI_SUCCESS) {
1873 dev_err(&ihost->pdev->dev,
1874 "%s: SCIC IO Request 0x%p could not get frame "
1875 "header for frame index %d, status %x\n",
1884 switch (frame_header->fis_type) {
1886 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1888 (void **)&frame_buffer);
1890 sci_controller_copy_sata_response(&ireq->stp.rsp,
1894 /* The command has completed with error */
1895 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1896 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1900 dev_warn(&ihost->pdev->dev,
1901 "%s: IO Request:0x%p Frame Id:%d protocol "
1902 "violation occurred\n", __func__, stp_req,
1905 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
1906 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1910 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1912 /* Frame has been decoded return it to the controller */
1913 sci_controller_release_frame(ihost, frame_index);
1918 case SCI_REQ_STP_PIO_WAIT_FRAME: {
1919 struct sas_task *task = isci_request_access_task(ireq);
1920 struct dev_to_host_fis *frame_header;
1923 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1925 (void **)&frame_header);
1927 if (status != SCI_SUCCESS) {
1928 dev_err(&ihost->pdev->dev,
1929 "%s: SCIC IO Request 0x%p could not get frame "
1930 "header for frame index %d, status %x\n",
1931 __func__, stp_req, frame_index, status);
1935 switch (frame_header->fis_type) {
1937 /* Get from the frame buffer the PIO Setup Data */
1938 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1940 (void **)&frame_buffer);
1942 /* Get the data from the PIO Setup The SCU Hardware
1943 * returns first word in the frame_header and the rest
1944 * of the data is in the frame buffer so we need to
1948 /* transfer_count: first 16bits in the 4th dword */
1949 stp_req->pio_len = frame_buffer[3] & 0xffff;
1951 /* status: 4th byte in the 3rd dword */
1952 stp_req->status = (frame_buffer[2] >> 24) & 0xff;
1954 sci_controller_copy_sata_response(&ireq->stp.rsp,
1958 ireq->stp.rsp.status = stp_req->status;
1960 /* The next state is dependent on whether the
1961 * request was PIO Data-in or Data out
1963 if (task->data_dir == DMA_FROM_DEVICE) {
1964 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
1965 } else if (task->data_dir == DMA_TO_DEVICE) {
1967 status = sci_stp_request_pio_data_out_transmit_data(ireq);
1968 if (status != SCI_SUCCESS)
1970 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
1974 case FIS_SETDEVBITS:
1975 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1979 if (frame_header->status & ATA_BUSY) {
1981 * Now why is the drive sending a D2H Register
1982 * FIS when it is still busy? Do nothing since
1983 * we are still in the right state.
1985 dev_dbg(&ihost->pdev->dev,
1986 "%s: SCIC PIO Request 0x%p received "
1987 "D2H Register FIS with BSY status "
1991 frame_header->status);
1995 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1997 (void **)&frame_buffer);
1999 sci_controller_copy_sata_response(&ireq->stp.req,
2003 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2004 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2005 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2009 /* FIXME: what do we do here? */
2013 /* Frame is decoded return it to the controller */
2014 sci_controller_release_frame(ihost, frame_index);
2019 case SCI_REQ_STP_PIO_DATA_IN: {
2020 struct dev_to_host_fis *frame_header;
2021 struct sata_fis_data *frame_buffer;
2023 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
2025 (void **)&frame_header);
2027 if (status != SCI_SUCCESS) {
2028 dev_err(&ihost->pdev->dev,
2029 "%s: SCIC IO Request 0x%p could not get frame "
2030 "header for frame index %d, status %x\n",
2038 if (frame_header->fis_type != FIS_DATA) {
2039 dev_err(&ihost->pdev->dev,
2040 "%s: SCIC PIO Request 0x%p received frame %d "
2041 "with fis type 0x%02x when expecting a data "
2046 frame_header->fis_type);
2048 ireq->scu_status = SCU_TASK_DONE_GOOD;
2049 ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
2050 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2052 /* Frame is decoded return it to the controller */
2053 sci_controller_release_frame(ihost, frame_index);
2057 if (stp_req->sgl.index < 0) {
2058 ireq->saved_rx_frame_index = frame_index;
2059 stp_req->pio_len = 0;
2061 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
2063 (void **)&frame_buffer);
2065 status = sci_stp_request_pio_data_in_copy_data(stp_req,
2066 (u8 *)frame_buffer);
2068 /* Frame is decoded return it to the controller */
2069 sci_controller_release_frame(ihost, frame_index);
2072 /* Check for the end of the transfer, are there more
2073 * bytes remaining for this data transfer
2075 if (status != SCI_SUCCESS || stp_req->pio_len != 0)
2078 if ((stp_req->status & ATA_BUSY) == 0) {
2079 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2080 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2081 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2083 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
2088 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
2089 struct dev_to_host_fis *frame_header;
2092 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
2094 (void **)&frame_header);
2095 if (status != SCI_SUCCESS) {
2096 dev_err(&ihost->pdev->dev,
2097 "%s: SCIC IO Request 0x%p could not get frame "
2098 "header for frame index %d, status %x\n",
2106 switch (frame_header->fis_type) {
2108 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
2110 (void **)&frame_buffer);
2112 sci_controller_copy_sata_response(&ireq->stp.rsp,
2116 /* The command has completed with error */
2117 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2118 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2122 dev_warn(&ihost->pdev->dev,
2123 "%s: IO Request:0x%p Frame Id:%d protocol "
2124 "violation occurred\n",
2129 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
2130 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
2134 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2136 /* Frame has been decoded return it to the controller */
2137 sci_controller_release_frame(ihost, frame_index);
2141 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
2142 struct sas_task *task = isci_request_access_task(ireq);
2144 sci_controller_release_frame(ihost, frame_index);
2145 ireq->target_device->working_request = ireq;
2146 if (task->data_dir == DMA_NONE) {
2147 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP);
2148 scu_atapi_reconstruct_raw_frame_task_context(ireq);
2150 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
2151 scu_atapi_construct_task_context(ireq);
2154 sci_controller_continue_io(ireq);
2157 case SCI_REQ_ATAPI_WAIT_D2H:
2158 return atapi_d2h_reg_frame_handler(ireq, frame_index);
2159 case SCI_REQ_ABORTING:
2161 * TODO: Is it even possible to get an unsolicited frame in the
2164 sci_controller_release_frame(ihost, frame_index);
2168 dev_warn(&ihost->pdev->dev,
2169 "%s: SCIC IO Request given unexpected frame %x while "
2175 sci_controller_release_frame(ihost, frame_index);
2176 return SCI_FAILURE_INVALID_STATE;
2180 static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
2181 u32 completion_code)
2183 enum sci_status status = SCI_SUCCESS;
2185 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2186 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2187 ireq->scu_status = SCU_TASK_DONE_GOOD;
2188 ireq->sci_status = SCI_SUCCESS;
2189 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2191 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
2192 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
2193 /* We must check ther response buffer to see if the D2H
2194 * Register FIS was received before we got the TC
2197 if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
2198 sci_remote_device_suspend(ireq->target_device,
2199 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2201 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2202 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2203 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2205 /* If we have an error completion status for the
2206 * TC then we can expect a D2H register FIS from
2207 * the device so we must change state to wait
2210 sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
2214 /* TODO Check to see if any of these completion status need to
2215 * wait for the device to host register fis.
2217 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
2218 * - this comes only for B0
2220 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
2221 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
2222 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
2223 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
2224 sci_remote_device_suspend(ireq->target_device,
2225 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2226 /* Fall through to the default case */
2228 /* All other completion status cause the IO to be complete. */
2229 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2230 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2231 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2238 static enum sci_status
2239 stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
2240 u32 completion_code)
2242 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2243 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2244 ireq->scu_status = SCU_TASK_DONE_GOOD;
2245 ireq->sci_status = SCI_SUCCESS;
2246 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
2251 * All other completion status cause the IO to be complete.
2252 * If a NAK was received, then it is up to the user to retry
2255 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2256 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2257 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2264 static enum sci_status
2265 stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
2266 u32 completion_code)
2268 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2269 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2270 ireq->scu_status = SCU_TASK_DONE_GOOD;
2271 ireq->sci_status = SCI_SUCCESS;
2272 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
2276 /* All other completion status cause the IO to be complete. If
2277 * a NAK was received, then it is up to the user to retry the
2280 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2281 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2282 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2289 static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
2290 enum sci_base_request_states next)
2292 enum sci_status status = SCI_SUCCESS;
2294 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2295 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2296 ireq->scu_status = SCU_TASK_DONE_GOOD;
2297 ireq->sci_status = SCI_SUCCESS;
2298 sci_change_state(&ireq->sm, next);
2301 /* All other completion status cause the IO to be complete.
2302 * If a NAK was received, then it is up to the user to retry
2305 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2306 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2308 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2315 static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
2316 u32 completion_code)
2318 struct isci_remote_device *idev = ireq->target_device;
2319 struct dev_to_host_fis *d2h = &ireq->stp.rsp;
2320 enum sci_status status = SCI_SUCCESS;
2322 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2323 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
2324 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2327 case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
2328 u16 len = sci_req_tx_bytes(ireq);
2330 /* likely non-error data underrrun, workaround missing
2331 * d2h frame from the controller
2333 if (d2h->fis_type != FIS_REGD2H) {
2334 d2h->fis_type = FIS_REGD2H;
2335 d2h->flags = (1 << 6);
2339 d2h->byte_count_low = len & 0xff;
2340 d2h->byte_count_high = len >> 8;
2346 d2h->sector_count = 0x3;
2347 d2h->sector_count_exp = 0;
2353 ireq->scu_status = SCU_TASK_DONE_GOOD;
2354 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
2355 status = ireq->sci_status;
2357 /* the hw will have suspended the rnc, so complete the
2358 * request upon pending resume
2360 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
2363 case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT):
2364 /* In this case, there is no UF coming after.
2365 * compelte the IO now.
2367 ireq->scu_status = SCU_TASK_DONE_GOOD;
2368 ireq->sci_status = SCI_SUCCESS;
2369 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2373 if (d2h->fis_type == FIS_REGD2H) {
2374 /* UF received change the device state to ATAPI_ERROR */
2375 status = ireq->sci_status;
2376 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
2378 /* If receiving any non-sucess TC status, no UF
2379 * received yet, then an UF for the status fis
2380 * is coming after (XXX: suspect this is
2381 * actually a protocol error or a bug like the
2382 * DONE_UNEXP_FIS case)
2384 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2385 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2387 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
2396 sci_io_request_tc_completion(struct isci_request *ireq,
2397 u32 completion_code)
2399 enum sci_base_request_states state;
2400 struct isci_host *ihost = ireq->owning_controller;
2402 state = ireq->sm.current_state_id;
2405 case SCI_REQ_STARTED:
2406 return request_started_state_tc_event(ireq, completion_code);
2408 case SCI_REQ_TASK_WAIT_TC_COMP:
2409 return ssp_task_request_await_tc_event(ireq,
2412 case SCI_REQ_SMP_WAIT_RESP:
2413 return smp_request_await_response_tc_event(ireq,
2416 case SCI_REQ_SMP_WAIT_TC_COMP:
2417 return smp_request_await_tc_event(ireq, completion_code);
2419 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
2420 return stp_request_udma_await_tc_event(ireq,
2423 case SCI_REQ_STP_NON_DATA_WAIT_H2D:
2424 return stp_request_non_data_await_h2d_tc_event(ireq,
2427 case SCI_REQ_STP_PIO_WAIT_H2D:
2428 return stp_request_pio_await_h2d_completion_tc_event(ireq,
2431 case SCI_REQ_STP_PIO_DATA_OUT:
2432 return pio_data_out_tx_done_tc_event(ireq, completion_code);
2434 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
2435 return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
2438 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
2439 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
2442 case SCI_REQ_ABORTING:
2443 return request_aborting_state_tc_event(ireq,
2446 case SCI_REQ_ATAPI_WAIT_H2D:
2447 return atapi_raw_completion(ireq, completion_code,
2448 SCI_REQ_ATAPI_WAIT_PIO_SETUP);
2450 case SCI_REQ_ATAPI_WAIT_TC_COMP:
2451 return atapi_raw_completion(ireq, completion_code,
2452 SCI_REQ_ATAPI_WAIT_D2H);
2454 case SCI_REQ_ATAPI_WAIT_D2H:
2455 return atapi_data_tc_completion_handler(ireq, completion_code);
2458 dev_warn(&ihost->pdev->dev,
2459 "%s: SCIC IO Request given task completion "
2460 "notification %x while in wrong state %d\n",
2464 return SCI_FAILURE_INVALID_STATE;
2469 * isci_request_process_response_iu() - This function sets the status and
2470 * response iu, in the task struct, from the request object for the upper
2472 * @sas_task: This parameter is the task struct from the upper layer driver.
2473 * @resp_iu: This parameter points to the response iu of the completed request.
2474 * @dev: This parameter specifies the linux device struct.
2478 static void isci_request_process_response_iu(
2479 struct sas_task *task,
2480 struct ssp_response_iu *resp_iu,
2485 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2486 "resp_iu->response_data_len = %x, "
2487 "resp_iu->sense_data_len = %x\nrepsonse data: ",
2492 resp_iu->response_data_len,
2493 resp_iu->sense_data_len);
2495 task->task_status.stat = resp_iu->status;
2497 /* libsas updates the task status fields based on the response iu. */
2498 sas_ssp_task_response(dev, task, resp_iu);
2502 * isci_request_set_open_reject_status() - This function prepares the I/O
2503 * completion for OPEN_REJECT conditions.
2504 * @request: This parameter is the completed isci_request object.
2505 * @response_ptr: This parameter specifies the service response for the I/O.
2506 * @status_ptr: This parameter specifies the exec status for the I/O.
2507 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2508 * the LLDD with respect to completing this request or forcing an abort
2509 * condition on the I/O.
2510 * @open_rej_reason: This parameter specifies the encoded reason for the
2511 * abandon-class reject.
2515 static void isci_request_set_open_reject_status(
2516 struct isci_request *request,
2517 struct sas_task *task,
2518 enum service_response *response_ptr,
2519 enum exec_status *status_ptr,
2520 enum isci_completion_selection *complete_to_host_ptr,
2521 enum sas_open_rej_reason open_rej_reason)
2523 /* Task in the target is done. */
2524 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2525 *response_ptr = SAS_TASK_UNDELIVERED;
2526 *status_ptr = SAS_OPEN_REJECT;
2527 *complete_to_host_ptr = isci_perform_normal_io_completion;
2528 task->task_status.open_rej_reason = open_rej_reason;
2532 * isci_request_handle_controller_specific_errors() - This function decodes
2533 * controller-specific I/O completion error conditions.
2534 * @request: This parameter is the completed isci_request object.
2535 * @response_ptr: This parameter specifies the service response for the I/O.
2536 * @status_ptr: This parameter specifies the exec status for the I/O.
2537 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2538 * the LLDD with respect to completing this request or forcing an abort
2539 * condition on the I/O.
2543 static void isci_request_handle_controller_specific_errors(
2544 struct isci_remote_device *idev,
2545 struct isci_request *request,
2546 struct sas_task *task,
2547 enum service_response *response_ptr,
2548 enum exec_status *status_ptr,
2549 enum isci_completion_selection *complete_to_host_ptr)
2551 unsigned int cstatus;
2553 cstatus = request->scu_status;
2555 dev_dbg(&request->isci_host->pdev->dev,
2556 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2557 "- controller status = 0x%x\n",
2558 __func__, request, cstatus);
2560 /* Decode the controller-specific errors; most
2561 * important is to recognize those conditions in which
2562 * the target may still have a task outstanding that
2565 * Note that there are SCU completion codes being
2566 * named in the decode below for which SCIC has already
2567 * done work to handle them in a way other than as
2568 * a controller-specific completion code; these are left
2569 * in the decode below for completeness sake.
2572 case SCU_TASK_DONE_DMASETUP_DIRERR:
2573 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2574 case SCU_TASK_DONE_XFERCNT_ERR:
2575 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2576 if (task->task_proto == SAS_PROTOCOL_SMP) {
2577 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2578 *response_ptr = SAS_TASK_COMPLETE;
2580 /* See if the device has been/is being stopped. Note
2581 * that we ignore the quiesce state, since we are
2582 * concerned about the actual device state.
2585 *status_ptr = SAS_DEVICE_UNKNOWN;
2587 *status_ptr = SAS_ABORTED_TASK;
2589 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2591 *complete_to_host_ptr =
2592 isci_perform_normal_io_completion;
2594 /* Task in the target is not done. */
2595 *response_ptr = SAS_TASK_UNDELIVERED;
2598 *status_ptr = SAS_DEVICE_UNKNOWN;
2600 *status_ptr = SAM_STAT_TASK_ABORTED;
2602 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2604 *complete_to_host_ptr =
2605 isci_perform_error_io_completion;
2610 case SCU_TASK_DONE_CRC_ERR:
2611 case SCU_TASK_DONE_NAK_CMD_ERR:
2612 case SCU_TASK_DONE_EXCESS_DATA:
2613 case SCU_TASK_DONE_UNEXP_FIS:
2614 /* Also SCU_TASK_DONE_UNEXP_RESP: */
2615 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
2616 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
2617 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
2618 /* These are conditions in which the target
2619 * has completed the task, so that no cleanup
2622 *response_ptr = SAS_TASK_COMPLETE;
2624 /* See if the device has been/is being stopped. Note
2625 * that we ignore the quiesce state, since we are
2626 * concerned about the actual device state.
2629 *status_ptr = SAS_DEVICE_UNKNOWN;
2631 *status_ptr = SAS_ABORTED_TASK;
2633 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2635 *complete_to_host_ptr = isci_perform_normal_io_completion;
2639 /* Note that the only open reject completion codes seen here will be
2640 * abandon-class codes; all others are automatically retried in the SCU.
2642 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2644 isci_request_set_open_reject_status(
2645 request, task, response_ptr, status_ptr,
2646 complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2649 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2651 /* Note - the return of AB0 will change when
2652 * libsas implements detection of zone violations.
2654 isci_request_set_open_reject_status(
2655 request, task, response_ptr, status_ptr,
2656 complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2659 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2661 isci_request_set_open_reject_status(
2662 request, task, response_ptr, status_ptr,
2663 complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2666 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2668 isci_request_set_open_reject_status(
2669 request, task, response_ptr, status_ptr,
2670 complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2673 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2675 isci_request_set_open_reject_status(
2676 request, task, response_ptr, status_ptr,
2677 complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2680 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2682 isci_request_set_open_reject_status(
2683 request, task, response_ptr, status_ptr,
2684 complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2687 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2689 isci_request_set_open_reject_status(
2690 request, task, response_ptr, status_ptr,
2691 complete_to_host_ptr, SAS_OREJ_STP_NORES);
2694 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2696 isci_request_set_open_reject_status(
2697 request, task, response_ptr, status_ptr,
2698 complete_to_host_ptr, SAS_OREJ_EPROTO);
2701 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2703 isci_request_set_open_reject_status(
2704 request, task, response_ptr, status_ptr,
2705 complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2708 case SCU_TASK_DONE_LL_R_ERR:
2709 /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2710 case SCU_TASK_DONE_LL_PERR:
2711 case SCU_TASK_DONE_LL_SY_TERM:
2712 /* Also SCU_TASK_DONE_NAK_ERR:*/
2713 case SCU_TASK_DONE_LL_LF_TERM:
2714 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2715 case SCU_TASK_DONE_LL_ABORT_ERR:
2716 case SCU_TASK_DONE_SEQ_INV_TYPE:
2717 /* Also SCU_TASK_DONE_UNEXP_XR: */
2718 case SCU_TASK_DONE_XR_IU_LEN_ERR:
2719 case SCU_TASK_DONE_INV_FIS_LEN:
2720 /* Also SCU_TASK_DONE_XR_WD_LEN: */
2721 case SCU_TASK_DONE_SDMA_ERR:
2722 case SCU_TASK_DONE_OFFSET_ERR:
2723 case SCU_TASK_DONE_MAX_PLD_ERR:
2724 case SCU_TASK_DONE_LF_ERR:
2725 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
2726 case SCU_TASK_DONE_SMP_LL_RX_ERR:
2727 case SCU_TASK_DONE_UNEXP_DATA:
2728 case SCU_TASK_DONE_UNEXP_SDBFIS:
2729 case SCU_TASK_DONE_REG_ERR:
2730 case SCU_TASK_DONE_SDB_ERR:
2731 case SCU_TASK_DONE_TASK_ABORT:
2733 /* Task in the target is not done. */
2734 *response_ptr = SAS_TASK_UNDELIVERED;
2735 *status_ptr = SAM_STAT_TASK_ABORTED;
2737 if (task->task_proto == SAS_PROTOCOL_SMP) {
2738 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2740 *complete_to_host_ptr = isci_perform_normal_io_completion;
2742 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2744 *complete_to_host_ptr = isci_perform_error_io_completion;
2751 * isci_task_save_for_upper_layer_completion() - This function saves the
2752 * request for later completion to the upper layer driver.
2753 * @host: This parameter is a pointer to the host on which the the request
2754 * should be queued (either as an error or success).
2755 * @request: This parameter is the completed request.
2756 * @response: This parameter is the response code for the completed task.
2757 * @status: This parameter is the status code for the completed task.
2761 static void isci_task_save_for_upper_layer_completion(
2762 struct isci_host *host,
2763 struct isci_request *request,
2764 enum service_response response,
2765 enum exec_status status,
2766 enum isci_completion_selection task_notification_selection)
2768 struct sas_task *task = isci_request_access_task(request);
2770 task_notification_selection
2771 = isci_task_set_completion_status(task, response, status,
2772 task_notification_selection);
2774 /* Tasks aborted specifically by a call to the lldd_abort_task
2775 * function should not be completed to the host in the regular path.
2777 switch (task_notification_selection) {
2779 case isci_perform_normal_io_completion:
2780 /* Normal notification (task_done) */
2782 /* Add to the completed list. */
2783 list_add(&request->completed_node,
2784 &host->requests_to_complete);
2786 /* Take the request off the device's pending request list. */
2787 list_del_init(&request->dev_node);
2790 case isci_perform_aborted_io_completion:
2791 /* No notification to libsas because this request is
2792 * already in the abort path.
2794 /* Wake up whatever process was waiting for this
2795 * request to complete.
2797 WARN_ON(request->io_request_completion == NULL);
2799 if (request->io_request_completion != NULL) {
2801 /* Signal whoever is waiting that this
2802 * request is complete.
2804 complete(request->io_request_completion);
2808 case isci_perform_error_io_completion:
2809 /* Use sas_task_abort */
2810 /* Add to the aborted list. */
2811 list_add(&request->completed_node,
2812 &host->requests_to_errorback);
2816 /* Add to the error to libsas list. */
2817 list_add(&request->completed_node,
2818 &host->requests_to_errorback);
2821 dev_dbg(&host->pdev->dev,
2822 "%s: %d - task = %p, response=%d (%d), status=%d (%d)\n",
2823 __func__, task_notification_selection, task,
2824 (task) ? task->task_status.resp : 0, response,
2825 (task) ? task->task_status.stat : 0, status);
2828 static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
2830 struct task_status_struct *ts = &task->task_status;
2831 struct ata_task_resp *resp = (void *)&ts->buf[0];
2833 resp->frame_len = sizeof(*fis);
2834 memcpy(resp->ending_fis, fis, sizeof(*fis));
2835 ts->buf_valid_size = sizeof(*resp);
2837 /* If the device fault bit is set in the status register, then
2838 * set the sense data and return.
2840 if (fis->status & ATA_DF)
2841 ts->stat = SAS_PROTO_RESPONSE;
2842 else if (fis->status & ATA_ERR)
2843 ts->stat = SAM_STAT_CHECK_CONDITION;
2845 ts->stat = SAM_STAT_GOOD;
2847 ts->resp = SAS_TASK_COMPLETE;
2850 static void isci_request_io_request_complete(struct isci_host *ihost,
2851 struct isci_request *request,
2852 enum sci_io_status completion_status)
2854 struct sas_task *task = isci_request_access_task(request);
2855 struct ssp_response_iu *resp_iu;
2856 unsigned long task_flags;
2857 struct isci_remote_device *idev = request->target_device;
2858 enum service_response response = SAS_TASK_UNDELIVERED;
2859 enum exec_status status = SAS_ABORTED_TASK;
2860 enum isci_request_status request_status;
2861 enum isci_completion_selection complete_to_host
2862 = isci_perform_normal_io_completion;
2864 dev_dbg(&ihost->pdev->dev,
2865 "%s: request = %p, task = %p,\n"
2866 "task->data_dir = %d completion_status = 0x%x\n",
2873 spin_lock(&request->state_lock);
2874 request_status = request->status;
2876 /* Decode the request status. Note that if the request has been
2877 * aborted by a task management function, we don't care
2878 * what the status is.
2880 switch (request_status) {
2883 /* "aborted" indicates that the request was aborted by a task
2884 * management function, since once a task management request is
2885 * perfomed by the device, the request only completes because
2886 * of the subsequent driver terminate.
2888 * Aborted also means an external thread is explicitly managing
2889 * this request, so that we do not complete it up the stack.
2891 * The target is still there (since the TMF was successful).
2893 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2894 response = SAS_TASK_COMPLETE;
2896 /* See if the device has been/is being stopped. Note
2897 * that we ignore the quiesce state, since we are
2898 * concerned about the actual device state.
2901 status = SAS_DEVICE_UNKNOWN;
2903 status = SAS_ABORTED_TASK;
2905 complete_to_host = isci_perform_aborted_io_completion;
2906 /* This was an aborted request. */
2908 spin_unlock(&request->state_lock);
2912 /* aborting means that the task management function tried and
2913 * failed to abort the request. We need to note the request
2914 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
2917 * Aborting also means an external thread is explicitly managing
2918 * this request, so that we do not complete it up the stack.
2920 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2921 response = SAS_TASK_UNDELIVERED;
2924 /* The device has been /is being stopped. Note that
2925 * we ignore the quiesce state, since we are
2926 * concerned about the actual device state.
2928 status = SAS_DEVICE_UNKNOWN;
2930 status = SAS_PHY_DOWN;
2932 complete_to_host = isci_perform_aborted_io_completion;
2934 /* This was an aborted request. */
2936 spin_unlock(&request->state_lock);
2941 /* This was an terminated request. This happens when
2942 * the I/O is being terminated because of an action on
2943 * the device (reset, tear down, etc.), and the I/O needs
2944 * to be completed up the stack.
2946 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2947 response = SAS_TASK_UNDELIVERED;
2949 /* See if the device has been/is being stopped. Note
2950 * that we ignore the quiesce state, since we are
2951 * concerned about the actual device state.
2954 status = SAS_DEVICE_UNKNOWN;
2956 status = SAS_ABORTED_TASK;
2958 complete_to_host = isci_perform_aborted_io_completion;
2960 /* This was a terminated request. */
2962 spin_unlock(&request->state_lock);
2966 /* This was a terminated request that timed-out during the
2967 * termination process. There is no task to complete to
2970 complete_to_host = isci_perform_normal_io_completion;
2971 spin_unlock(&request->state_lock);
2976 /* The request is done from an SCU HW perspective. */
2977 request->status = completed;
2979 spin_unlock(&request->state_lock);
2981 /* This is an active request being completed from the core. */
2982 switch (completion_status) {
2984 case SCI_IO_FAILURE_RESPONSE_VALID:
2985 dev_dbg(&ihost->pdev->dev,
2986 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2991 if (sas_protocol_ata(task->task_proto)) {
2992 isci_process_stp_response(task, &request->stp.rsp);
2993 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2995 /* crack the iu response buffer. */
2996 resp_iu = &request->ssp.rsp;
2997 isci_request_process_response_iu(task, resp_iu,
3000 } else if (SAS_PROTOCOL_SMP == task->task_proto) {
3002 dev_err(&ihost->pdev->dev,
3003 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
3004 "SAS_PROTOCOL_SMP protocol\n",
3008 dev_err(&ihost->pdev->dev,
3009 "%s: unknown protocol\n", __func__);
3011 /* use the task status set in the task struct by the
3012 * isci_request_process_response_iu call.
3014 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3015 response = task->task_status.resp;
3016 status = task->task_status.stat;
3019 case SCI_IO_SUCCESS:
3020 case SCI_IO_SUCCESS_IO_DONE_EARLY:
3022 response = SAS_TASK_COMPLETE;
3023 status = SAM_STAT_GOOD;
3024 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3026 if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
3028 /* This was an SSP / STP / SATA transfer.
3029 * There is a possibility that less data than
3030 * the maximum was transferred.
3032 u32 transferred_length = sci_req_tx_bytes(request);
3034 task->task_status.residual
3035 = task->total_xfer_len - transferred_length;
3037 /* If there were residual bytes, call this an
3040 if (task->task_status.residual != 0)
3041 status = SAS_DATA_UNDERRUN;
3043 dev_dbg(&ihost->pdev->dev,
3044 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
3049 dev_dbg(&ihost->pdev->dev,
3050 "%s: SCI_IO_SUCCESS\n",
3055 case SCI_IO_FAILURE_TERMINATED:
3056 dev_dbg(&ihost->pdev->dev,
3057 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
3062 /* The request was terminated explicitly. No handling
3063 * is needed in the SCSI error handler path.
3065 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3066 response = SAS_TASK_UNDELIVERED;
3068 /* See if the device has been/is being stopped. Note
3069 * that we ignore the quiesce state, since we are
3070 * concerned about the actual device state.
3073 status = SAS_DEVICE_UNKNOWN;
3075 status = SAS_ABORTED_TASK;
3077 complete_to_host = isci_perform_normal_io_completion;
3080 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
3082 isci_request_handle_controller_specific_errors(
3083 idev, request, task, &response, &status,
3088 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
3089 /* This is a special case, in that the I/O completion
3090 * is telling us that the device needs a reset.
3091 * In order for the device reset condition to be
3092 * noticed, the I/O has to be handled in the error
3093 * handler. Set the reset flag and cause the
3094 * SCSI error thread to be scheduled.
3096 spin_lock_irqsave(&task->task_state_lock, task_flags);
3097 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3098 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
3101 response = SAS_TASK_UNDELIVERED;
3102 status = SAM_STAT_TASK_ABORTED;
3104 complete_to_host = isci_perform_error_io_completion;
3105 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3108 case SCI_FAILURE_RETRY_REQUIRED:
3110 /* Fail the I/O so it can be retried. */
3111 response = SAS_TASK_UNDELIVERED;
3113 status = SAS_DEVICE_UNKNOWN;
3115 status = SAS_ABORTED_TASK;
3117 complete_to_host = isci_perform_normal_io_completion;
3118 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3123 /* Catch any otherwise unhandled error codes here. */
3124 dev_dbg(&ihost->pdev->dev,
3125 "%s: invalid completion code: 0x%x - "
3126 "isci_request = %p\n",
3127 __func__, completion_status, request);
3129 response = SAS_TASK_UNDELIVERED;
3131 /* See if the device has been/is being stopped. Note
3132 * that we ignore the quiesce state, since we are
3133 * concerned about the actual device state.
3136 status = SAS_DEVICE_UNKNOWN;
3138 status = SAS_ABORTED_TASK;
3140 if (SAS_PROTOCOL_SMP == task->task_proto) {
3141 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3142 complete_to_host = isci_perform_normal_io_completion;
3144 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3145 complete_to_host = isci_perform_error_io_completion;
3152 switch (task->task_proto) {
3153 case SAS_PROTOCOL_SSP:
3154 if (task->data_dir == DMA_NONE)
3156 if (task->num_scatter == 0)
3157 /* 0 indicates a single dma address */
3158 dma_unmap_single(&ihost->pdev->dev,
3159 request->zero_scatter_daddr,
3160 task->total_xfer_len, task->data_dir);
3161 else /* unmap the sgl dma addresses */
3162 dma_unmap_sg(&ihost->pdev->dev, task->scatter,
3163 request->num_sg_entries, task->data_dir);
3165 case SAS_PROTOCOL_SMP: {
3166 struct scatterlist *sg = &task->smp_task.smp_req;
3167 struct smp_req *smp_req;
3170 dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
3172 /* need to swab it back in case the command buffer is re-used */
3173 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
3174 smp_req = kaddr + sg->offset;
3175 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3176 kunmap_atomic(kaddr, KM_IRQ0);
3183 /* Put the completed request on the correct list */
3184 isci_task_save_for_upper_layer_completion(ihost, request, response,
3185 status, complete_to_host
3188 /* complete the io request to the core. */
3189 sci_controller_complete_io(ihost, request->target_device, request);
3191 /* set terminated handle so it cannot be completed or
3192 * terminated again, and to cause any calls into abort
3193 * task to recognize the already completed case.
3195 set_bit(IREQ_TERMINATED, &request->flags);
3198 static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
3200 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3201 struct domain_device *dev = ireq->target_device->domain_dev;
3202 enum sci_base_request_states state;
3203 struct sas_task *task;
3205 /* XXX as hch said always creating an internal sas_task for tmf
3206 * requests would simplify the driver
3208 task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq);
3210 /* all unaccelerated request types (non ssp or ncq) handled with
3213 if (!task && dev->dev_type == SAS_END_DEV) {
3214 state = SCI_REQ_TASK_WAIT_TC_COMP;
3216 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
3217 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
3218 state = SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED;
3219 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
3220 state = SCI_REQ_SMP_WAIT_RESP;
3221 } else if (task && sas_protocol_ata(task->task_proto) &&
3222 !task->ata_task.use_ncq) {
3223 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
3224 task->ata_task.fis.command == ATA_CMD_PACKET) {
3225 state = SCI_REQ_ATAPI_WAIT_H2D;
3226 } else if (task->data_dir == DMA_NONE) {
3227 state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
3228 } else if (task->ata_task.dma_xfer) {
3229 state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
3231 state = SCI_REQ_STP_PIO_WAIT_H2D;
3234 /* SSP or NCQ are fully accelerated, no substates */
3237 sci_change_state(sm, state);
3240 static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
3242 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3243 struct isci_host *ihost = ireq->owning_controller;
3245 /* Tell the SCI_USER that the IO request is complete */
3246 if (!test_bit(IREQ_TMF, &ireq->flags))
3247 isci_request_io_request_complete(ihost, ireq,
3250 isci_task_request_complete(ihost, ireq, ireq->sci_status);
3253 static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
3255 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3257 /* Setting the abort bit in the Task Context is required by the silicon. */
3258 ireq->tc->abort = 1;
3261 static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3263 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3265 ireq->target_device->working_request = ireq;
3268 static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3270 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3272 ireq->target_device->working_request = ireq;
3275 static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
3277 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3279 ireq->target_device->working_request = ireq;
3282 static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
3284 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3285 struct scu_task_context *tc = ireq->tc;
3286 struct host_to_dev_fis *h2d_fis;
3287 enum sci_status status;
3289 /* Clear the SRST bit */
3290 h2d_fis = &ireq->stp.cmd;
3291 h2d_fis->control = 0;
3293 /* Clear the TC control bit */
3294 tc->control_frame = 0;
3296 status = sci_controller_continue_io(ireq);
3297 WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
3300 static const struct sci_base_state sci_request_state_table[] = {
3301 [SCI_REQ_INIT] = { },
3302 [SCI_REQ_CONSTRUCTED] = { },
3303 [SCI_REQ_STARTED] = {
3304 .enter_state = sci_request_started_state_enter,
3306 [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
3307 .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
3309 [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
3310 [SCI_REQ_STP_PIO_WAIT_H2D] = {
3311 .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
3313 [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
3314 [SCI_REQ_STP_PIO_DATA_IN] = { },
3315 [SCI_REQ_STP_PIO_DATA_OUT] = { },
3316 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
3317 [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
3318 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
3319 .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
3321 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
3322 .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
3324 [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
3325 [SCI_REQ_TASK_WAIT_TC_COMP] = { },
3326 [SCI_REQ_TASK_WAIT_TC_RESP] = { },
3327 [SCI_REQ_SMP_WAIT_RESP] = { },
3328 [SCI_REQ_SMP_WAIT_TC_COMP] = { },
3329 [SCI_REQ_ATAPI_WAIT_H2D] = { },
3330 [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { },
3331 [SCI_REQ_ATAPI_WAIT_D2H] = { },
3332 [SCI_REQ_ATAPI_WAIT_TC_COMP] = { },
3333 [SCI_REQ_COMPLETED] = {
3334 .enter_state = sci_request_completed_state_enter,
3336 [SCI_REQ_ABORTING] = {
3337 .enter_state = sci_request_aborting_state_enter,
3339 [SCI_REQ_FINAL] = { },
3343 sci_general_request_construct(struct isci_host *ihost,
3344 struct isci_remote_device *idev,
3345 struct isci_request *ireq)
3347 sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
3349 ireq->target_device = idev;
3350 ireq->protocol = SCIC_NO_PROTOCOL;
3351 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3353 ireq->sci_status = SCI_SUCCESS;
3354 ireq->scu_status = 0;
3355 ireq->post_context = 0xFFFFFFFF;
3358 static enum sci_status
3359 sci_io_request_construct(struct isci_host *ihost,
3360 struct isci_remote_device *idev,
3361 struct isci_request *ireq)
3363 struct domain_device *dev = idev->domain_dev;
3364 enum sci_status status = SCI_SUCCESS;
3366 /* Build the common part of the request */
3367 sci_general_request_construct(ihost, idev, ireq);
3369 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3370 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3372 if (dev->dev_type == SAS_END_DEV)
3374 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
3375 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
3376 else if (dev_is_expander(dev))
3379 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3381 memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
3386 enum sci_status sci_task_request_construct(struct isci_host *ihost,
3387 struct isci_remote_device *idev,
3388 u16 io_tag, struct isci_request *ireq)
3390 struct domain_device *dev = idev->domain_dev;
3391 enum sci_status status = SCI_SUCCESS;
3393 /* Build the common part of the request */
3394 sci_general_request_construct(ihost, idev, ireq);
3396 if (dev->dev_type == SAS_END_DEV ||
3397 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
3398 set_bit(IREQ_TMF, &ireq->flags);
3399 memset(ireq->tc, 0, sizeof(struct scu_task_context));
3401 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3406 static enum sci_status isci_request_ssp_request_construct(
3407 struct isci_request *request)
3409 enum sci_status status;
3411 dev_dbg(&request->isci_host->pdev->dev,
3412 "%s: request = %p\n",
3415 status = sci_io_request_construct_basic_ssp(request);
3419 static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
3421 struct sas_task *task = isci_request_access_task(ireq);
3422 struct host_to_dev_fis *fis = &ireq->stp.cmd;
3423 struct ata_queued_cmd *qc = task->uldd_task;
3424 enum sci_status status;
3426 dev_dbg(&ireq->isci_host->pdev->dev,
3431 memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
3432 if (!task->ata_task.device_control_reg_update)
3436 status = sci_io_request_construct_basic_sata(ireq);
3438 if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
3439 qc->tf.command == ATA_CMD_FPDMA_READ)) {
3440 fis->sector_count = qc->tag << 3;
3441 ireq->tc->type.stp.ncq_tag = qc->tag;
3447 static enum sci_status
3448 sci_io_request_construct_smp(struct device *dev,
3449 struct isci_request *ireq,
3450 struct sas_task *task)
3452 struct scatterlist *sg = &task->smp_task.smp_req;
3453 struct isci_remote_device *idev;
3454 struct scu_task_context *task_context;
3455 struct isci_port *iport;
3456 struct smp_req *smp_req;
3461 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
3462 smp_req = kaddr + sg->offset;
3464 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3465 * functions under SAS 2.0, a zero request length really indicates
3466 * a non-zero default length.
3468 if (smp_req->req_len == 0) {
3469 switch (smp_req->func) {
3471 case SMP_REPORT_PHY_ERR_LOG:
3472 case SMP_REPORT_PHY_SATA:
3473 case SMP_REPORT_ROUTE_INFO:
3474 smp_req->req_len = 2;
3476 case SMP_CONF_ROUTE_INFO:
3477 case SMP_PHY_CONTROL:
3478 case SMP_PHY_TEST_FUNCTION:
3479 smp_req->req_len = 9;
3481 /* Default - zero is a valid default for 2.0. */
3484 req_len = smp_req->req_len;
3485 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3486 cmd = *(u32 *) smp_req;
3487 kunmap_atomic(kaddr, KM_IRQ0);
3489 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3492 ireq->protocol = SCIC_SMP_PROTOCOL;
3494 /* byte swap the smp request. */
3496 task_context = ireq->tc;
3498 idev = ireq->target_device;
3499 iport = idev->owning_port;
3502 * Fill in the TC with the its required data
3505 task_context->priority = 0;
3506 task_context->initiator_request = 1;
3507 task_context->connection_rate = idev->connection_rate;
3508 task_context->protocol_engine_index = ISCI_PEG;
3509 task_context->logical_port_index = iport->physical_port_index;
3510 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3511 task_context->abort = 0;
3512 task_context->valid = SCU_TASK_CONTEXT_VALID;
3513 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3516 task_context->remote_node_index = idev->rnc.remote_node_index;
3517 task_context->command_code = 0;
3518 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3521 task_context->link_layer_control = 0;
3522 task_context->do_not_dma_ssp_good_response = 1;
3523 task_context->strict_ordering = 0;
3524 task_context->control_frame = 1;
3525 task_context->timeout_enable = 0;
3526 task_context->block_guard_enable = 0;
3529 task_context->address_modifier = 0;
3532 task_context->ssp_command_iu_length = req_len;
3535 task_context->transfer_length_bytes = 0;
3538 * 18h ~ 30h, protocol specific
3539 * since commandIU has been build by framework at this point, we just
3540 * copy the frist DWord from command IU to this location. */
3541 memcpy(&task_context->type.smp, &cmd, sizeof(u32));
3545 * "For SMP you could program it to zero. We would prefer that way
3546 * so that done code will be consistent." - Venki
3548 task_context->task_phase = 0;
3550 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3551 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3552 (iport->physical_port_index <<
3553 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3554 ISCI_TAG_TCI(ireq->io_tag));
3556 * Copy the physical address for the command buffer to the SCU Task
3557 * Context command buffer should not contain command header.
3559 task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
3560 task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
3562 /* SMP response comes as UF, so no need to set response IU address. */
3563 task_context->response_iu_upper = 0;
3564 task_context->response_iu_lower = 0;
3566 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
3572 * isci_smp_request_build() - This function builds the smp request.
3573 * @ireq: This parameter points to the isci_request allocated in the
3574 * request construct function.
3576 * SCI_SUCCESS on successfull completion, or specific failure code.
3578 static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3580 struct sas_task *task = isci_request_access_task(ireq);
3581 struct device *dev = &ireq->isci_host->pdev->dev;
3582 enum sci_status status = SCI_FAILURE;
3584 status = sci_io_request_construct_smp(dev, ireq, task);
3585 if (status != SCI_SUCCESS)
3586 dev_dbg(&ireq->isci_host->pdev->dev,
3587 "%s: failed with status = %d\n",
3595 * isci_io_request_build() - This function builds the io request object.
3596 * @ihost: This parameter specifies the ISCI host object
3597 * @request: This parameter points to the isci_request object allocated in the
3598 * request construct function.
3599 * @sci_device: This parameter is the handle for the sci core's remote device
3600 * object that is the destination for this request.
3602 * SCI_SUCCESS on successfull completion, or specific failure code.
3604 static enum sci_status isci_io_request_build(struct isci_host *ihost,
3605 struct isci_request *request,
3606 struct isci_remote_device *idev)
3608 enum sci_status status = SCI_SUCCESS;
3609 struct sas_task *task = isci_request_access_task(request);
3611 dev_dbg(&ihost->pdev->dev,
3612 "%s: idev = 0x%p; request = %p, "
3613 "num_scatter = %d\n",
3619 /* map the sgl addresses, if present.
3620 * libata does the mapping for sata devices
3621 * before we get the request.
3623 if (task->num_scatter &&
3624 !sas_protocol_ata(task->task_proto) &&
3625 !(SAS_PROTOCOL_SMP & task->task_proto)) {
3627 request->num_sg_entries = dma_map_sg(
3634 if (request->num_sg_entries == 0)
3635 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3638 status = sci_io_request_construct(ihost, idev, request);
3640 if (status != SCI_SUCCESS) {
3641 dev_dbg(&ihost->pdev->dev,
3642 "%s: failed request construct\n",
3647 switch (task->task_proto) {
3648 case SAS_PROTOCOL_SMP:
3649 status = isci_smp_request_build(request);
3651 case SAS_PROTOCOL_SSP:
3652 status = isci_request_ssp_request_construct(request);
3654 case SAS_PROTOCOL_SATA:
3655 case SAS_PROTOCOL_STP:
3656 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3657 status = isci_request_stp_request_construct(request);
3660 dev_dbg(&ihost->pdev->dev,
3661 "%s: unknown protocol\n", __func__);
3668 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
3670 struct isci_request *ireq;
3672 ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
3674 ireq->io_request_completion = NULL;
3676 ireq->num_sg_entries = 0;
3677 INIT_LIST_HEAD(&ireq->completed_node);
3678 INIT_LIST_HEAD(&ireq->dev_node);
3679 isci_request_change_state(ireq, allocated);
3684 static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
3685 struct sas_task *task,
3688 struct isci_request *ireq;
3690 ireq = isci_request_from_tag(ihost, tag);
3691 ireq->ttype_ptr.io_task_ptr = task;
3692 clear_bit(IREQ_TMF, &ireq->flags);
3693 task->lldd_task = ireq;
3698 struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
3699 struct isci_tmf *isci_tmf,
3702 struct isci_request *ireq;
3704 ireq = isci_request_from_tag(ihost, tag);
3705 ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3706 set_bit(IREQ_TMF, &ireq->flags);
3711 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
3712 struct sas_task *task, u16 tag)
3714 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3715 struct isci_request *ireq;
3716 unsigned long flags;
3719 /* do common allocation and init of request object. */
3720 ireq = isci_io_request_from_tag(ihost, task, tag);
3722 status = isci_io_request_build(ihost, ireq, idev);
3723 if (status != SCI_SUCCESS) {
3724 dev_dbg(&ihost->pdev->dev,
3725 "%s: request_construct failed - status = 0x%x\n",
3731 spin_lock_irqsave(&ihost->scic_lock, flags);
3733 if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
3735 if (isci_task_is_ncq_recovery(task)) {
3737 /* The device is in an NCQ recovery state. Issue the
3738 * request on the task side. Note that it will
3739 * complete on the I/O request side because the
3740 * request was built that way (ie.
3741 * ireq->is_task_management_request is false).
3743 status = sci_controller_start_task(ihost,
3747 status = SCI_FAILURE;
3750 /* send the request, let the core assign the IO TAG. */
3751 status = sci_controller_start_io(ihost, idev,
3755 if (status != SCI_SUCCESS &&
3756 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3757 dev_dbg(&ihost->pdev->dev,
3758 "%s: failed request start (0x%x)\n",
3760 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3764 /* Either I/O started OK, or the core has signaled that
3765 * the device needs a target reset.
3767 * In either case, hold onto the I/O for later.
3769 * Update it's status and add it to the list in the
3770 * remote device object.
3772 list_add(&ireq->dev_node, &idev->reqs_in_process);
3774 if (status == SCI_SUCCESS) {
3775 isci_request_change_state(ireq, started);
3777 /* The request did not really start in the
3778 * hardware, so clear the request handle
3779 * here so no terminations will be done.
3781 set_bit(IREQ_TERMINATED, &ireq->flags);
3782 isci_request_change_state(ireq, completed);
3784 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3787 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3788 /* Signal libsas that we need the SCSI error
3789 * handler thread to work on this I/O and that
3790 * we want a device reset.
3792 spin_lock_irqsave(&task->task_state_lock, flags);
3793 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3794 spin_unlock_irqrestore(&task->task_state_lock, flags);
3796 /* Cause this task to be scheduled in the SCSI error
3799 isci_execpath_callback(ihost, task,
3802 /* Change the status, since we are holding
3803 * the I/O until it is managed by the SCSI
3806 status = SCI_SUCCESS;