2 * Copyright (c) 2016 Linaro Ltd.
3 * Copyright (c) 2016 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
13 #define DRV_NAME "hisi_sas_v2_hw"
15 /* global registers need init*/
16 #define DLVRY_QUEUE_ENABLE 0x0
17 #define IOST_BASE_ADDR_LO 0x8
18 #define IOST_BASE_ADDR_HI 0xc
19 #define ITCT_BASE_ADDR_LO 0x10
20 #define ITCT_BASE_ADDR_HI 0x14
21 #define IO_BROKEN_MSG_ADDR_LO 0x18
22 #define IO_BROKEN_MSG_ADDR_HI 0x1c
23 #define PHY_CONTEXT 0x20
24 #define PHY_STATE 0x24
25 #define PHY_PORT_NUM_MA 0x28
26 #define PORT_STATE 0x2c
27 #define PORT_STATE_PHY8_PORT_NUM_OFF 16
28 #define PORT_STATE_PHY8_PORT_NUM_MSK (0xf << PORT_STATE_PHY8_PORT_NUM_OFF)
29 #define PORT_STATE_PHY8_CONN_RATE_OFF 20
30 #define PORT_STATE_PHY8_CONN_RATE_MSK (0xf << PORT_STATE_PHY8_CONN_RATE_OFF)
31 #define PHY_CONN_RATE 0x30
32 #define HGC_TRANS_TASK_CNT_LIMIT 0x38
33 #define AXI_AHB_CLK_CFG 0x3c
35 #define ITCT_CLR_EN_OFF 16
36 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF)
37 #define ITCT_DEV_OFF 0
38 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF)
39 #define AXI_USER1 0x48
40 #define AXI_USER2 0x4c
41 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58
42 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c
43 #define SATA_INITI_D2H_STORE_ADDR_LO 0x60
44 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64
45 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
46 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88
47 #define HGC_GET_ITV_TIME 0x90
48 #define DEVICE_MSG_WORK_MODE 0x94
49 #define OPENA_WT_CONTI_TIME 0x9c
50 #define I_T_NEXUS_LOSS_TIME 0xa0
51 #define MAX_CON_TIME_LIMIT_TIME 0xa4
52 #define BUS_INACTIVE_LIMIT_TIME 0xa8
53 #define REJECT_TO_OPEN_LIMIT_TIME 0xac
54 #define CFG_AGING_TIME 0xbc
55 #define HGC_DFX_CFG2 0xc0
56 #define HGC_IOMB_PROC1_STATUS 0x104
57 #define CFG_1US_TIMER_TRSH 0xcc
58 #define HGC_INVLD_DQE_INFO 0x148
59 #define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9
60 #define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF)
61 #define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18
62 #define INT_COAL_EN 0x19c
63 #define OQ_INT_COAL_TIME 0x1a0
64 #define OQ_INT_COAL_CNT 0x1a4
65 #define ENT_INT_COAL_TIME 0x1a8
66 #define ENT_INT_COAL_CNT 0x1ac
67 #define OQ_INT_SRC 0x1b0
68 #define OQ_INT_SRC_MSK 0x1b4
69 #define ENT_INT_SRC1 0x1b8
70 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0
71 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF)
72 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8
73 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
74 #define ENT_INT_SRC2 0x1bc
75 #define ENT_INT_SRC3 0x1c0
76 #define ENT_INT_SRC3_ITC_INT_OFF 15
77 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
78 #define ENT_INT_SRC_MSK1 0x1c4
79 #define ENT_INT_SRC_MSK2 0x1c8
80 #define ENT_INT_SRC_MSK3 0x1cc
81 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
82 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
83 #define SAS_ECC_INTR_MSK 0x1ec
84 #define HGC_ERR_STAT_EN 0x238
85 #define DLVRY_Q_0_BASE_ADDR_LO 0x260
86 #define DLVRY_Q_0_BASE_ADDR_HI 0x264
87 #define DLVRY_Q_0_DEPTH 0x268
88 #define DLVRY_Q_0_WR_PTR 0x26c
89 #define DLVRY_Q_0_RD_PTR 0x270
90 #define HYPER_STREAM_ID_EN_CFG 0xc80
91 #define OQ0_INT_SRC_MSK 0xc90
92 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0
93 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4
94 #define COMPL_Q_0_DEPTH 0x4e8
95 #define COMPL_Q_0_WR_PTR 0x4ec
96 #define COMPL_Q_0_RD_PTR 0x4f0
98 /* phy registers need init */
99 #define PORT_BASE (0x2000)
101 #define PHY_CFG (PORT_BASE + 0x0)
102 #define HARD_PHY_LINKRATE (PORT_BASE + 0x4)
103 #define PHY_CFG_ENA_OFF 0
104 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
105 #define PHY_CFG_DC_OPT_OFF 2
106 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
107 #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
108 #define PROG_PHY_LINK_RATE_MAX_OFF 0
109 #define PROG_PHY_LINK_RATE_MAX_MSK (0xff << PROG_PHY_LINK_RATE_MAX_OFF)
110 #define PHY_CTRL (PORT_BASE + 0x14)
111 #define PHY_CTRL_RESET_OFF 0
112 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
113 #define SAS_PHY_CTRL (PORT_BASE + 0x20)
114 #define SL_CFG (PORT_BASE + 0x84)
115 #define PHY_PCN (PORT_BASE + 0x44)
116 #define SL_TOUT_CFG (PORT_BASE + 0x8c)
117 #define SL_CONTROL (PORT_BASE + 0x94)
118 #define SL_CONTROL_NOTIFY_EN_OFF 0
119 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
120 #define TX_ID_DWORD0 (PORT_BASE + 0x9c)
121 #define TX_ID_DWORD1 (PORT_BASE + 0xa0)
122 #define TX_ID_DWORD2 (PORT_BASE + 0xa4)
123 #define TX_ID_DWORD3 (PORT_BASE + 0xa8)
124 #define TX_ID_DWORD4 (PORT_BASE + 0xaC)
125 #define TX_ID_DWORD5 (PORT_BASE + 0xb0)
126 #define TX_ID_DWORD6 (PORT_BASE + 0xb4)
127 #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
128 #define RX_IDAF_DWORD1 (PORT_BASE + 0xc8)
129 #define RX_IDAF_DWORD2 (PORT_BASE + 0xcc)
130 #define RX_IDAF_DWORD3 (PORT_BASE + 0xd0)
131 #define RX_IDAF_DWORD4 (PORT_BASE + 0xd4)
132 #define RX_IDAF_DWORD5 (PORT_BASE + 0xd8)
133 #define RX_IDAF_DWORD6 (PORT_BASE + 0xdc)
134 #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
135 #define DONE_RECEIVED_TIME (PORT_BASE + 0x11c)
136 #define CHL_INT0 (PORT_BASE + 0x1b4)
137 #define CHL_INT0_HOTPLUG_TOUT_OFF 0
138 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF)
139 #define CHL_INT0_SL_RX_BCST_ACK_OFF 1
140 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF)
141 #define CHL_INT0_SL_PHY_ENABLE_OFF 2
142 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF)
143 #define CHL_INT0_NOT_RDY_OFF 4
144 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF)
145 #define CHL_INT0_PHY_RDY_OFF 5
146 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF)
147 #define CHL_INT1 (PORT_BASE + 0x1b8)
148 #define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15
149 #define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
150 #define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
151 #define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
152 #define CHL_INT2 (PORT_BASE + 0x1bc)
153 #define CHL_INT0_MSK (PORT_BASE + 0x1c0)
154 #define CHL_INT1_MSK (PORT_BASE + 0x1c4)
155 #define CHL_INT2_MSK (PORT_BASE + 0x1c8)
156 #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0)
157 #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0)
158 #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4)
159 #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8)
160 #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc)
161 #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0)
162 #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4)
163 #define DMA_TX_STATUS (PORT_BASE + 0x2d0)
164 #define DMA_TX_STATUS_BUSY_OFF 0
165 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF)
166 #define DMA_RX_STATUS (PORT_BASE + 0x2e8)
167 #define DMA_RX_STATUS_BUSY_OFF 0
168 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
170 #define AXI_CFG (0x5100)
171 #define AM_CFG_MAX_TRANS (0x5010)
172 #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014)
174 /* HW dma structures */
175 /* Delivery queue header */
177 #define CMD_HDR_RESP_REPORT_OFF 5
178 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
179 #define CMD_HDR_TLR_CTRL_OFF 6
180 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
181 #define CMD_HDR_PORT_OFF 18
182 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
183 #define CMD_HDR_PRIORITY_OFF 27
184 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF)
185 #define CMD_HDR_CMD_OFF 29
186 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF)
188 #define CMD_HDR_DIR_OFF 5
189 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF)
190 #define CMD_HDR_RESET_OFF 7
191 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF)
192 #define CMD_HDR_VDTL_OFF 10
193 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF)
194 #define CMD_HDR_FRAME_TYPE_OFF 11
195 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF)
196 #define CMD_HDR_DEV_ID_OFF 16
197 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF)
199 #define CMD_HDR_CFL_OFF 0
200 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF)
201 #define CMD_HDR_NCQ_TAG_OFF 10
202 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF)
203 #define CMD_HDR_MRFL_OFF 15
204 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF)
205 #define CMD_HDR_SG_MOD_OFF 24
206 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF)
207 #define CMD_HDR_FIRST_BURST_OFF 26
208 #define CMD_HDR_FIRST_BURST_MSK (0x1 << CMD_HDR_SG_MOD_OFF)
210 #define CMD_HDR_IPTT_OFF 0
211 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF)
213 #define CMD_HDR_DIF_SGL_LEN_OFF 0
214 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
215 #define CMD_HDR_DATA_SGL_LEN_OFF 16
216 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
218 /* Completion header */
220 #define CMPLT_HDR_RSPNS_XFRD_OFF 10
221 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
222 #define CMPLT_HDR_ERX_OFF 12
223 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
225 #define CMPLT_HDR_IPTT_OFF 0
226 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF)
227 #define CMPLT_HDR_DEV_ID_OFF 16
228 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF)
232 #define ITCT_HDR_DEV_TYPE_OFF 0
233 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF)
234 #define ITCT_HDR_VALID_OFF 2
235 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF)
236 #define ITCT_HDR_MCR_OFF 5
237 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF)
238 #define ITCT_HDR_VLN_OFF 9
239 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF)
240 #define ITCT_HDR_PORT_ID_OFF 28
241 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF)
243 #define ITCT_HDR_INLT_OFF 0
244 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF)
245 #define ITCT_HDR_BITLT_OFF 16
246 #define ITCT_HDR_BITLT_MSK (0xffffULL << ITCT_HDR_BITLT_OFF)
247 #define ITCT_HDR_MCTLT_OFF 32
248 #define ITCT_HDR_MCTLT_MSK (0xffffULL << ITCT_HDR_MCTLT_OFF)
249 #define ITCT_HDR_RTOLT_OFF 48
250 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
252 struct hisi_sas_complete_v2_hdr {
259 struct hisi_sas_err_record_v2 {
261 __le32 trans_tx_fail_type;
264 __le32 trans_rx_fail_type;
267 __le16 dma_tx_err_type;
268 __le16 sipc_rx_err_type;
271 __le32 dma_rx_err_type;
275 HISI_SAS_PHY_PHY_UPDOWN,
276 HISI_SAS_PHY_CHNL_INT,
281 TRANS_TX_FAIL_BASE = 0x0, /* dw0 */
282 TRANS_RX_FAIL_BASE = 0x100, /* dw1 */
283 DMA_TX_ERR_BASE = 0x200, /* dw2 bit 15-0 */
284 SIPC_RX_ERR_BASE = 0x300, /* dw2 bit 31-16*/
285 DMA_RX_ERR_BASE = 0x400, /* dw3 */
288 TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS = TRANS_TX_FAIL_BASE, /* 0x0 */
289 TRANS_TX_ERR_PHY_NOT_ENABLE, /* 0x1 */
290 TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, /* 0x2 */
291 TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, /* 0x3 */
292 TRANS_TX_OPEN_CNX_ERR_BY_OTHER, /* 0x4 */
294 TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, /* 0x6 */
295 TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, /* 0x7 */
296 TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, /* 0x8 */
297 TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, /* 0x9 */
298 TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, /* 0xa */
299 TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, /* 0xb */
300 TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, /* 0xc */
301 TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, /* 0xd */
302 TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, /* 0xe */
303 TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, /* 0xf */
304 TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, /* 0x10 */
305 TRANS_TX_ERR_FRAME_TXED, /* 0x11 */
306 TRANS_TX_ERR_WITH_BREAK_TIMEOUT, /* 0x12 */
307 TRANS_TX_ERR_WITH_BREAK_REQUEST, /* 0x13 */
308 TRANS_TX_ERR_WITH_BREAK_RECEVIED, /* 0x14 */
309 TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, /* 0x15 */
310 TRANS_TX_ERR_WITH_CLOSE_NORMAL, /* 0x16 for ssp*/
311 TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, /* 0x17 */
312 TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x18 */
313 TRANS_TX_ERR_WITH_CLOSE_COMINIT, /* 0x19 */
314 TRANS_TX_ERR_WITH_NAK_RECEVIED, /* 0x1a for ssp*/
315 TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, /* 0x1b for ssp*/
316 /*IO_TX_ERR_WITH_R_ERR_RECEVIED, [> 0x1b for sata/stp<] */
317 TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, /* 0x1c for ssp */
318 /*IO_RX_ERR_WITH_SATA_DEVICE_LOST 0x1c for sata/stp */
319 TRANS_TX_ERR_WITH_IPTT_CONFLICT, /* 0x1d for ssp/smp */
320 TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, /* 0x1e */
321 /*IO_TX_ERR_WITH_SYNC_RXD, [> 0x1e <] for sata/stp */
322 TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, /* 0x1f for sata/stp */
325 TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x100 */
326 TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, /* 0x101 for sata/stp */
327 TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, /* 0x102 for ssp/smp */
328 /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x102 <] for sata/stp */
329 TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, /* 0x103 for sata/stp */
330 TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, /* 0x104 for sata/stp */
331 TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, /* 0x105 for smp */
332 /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x105 <] for sata/stp */
333 TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, /* 0x106 for sata/stp*/
334 TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, /* 0x107 */
335 TRANS_RX_ERR_WITH_BREAK_TIMEOUT, /* 0x108 */
336 TRANS_RX_ERR_WITH_BREAK_REQUEST, /* 0x109 */
337 TRANS_RX_ERR_WITH_BREAK_RECEVIED, /* 0x10a */
338 RESERVED1, /* 0x10b */
339 TRANS_RX_ERR_WITH_CLOSE_NORMAL, /* 0x10c */
340 TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, /* 0x10d */
341 TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x10e */
342 TRANS_RX_ERR_WITH_CLOSE_COMINIT, /* 0x10f */
343 TRANS_RX_ERR_WITH_DATA_LEN0, /* 0x110 for ssp/smp */
344 TRANS_RX_ERR_WITH_BAD_HASH, /* 0x111 for ssp */
345 /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x111 <] for sata/stp */
346 TRANS_RX_XRDY_WLEN_ZERO_ERR, /* 0x112 for ssp*/
347 /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x112 <] for sata/stp */
348 TRANS_RX_SSP_FRM_LEN_ERR, /* 0x113 for ssp */
349 /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x113 <] for sata */
350 RESERVED2, /* 0x114 */
351 RESERVED3, /* 0x115 */
352 RESERVED4, /* 0x116 */
353 RESERVED5, /* 0x117 */
354 TRANS_RX_ERR_WITH_BAD_FRM_TYPE, /* 0x118 */
355 TRANS_RX_SMP_FRM_LEN_ERR, /* 0x119 */
356 TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x11a */
357 RESERVED6, /* 0x11b */
358 RESERVED7, /* 0x11c */
359 RESERVED8, /* 0x11d */
360 RESERVED9, /* 0x11e */
361 TRANS_RX_R_ERR, /* 0x11f */
364 DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x200 */
365 DMA_TX_DIF_APP_ERR, /* 0x201 */
366 DMA_TX_DIF_RPP_ERR, /* 0x202 */
367 DMA_TX_DATA_SGL_OVERFLOW, /* 0x203 */
368 DMA_TX_DIF_SGL_OVERFLOW, /* 0x204 */
369 DMA_TX_UNEXP_XFER_ERR, /* 0x205 */
370 DMA_TX_UNEXP_RETRANS_ERR, /* 0x206 */
371 DMA_TX_XFER_LEN_OVERFLOW, /* 0x207 */
372 DMA_TX_XFER_OFFSET_ERR, /* 0x208 */
373 DMA_TX_RAM_ECC_ERR, /* 0x209 */
374 DMA_TX_DIF_LEN_ALIGN_ERR, /* 0x20a */
377 SIPC_RX_FIS_STATUS_ERR_BIT_VLD = SIPC_RX_ERR_BASE, /* 0x300 */
378 SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, /* 0x301 */
379 SIPC_RX_FIS_STATUS_BSY_BIT_ERR, /* 0x302 */
380 SIPC_RX_WRSETUP_LEN_ODD_ERR, /* 0x303 */
381 SIPC_RX_WRSETUP_LEN_ZERO_ERR, /* 0x304 */
382 SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, /* 0x305 */
383 SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, /* 0x306 */
384 SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, /* 0x307 */
385 SIPC_RX_SATA_UNEXP_FIS_ERR, /* 0x308 */
386 SIPC_RX_WRSETUP_ESTATUS_ERR, /* 0x309 */
387 SIPC_RX_DATA_UNDERFLOW_ERR, /* 0x30a */
390 DMA_RX_DIF_CRC_ERR = DMA_RX_ERR_BASE, /* 0x400 */
391 DMA_RX_DIF_APP_ERR, /* 0x401 */
392 DMA_RX_DIF_RPP_ERR, /* 0x402 */
393 DMA_RX_DATA_SGL_OVERFLOW, /* 0x403 */
394 DMA_RX_DIF_SGL_OVERFLOW, /* 0x404 */
395 DMA_RX_DATA_LEN_OVERFLOW, /* 0x405 */
396 DMA_RX_DATA_LEN_UNDERFLOW, /* 0x406 */
397 DMA_RX_DATA_OFFSET_ERR, /* 0x407 */
398 RESERVED10, /* 0x408 */
399 DMA_RX_SATA_FRAME_TYPE_ERR, /* 0x409 */
400 DMA_RX_RESP_BUF_OVERFLOW, /* 0x40a */
401 DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x40b */
402 DMA_RX_UNEXP_NORM_RESP_ERR, /* 0x40c */
403 DMA_RX_UNEXP_RDFRAME_ERR, /* 0x40d */
404 DMA_RX_PIO_DATA_LEN_ERR, /* 0x40e */
405 DMA_RX_RDSETUP_STATUS_ERR, /* 0x40f */
406 DMA_RX_RDSETUP_STATUS_DRQ_ERR, /* 0x410 */
407 DMA_RX_RDSETUP_STATUS_BSY_ERR, /* 0x411 */
408 DMA_RX_RDSETUP_LEN_ODD_ERR, /* 0x412 */
409 DMA_RX_RDSETUP_LEN_ZERO_ERR, /* 0x413 */
410 DMA_RX_RDSETUP_LEN_OVER_ERR, /* 0x414 */
411 DMA_RX_RDSETUP_OFFSET_ERR, /* 0x415 */
412 DMA_RX_RDSETUP_ACTIVE_ERR, /* 0x416 */
413 DMA_RX_RDSETUP_ESTATUS_ERR, /* 0x417 */
414 DMA_RX_RAM_ECC_ERR, /* 0x418 */
415 DMA_RX_UNKNOWN_FRM_ERR, /* 0x419 */
418 #define HISI_SAS_COMMAND_ENTRIES_V2_HW 4096
420 #define DIR_NO_DATA 0
422 #define DIR_TO_DEVICE 2
423 #define DIR_RESERVED 3
425 #define SATA_PROTOCOL_NONDATA 0x1
426 #define SATA_PROTOCOL_PIO 0x2
427 #define SATA_PROTOCOL_DMA 0x4
428 #define SATA_PROTOCOL_FPDMA 0x8
429 #define SATA_PROTOCOL_ATAPI 0x10
431 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
433 void __iomem *regs = hisi_hba->regs + off;
438 static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
440 void __iomem *regs = hisi_hba->regs + off;
442 return readl_relaxed(regs);
445 static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val)
447 void __iomem *regs = hisi_hba->regs + off;
452 static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no,
455 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
460 static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
463 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
468 /* This function needs to be protected from pre-emption. */
470 slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
471 struct domain_device *device)
473 unsigned int index = 0;
474 void *bitmap = hisi_hba->slot_index_tags;
475 int sata_dev = dev_is_sata(device);
478 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
480 if (index >= hisi_hba->slot_index_count)
481 return -SAS_QUEUE_FULL;
483 * SAS IPTT bit0 should be 1
485 if (sata_dev || (index & 1))
490 set_bit(index, bitmap);
496 hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
498 struct hisi_hba *hisi_hba = device->port->ha->lldd_ha;
499 struct hisi_sas_device *sas_dev = NULL;
500 int i, sata_dev = dev_is_sata(device);
502 spin_lock(&hisi_hba->lock);
503 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
505 * SATA device id bit0 should be 0
507 if (sata_dev && (i & 1))
509 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
510 hisi_hba->devices[i].device_id = i;
511 sas_dev = &hisi_hba->devices[i];
512 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
513 sas_dev->dev_type = device->dev_type;
514 sas_dev->hisi_hba = hisi_hba;
515 sas_dev->sas_device = device;
519 spin_unlock(&hisi_hba->lock);
524 static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
526 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
528 cfg &= ~PHY_CFG_DC_OPT_MSK;
529 cfg |= 1 << PHY_CFG_DC_OPT_OFF;
530 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
533 static void config_id_frame_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
535 struct sas_identify_frame identify_frame;
536 u32 *identify_buffer;
538 memset(&identify_frame, 0, sizeof(identify_frame));
539 identify_frame.dev_type = SAS_END_DEVICE;
540 identify_frame.frame_type = 0;
541 identify_frame._un1 = 1;
542 identify_frame.initiator_bits = SAS_PROTOCOL_ALL;
543 identify_frame.target_bits = SAS_PROTOCOL_NONE;
544 memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
545 memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
546 identify_frame.phy_id = phy_no;
547 identify_buffer = (u32 *)(&identify_frame);
549 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
550 __swab32(identify_buffer[0]));
551 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
553 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
555 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
557 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
559 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
560 __swab32(identify_buffer[5]));
563 static void init_id_frame_v2_hw(struct hisi_hba *hisi_hba)
567 for (i = 0; i < hisi_hba->n_phy; i++)
568 config_id_frame_v2_hw(hisi_hba, i);
571 static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
572 struct hisi_sas_device *sas_dev)
574 struct domain_device *device = sas_dev->sas_device;
575 struct device *dev = &hisi_hba->pdev->dev;
576 u64 qw0, device_id = sas_dev->device_id;
577 struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
578 struct domain_device *parent_dev = device->parent;
579 struct hisi_sas_port *port = device->port->lldd_port;
581 memset(itct, 0, sizeof(*itct));
585 switch (sas_dev->dev_type) {
587 case SAS_EDGE_EXPANDER_DEVICE:
588 case SAS_FANOUT_EXPANDER_DEVICE:
589 qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF;
592 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
593 qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
595 qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
598 dev_warn(dev, "setup itct: unsupported dev type (%d)\n",
602 qw0 |= ((1 << ITCT_HDR_VALID_OFF) |
603 (device->linkrate << ITCT_HDR_MCR_OFF) |
604 (1 << ITCT_HDR_VLN_OFF) |
605 (port->id << ITCT_HDR_PORT_ID_OFF));
606 itct->qw0 = cpu_to_le64(qw0);
609 memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
610 itct->sas_addr = __swab64(itct->sas_addr);
613 if (!dev_is_sata(device))
614 itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) |
615 (0x1ULL << ITCT_HDR_BITLT_OFF) |
616 (0x32ULL << ITCT_HDR_MCTLT_OFF) |
617 (0x1ULL << ITCT_HDR_RTOLT_OFF));
620 static void free_device_v2_hw(struct hisi_hba *hisi_hba,
621 struct hisi_sas_device *sas_dev)
623 u64 qw0, dev_id = sas_dev->device_id;
624 struct device *dev = &hisi_hba->pdev->dev;
625 struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
626 u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
629 /* clear the itct interrupt state */
630 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
631 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
632 ENT_INT_SRC3_ITC_INT_MSK);
634 /* clear the itct int*/
635 for (i = 0; i < 2; i++) {
636 /* clear the itct table*/
637 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
638 reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
639 hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
642 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
643 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) {
644 dev_dbg(dev, "got clear ITCT done interrupt\n");
646 /* invalid the itct state*/
647 qw0 = cpu_to_le64(itct->qw0);
648 qw0 &= ~(1 << ITCT_HDR_VALID_OFF);
649 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
650 ENT_INT_SRC3_ITC_INT_MSK);
651 hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED;
652 hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL;
655 hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
656 dev_dbg(dev, "clear ITCT ok\n");
662 static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
666 unsigned long end_time;
667 struct device *dev = &hisi_hba->pdev->dev;
669 /* The mask needs to be set depending on the number of phys */
670 if (hisi_hba->n_phy == 9)
671 reset_val = 0x1fffff;
675 /* Disable all of the DQ */
676 for (i = 0; i < HISI_SAS_MAX_QUEUES; i++)
677 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
679 /* Disable all of the PHYs */
680 for (i = 0; i < hisi_hba->n_phy; i++) {
681 u32 phy_cfg = hisi_sas_phy_read32(hisi_hba, i, PHY_CFG);
683 phy_cfg &= ~PHY_CTRL_RESET_MSK;
684 hisi_sas_phy_write32(hisi_hba, i, PHY_CFG, phy_cfg);
688 /* Ensure DMA tx & rx idle */
689 for (i = 0; i < hisi_hba->n_phy; i++) {
690 u32 dma_tx_status, dma_rx_status;
692 end_time = jiffies + msecs_to_jiffies(1000);
695 dma_tx_status = hisi_sas_phy_read32(hisi_hba, i,
697 dma_rx_status = hisi_sas_phy_read32(hisi_hba, i,
700 if (!(dma_tx_status & DMA_TX_STATUS_BUSY_MSK) &&
701 !(dma_rx_status & DMA_RX_STATUS_BUSY_MSK))
705 if (time_after(jiffies, end_time))
710 /* Ensure axi bus idle */
711 end_time = jiffies + msecs_to_jiffies(1000);
714 hisi_sas_read32(hisi_hba, AXI_CFG);
720 if (time_after(jiffies, end_time))
724 /* reset and disable clock*/
725 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg,
727 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4,
730 regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
731 if (reset_val != (val & reset_val)) {
732 dev_err(dev, "SAS reset fail.\n");
736 /* De-reset and enable clock*/
737 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4,
739 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg,
742 regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg,
744 if (val & reset_val) {
745 dev_err(dev, "SAS de-reset fail.\n");
752 static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
754 struct device *dev = &hisi_hba->pdev->dev;
755 struct device_node *np = dev->of_node;
758 /* Global registers init */
760 /* Deal with am-max-transmissions quirk */
761 if (of_get_property(np, "hip06-sas-v2-quirk-amt", NULL)) {
762 hisi_sas_write32(hisi_hba, AM_CFG_MAX_TRANS, 0x2020);
763 hisi_sas_write32(hisi_hba, AM_CFG_SINGLE_PORT_MAX_TRANS,
765 } /* Else, use defaults -> do nothing */
767 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
768 (u32)((1ULL << hisi_hba->queue_count) - 1));
769 hisi_sas_write32(hisi_hba, AXI_USER1, 0xc0000000);
770 hisi_sas_write32(hisi_hba, AXI_USER2, 0x10000);
771 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
772 hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF);
773 hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1);
774 hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4);
775 hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x32);
776 hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1);
777 hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
778 hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1);
779 hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1);
780 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
781 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
782 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
783 hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1);
784 hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1);
785 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0x0);
786 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
787 hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
788 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
789 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe);
790 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe);
791 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe);
792 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfffff3c0);
793 for (i = 0; i < hisi_hba->queue_count; i++)
794 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
796 hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1);
797 hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
799 for (i = 0; i < hisi_hba->n_phy; i++) {
800 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855);
801 hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, 0x30b9908);
802 hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d);
803 hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10);
804 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
805 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
806 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
807 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
808 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
809 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
810 hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x23f801fc);
811 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
812 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
813 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
814 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0);
815 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
816 hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0);
817 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
818 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694);
821 for (i = 0; i < hisi_hba->queue_count; i++) {
823 hisi_sas_write32(hisi_hba,
824 DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14),
825 upper_32_bits(hisi_hba->cmd_hdr_dma[i]));
827 hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14),
828 lower_32_bits(hisi_hba->cmd_hdr_dma[i]));
830 hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14),
831 HISI_SAS_QUEUE_SLOTS);
833 /* Completion queue */
834 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14),
835 upper_32_bits(hisi_hba->complete_hdr_dma[i]));
837 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14),
838 lower_32_bits(hisi_hba->complete_hdr_dma[i]));
840 hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14),
841 HISI_SAS_QUEUE_SLOTS);
845 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO,
846 lower_32_bits(hisi_hba->itct_dma));
848 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI,
849 upper_32_bits(hisi_hba->itct_dma));
852 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO,
853 lower_32_bits(hisi_hba->iost_dma));
855 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI,
856 upper_32_bits(hisi_hba->iost_dma));
859 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO,
860 lower_32_bits(hisi_hba->breakpoint_dma));
862 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI,
863 upper_32_bits(hisi_hba->breakpoint_dma));
865 /* SATA broken msg */
866 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO,
867 lower_32_bits(hisi_hba->sata_breakpoint_dma));
869 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI,
870 upper_32_bits(hisi_hba->sata_breakpoint_dma));
872 /* SATA initial fis */
873 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO,
874 lower_32_bits(hisi_hba->initial_fis_dma));
876 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
877 upper_32_bits(hisi_hba->initial_fis_dma));
880 static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
882 struct device *dev = &hisi_hba->pdev->dev;
885 rc = reset_hw_v2_hw(hisi_hba);
887 dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
892 init_reg_v2_hw(hisi_hba);
894 init_id_frame_v2_hw(hisi_hba);
899 static void enable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
901 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
903 cfg |= PHY_CFG_ENA_MSK;
904 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
907 static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
909 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
911 cfg &= ~PHY_CFG_ENA_MSK;
912 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
915 static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
917 config_id_frame_v2_hw(hisi_hba, phy_no);
918 config_phy_opt_mode_v2_hw(hisi_hba, phy_no);
919 enable_phy_v2_hw(hisi_hba, phy_no);
922 static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
924 disable_phy_v2_hw(hisi_hba, phy_no);
927 static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
929 stop_phy_v2_hw(hisi_hba, phy_no);
931 start_phy_v2_hw(hisi_hba, phy_no);
934 static void start_phys_v2_hw(unsigned long data)
936 struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
939 for (i = 0; i < hisi_hba->n_phy; i++)
940 start_phy_v2_hw(hisi_hba, i);
943 static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
946 struct timer_list *timer = &hisi_hba->timer;
948 for (i = 0; i < hisi_hba->n_phy; i++) {
949 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x6a);
950 hisi_sas_phy_read32(hisi_hba, i, CHL_INT2_MSK);
953 setup_timer(timer, start_phys_v2_hw, (unsigned long)hisi_hba);
954 mod_timer(timer, jiffies + HZ);
957 static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
961 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
962 sl_control |= SL_CONTROL_NOTIFY_EN_MSK;
963 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
965 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
966 sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK;
967 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
970 static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
973 u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
974 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
976 for (i = 0; i < (hisi_hba->n_phy < 9 ? hisi_hba->n_phy : 8); i++)
977 if (phy_state & 1 << i)
978 if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
981 if (hisi_hba->n_phy == 9) {
982 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
984 if (phy_state & 1 << 8)
985 if (((port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >>
986 PORT_STATE_PHY8_PORT_NUM_OFF) == port_id)
994 * This function allocates across all queues to load balance.
995 * Slots are allocated from queues in a round-robin fashion.
997 * The callpath to this function and upto writing the write
998 * queue pointer should be safe from interruption.
1000 static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s)
1002 struct device *dev = &hisi_hba->pdev->dev;
1004 int queue = hisi_hba->queue;
1007 w = hisi_sas_read32_relaxed(hisi_hba,
1008 DLVRY_Q_0_WR_PTR + (queue * 0x14));
1009 r = hisi_sas_read32_relaxed(hisi_hba,
1010 DLVRY_Q_0_RD_PTR + (queue * 0x14));
1011 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
1012 queue = (queue + 1) % hisi_hba->queue_count;
1013 if (queue == hisi_hba->queue) {
1014 dev_warn(dev, "could not find free slot\n");
1021 hisi_hba->queue = (queue + 1) % hisi_hba->queue_count;
1027 static void start_delivery_v2_hw(struct hisi_hba *hisi_hba)
1029 int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
1030 int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
1032 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
1033 ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS);
1036 static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
1037 struct hisi_sas_slot *slot,
1038 struct hisi_sas_cmd_hdr *hdr,
1039 struct scatterlist *scatter,
1042 struct device *dev = &hisi_hba->pdev->dev;
1043 struct scatterlist *sg;
1046 if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
1047 dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
1052 slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC,
1053 &slot->sge_page_dma);
1054 if (!slot->sge_page)
1057 for_each_sg(scatter, sg, n_elem, i) {
1058 struct hisi_sas_sge *entry = &slot->sge_page->sge[i];
1060 entry->addr = cpu_to_le64(sg_dma_address(sg));
1061 entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
1062 entry->data_len = cpu_to_le32(sg_dma_len(sg));
1063 entry->data_off = 0;
1066 hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma);
1068 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
1073 static int prep_smp_v2_hw(struct hisi_hba *hisi_hba,
1074 struct hisi_sas_slot *slot)
1076 struct sas_task *task = slot->task;
1077 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1078 struct domain_device *device = task->dev;
1079 struct device *dev = &hisi_hba->pdev->dev;
1080 struct hisi_sas_port *port = slot->port;
1081 struct scatterlist *sg_req, *sg_resp;
1082 struct hisi_sas_device *sas_dev = device->lldd_dev;
1083 dma_addr_t req_dma_addr;
1084 unsigned int req_len, resp_len;
1088 * DMA-map SMP request, response buffers
1091 sg_req = &task->smp_task.smp_req;
1092 elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
1095 req_len = sg_dma_len(sg_req);
1096 req_dma_addr = sg_dma_address(sg_req);
1099 sg_resp = &task->smp_task.smp_resp;
1100 elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
1105 resp_len = sg_dma_len(sg_resp);
1106 if ((req_len & 0x3) || (resp_len & 0x3)) {
1113 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
1114 (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */
1115 (2 << CMD_HDR_CMD_OFF)); /* smp */
1117 /* map itct entry */
1118 hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) |
1119 (1 << CMD_HDR_FRAME_TYPE_OFF) |
1120 (DIR_NO_DATA << CMD_HDR_DIR_OFF));
1123 hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) |
1124 (HISI_SAS_MAX_SMP_RESP_SZ / 4 <<
1127 hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
1129 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
1130 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
1135 dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
1138 dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
1143 static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
1144 struct hisi_sas_slot *slot, int is_tmf,
1145 struct hisi_sas_tmf_task *tmf)
1147 struct sas_task *task = slot->task;
1148 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1149 struct domain_device *device = task->dev;
1150 struct hisi_sas_device *sas_dev = device->lldd_dev;
1151 struct hisi_sas_port *port = slot->port;
1152 struct sas_ssp_task *ssp_task = &task->ssp_task;
1153 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
1154 int has_data = 0, rc, priority = is_tmf;
1156 u32 dw1 = 0, dw2 = 0;
1158 hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) |
1159 (2 << CMD_HDR_TLR_CTRL_OFF) |
1160 (port->id << CMD_HDR_PORT_OFF) |
1161 (priority << CMD_HDR_PRIORITY_OFF) |
1162 (1 << CMD_HDR_CMD_OFF)); /* ssp */
1164 dw1 = 1 << CMD_HDR_VDTL_OFF;
1166 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
1167 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
1169 dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF;
1170 switch (scsi_cmnd->sc_data_direction) {
1173 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
1175 case DMA_FROM_DEVICE:
1177 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
1180 dw1 &= ~CMD_HDR_DIR_MSK;
1184 /* map itct entry */
1185 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
1186 hdr->dw1 = cpu_to_le32(dw1);
1188 dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
1189 + 3) / 4) << CMD_HDR_CFL_OFF) |
1190 ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) |
1191 (2 << CMD_HDR_SG_MOD_OFF);
1192 hdr->dw2 = cpu_to_le32(dw2);
1194 hdr->transfer_tags = cpu_to_le32(slot->idx);
1197 rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
1203 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
1204 hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
1205 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
1207 buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr);
1209 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
1211 buf_cmd[9] = task->ssp_task.task_attr |
1212 (task->ssp_task.task_prio << 3);
1213 memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
1214 task->ssp_task.cmd->cmd_len);
1216 buf_cmd[10] = tmf->tmf;
1218 case TMF_ABORT_TASK:
1219 case TMF_QUERY_TASK:
1221 (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
1223 tmf->tag_of_task_to_be_managed & 0xff;
1233 static void sata_done_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
1234 struct hisi_sas_slot *slot)
1236 struct task_status_struct *ts = &task->task_status;
1237 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
1238 struct dev_to_host_fis *d2h = slot->status_buffer +
1239 sizeof(struct hisi_sas_err_record);
1241 resp->frame_len = sizeof(struct dev_to_host_fis);
1242 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
1244 ts->buf_valid_size = sizeof(*resp);
1247 /* by default, task resp is complete */
1248 static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
1249 struct sas_task *task,
1250 struct hisi_sas_slot *slot)
1252 struct task_status_struct *ts = &task->task_status;
1253 struct hisi_sas_err_record_v2 *err_record = slot->status_buffer;
1254 u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type);
1255 u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type);
1256 u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type);
1257 u16 sipc_rx_err_type = cpu_to_le16(err_record->sipc_rx_err_type);
1258 u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type);
1261 if (dma_rx_err_type) {
1262 error = ffs(dma_rx_err_type)
1263 - 1 + DMA_RX_ERR_BASE;
1264 } else if (sipc_rx_err_type) {
1265 error = ffs(sipc_rx_err_type)
1266 - 1 + SIPC_RX_ERR_BASE;
1267 } else if (dma_tx_err_type) {
1268 error = ffs(dma_tx_err_type)
1269 - 1 + DMA_TX_ERR_BASE;
1270 } else if (trans_rx_fail_type) {
1271 error = ffs(trans_rx_fail_type)
1272 - 1 + TRANS_RX_FAIL_BASE;
1273 } else if (trans_tx_fail_type) {
1274 error = ffs(trans_tx_fail_type)
1275 - 1 + TRANS_TX_FAIL_BASE;
1278 switch (task->task_proto) {
1279 case SAS_PROTOCOL_SSP:
1282 case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION:
1284 ts->stat = SAS_OPEN_REJECT;
1285 ts->open_rej_reason = SAS_OREJ_NO_DEST;
1288 case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED:
1290 ts->stat = SAS_OPEN_REJECT;
1291 ts->open_rej_reason = SAS_OREJ_PATH_BLOCKED;
1294 case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED:
1296 ts->stat = SAS_OPEN_REJECT;
1297 ts->open_rej_reason = SAS_OREJ_EPROTO;
1300 case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED:
1302 ts->stat = SAS_OPEN_REJECT;
1303 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
1306 case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION:
1308 ts->stat = SAS_OPEN_REJECT;
1309 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
1312 case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD:
1314 ts->stat = SAS_OPEN_REJECT;
1315 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1318 case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION:
1320 ts->stat = SAS_OPEN_REJECT;
1321 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
1324 case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION:
1326 ts->stat = SAS_OPEN_REJECT;
1327 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1330 case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER:
1333 ts->stat = SAS_DEV_NO_RESPONSE;
1336 case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE:
1338 ts->stat = SAS_PHY_DOWN;
1341 case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT:
1343 ts->stat = SAS_OPEN_TO;
1346 case DMA_RX_DATA_LEN_OVERFLOW:
1348 ts->stat = SAS_DATA_OVERRUN;
1352 case DMA_RX_DATA_LEN_UNDERFLOW:
1353 case SIPC_RX_DATA_UNDERFLOW_ERR:
1355 ts->residual = trans_tx_fail_type;
1356 ts->stat = SAS_DATA_UNDERRUN;
1359 case TRANS_TX_ERR_FRAME_TXED:
1361 /* This will request a retry */
1362 ts->stat = SAS_QUEUE_FULL;
1366 case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS:
1367 case TRANS_TX_ERR_PHY_NOT_ENABLE:
1368 case TRANS_TX_OPEN_CNX_ERR_BY_OTHER:
1369 case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT:
1370 case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED:
1371 case TRANS_TX_ERR_WITH_BREAK_TIMEOUT:
1372 case TRANS_TX_ERR_WITH_BREAK_REQUEST:
1373 case TRANS_TX_ERR_WITH_BREAK_RECEVIED:
1374 case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT:
1375 case TRANS_TX_ERR_WITH_CLOSE_NORMAL:
1376 case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT:
1377 case TRANS_TX_ERR_WITH_CLOSE_COMINIT:
1378 case TRANS_TX_ERR_WITH_NAK_RECEVIED:
1379 case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT:
1380 case TRANS_TX_ERR_WITH_IPTT_CONFLICT:
1381 case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT:
1382 case TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR:
1383 case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR:
1384 case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM:
1385 case TRANS_RX_ERR_WITH_BREAK_TIMEOUT:
1386 case TRANS_RX_ERR_WITH_BREAK_REQUEST:
1387 case TRANS_RX_ERR_WITH_BREAK_RECEVIED:
1388 case TRANS_RX_ERR_WITH_CLOSE_NORMAL:
1389 case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT:
1390 case TRANS_RX_ERR_WITH_CLOSE_COMINIT:
1391 case TRANS_RX_ERR_WITH_DATA_LEN0:
1392 case TRANS_RX_ERR_WITH_BAD_HASH:
1393 case TRANS_RX_XRDY_WLEN_ZERO_ERR:
1394 case TRANS_RX_SSP_FRM_LEN_ERR:
1395 case TRANS_RX_ERR_WITH_BAD_FRM_TYPE:
1396 case DMA_TX_UNEXP_XFER_ERR:
1397 case DMA_TX_UNEXP_RETRANS_ERR:
1398 case DMA_TX_XFER_LEN_OVERFLOW:
1399 case DMA_TX_XFER_OFFSET_ERR:
1400 case DMA_RX_DATA_OFFSET_ERR:
1401 case DMA_RX_UNEXP_NORM_RESP_ERR:
1402 case DMA_RX_UNEXP_RDFRAME_ERR:
1403 case DMA_RX_UNKNOWN_FRM_ERR:
1405 ts->stat = SAS_OPEN_REJECT;
1406 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1414 case SAS_PROTOCOL_SMP:
1415 ts->stat = SAM_STAT_CHECK_CONDITION;
1418 case SAS_PROTOCOL_SATA:
1419 case SAS_PROTOCOL_STP:
1420 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1423 case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER:
1424 case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED:
1425 case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION:
1427 ts->resp = SAS_TASK_UNDELIVERED;
1428 ts->stat = SAS_DEV_NO_RESPONSE;
1431 case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED:
1432 case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED:
1433 case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION:
1434 case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD:
1435 case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION:
1436 case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION:
1437 case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY:
1439 ts->stat = SAS_OPEN_REJECT;
1442 case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT:
1444 ts->stat = SAS_OPEN_TO;
1447 case DMA_RX_DATA_LEN_OVERFLOW:
1449 ts->stat = SAS_DATA_OVERRUN;
1452 case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS:
1453 case TRANS_TX_ERR_PHY_NOT_ENABLE:
1454 case TRANS_TX_OPEN_CNX_ERR_BY_OTHER:
1455 case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT:
1456 case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED:
1457 case TRANS_TX_ERR_WITH_BREAK_TIMEOUT:
1458 case TRANS_TX_ERR_WITH_BREAK_REQUEST:
1459 case TRANS_TX_ERR_WITH_BREAK_RECEVIED:
1460 case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT:
1461 case TRANS_TX_ERR_WITH_CLOSE_NORMAL:
1462 case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT:
1463 case TRANS_TX_ERR_WITH_CLOSE_COMINIT:
1464 case TRANS_TX_ERR_WITH_NAK_RECEVIED:
1465 case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT:
1466 case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT:
1467 case TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT:
1468 case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR:
1469 case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM:
1470 case TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR:
1471 case TRANS_RX_ERR_WITH_RXFIS_CRC_ERR:
1472 case TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN:
1473 case TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP:
1474 case TRANS_RX_ERR_WITH_CLOSE_NORMAL:
1475 case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE:
1476 case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT:
1477 case TRANS_RX_ERR_WITH_CLOSE_COMINIT:
1478 case TRANS_RX_ERR_WITH_DATA_LEN0:
1479 case TRANS_RX_ERR_WITH_BAD_HASH:
1480 case TRANS_RX_XRDY_WLEN_ZERO_ERR:
1481 case TRANS_RX_SSP_FRM_LEN_ERR:
1482 case SIPC_RX_FIS_STATUS_ERR_BIT_VLD:
1483 case SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR:
1484 case SIPC_RX_FIS_STATUS_BSY_BIT_ERR:
1485 case SIPC_RX_WRSETUP_LEN_ODD_ERR:
1486 case SIPC_RX_WRSETUP_LEN_ZERO_ERR:
1487 case SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR:
1488 case SIPC_RX_SATA_UNEXP_FIS_ERR:
1489 case DMA_RX_SATA_FRAME_TYPE_ERR:
1490 case DMA_RX_UNEXP_RDFRAME_ERR:
1491 case DMA_RX_PIO_DATA_LEN_ERR:
1492 case DMA_RX_RDSETUP_STATUS_ERR:
1493 case DMA_RX_RDSETUP_STATUS_DRQ_ERR:
1494 case DMA_RX_RDSETUP_STATUS_BSY_ERR:
1495 case DMA_RX_RDSETUP_LEN_ODD_ERR:
1496 case DMA_RX_RDSETUP_LEN_ZERO_ERR:
1497 case DMA_RX_RDSETUP_LEN_OVER_ERR:
1498 case DMA_RX_RDSETUP_OFFSET_ERR:
1499 case DMA_RX_RDSETUP_ACTIVE_ERR:
1500 case DMA_RX_RDSETUP_ESTATUS_ERR:
1501 case DMA_RX_UNKNOWN_FRM_ERR:
1503 ts->stat = SAS_OPEN_REJECT;
1508 ts->stat = SAS_PROTO_RESPONSE;
1512 sata_done_v2_hw(hisi_hba, task, slot);
1521 slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
1524 struct sas_task *task = slot->task;
1525 struct hisi_sas_device *sas_dev;
1526 struct device *dev = &hisi_hba->pdev->dev;
1527 struct task_status_struct *ts;
1528 struct domain_device *device;
1529 enum exec_status sts;
1530 struct hisi_sas_complete_v2_hdr *complete_queue =
1531 hisi_hba->complete_hdr[slot->cmplt_queue];
1532 struct hisi_sas_complete_v2_hdr *complete_hdr =
1533 &complete_queue[slot->cmplt_queue_slot];
1535 if (unlikely(!task || !task->lldd_task || !task->dev))
1538 ts = &task->task_status;
1540 sas_dev = device->lldd_dev;
1542 task->task_state_flags &=
1543 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1544 task->task_state_flags |= SAS_TASK_STATE_DONE;
1546 memset(ts, 0, sizeof(*ts));
1547 ts->resp = SAS_TASK_COMPLETE;
1549 if (unlikely(!sas_dev || abort)) {
1551 dev_dbg(dev, "slot complete: port has not device\n");
1552 ts->stat = SAS_PHY_DOWN;
1556 if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) &&
1557 (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
1559 slot_err_v2_hw(hisi_hba, task, slot);
1560 if (unlikely(slot->abort)) {
1561 queue_work(hisi_hba->wq, &slot->abort_slot);
1562 /* immediately return and do not complete */
1568 switch (task->task_proto) {
1569 case SAS_PROTOCOL_SSP:
1571 struct ssp_response_iu *iu = slot->status_buffer +
1572 sizeof(struct hisi_sas_err_record);
1574 sas_ssp_task_response(dev, task, iu);
1577 case SAS_PROTOCOL_SMP:
1579 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1582 ts->stat = SAM_STAT_GOOD;
1583 to = kmap_atomic(sg_page(sg_resp));
1585 dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
1587 dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
1589 memcpy(to + sg_resp->offset,
1590 slot->status_buffer +
1591 sizeof(struct hisi_sas_err_record),
1592 sg_dma_len(sg_resp));
1596 case SAS_PROTOCOL_SATA:
1597 case SAS_PROTOCOL_STP:
1598 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1600 ts->stat = SAM_STAT_GOOD;
1601 sata_done_v2_hw(hisi_hba, task, slot);
1605 ts->stat = SAM_STAT_CHECK_CONDITION;
1609 if (!slot->port->port_attached) {
1610 dev_err(dev, "slot complete: port %d has removed\n",
1611 slot->port->sas_port.id);
1612 ts->stat = SAS_PHY_DOWN;
1616 if (sas_dev && sas_dev->running_req)
1617 sas_dev->running_req--;
1619 hisi_sas_slot_task_free(hisi_hba, task, slot);
1622 if (task->task_done)
1623 task->task_done(task);
1628 static u8 get_ata_protocol(u8 cmd, int direction)
1631 case ATA_CMD_FPDMA_WRITE:
1632 case ATA_CMD_FPDMA_READ:
1633 return SATA_PROTOCOL_FPDMA;
1635 case ATA_CMD_ID_ATA:
1636 case ATA_CMD_PMP_READ:
1637 case ATA_CMD_READ_LOG_EXT:
1638 case ATA_CMD_PIO_READ:
1639 case ATA_CMD_PIO_READ_EXT:
1640 case ATA_CMD_PMP_WRITE:
1641 case ATA_CMD_WRITE_LOG_EXT:
1642 case ATA_CMD_PIO_WRITE:
1643 case ATA_CMD_PIO_WRITE_EXT:
1644 return SATA_PROTOCOL_PIO;
1647 case ATA_CMD_READ_EXT:
1648 case ATA_CMD_READ_LOG_DMA_EXT:
1650 case ATA_CMD_WRITE_EXT:
1651 case ATA_CMD_WRITE_QUEUED:
1652 case ATA_CMD_WRITE_LOG_DMA_EXT:
1653 return SATA_PROTOCOL_DMA;
1655 case ATA_CMD_DOWNLOAD_MICRO:
1656 case ATA_CMD_DEV_RESET:
1657 case ATA_CMD_CHK_POWER:
1659 case ATA_CMD_FLUSH_EXT:
1660 case ATA_CMD_VERIFY:
1661 case ATA_CMD_VERIFY_EXT:
1662 case ATA_CMD_SET_FEATURES:
1663 case ATA_CMD_STANDBY:
1664 case ATA_CMD_STANDBYNOW1:
1665 return SATA_PROTOCOL_NONDATA;
1667 if (direction == DMA_NONE)
1668 return SATA_PROTOCOL_NONDATA;
1669 return SATA_PROTOCOL_PIO;
1673 static int get_ncq_tag_v2_hw(struct sas_task *task, u32 *tag)
1675 struct ata_queued_cmd *qc = task->uldd_task;
1678 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
1679 qc->tf.command == ATA_CMD_FPDMA_READ) {
1687 static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
1688 struct hisi_sas_slot *slot)
1690 struct sas_task *task = slot->task;
1691 struct domain_device *device = task->dev;
1692 struct domain_device *parent_dev = device->parent;
1693 struct hisi_sas_device *sas_dev = device->lldd_dev;
1694 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1695 struct hisi_sas_port *port = device->port->lldd_port;
1697 int has_data = 0, rc = 0, hdr_tag = 0;
1698 u32 dw1 = 0, dw2 = 0;
1702 hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
1703 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
1704 hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
1706 hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
1709 switch (task->data_dir) {
1712 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
1714 case DMA_FROM_DEVICE:
1716 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
1719 dw1 &= ~CMD_HDR_DIR_MSK;
1722 if (0 == task->ata_task.fis.command)
1723 dw1 |= 1 << CMD_HDR_RESET_OFF;
1725 dw1 |= (get_ata_protocol(task->ata_task.fis.command, task->data_dir))
1726 << CMD_HDR_FRAME_TYPE_OFF;
1727 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
1728 hdr->dw1 = cpu_to_le32(dw1);
1731 if (task->ata_task.use_ncq && get_ncq_tag_v2_hw(task, &hdr_tag)) {
1732 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
1733 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
1736 dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF |
1737 2 << CMD_HDR_SG_MOD_OFF;
1738 hdr->dw2 = cpu_to_le32(dw2);
1741 hdr->transfer_tags = cpu_to_le32(slot->idx);
1744 rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
1751 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
1752 hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
1753 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
1755 buf_cmd = slot->command_table;
1757 if (likely(!task->ata_task.device_control_reg_update))
1758 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
1759 /* fill in command FIS */
1760 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
1765 static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
1768 u32 context, port_id, link_rate, hard_phy_linkrate;
1769 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1770 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1771 struct device *dev = &hisi_hba->pdev->dev;
1772 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
1773 struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
1775 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
1777 /* Check for SATA dev */
1778 context = hisi_sas_read32(hisi_hba, PHY_CONTEXT);
1779 if (context & (1 << phy_no))
1783 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
1785 port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >>
1786 PORT_STATE_PHY8_PORT_NUM_OFF;
1787 link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >>
1788 PORT_STATE_PHY8_CONN_RATE_OFF;
1790 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
1791 port_id = (port_id >> (4 * phy_no)) & 0xf;
1792 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
1793 link_rate = (link_rate >> (phy_no * 4)) & 0xf;
1796 if (port_id == 0xf) {
1797 dev_err(dev, "phyup: phy%d invalid portid\n", phy_no);
1802 for (i = 0; i < 6; i++) {
1803 u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no,
1804 RX_IDAF_DWORD0 + (i * 4));
1805 frame_rcvd[i] = __swab32(idaf);
1808 /* Get the linkrates */
1809 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
1810 link_rate = (link_rate >> (phy_no * 4)) & 0xf;
1811 sas_phy->linkrate = link_rate;
1812 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
1814 phy->maximum_linkrate = hard_phy_linkrate & 0xf;
1815 phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf;
1817 sas_phy->oob_mode = SAS_OOB_MODE;
1818 memcpy(sas_phy->attached_sas_addr, &id->sas_addr, SAS_ADDR_SIZE);
1819 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
1820 phy->port_id = port_id;
1821 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
1822 phy->phy_type |= PORT_TYPE_SAS;
1823 phy->phy_attached = 1;
1824 phy->identify.device_type = id->dev_type;
1825 phy->frame_rcvd_size = sizeof(struct sas_identify_frame);
1826 if (phy->identify.device_type == SAS_END_DEVICE)
1827 phy->identify.target_port_protocols =
1829 else if (phy->identify.device_type != SAS_PHY_UNUSED)
1830 phy->identify.target_port_protocols =
1832 queue_work(hisi_hba->wq, &phy->phyup_ws);
1835 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
1836 CHL_INT0_SL_PHY_ENABLE_MSK);
1837 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
1842 static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
1845 u32 phy_cfg, phy_state;
1847 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
1849 phy_cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
1851 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
1853 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
1855 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
1856 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
1861 static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
1863 struct hisi_hba *hisi_hba = p;
1866 irqreturn_t res = IRQ_HANDLED;
1868 irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO)
1869 >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff;
1872 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no,
1875 if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK)
1877 if (phy_up_v2_hw(phy_no, hisi_hba)) {
1882 if (irq_value & CHL_INT0_NOT_RDY_MSK)
1884 if (phy_down_v2_hw(phy_no, hisi_hba)) {
1897 static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
1899 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1900 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1901 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1902 unsigned long flags;
1904 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
1906 spin_lock_irqsave(&hisi_hba->lock, flags);
1907 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
1908 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1910 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
1911 CHL_INT0_SL_RX_BCST_ACK_MSK);
1912 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
1915 static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
1917 struct hisi_hba *hisi_hba = p;
1918 struct device *dev = &hisi_hba->pdev->dev;
1919 u32 ent_msk, ent_tmp, irq_msk;
1922 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
1924 ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK;
1925 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk);
1927 irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >>
1928 HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff;
1931 if (irq_msk & (1 << phy_no)) {
1932 u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
1934 u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
1936 u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
1940 if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
1941 CHL_INT1_DMAC_TX_ECC_ERR_MSK))
1942 panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
1943 dev_name(dev), irq_value1);
1945 hisi_sas_phy_write32(hisi_hba, phy_no,
1946 CHL_INT1, irq_value1);
1950 hisi_sas_phy_write32(hisi_hba, phy_no,
1951 CHL_INT2, irq_value2);
1955 if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
1956 phy_bcast_v2_hw(phy_no, hisi_hba);
1958 hisi_sas_phy_write32(hisi_hba, phy_no,
1959 CHL_INT0, irq_value0
1960 & (~CHL_INT0_HOTPLUG_TOUT_MSK)
1961 & (~CHL_INT0_SL_PHY_ENABLE_MSK)
1962 & (~CHL_INT0_NOT_RDY_MSK));
1965 irq_msk &= ~(1 << phy_no);
1969 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp);
1974 static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
1976 struct hisi_sas_cq *cq = p;
1977 struct hisi_hba *hisi_hba = cq->hisi_hba;
1978 struct hisi_sas_slot *slot;
1979 struct hisi_sas_itct *itct;
1980 struct hisi_sas_complete_v2_hdr *complete_queue;
1981 u32 irq_value, rd_point, wr_point, dev_id;
1984 complete_queue = hisi_hba->complete_hdr[queue];
1985 irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
1987 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
1989 rd_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_RD_PTR +
1991 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
1994 while (rd_point != wr_point) {
1995 struct hisi_sas_complete_v2_hdr *complete_hdr;
1998 complete_hdr = &complete_queue[rd_point];
2000 /* Check for NCQ completion */
2001 if (complete_hdr->act) {
2002 u32 act_tmp = complete_hdr->act;
2003 int ncq_tag_count = ffs(act_tmp);
2005 dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
2006 CMPLT_HDR_DEV_ID_OFF;
2007 itct = &hisi_hba->itct[dev_id];
2009 /* The NCQ tags are held in the itct header */
2010 while (ncq_tag_count) {
2011 __le64 *ncq_tag = &itct->qw4_15[0];
2014 iptt = (ncq_tag[ncq_tag_count / 5]
2015 >> (ncq_tag_count % 5) * 12) & 0xfff;
2017 slot = &hisi_hba->slot_info[iptt];
2018 slot->cmplt_queue_slot = rd_point;
2019 slot->cmplt_queue = queue;
2020 slot_complete_v2_hw(hisi_hba, slot, 0);
2022 act_tmp &= ~(1 << ncq_tag_count);
2023 ncq_tag_count = ffs(act_tmp);
2026 iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
2027 slot = &hisi_hba->slot_info[iptt];
2028 slot->cmplt_queue_slot = rd_point;
2029 slot->cmplt_queue = queue;
2030 slot_complete_v2_hw(hisi_hba, slot, 0);
2033 if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
2037 /* update rd_point */
2038 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
2042 static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
2044 struct hisi_sas_phy *phy = p;
2045 struct hisi_hba *hisi_hba = phy->hisi_hba;
2046 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2047 struct device *dev = &hisi_hba->pdev->dev;
2048 struct hisi_sas_initial_fis *initial_fis;
2049 struct dev_to_host_fis *fis;
2050 u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate;
2051 irqreturn_t res = IRQ_HANDLED;
2052 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
2055 phy_no = sas_phy->id;
2056 initial_fis = &hisi_hba->initial_fis[phy_no];
2057 fis = &initial_fis->fis;
2059 offset = 4 * (phy_no / 4);
2060 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1 + offset);
2061 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset,
2062 ent_msk | 1 << ((phy_no % 4) * 8));
2064 ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1 + offset);
2065 ent_tmp = ent_int & (1 << (ENT_INT_SRC1_D2H_FIS_CH1_OFF *
2067 ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4);
2068 if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) {
2069 dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no);
2074 if (unlikely(phy_no == 8)) {
2075 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
2077 port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >>
2078 PORT_STATE_PHY8_PORT_NUM_OFF;
2079 link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >>
2080 PORT_STATE_PHY8_CONN_RATE_OFF;
2082 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
2083 port_id = (port_id >> (4 * phy_no)) & 0xf;
2084 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
2085 link_rate = (link_rate >> (phy_no * 4)) & 0xf;
2088 if (port_id == 0xf) {
2089 dev_err(dev, "sata int: phy%d invalid portid\n", phy_no);
2094 sas_phy->linkrate = link_rate;
2095 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
2097 phy->maximum_linkrate = hard_phy_linkrate & 0xf;
2098 phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf;
2100 sas_phy->oob_mode = SATA_OOB_MODE;
2101 /* Make up some unique SAS address */
2102 attached_sas_addr[0] = 0x50;
2103 attached_sas_addr[7] = phy_no;
2104 memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE);
2105 memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis));
2106 dev_info(dev, "sata int phyup: phy%d link_rate=%d\n", phy_no, link_rate);
2107 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
2108 phy->port_id = port_id;
2109 phy->phy_type |= PORT_TYPE_SATA;
2110 phy->phy_attached = 1;
2111 phy->identify.device_type = SAS_SATA_DEV;
2112 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
2113 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
2114 queue_work(hisi_hba->wq, &phy->phyup_ws);
2117 hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
2118 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk);
2123 static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = {
2124 int_phy_updown_v2_hw,
2129 * There is a limitation in the hip06 chipset that we need
2130 * to map in all mbigen interrupts, even if they are not used.
2132 static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
2134 struct platform_device *pdev = hisi_hba->pdev;
2135 struct device *dev = &pdev->dev;
2136 int i, irq, rc, irq_map[128];
2139 for (i = 0; i < 128; i++)
2140 irq_map[i] = platform_get_irq(pdev, i);
2142 for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) {
2145 irq = irq_map[idx + 1]; /* Phy up/down is irq1 */
2147 dev_err(dev, "irq init: fail map phy interrupt %d\n",
2152 rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
2153 DRV_NAME " phy", hisi_hba);
2155 dev_err(dev, "irq init: could not request "
2156 "phy interrupt %d, rc=%d\n",
2162 for (i = 0; i < hisi_hba->n_phy; i++) {
2163 struct hisi_sas_phy *phy = &hisi_hba->phy[i];
2164 int idx = i + 72; /* First SATA interrupt is irq72 */
2168 dev_err(dev, "irq init: fail map phy interrupt %d\n",
2173 rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
2174 DRV_NAME " sata", phy);
2176 dev_err(dev, "irq init: could not request "
2177 "sata interrupt %d, rc=%d\n",
2183 for (i = 0; i < hisi_hba->queue_count; i++) {
2184 int idx = i + 96; /* First cq interrupt is irq96 */
2189 "irq init: could not map cq interrupt %d\n",
2193 rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0,
2194 DRV_NAME " cq", &hisi_hba->cq[i]);
2197 "irq init: could not request cq interrupt %d, rc=%d\n",
2206 static int hisi_sas_v2_init(struct hisi_hba *hisi_hba)
2210 rc = hw_init_v2_hw(hisi_hba);
2214 rc = interrupt_init_v2_hw(hisi_hba);
2218 phys_init_v2_hw(hisi_hba);
2223 static const struct hisi_sas_hw hisi_sas_v2_hw = {
2224 .hw_init = hisi_sas_v2_init,
2225 .setup_itct = setup_itct_v2_hw,
2226 .slot_index_alloc = slot_index_alloc_quirk_v2_hw,
2227 .alloc_dev = alloc_dev_quirk_v2_hw,
2228 .sl_notify = sl_notify_v2_hw,
2229 .get_wideport_bitmap = get_wideport_bitmap_v2_hw,
2230 .free_device = free_device_v2_hw,
2231 .prep_smp = prep_smp_v2_hw,
2232 .prep_ssp = prep_ssp_v2_hw,
2233 .prep_stp = prep_ata_v2_hw,
2234 .get_free_slot = get_free_slot_v2_hw,
2235 .start_delivery = start_delivery_v2_hw,
2236 .slot_complete = slot_complete_v2_hw,
2237 .phy_enable = enable_phy_v2_hw,
2238 .phy_disable = disable_phy_v2_hw,
2239 .phy_hard_reset = phy_hard_reset_v2_hw,
2240 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
2241 .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
2244 static int hisi_sas_v2_probe(struct platform_device *pdev)
2246 return hisi_sas_probe(pdev, &hisi_sas_v2_hw);
2249 static int hisi_sas_v2_remove(struct platform_device *pdev)
2251 return hisi_sas_remove(pdev);
2254 static const struct of_device_id sas_v2_of_match[] = {
2255 { .compatible = "hisilicon,hip06-sas-v2",},
2258 MODULE_DEVICE_TABLE(of, sas_v2_of_match);
2260 static struct platform_driver hisi_sas_v2_driver = {
2261 .probe = hisi_sas_v2_probe,
2262 .remove = hisi_sas_v2_remove,
2265 .of_match_table = sas_v2_of_match,
2269 module_platform_driver(hisi_sas_v2_driver);
2271 MODULE_LICENSE("GPL");
2272 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2273 MODULE_DESCRIPTION("HISILICON SAS controller v2 hw driver");
2274 MODULE_ALIAS("platform:" DRV_NAME);