2 * IOMMU API for ARM architected SMMUv3 implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2015 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
20 * This driver is powered by bad coffee and bombay mix.
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/interrupt.h>
26 #include <linux/iommu.h>
27 #include <linux/iopoll.h>
28 #include <linux/module.h>
29 #include <linux/msi.h>
31 #include <linux/of_address.h>
32 #include <linux/of_platform.h>
33 #include <linux/pci.h>
34 #include <linux/platform_device.h>
36 #include "io-pgtable.h"
39 #define ARM_SMMU_IDR0 0x0
40 #define IDR0_ST_LVL_SHIFT 27
41 #define IDR0_ST_LVL_MASK 0x3
42 #define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
43 #define IDR0_STALL_MODEL (3 << 24)
44 #define IDR0_TTENDIAN_SHIFT 21
45 #define IDR0_TTENDIAN_MASK 0x3
46 #define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
47 #define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
48 #define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
49 #define IDR0_CD2L (1 << 19)
50 #define IDR0_VMID16 (1 << 18)
51 #define IDR0_PRI (1 << 16)
52 #define IDR0_SEV (1 << 14)
53 #define IDR0_MSI (1 << 13)
54 #define IDR0_ASID16 (1 << 12)
55 #define IDR0_ATS (1 << 10)
56 #define IDR0_HYP (1 << 9)
57 #define IDR0_COHACC (1 << 4)
58 #define IDR0_TTF_SHIFT 2
59 #define IDR0_TTF_MASK 0x3
60 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
61 #define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
62 #define IDR0_S1P (1 << 1)
63 #define IDR0_S2P (1 << 0)
65 #define ARM_SMMU_IDR1 0x4
66 #define IDR1_TABLES_PRESET (1 << 30)
67 #define IDR1_QUEUES_PRESET (1 << 29)
68 #define IDR1_REL (1 << 28)
69 #define IDR1_CMDQ_SHIFT 21
70 #define IDR1_CMDQ_MASK 0x1f
71 #define IDR1_EVTQ_SHIFT 16
72 #define IDR1_EVTQ_MASK 0x1f
73 #define IDR1_PRIQ_SHIFT 11
74 #define IDR1_PRIQ_MASK 0x1f
75 #define IDR1_SSID_SHIFT 6
76 #define IDR1_SSID_MASK 0x1f
77 #define IDR1_SID_SHIFT 0
78 #define IDR1_SID_MASK 0x3f
80 #define ARM_SMMU_IDR5 0x14
81 #define IDR5_STALL_MAX_SHIFT 16
82 #define IDR5_STALL_MAX_MASK 0xffff
83 #define IDR5_GRAN64K (1 << 6)
84 #define IDR5_GRAN16K (1 << 5)
85 #define IDR5_GRAN4K (1 << 4)
86 #define IDR5_OAS_SHIFT 0
87 #define IDR5_OAS_MASK 0x7
88 #define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
89 #define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
90 #define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
91 #define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
92 #define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
93 #define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
95 #define ARM_SMMU_CR0 0x20
96 #define CR0_CMDQEN (1 << 3)
97 #define CR0_EVTQEN (1 << 2)
98 #define CR0_PRIQEN (1 << 1)
99 #define CR0_SMMUEN (1 << 0)
101 #define ARM_SMMU_CR0ACK 0x24
103 #define ARM_SMMU_CR1 0x28
107 #define CR1_CACHE_NC 0
108 #define CR1_CACHE_WB 1
109 #define CR1_CACHE_WT 2
110 #define CR1_TABLE_SH_SHIFT 10
111 #define CR1_TABLE_OC_SHIFT 8
112 #define CR1_TABLE_IC_SHIFT 6
113 #define CR1_QUEUE_SH_SHIFT 4
114 #define CR1_QUEUE_OC_SHIFT 2
115 #define CR1_QUEUE_IC_SHIFT 0
117 #define ARM_SMMU_CR2 0x2c
118 #define CR2_PTM (1 << 2)
119 #define CR2_RECINVSID (1 << 1)
120 #define CR2_E2H (1 << 0)
122 #define ARM_SMMU_IRQ_CTRL 0x50
123 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
124 #define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
125 #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
127 #define ARM_SMMU_IRQ_CTRLACK 0x54
129 #define ARM_SMMU_GERROR 0x60
130 #define GERROR_SFM_ERR (1 << 8)
131 #define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
132 #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
133 #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
134 #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
135 #define GERROR_PRIQ_ABT_ERR (1 << 3)
136 #define GERROR_EVTQ_ABT_ERR (1 << 2)
137 #define GERROR_CMDQ_ERR (1 << 0)
138 #define GERROR_ERR_MASK 0xfd
140 #define ARM_SMMU_GERRORN 0x64
142 #define ARM_SMMU_GERROR_IRQ_CFG0 0x68
143 #define ARM_SMMU_GERROR_IRQ_CFG1 0x70
144 #define ARM_SMMU_GERROR_IRQ_CFG2 0x74
146 #define ARM_SMMU_STRTAB_BASE 0x80
147 #define STRTAB_BASE_RA (1UL << 62)
148 #define STRTAB_BASE_ADDR_SHIFT 6
149 #define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
151 #define ARM_SMMU_STRTAB_BASE_CFG 0x88
152 #define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
153 #define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
154 #define STRTAB_BASE_CFG_SPLIT_SHIFT 6
155 #define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
156 #define STRTAB_BASE_CFG_FMT_SHIFT 16
157 #define STRTAB_BASE_CFG_FMT_MASK 0x3
158 #define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
159 #define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
161 #define ARM_SMMU_CMDQ_BASE 0x90
162 #define ARM_SMMU_CMDQ_PROD 0x98
163 #define ARM_SMMU_CMDQ_CONS 0x9c
165 #define ARM_SMMU_EVTQ_BASE 0xa0
166 #define ARM_SMMU_EVTQ_PROD 0x100a8
167 #define ARM_SMMU_EVTQ_CONS 0x100ac
168 #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
169 #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
170 #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
172 #define ARM_SMMU_PRIQ_BASE 0xc0
173 #define ARM_SMMU_PRIQ_PROD 0x100c8
174 #define ARM_SMMU_PRIQ_CONS 0x100cc
175 #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
176 #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
177 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
179 /* Common MSI config fields */
180 #define MSI_CFG0_ADDR_SHIFT 2
181 #define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
182 #define MSI_CFG2_SH_SHIFT 4
183 #define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
184 #define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
185 #define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
186 #define MSI_CFG2_MEMATTR_SHIFT 0
187 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
189 #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
190 #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
191 #define Q_OVERFLOW_FLAG (1 << 31)
192 #define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
193 #define Q_ENT(q, p) ((q)->base + \
194 Q_IDX(q, p) * (q)->ent_dwords)
196 #define Q_BASE_RWA (1UL << 62)
197 #define Q_BASE_ADDR_SHIFT 5
198 #define Q_BASE_ADDR_MASK 0xfffffffffffUL
199 #define Q_BASE_LOG2SIZE_SHIFT 0
200 #define Q_BASE_LOG2SIZE_MASK 0x1fUL
205 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
206 * 2lvl: 128k L1 entries,
207 * 256 lazy entries per table (each table covers a PCI bus)
209 #define STRTAB_L1_SZ_SHIFT 20
210 #define STRTAB_SPLIT 8
212 #define STRTAB_L1_DESC_DWORDS 1
213 #define STRTAB_L1_DESC_SPAN_SHIFT 0
214 #define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
215 #define STRTAB_L1_DESC_L2PTR_SHIFT 6
216 #define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
218 #define STRTAB_STE_DWORDS 8
219 #define STRTAB_STE_0_V (1UL << 0)
220 #define STRTAB_STE_0_CFG_SHIFT 1
221 #define STRTAB_STE_0_CFG_MASK 0x7UL
222 #define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
223 #define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
224 #define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
225 #define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
227 #define STRTAB_STE_0_S1FMT_SHIFT 4
228 #define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
229 #define STRTAB_STE_0_S1CTXPTR_SHIFT 6
230 #define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
231 #define STRTAB_STE_0_S1CDMAX_SHIFT 59
232 #define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
234 #define STRTAB_STE_1_S1C_CACHE_NC 0UL
235 #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
236 #define STRTAB_STE_1_S1C_CACHE_WT 2UL
237 #define STRTAB_STE_1_S1C_CACHE_WB 3UL
238 #define STRTAB_STE_1_S1C_SH_NSH 0UL
239 #define STRTAB_STE_1_S1C_SH_OSH 2UL
240 #define STRTAB_STE_1_S1C_SH_ISH 3UL
241 #define STRTAB_STE_1_S1CIR_SHIFT 2
242 #define STRTAB_STE_1_S1COR_SHIFT 4
243 #define STRTAB_STE_1_S1CSH_SHIFT 6
245 #define STRTAB_STE_1_S1STALLD (1UL << 27)
247 #define STRTAB_STE_1_EATS_ABT 0UL
248 #define STRTAB_STE_1_EATS_TRANS 1UL
249 #define STRTAB_STE_1_EATS_S1CHK 2UL
250 #define STRTAB_STE_1_EATS_SHIFT 28
252 #define STRTAB_STE_1_STRW_NSEL1 0UL
253 #define STRTAB_STE_1_STRW_EL2 2UL
254 #define STRTAB_STE_1_STRW_SHIFT 30
256 #define STRTAB_STE_2_S2VMID_SHIFT 0
257 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL
258 #define STRTAB_STE_2_VTCR_SHIFT 32
259 #define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
260 #define STRTAB_STE_2_S2AA64 (1UL << 51)
261 #define STRTAB_STE_2_S2ENDI (1UL << 52)
262 #define STRTAB_STE_2_S2PTW (1UL << 54)
263 #define STRTAB_STE_2_S2R (1UL << 58)
265 #define STRTAB_STE_3_S2TTB_SHIFT 4
266 #define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
268 /* Context descriptor (stage-1 only) */
269 #define CTXDESC_CD_DWORDS 8
270 #define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
271 #define ARM64_TCR_T0SZ_SHIFT 0
272 #define ARM64_TCR_T0SZ_MASK 0x1fUL
273 #define CTXDESC_CD_0_TCR_TG0_SHIFT 6
274 #define ARM64_TCR_TG0_SHIFT 14
275 #define ARM64_TCR_TG0_MASK 0x3UL
276 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
277 #define ARM64_TCR_IRGN0_SHIFT 8
278 #define ARM64_TCR_IRGN0_MASK 0x3UL
279 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
280 #define ARM64_TCR_ORGN0_SHIFT 10
281 #define ARM64_TCR_ORGN0_MASK 0x3UL
282 #define CTXDESC_CD_0_TCR_SH0_SHIFT 12
283 #define ARM64_TCR_SH0_SHIFT 12
284 #define ARM64_TCR_SH0_MASK 0x3UL
285 #define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
286 #define ARM64_TCR_EPD0_SHIFT 7
287 #define ARM64_TCR_EPD0_MASK 0x1UL
288 #define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
289 #define ARM64_TCR_EPD1_SHIFT 23
290 #define ARM64_TCR_EPD1_MASK 0x1UL
292 #define CTXDESC_CD_0_ENDI (1UL << 15)
293 #define CTXDESC_CD_0_V (1UL << 31)
295 #define CTXDESC_CD_0_TCR_IPS_SHIFT 32
296 #define ARM64_TCR_IPS_SHIFT 32
297 #define ARM64_TCR_IPS_MASK 0x7UL
298 #define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
299 #define ARM64_TCR_TBI0_SHIFT 37
300 #define ARM64_TCR_TBI0_MASK 0x1UL
302 #define CTXDESC_CD_0_AA64 (1UL << 41)
303 #define CTXDESC_CD_0_R (1UL << 45)
304 #define CTXDESC_CD_0_A (1UL << 46)
305 #define CTXDESC_CD_0_ASET_SHIFT 47
306 #define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
307 #define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
308 #define CTXDESC_CD_0_ASID_SHIFT 48
309 #define CTXDESC_CD_0_ASID_MASK 0xffffUL
311 #define CTXDESC_CD_1_TTB0_SHIFT 4
312 #define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
314 #define CTXDESC_CD_3_MAIR_SHIFT 0
316 /* Convert between AArch64 (CPU) TCR format and SMMU CD format */
317 #define ARM_SMMU_TCR2CD(tcr, fld) \
318 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
319 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
322 #define CMDQ_ENT_DWORDS 2
323 #define CMDQ_MAX_SZ_SHIFT 8
325 #define CMDQ_ERR_SHIFT 24
326 #define CMDQ_ERR_MASK 0x7f
327 #define CMDQ_ERR_CERROR_NONE_IDX 0
328 #define CMDQ_ERR_CERROR_ILL_IDX 1
329 #define CMDQ_ERR_CERROR_ABT_IDX 2
331 #define CMDQ_0_OP_SHIFT 0
332 #define CMDQ_0_OP_MASK 0xffUL
333 #define CMDQ_0_SSV (1UL << 11)
335 #define CMDQ_PREFETCH_0_SID_SHIFT 32
336 #define CMDQ_PREFETCH_1_SIZE_SHIFT 0
337 #define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
339 #define CMDQ_CFGI_0_SID_SHIFT 32
340 #define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
341 #define CMDQ_CFGI_1_LEAF (1UL << 0)
342 #define CMDQ_CFGI_1_RANGE_SHIFT 0
343 #define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
345 #define CMDQ_TLBI_0_VMID_SHIFT 32
346 #define CMDQ_TLBI_0_ASID_SHIFT 48
347 #define CMDQ_TLBI_1_LEAF (1UL << 0)
348 #define CMDQ_TLBI_1_VA_MASK ~0xfffUL
349 #define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
351 #define CMDQ_PRI_0_SSID_SHIFT 12
352 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL
353 #define CMDQ_PRI_0_SID_SHIFT 32
354 #define CMDQ_PRI_0_SID_MASK 0xffffffffUL
355 #define CMDQ_PRI_1_GRPID_SHIFT 0
356 #define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
357 #define CMDQ_PRI_1_RESP_SHIFT 12
358 #define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
359 #define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
360 #define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
362 #define CMDQ_SYNC_0_CS_SHIFT 12
363 #define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
364 #define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
367 #define EVTQ_ENT_DWORDS 4
368 #define EVTQ_MAX_SZ_SHIFT 7
370 #define EVTQ_0_ID_SHIFT 0
371 #define EVTQ_0_ID_MASK 0xffUL
374 #define PRIQ_ENT_DWORDS 2
375 #define PRIQ_MAX_SZ_SHIFT 8
377 #define PRIQ_0_SID_SHIFT 0
378 #define PRIQ_0_SID_MASK 0xffffffffUL
379 #define PRIQ_0_SSID_SHIFT 32
380 #define PRIQ_0_SSID_MASK 0xfffffUL
381 #define PRIQ_0_PERM_PRIV (1UL << 58)
382 #define PRIQ_0_PERM_EXEC (1UL << 59)
383 #define PRIQ_0_PERM_READ (1UL << 60)
384 #define PRIQ_0_PERM_WRITE (1UL << 61)
385 #define PRIQ_0_PRG_LAST (1UL << 62)
386 #define PRIQ_0_SSID_V (1UL << 63)
388 #define PRIQ_1_PRG_IDX_SHIFT 0
389 #define PRIQ_1_PRG_IDX_MASK 0x1ffUL
390 #define PRIQ_1_ADDR_SHIFT 12
391 #define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
393 /* High-level queue structures */
394 #define ARM_SMMU_POLL_TIMEOUT_US 100
396 static bool disable_bypass;
397 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
398 MODULE_PARM_DESC(disable_bypass,
399 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
407 enum arm_smmu_msi_index {
414 static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
416 ARM_SMMU_EVTQ_IRQ_CFG0,
417 ARM_SMMU_EVTQ_IRQ_CFG1,
418 ARM_SMMU_EVTQ_IRQ_CFG2,
420 [GERROR_MSI_INDEX] = {
421 ARM_SMMU_GERROR_IRQ_CFG0,
422 ARM_SMMU_GERROR_IRQ_CFG1,
423 ARM_SMMU_GERROR_IRQ_CFG2,
426 ARM_SMMU_PRIQ_IRQ_CFG0,
427 ARM_SMMU_PRIQ_IRQ_CFG1,
428 ARM_SMMU_PRIQ_IRQ_CFG2,
432 struct arm_smmu_cmdq_ent {
435 bool substream_valid;
437 /* Command-specific fields */
439 #define CMDQ_OP_PREFETCH_CFG 0x1
446 #define CMDQ_OP_CFGI_STE 0x3
447 #define CMDQ_OP_CFGI_ALL 0x4
456 #define CMDQ_OP_TLBI_NH_ASID 0x11
457 #define CMDQ_OP_TLBI_NH_VA 0x12
458 #define CMDQ_OP_TLBI_EL2_ALL 0x20
459 #define CMDQ_OP_TLBI_S12_VMALL 0x28
460 #define CMDQ_OP_TLBI_S2_IPA 0x2a
461 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
469 #define CMDQ_OP_PRI_RESP 0x41
477 #define CMDQ_OP_CMD_SYNC 0x46
481 struct arm_smmu_queue {
482 int irq; /* Wired interrupt */
493 u32 __iomem *prod_reg;
494 u32 __iomem *cons_reg;
497 struct arm_smmu_cmdq {
498 struct arm_smmu_queue q;
502 struct arm_smmu_evtq {
503 struct arm_smmu_queue q;
507 struct arm_smmu_priq {
508 struct arm_smmu_queue q;
511 /* High-level stream table and context descriptor structures */
512 struct arm_smmu_strtab_l1_desc {
516 dma_addr_t l2ptr_dma;
519 struct arm_smmu_s1_cfg {
521 dma_addr_t cdptr_dma;
523 struct arm_smmu_ctx_desc {
531 struct arm_smmu_s2_cfg {
537 struct arm_smmu_strtab_ent {
540 bool bypass; /* Overrides s1/s2 config */
541 struct arm_smmu_s1_cfg *s1_cfg;
542 struct arm_smmu_s2_cfg *s2_cfg;
545 struct arm_smmu_strtab_cfg {
547 dma_addr_t strtab_dma;
548 struct arm_smmu_strtab_l1_desc *l1_desc;
549 unsigned int num_l1_ents;
555 /* An SMMUv3 instance */
556 struct arm_smmu_device {
560 #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
561 #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
562 #define ARM_SMMU_FEAT_TT_LE (1 << 2)
563 #define ARM_SMMU_FEAT_TT_BE (1 << 3)
564 #define ARM_SMMU_FEAT_PRI (1 << 4)
565 #define ARM_SMMU_FEAT_ATS (1 << 5)
566 #define ARM_SMMU_FEAT_SEV (1 << 6)
567 #define ARM_SMMU_FEAT_MSI (1 << 7)
568 #define ARM_SMMU_FEAT_COHERENCY (1 << 8)
569 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
570 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
571 #define ARM_SMMU_FEAT_STALLS (1 << 11)
572 #define ARM_SMMU_FEAT_HYP (1 << 12)
575 #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
578 struct arm_smmu_cmdq cmdq;
579 struct arm_smmu_evtq evtq;
580 struct arm_smmu_priq priq;
584 unsigned long ias; /* IPA */
585 unsigned long oas; /* PA */
587 #define ARM_SMMU_MAX_ASIDS (1 << 16)
588 unsigned int asid_bits;
589 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
591 #define ARM_SMMU_MAX_VMIDS (1 << 16)
592 unsigned int vmid_bits;
593 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
595 unsigned int ssid_bits;
596 unsigned int sid_bits;
598 struct arm_smmu_strtab_cfg strtab_cfg;
601 /* SMMU private data for an IOMMU group */
602 struct arm_smmu_group {
603 struct arm_smmu_device *smmu;
604 struct arm_smmu_domain *domain;
607 struct arm_smmu_strtab_ent ste;
610 /* SMMU private data for an IOMMU domain */
611 enum arm_smmu_domain_stage {
612 ARM_SMMU_DOMAIN_S1 = 0,
614 ARM_SMMU_DOMAIN_NESTED,
617 struct arm_smmu_domain {
618 struct arm_smmu_device *smmu;
619 struct mutex init_mutex; /* Protects smmu pointer */
621 struct io_pgtable_ops *pgtbl_ops;
622 spinlock_t pgtbl_lock;
624 enum arm_smmu_domain_stage stage;
626 struct arm_smmu_s1_cfg s1_cfg;
627 struct arm_smmu_s2_cfg s2_cfg;
630 struct iommu_domain domain;
633 struct arm_smmu_option_prop {
638 static struct arm_smmu_option_prop arm_smmu_options[] = {
639 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
643 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
645 return container_of(dom, struct arm_smmu_domain, domain);
648 static void parse_driver_options(struct arm_smmu_device *smmu)
653 if (of_property_read_bool(smmu->dev->of_node,
654 arm_smmu_options[i].prop)) {
655 smmu->options |= arm_smmu_options[i].opt;
656 dev_notice(smmu->dev, "option %s\n",
657 arm_smmu_options[i].prop);
659 } while (arm_smmu_options[++i].opt);
662 /* Low-level queue manipulation functions */
663 static bool queue_full(struct arm_smmu_queue *q)
665 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
666 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
669 static bool queue_empty(struct arm_smmu_queue *q)
671 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
672 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
675 static void queue_sync_cons(struct arm_smmu_queue *q)
677 q->cons = readl_relaxed(q->cons_reg);
680 static void queue_inc_cons(struct arm_smmu_queue *q)
682 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
684 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
685 writel(q->cons, q->cons_reg);
688 static int queue_sync_prod(struct arm_smmu_queue *q)
691 u32 prod = readl_relaxed(q->prod_reg);
693 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
700 static void queue_inc_prod(struct arm_smmu_queue *q)
702 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
704 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
705 writel(q->prod, q->prod_reg);
708 static bool __queue_cons_before(struct arm_smmu_queue *q, u32 until)
710 if (Q_WRP(q, q->cons) == Q_WRP(q, until))
711 return Q_IDX(q, q->cons) < Q_IDX(q, until);
713 return Q_IDX(q, q->cons) >= Q_IDX(q, until);
716 static int queue_poll_cons(struct arm_smmu_queue *q, u32 until, bool wfe)
718 ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
720 while (queue_sync_cons(q), __queue_cons_before(q, until)) {
721 if (ktime_compare(ktime_get(), timeout) > 0)
735 static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
739 for (i = 0; i < n_dwords; ++i)
740 *dst++ = cpu_to_le64(*src++);
743 static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
748 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
753 static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
757 for (i = 0; i < n_dwords; ++i)
758 *dst++ = le64_to_cpu(*src++);
761 static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
766 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
771 /* High-level queue accessors */
772 static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
774 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
775 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
777 switch (ent->opcode) {
778 case CMDQ_OP_TLBI_EL2_ALL:
779 case CMDQ_OP_TLBI_NSNH_ALL:
781 case CMDQ_OP_PREFETCH_CFG:
782 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
783 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
784 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
786 case CMDQ_OP_CFGI_STE:
787 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
788 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
790 case CMDQ_OP_CFGI_ALL:
791 /* Cover the entire SID range */
792 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
794 case CMDQ_OP_TLBI_NH_VA:
795 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
796 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
797 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
799 case CMDQ_OP_TLBI_S2_IPA:
800 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
801 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
802 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
804 case CMDQ_OP_TLBI_NH_ASID:
805 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
807 case CMDQ_OP_TLBI_S12_VMALL:
808 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
810 case CMDQ_OP_PRI_RESP:
811 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
812 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
813 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
814 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
815 switch (ent->pri.resp) {
817 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
820 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
823 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
829 case CMDQ_OP_CMD_SYNC:
830 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
839 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
841 static const char *cerror_str[] = {
842 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
843 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
844 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
848 u64 cmd[CMDQ_ENT_DWORDS];
849 struct arm_smmu_queue *q = &smmu->cmdq.q;
850 u32 cons = readl_relaxed(q->cons_reg);
851 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
852 struct arm_smmu_cmdq_ent cmd_sync = {
853 .opcode = CMDQ_OP_CMD_SYNC,
856 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
860 case CMDQ_ERR_CERROR_ILL_IDX:
862 case CMDQ_ERR_CERROR_ABT_IDX:
863 dev_err(smmu->dev, "retrying command fetch\n");
864 case CMDQ_ERR_CERROR_NONE_IDX:
869 * We may have concurrent producers, so we need to be careful
870 * not to touch any of the shadow cmdq state.
872 queue_read(cmd, Q_ENT(q, idx), q->ent_dwords);
873 dev_err(smmu->dev, "skipping command in error state:\n");
874 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
875 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
877 /* Convert the erroneous command into a CMD_SYNC */
878 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
879 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
883 queue_write(cmd, Q_ENT(q, idx), q->ent_dwords);
886 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
887 struct arm_smmu_cmdq_ent *ent)
890 u64 cmd[CMDQ_ENT_DWORDS];
891 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
892 struct arm_smmu_queue *q = &smmu->cmdq.q;
894 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
895 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
900 spin_lock(&smmu->cmdq.lock);
901 while (until = q->prod + 1, queue_insert_raw(q, cmd) == -ENOSPC) {
903 * Keep the queue locked, otherwise the producer could wrap
904 * twice and we could see a future consumer pointer that looks
905 * like it's behind us.
907 if (queue_poll_cons(q, until, wfe))
908 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
911 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, until, wfe))
912 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
913 spin_unlock(&smmu->cmdq.lock);
916 /* Context descriptor manipulation functions */
917 static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
921 /* Repack the TCR. Just care about TTBR0 for now */
922 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
923 val |= ARM_SMMU_TCR2CD(tcr, TG0);
924 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
925 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
926 val |= ARM_SMMU_TCR2CD(tcr, SH0);
927 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
928 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
929 val |= ARM_SMMU_TCR2CD(tcr, IPS);
930 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
935 static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
936 struct arm_smmu_s1_cfg *cfg)
941 * We don't need to issue any invalidation here, as we'll invalidate
942 * the STE when installing the new entry anyway.
944 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
948 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
949 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
951 cfg->cdptr[0] = cpu_to_le64(val);
953 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
954 cfg->cdptr[1] = cpu_to_le64(val);
956 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
959 /* Stream table manipulation functions */
961 arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
965 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
966 << STRTAB_L1_DESC_SPAN_SHIFT;
967 val |= desc->l2ptr_dma &
968 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
970 *dst = cpu_to_le64(val);
973 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
975 struct arm_smmu_cmdq_ent cmd = {
976 .opcode = CMDQ_OP_CFGI_STE,
983 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
984 cmd.opcode = CMDQ_OP_CMD_SYNC;
985 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
988 static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
989 __le64 *dst, struct arm_smmu_strtab_ent *ste)
992 * This is hideously complicated, but we only really care about
993 * three cases at the moment:
995 * 1. Invalid (all zero) -> bypass (init)
996 * 2. Bypass -> translation (attach)
997 * 3. Translation -> bypass (detach)
999 * Given that we can't update the STE atomically and the SMMU
1000 * doesn't read the thing in a defined order, that leaves us
1001 * with the following maintenance requirements:
1003 * 1. Update Config, return (init time STEs aren't live)
1004 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1005 * 3. Update Config, sync
1007 u64 val = le64_to_cpu(dst[0]);
1008 bool ste_live = false;
1009 struct arm_smmu_cmdq_ent prefetch_cmd = {
1010 .opcode = CMDQ_OP_PREFETCH_CFG,
1016 if (val & STRTAB_STE_0_V) {
1019 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
1021 case STRTAB_STE_0_CFG_BYPASS:
1023 case STRTAB_STE_0_CFG_S1_TRANS:
1024 case STRTAB_STE_0_CFG_S2_TRANS:
1028 BUG(); /* STE corruption */
1032 /* Nuke the existing Config, as we're going to rewrite it */
1033 val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
1036 val |= STRTAB_STE_0_V;
1038 val &= ~STRTAB_STE_0_V;
1041 val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
1042 : STRTAB_STE_0_CFG_BYPASS;
1043 dst[0] = cpu_to_le64(val);
1044 dst[2] = 0; /* Nuke the VMID */
1046 arm_smmu_sync_ste_for_sid(smmu, sid);
1052 dst[1] = cpu_to_le64(
1053 STRTAB_STE_1_S1C_CACHE_WBRA
1054 << STRTAB_STE_1_S1CIR_SHIFT |
1055 STRTAB_STE_1_S1C_CACHE_WBRA
1056 << STRTAB_STE_1_S1COR_SHIFT |
1057 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
1058 STRTAB_STE_1_S1STALLD |
1059 #ifdef CONFIG_PCI_ATS
1060 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1062 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
1064 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1065 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1066 STRTAB_STE_0_CFG_S1_TRANS;
1072 dst[2] = cpu_to_le64(
1073 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1074 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1075 << STRTAB_STE_2_VTCR_SHIFT |
1077 STRTAB_STE_2_S2ENDI |
1079 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1082 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1083 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1085 val |= STRTAB_STE_0_CFG_S2_TRANS;
1088 arm_smmu_sync_ste_for_sid(smmu, sid);
1089 dst[0] = cpu_to_le64(val);
1090 arm_smmu_sync_ste_for_sid(smmu, sid);
1092 /* It's likely that we'll want to use the new STE soon */
1093 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1094 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1097 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1100 struct arm_smmu_strtab_ent ste = {
1105 for (i = 0; i < nent; ++i) {
1106 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1107 strtab += STRTAB_STE_DWORDS;
1111 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1115 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1116 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1121 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1122 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1124 desc->span = STRTAB_SPLIT + 1;
1125 desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1129 "failed to allocate l2 stream table for SID %u\n",
1134 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1135 arm_smmu_write_strtab_l1_desc(strtab, desc);
1139 /* IRQ and event handlers */
1140 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1143 struct arm_smmu_device *smmu = dev;
1144 struct arm_smmu_queue *q = &smmu->evtq.q;
1145 u64 evt[EVTQ_ENT_DWORDS];
1147 while (!queue_remove_raw(q, evt)) {
1148 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
1150 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1151 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1152 dev_info(smmu->dev, "\t0x%016llx\n",
1153 (unsigned long long)evt[i]);
1156 /* Sync our overflow flag, as we believe we're up to speed */
1157 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1161 static irqreturn_t arm_smmu_evtq_handler(int irq, void *dev)
1163 irqreturn_t ret = IRQ_WAKE_THREAD;
1164 struct arm_smmu_device *smmu = dev;
1165 struct arm_smmu_queue *q = &smmu->evtq.q;
1168 * Not much we can do on overflow, so scream and pretend we're
1171 if (queue_sync_prod(q) == -EOVERFLOW)
1172 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1173 else if (queue_empty(q))
1179 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1181 struct arm_smmu_device *smmu = dev;
1182 struct arm_smmu_queue *q = &smmu->priq.q;
1183 u64 evt[PRIQ_ENT_DWORDS];
1185 while (!queue_remove_raw(q, evt)) {
1190 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1191 ssv = evt[0] & PRIQ_0_SSID_V;
1192 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1193 last = evt[0] & PRIQ_0_PRG_LAST;
1194 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1196 dev_info(smmu->dev, "unexpected PRI request received:\n");
1198 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1199 sid, ssid, grpid, last ? "L" : "",
1200 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1201 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1202 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1203 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1204 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1207 struct arm_smmu_cmdq_ent cmd = {
1208 .opcode = CMDQ_OP_PRI_RESP,
1209 .substream_valid = ssv,
1214 .resp = PRI_RESP_DENY,
1218 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1222 /* Sync our overflow flag, as we believe we're up to speed */
1223 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1227 static irqreturn_t arm_smmu_priq_handler(int irq, void *dev)
1229 irqreturn_t ret = IRQ_WAKE_THREAD;
1230 struct arm_smmu_device *smmu = dev;
1231 struct arm_smmu_queue *q = &smmu->priq.q;
1233 /* PRIQ overflow indicates a programming error */
1234 if (queue_sync_prod(q) == -EOVERFLOW)
1235 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1236 else if (queue_empty(q))
1242 static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1244 /* We don't actually use CMD_SYNC interrupts for anything */
1248 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1250 static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1252 u32 gerror, gerrorn;
1253 struct arm_smmu_device *smmu = dev;
1255 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1256 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1259 if (!(gerror & GERROR_ERR_MASK))
1260 return IRQ_NONE; /* No errors pending */
1263 "unexpected global error reported (0x%08x), this could be serious\n",
1266 if (gerror & GERROR_SFM_ERR) {
1267 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1268 arm_smmu_device_disable(smmu);
1271 if (gerror & GERROR_MSI_GERROR_ABT_ERR)
1272 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1274 if (gerror & GERROR_MSI_PRIQ_ABT_ERR) {
1275 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1276 arm_smmu_priq_handler(irq, smmu->dev);
1279 if (gerror & GERROR_MSI_EVTQ_ABT_ERR) {
1280 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1281 arm_smmu_evtq_handler(irq, smmu->dev);
1284 if (gerror & GERROR_MSI_CMDQ_ABT_ERR) {
1285 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1286 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1289 if (gerror & GERROR_PRIQ_ABT_ERR)
1290 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1292 if (gerror & GERROR_EVTQ_ABT_ERR)
1293 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1295 if (gerror & GERROR_CMDQ_ERR)
1296 arm_smmu_cmdq_skip_err(smmu);
1298 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1302 /* IO_PGTABLE API */
1303 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1305 struct arm_smmu_cmdq_ent cmd;
1307 cmd.opcode = CMDQ_OP_CMD_SYNC;
1308 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1311 static void arm_smmu_tlb_sync(void *cookie)
1313 struct arm_smmu_domain *smmu_domain = cookie;
1314 __arm_smmu_tlb_sync(smmu_domain->smmu);
1317 static void arm_smmu_tlb_inv_context(void *cookie)
1319 struct arm_smmu_domain *smmu_domain = cookie;
1320 struct arm_smmu_device *smmu = smmu_domain->smmu;
1321 struct arm_smmu_cmdq_ent cmd;
1323 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1324 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1325 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1328 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1329 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1332 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1333 __arm_smmu_tlb_sync(smmu);
1336 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1337 bool leaf, void *cookie)
1339 struct arm_smmu_domain *smmu_domain = cookie;
1340 struct arm_smmu_device *smmu = smmu_domain->smmu;
1341 struct arm_smmu_cmdq_ent cmd = {
1348 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1349 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1350 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1352 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1353 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1356 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1359 static struct iommu_gather_ops arm_smmu_gather_ops = {
1360 .tlb_flush_all = arm_smmu_tlb_inv_context,
1361 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1362 .tlb_sync = arm_smmu_tlb_sync,
1366 static bool arm_smmu_capable(enum iommu_cap cap)
1369 case IOMMU_CAP_CACHE_COHERENCY:
1371 case IOMMU_CAP_INTR_REMAP:
1372 return true; /* MSIs are just memory writes */
1373 case IOMMU_CAP_NOEXEC:
1380 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1382 struct arm_smmu_domain *smmu_domain;
1384 if (type != IOMMU_DOMAIN_UNMANAGED)
1388 * Allocate the domain and initialise some of its data structures.
1389 * We can't really do anything meaningful until we've added a
1392 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1396 mutex_init(&smmu_domain->init_mutex);
1397 spin_lock_init(&smmu_domain->pgtbl_lock);
1398 return &smmu_domain->domain;
1401 static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1403 int idx, size = 1 << span;
1406 idx = find_first_zero_bit(map, size);
1409 } while (test_and_set_bit(idx, map));
1414 static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1416 clear_bit(idx, map);
1419 static void arm_smmu_domain_free(struct iommu_domain *domain)
1421 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1422 struct arm_smmu_device *smmu = smmu_domain->smmu;
1424 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1426 /* Free the CD and ASID, if we allocated them */
1427 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1428 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1431 dma_free_coherent(smmu_domain->smmu->dev,
1432 CTXDESC_CD_DWORDS << 3,
1436 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1439 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1441 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1447 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1448 struct io_pgtable_cfg *pgtbl_cfg)
1452 struct arm_smmu_device *smmu = smmu_domain->smmu;
1453 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1455 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1456 if (IS_ERR_VALUE(asid))
1459 cfg->cdptr = dma_zalloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1460 &cfg->cdptr_dma, GFP_KERNEL);
1462 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1467 cfg->cd.asid = (u16)asid;
1468 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1469 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1470 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1474 arm_smmu_bitmap_free(smmu->asid_map, asid);
1478 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1479 struct io_pgtable_cfg *pgtbl_cfg)
1482 struct arm_smmu_device *smmu = smmu_domain->smmu;
1483 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1485 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1486 if (IS_ERR_VALUE(vmid))
1489 cfg->vmid = (u16)vmid;
1490 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1491 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1495 static struct iommu_ops arm_smmu_ops;
1497 static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1500 unsigned long ias, oas;
1501 enum io_pgtable_fmt fmt;
1502 struct io_pgtable_cfg pgtbl_cfg;
1503 struct io_pgtable_ops *pgtbl_ops;
1504 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1505 struct io_pgtable_cfg *);
1506 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1507 struct arm_smmu_device *smmu = smmu_domain->smmu;
1509 /* Restrict the stage to what we can actually support */
1510 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1511 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1512 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1513 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1515 switch (smmu_domain->stage) {
1516 case ARM_SMMU_DOMAIN_S1:
1519 fmt = ARM_64_LPAE_S1;
1520 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1522 case ARM_SMMU_DOMAIN_NESTED:
1523 case ARM_SMMU_DOMAIN_S2:
1526 fmt = ARM_64_LPAE_S2;
1527 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1533 pgtbl_cfg = (struct io_pgtable_cfg) {
1534 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
1537 .tlb = &arm_smmu_gather_ops,
1538 .iommu_dev = smmu->dev,
1541 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1545 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1546 smmu_domain->pgtbl_ops = pgtbl_ops;
1548 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1549 if (IS_ERR_VALUE(ret))
1550 free_io_pgtable_ops(pgtbl_ops);
1555 static struct arm_smmu_group *arm_smmu_group_get(struct device *dev)
1557 struct iommu_group *group;
1558 struct arm_smmu_group *smmu_group;
1560 group = iommu_group_get(dev);
1564 smmu_group = iommu_group_get_iommudata(group);
1565 iommu_group_put(group);
1569 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1572 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1574 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1575 struct arm_smmu_strtab_l1_desc *l1_desc;
1578 /* Two-level walk */
1579 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1580 l1_desc = &cfg->l1_desc[idx];
1581 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1582 step = &l1_desc->l2ptr[idx];
1584 /* Simple linear lookup */
1585 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1591 static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group)
1594 struct arm_smmu_domain *smmu_domain = smmu_group->domain;
1595 struct arm_smmu_strtab_ent *ste = &smmu_group->ste;
1596 struct arm_smmu_device *smmu = smmu_group->smmu;
1598 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1599 ste->s1_cfg = &smmu_domain->s1_cfg;
1601 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1604 ste->s2_cfg = &smmu_domain->s2_cfg;
1607 for (i = 0; i < smmu_group->num_sids; ++i) {
1608 u32 sid = smmu_group->sids[i];
1609 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1611 arm_smmu_write_strtab_ent(smmu, sid, step, ste);
1617 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1620 struct arm_smmu_device *smmu;
1621 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1622 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1627 /* Already attached to a different domain? */
1628 if (smmu_group->domain && smmu_group->domain != smmu_domain)
1631 smmu = smmu_group->smmu;
1632 mutex_lock(&smmu_domain->init_mutex);
1634 if (!smmu_domain->smmu) {
1635 smmu_domain->smmu = smmu;
1636 ret = arm_smmu_domain_finalise(domain);
1638 smmu_domain->smmu = NULL;
1641 } else if (smmu_domain->smmu != smmu) {
1643 "cannot attach to SMMU %s (upstream of %s)\n",
1644 dev_name(smmu_domain->smmu->dev),
1645 dev_name(smmu->dev));
1650 /* Group already attached to this domain? */
1651 if (smmu_group->domain)
1654 smmu_group->domain = smmu_domain;
1655 smmu_group->ste.bypass = false;
1657 ret = arm_smmu_install_ste_for_group(smmu_group);
1658 if (IS_ERR_VALUE(ret))
1659 smmu_group->domain = NULL;
1662 mutex_unlock(&smmu_domain->init_mutex);
1666 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1668 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1669 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1671 BUG_ON(!smmu_domain);
1672 BUG_ON(!smmu_group);
1674 mutex_lock(&smmu_domain->init_mutex);
1675 BUG_ON(smmu_group->domain != smmu_domain);
1677 smmu_group->ste.bypass = true;
1678 if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group)))
1679 dev_warn(dev, "failed to install bypass STE\n");
1681 smmu_group->domain = NULL;
1682 mutex_unlock(&smmu_domain->init_mutex);
1685 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1686 phys_addr_t paddr, size_t size, int prot)
1689 unsigned long flags;
1690 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1691 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1696 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1697 ret = ops->map(ops, iova, paddr, size, prot);
1698 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1703 arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1706 unsigned long flags;
1707 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1708 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1713 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1714 ret = ops->unmap(ops, iova, size);
1715 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1720 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1723 unsigned long flags;
1724 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1725 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1730 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1731 ret = ops->iova_to_phys(ops, iova);
1732 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1737 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *sidp)
1739 *(u32 *)sidp = alias;
1740 return 0; /* Continue walking */
1743 static void __arm_smmu_release_pci_iommudata(void *data)
1748 static struct arm_smmu_device *arm_smmu_get_for_pci_dev(struct pci_dev *pdev)
1750 struct device_node *of_node;
1751 struct platform_device *smmu_pdev;
1752 struct arm_smmu_device *smmu = NULL;
1753 struct pci_bus *bus = pdev->bus;
1755 /* Walk up to the root bus */
1756 while (!pci_is_root_bus(bus))
1759 /* Follow the "iommus" phandle from the host controller */
1760 of_node = of_parse_phandle(bus->bridge->parent->of_node, "iommus", 0);
1764 /* See if we can find an SMMU corresponding to the phandle */
1765 smmu_pdev = of_find_device_by_node(of_node);
1767 smmu = platform_get_drvdata(smmu_pdev);
1769 of_node_put(of_node);
1773 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1775 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1777 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1778 limit *= 1UL << STRTAB_SPLIT;
1783 static int arm_smmu_add_device(struct device *dev)
1787 struct pci_dev *pdev;
1788 struct iommu_group *group;
1789 struct arm_smmu_group *smmu_group;
1790 struct arm_smmu_device *smmu;
1792 /* We only support PCI, for now */
1793 if (!dev_is_pci(dev))
1796 pdev = to_pci_dev(dev);
1797 group = iommu_group_get_for_dev(dev);
1799 return PTR_ERR(group);
1801 smmu_group = iommu_group_get_iommudata(group);
1803 smmu = arm_smmu_get_for_pci_dev(pdev);
1809 smmu_group = kzalloc(sizeof(*smmu_group), GFP_KERNEL);
1815 smmu_group->ste.valid = true;
1816 smmu_group->smmu = smmu;
1817 iommu_group_set_iommudata(group, smmu_group,
1818 __arm_smmu_release_pci_iommudata);
1820 smmu = smmu_group->smmu;
1823 /* Assume SID == RID until firmware tells us otherwise */
1824 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1825 for (i = 0; i < smmu_group->num_sids; ++i) {
1826 /* If we already know about this SID, then we're done */
1827 if (smmu_group->sids[i] == sid)
1831 /* Check the SID is in range of the SMMU and our stream table */
1832 if (!arm_smmu_sid_in_range(smmu, sid)) {
1837 /* Ensure l2 strtab is initialised */
1838 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1839 ret = arm_smmu_init_l2_strtab(smmu, sid);
1844 /* Resize the SID array for the group */
1845 smmu_group->num_sids++;
1846 sids = krealloc(smmu_group->sids, smmu_group->num_sids * sizeof(*sids),
1849 smmu_group->num_sids--;
1854 /* Add the new SID */
1855 sids[smmu_group->num_sids - 1] = sid;
1856 smmu_group->sids = sids;
1860 iommu_group_put(group);
1864 static void arm_smmu_remove_device(struct device *dev)
1866 iommu_group_remove_device(dev);
1869 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1870 enum iommu_attr attr, void *data)
1872 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1875 case DOMAIN_ATTR_NESTING:
1876 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1883 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1884 enum iommu_attr attr, void *data)
1887 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1889 mutex_lock(&smmu_domain->init_mutex);
1892 case DOMAIN_ATTR_NESTING:
1893 if (smmu_domain->smmu) {
1899 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1901 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1909 mutex_unlock(&smmu_domain->init_mutex);
1913 static struct iommu_ops arm_smmu_ops = {
1914 .capable = arm_smmu_capable,
1915 .domain_alloc = arm_smmu_domain_alloc,
1916 .domain_free = arm_smmu_domain_free,
1917 .attach_dev = arm_smmu_attach_dev,
1918 .detach_dev = arm_smmu_detach_dev,
1919 .map = arm_smmu_map,
1920 .unmap = arm_smmu_unmap,
1921 .iova_to_phys = arm_smmu_iova_to_phys,
1922 .add_device = arm_smmu_add_device,
1923 .remove_device = arm_smmu_remove_device,
1924 .device_group = pci_device_group,
1925 .domain_get_attr = arm_smmu_domain_get_attr,
1926 .domain_set_attr = arm_smmu_domain_set_attr,
1927 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1930 /* Probing and initialisation functions */
1931 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1932 struct arm_smmu_queue *q,
1933 unsigned long prod_off,
1934 unsigned long cons_off,
1937 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
1939 q->base = dma_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
1941 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
1946 q->prod_reg = smmu->base + prod_off;
1947 q->cons_reg = smmu->base + cons_off;
1948 q->ent_dwords = dwords;
1950 q->q_base = Q_BASE_RWA;
1951 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
1952 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
1953 << Q_BASE_LOG2SIZE_SHIFT;
1955 q->prod = q->cons = 0;
1959 static void arm_smmu_free_one_queue(struct arm_smmu_device *smmu,
1960 struct arm_smmu_queue *q)
1962 size_t qsz = ((1 << q->max_n_shift) * q->ent_dwords) << 3;
1964 dma_free_coherent(smmu->dev, qsz, q->base, q->base_dma);
1967 static void arm_smmu_free_queues(struct arm_smmu_device *smmu)
1969 arm_smmu_free_one_queue(smmu, &smmu->cmdq.q);
1970 arm_smmu_free_one_queue(smmu, &smmu->evtq.q);
1972 if (smmu->features & ARM_SMMU_FEAT_PRI)
1973 arm_smmu_free_one_queue(smmu, &smmu->priq.q);
1976 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
1981 spin_lock_init(&smmu->cmdq.lock);
1982 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
1983 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
1988 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
1989 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
1994 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
1997 ret = arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
1998 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
2005 arm_smmu_free_one_queue(smmu, &smmu->evtq.q);
2007 arm_smmu_free_one_queue(smmu, &smmu->cmdq.q);
2012 static void arm_smmu_free_l2_strtab(struct arm_smmu_device *smmu)
2016 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2018 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
2019 for (i = 0; i < cfg->num_l1_ents; ++i) {
2020 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[i];
2025 dma_free_coherent(smmu->dev, size, desc->l2ptr,
2030 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
2033 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2034 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
2035 void *strtab = smmu->strtab_cfg.strtab;
2037 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
2038 if (!cfg->l1_desc) {
2039 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
2043 for (i = 0; i < cfg->num_l1_ents; ++i) {
2044 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
2045 strtab += STRTAB_L1_DESC_DWORDS << 3;
2051 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2057 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2060 * If we can resolve everything with a single L2 table, then we
2061 * just need a single L1 descriptor. Otherwise, calculate the L1
2062 * size, capped to the SIDSIZE.
2064 if (smmu->sid_bits < STRTAB_SPLIT) {
2067 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2068 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2070 cfg->num_l1_ents = 1 << size;
2072 size += STRTAB_SPLIT;
2073 if (size < smmu->sid_bits)
2075 "2-level strtab only covers %u/%u bits of SID\n",
2076 size, smmu->sid_bits);
2078 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2079 strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2083 "failed to allocate l1 stream table (%u bytes)\n",
2087 cfg->strtab = strtab;
2089 /* Configure strtab_base_cfg for 2 levels */
2090 reg = STRTAB_BASE_CFG_FMT_2LVL;
2091 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2092 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2093 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2094 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2095 cfg->strtab_base_cfg = reg;
2097 ret = arm_smmu_init_l1_strtab(smmu);
2099 dma_free_coherent(smmu->dev,
2106 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2111 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2113 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2114 strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2118 "failed to allocate linear stream table (%u bytes)\n",
2122 cfg->strtab = strtab;
2123 cfg->num_l1_ents = 1 << smmu->sid_bits;
2125 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2126 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2127 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2128 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2129 cfg->strtab_base_cfg = reg;
2131 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2135 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2140 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2141 ret = arm_smmu_init_strtab_2lvl(smmu);
2143 ret = arm_smmu_init_strtab_linear(smmu);
2148 /* Set the strtab base address */
2149 reg = smmu->strtab_cfg.strtab_dma &
2150 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2151 reg |= STRTAB_BASE_RA;
2152 smmu->strtab_cfg.strtab_base = reg;
2154 /* Allocate the first VMID for stage-2 bypass STEs */
2155 set_bit(0, smmu->vmid_map);
2159 static void arm_smmu_free_strtab(struct arm_smmu_device *smmu)
2161 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2162 u32 size = cfg->num_l1_ents;
2164 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
2165 arm_smmu_free_l2_strtab(smmu);
2166 size *= STRTAB_L1_DESC_DWORDS << 3;
2168 size *= STRTAB_STE_DWORDS * 3;
2171 dma_free_coherent(smmu->dev, size, cfg->strtab, cfg->strtab_dma);
2174 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2178 ret = arm_smmu_init_queues(smmu);
2182 ret = arm_smmu_init_strtab(smmu);
2184 goto out_free_queues;
2189 arm_smmu_free_queues(smmu);
2193 static void arm_smmu_free_structures(struct arm_smmu_device *smmu)
2195 arm_smmu_free_strtab(smmu);
2196 arm_smmu_free_queues(smmu);
2199 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2200 unsigned int reg_off, unsigned int ack_off)
2204 writel_relaxed(val, smmu->base + reg_off);
2205 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2206 1, ARM_SMMU_POLL_TIMEOUT_US);
2209 static void arm_smmu_free_msis(void *data)
2211 struct device *dev = data;
2212 platform_msi_domain_free_irqs(dev);
2215 static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2217 phys_addr_t doorbell;
2218 struct device *dev = msi_desc_to_dev(desc);
2219 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2220 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2222 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
2223 doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
2225 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2226 writel_relaxed(msg->data, smmu->base + cfg[1]);
2227 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2230 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2232 struct msi_desc *desc;
2233 int ret, nvec = ARM_SMMU_MAX_MSIS;
2234 struct device *dev = smmu->dev;
2236 /* Clear the MSI address regs */
2237 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2238 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2240 if (smmu->features & ARM_SMMU_FEAT_PRI)
2241 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2245 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2248 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2249 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2251 dev_warn(dev, "failed to allocate MSIs\n");
2255 for_each_msi_entry(desc, dev) {
2256 switch (desc->platform.msi_index) {
2257 case EVTQ_MSI_INDEX:
2258 smmu->evtq.q.irq = desc->irq;
2260 case GERROR_MSI_INDEX:
2261 smmu->gerr_irq = desc->irq;
2263 case PRIQ_MSI_INDEX:
2264 smmu->priq.q.irq = desc->irq;
2266 default: /* Unknown */
2271 /* Add callback to free MSIs on teardown */
2272 devm_add_action(dev, arm_smmu_free_msis, dev);
2275 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2278 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
2280 /* Disable IRQs first */
2281 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2282 ARM_SMMU_IRQ_CTRLACK);
2284 dev_err(smmu->dev, "failed to disable irqs\n");
2288 arm_smmu_setup_msis(smmu);
2290 /* Request interrupt lines */
2291 irq = smmu->evtq.q.irq;
2293 ret = devm_request_threaded_irq(smmu->dev, irq,
2294 arm_smmu_evtq_handler,
2295 arm_smmu_evtq_thread,
2296 0, "arm-smmu-v3-evtq", smmu);
2297 if (IS_ERR_VALUE(ret))
2298 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2301 irq = smmu->cmdq.q.irq;
2303 ret = devm_request_irq(smmu->dev, irq,
2304 arm_smmu_cmdq_sync_handler, 0,
2305 "arm-smmu-v3-cmdq-sync", smmu);
2306 if (IS_ERR_VALUE(ret))
2307 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2310 irq = smmu->gerr_irq;
2312 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2313 0, "arm-smmu-v3-gerror", smmu);
2314 if (IS_ERR_VALUE(ret))
2315 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2318 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2319 irq = smmu->priq.q.irq;
2321 ret = devm_request_threaded_irq(smmu->dev, irq,
2322 arm_smmu_priq_handler,
2323 arm_smmu_priq_thread,
2324 0, "arm-smmu-v3-priq",
2326 if (IS_ERR_VALUE(ret))
2328 "failed to enable priq irq\n");
2330 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
2334 /* Enable interrupt generation on the SMMU */
2335 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
2336 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2338 dev_warn(smmu->dev, "failed to enable irqs\n");
2343 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2347 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2349 dev_err(smmu->dev, "failed to clear cr0\n");
2354 static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
2358 struct arm_smmu_cmdq_ent cmd;
2360 /* Clear CR0 and sync (disables SMMU and queue processing) */
2361 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2362 if (reg & CR0_SMMUEN)
2363 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2365 ret = arm_smmu_device_disable(smmu);
2369 /* CR1 (table and queue memory attributes) */
2370 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2371 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2372 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2373 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2374 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2375 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2376 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2378 /* CR2 (random crap) */
2379 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2380 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2383 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2384 smmu->base + ARM_SMMU_STRTAB_BASE);
2385 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2386 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2389 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2390 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2391 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2393 enables = CR0_CMDQEN;
2394 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2397 dev_err(smmu->dev, "failed to enable command queue\n");
2401 /* Invalidate any cached configuration */
2402 cmd.opcode = CMDQ_OP_CFGI_ALL;
2403 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2404 cmd.opcode = CMDQ_OP_CMD_SYNC;
2405 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2407 /* Invalidate any stale TLB entries */
2408 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2409 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2410 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2413 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2414 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2415 cmd.opcode = CMDQ_OP_CMD_SYNC;
2416 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2419 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2420 writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD);
2421 writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS);
2423 enables |= CR0_EVTQEN;
2424 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2427 dev_err(smmu->dev, "failed to enable event queue\n");
2432 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2433 writeq_relaxed(smmu->priq.q.q_base,
2434 smmu->base + ARM_SMMU_PRIQ_BASE);
2435 writel_relaxed(smmu->priq.q.prod,
2436 smmu->base + ARM_SMMU_PRIQ_PROD);
2437 writel_relaxed(smmu->priq.q.cons,
2438 smmu->base + ARM_SMMU_PRIQ_CONS);
2440 enables |= CR0_PRIQEN;
2441 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2444 dev_err(smmu->dev, "failed to enable PRI queue\n");
2449 ret = arm_smmu_setup_irqs(smmu);
2451 dev_err(smmu->dev, "failed to setup irqs\n");
2455 /* Enable the SMMU interface */
2456 enables |= CR0_SMMUEN;
2457 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2460 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2467 static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
2471 unsigned long pgsize_bitmap = 0;
2474 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2476 /* 2-level structures */
2477 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2478 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2480 if (reg & IDR0_CD2L)
2481 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2484 * Translation table endianness.
2485 * We currently require the same endianness as the CPU, but this
2486 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2488 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2489 case IDR0_TTENDIAN_MIXED:
2490 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2493 case IDR0_TTENDIAN_BE:
2494 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2497 case IDR0_TTENDIAN_LE:
2498 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2502 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2506 /* Boolean feature flags */
2507 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2508 smmu->features |= ARM_SMMU_FEAT_PRI;
2510 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2511 smmu->features |= ARM_SMMU_FEAT_ATS;
2514 smmu->features |= ARM_SMMU_FEAT_SEV;
2517 smmu->features |= ARM_SMMU_FEAT_MSI;
2520 smmu->features |= ARM_SMMU_FEAT_HYP;
2523 * The dma-coherent property is used in preference to the ID
2524 * register, but warn on mismatch.
2526 coherent = of_dma_is_coherent(smmu->dev->of_node);
2528 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2530 if (!!(reg & IDR0_COHACC) != coherent)
2531 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2532 coherent ? "true" : "false");
2534 if (reg & IDR0_STALL_MODEL)
2535 smmu->features |= ARM_SMMU_FEAT_STALLS;
2538 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2541 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2543 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2544 dev_err(smmu->dev, "no translation support!\n");
2548 /* We only support the AArch64 table format at present */
2549 switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
2550 case IDR0_TTF_AARCH32_64:
2553 case IDR0_TTF_AARCH64:
2556 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2560 /* ASID/VMID sizes */
2561 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2562 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2565 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2566 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2567 dev_err(smmu->dev, "embedded implementation not supported\n");
2571 /* Queue sizes, capped at 4k */
2572 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2573 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2574 if (!smmu->cmdq.q.max_n_shift) {
2575 /* Odd alignment restrictions on the base, so ignore for now */
2576 dev_err(smmu->dev, "unit-length command queue not supported\n");
2580 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2581 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2582 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2583 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2585 /* SID/SSID sizes */
2586 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2587 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2590 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2592 /* Maximum number of outstanding stalls */
2593 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2594 & IDR5_STALL_MAX_MASK;
2597 if (reg & IDR5_GRAN64K)
2598 pgsize_bitmap |= SZ_64K | SZ_512M;
2599 if (reg & IDR5_GRAN16K)
2600 pgsize_bitmap |= SZ_16K | SZ_32M;
2601 if (reg & IDR5_GRAN4K)
2602 pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2604 arm_smmu_ops.pgsize_bitmap &= pgsize_bitmap;
2606 /* Output address size */
2607 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2608 case IDR5_OAS_32_BIT:
2611 case IDR5_OAS_36_BIT:
2614 case IDR5_OAS_40_BIT:
2617 case IDR5_OAS_42_BIT:
2620 case IDR5_OAS_44_BIT:
2625 "unknown output address size. Truncating to 48-bit\n");
2627 case IDR5_OAS_48_BIT:
2631 /* Set the DMA mask for our table walker */
2632 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2634 "failed to set DMA mask for table walker\n");
2636 smmu->ias = max(smmu->ias, smmu->oas);
2638 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2639 smmu->ias, smmu->oas, smmu->features);
2643 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2646 struct resource *res;
2647 struct arm_smmu_device *smmu;
2648 struct device *dev = &pdev->dev;
2650 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2652 dev_err(dev, "failed to allocate arm_smmu_device\n");
2658 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2659 if (resource_size(res) + 1 < SZ_128K) {
2660 dev_err(dev, "MMIO region too small (%pr)\n", res);
2664 smmu->base = devm_ioremap_resource(dev, res);
2665 if (IS_ERR(smmu->base))
2666 return PTR_ERR(smmu->base);
2668 /* Interrupt lines */
2669 irq = platform_get_irq_byname(pdev, "eventq");
2671 smmu->evtq.q.irq = irq;
2673 irq = platform_get_irq_byname(pdev, "priq");
2675 smmu->priq.q.irq = irq;
2677 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2679 smmu->cmdq.q.irq = irq;
2681 irq = platform_get_irq_byname(pdev, "gerror");
2683 smmu->gerr_irq = irq;
2685 parse_driver_options(smmu);
2688 ret = arm_smmu_device_probe(smmu);
2692 /* Initialise in-memory data structures */
2693 ret = arm_smmu_init_structures(smmu);
2697 /* Record our private device structure */
2698 platform_set_drvdata(pdev, smmu);
2700 /* Reset the device */
2701 ret = arm_smmu_device_reset(smmu);
2703 goto out_free_structures;
2707 out_free_structures:
2708 arm_smmu_free_structures(smmu);
2712 static int arm_smmu_device_remove(struct platform_device *pdev)
2714 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2716 arm_smmu_device_disable(smmu);
2717 arm_smmu_free_structures(smmu);
2721 static struct of_device_id arm_smmu_of_match[] = {
2722 { .compatible = "arm,smmu-v3", },
2725 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2727 static struct platform_driver arm_smmu_driver = {
2729 .name = "arm-smmu-v3",
2730 .of_match_table = of_match_ptr(arm_smmu_of_match),
2732 .probe = arm_smmu_device_dt_probe,
2733 .remove = arm_smmu_device_remove,
2736 static int __init arm_smmu_init(void)
2738 struct device_node *np;
2741 np = of_find_matching_node(NULL, arm_smmu_of_match);
2747 ret = platform_driver_register(&arm_smmu_driver);
2751 return bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2754 static void __exit arm_smmu_exit(void)
2756 return platform_driver_unregister(&arm_smmu_driver);
2759 subsys_initcall(arm_smmu_init);
2760 module_exit(arm_smmu_exit);
2762 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2763 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2764 MODULE_LICENSE("GPL v2");