iommu/arm-smmu: Fix stream-match conflict with IOMMU_DOMAIN_DMA
[cascardo/linux.git] / drivers / iommu / arm-smmu.c
1 /*
2  * IOMMU API for ARM architected SMMU implementations.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16  *
17  * Copyright (C) 2013 ARM Limited
18  *
19  * Author: Will Deacon <will.deacon@arm.com>
20  *
21  * This driver currently supports:
22  *      - SMMUv1 and v2 implementations
23  *      - Stream-matching and stream-indexing
24  *      - v7/v8 long-descriptor format
25  *      - Non-secure access to the SMMU
26  *      - Context fault reporting
27  */
28
29 #define pr_fmt(fmt) "arm-smmu: " fmt
30
31 #include <linux/delay.h>
32 #include <linux/dma-iommu.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/err.h>
35 #include <linux/interrupt.h>
36 #include <linux/io.h>
37 #include <linux/iommu.h>
38 #include <linux/iopoll.h>
39 #include <linux/module.h>
40 #include <linux/of.h>
41 #include <linux/of_address.h>
42 #include <linux/pci.h>
43 #include <linux/platform_device.h>
44 #include <linux/slab.h>
45 #include <linux/spinlock.h>
46
47 #include <linux/amba/bus.h>
48
49 #include "io-pgtable.h"
50
51 /* Maximum number of stream IDs assigned to a single device */
52 #define MAX_MASTER_STREAMIDS            MAX_PHANDLE_ARGS
53
54 /* Maximum number of context banks per SMMU */
55 #define ARM_SMMU_MAX_CBS                128
56
57 /* Maximum number of mapping groups per SMMU */
58 #define ARM_SMMU_MAX_SMRS               128
59
60 /* SMMU global address space */
61 #define ARM_SMMU_GR0(smmu)              ((smmu)->base)
62 #define ARM_SMMU_GR1(smmu)              ((smmu)->base + (1 << (smmu)->pgshift))
63
64 /*
65  * SMMU global address space with conditional offset to access secure
66  * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
67  * nsGFSYNR0: 0x450)
68  */
69 #define ARM_SMMU_GR0_NS(smmu)                                           \
70         ((smmu)->base +                                                 \
71                 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS)       \
72                         ? 0x400 : 0))
73
74 #ifdef CONFIG_64BIT
75 #define smmu_writeq     writeq_relaxed
76 #else
77 #define smmu_writeq(reg64, addr)                                \
78         do {                                                    \
79                 u64 __val = (reg64);                            \
80                 void __iomem *__addr = (addr);                  \
81                 writel_relaxed(__val >> 32, __addr + 4);        \
82                 writel_relaxed(__val, __addr);                  \
83         } while (0)
84 #endif
85
86 /* Configuration registers */
87 #define ARM_SMMU_GR0_sCR0               0x0
88 #define sCR0_CLIENTPD                   (1 << 0)
89 #define sCR0_GFRE                       (1 << 1)
90 #define sCR0_GFIE                       (1 << 2)
91 #define sCR0_GCFGFRE                    (1 << 4)
92 #define sCR0_GCFGFIE                    (1 << 5)
93 #define sCR0_USFCFG                     (1 << 10)
94 #define sCR0_VMIDPNE                    (1 << 11)
95 #define sCR0_PTM                        (1 << 12)
96 #define sCR0_FB                         (1 << 13)
97 #define sCR0_BSU_SHIFT                  14
98 #define sCR0_BSU_MASK                   0x3
99
100 /* Identification registers */
101 #define ARM_SMMU_GR0_ID0                0x20
102 #define ARM_SMMU_GR0_ID1                0x24
103 #define ARM_SMMU_GR0_ID2                0x28
104 #define ARM_SMMU_GR0_ID3                0x2c
105 #define ARM_SMMU_GR0_ID4                0x30
106 #define ARM_SMMU_GR0_ID5                0x34
107 #define ARM_SMMU_GR0_ID6                0x38
108 #define ARM_SMMU_GR0_ID7                0x3c
109 #define ARM_SMMU_GR0_sGFSR              0x48
110 #define ARM_SMMU_GR0_sGFSYNR0           0x50
111 #define ARM_SMMU_GR0_sGFSYNR1           0x54
112 #define ARM_SMMU_GR0_sGFSYNR2           0x58
113
114 #define ID0_S1TS                        (1 << 30)
115 #define ID0_S2TS                        (1 << 29)
116 #define ID0_NTS                         (1 << 28)
117 #define ID0_SMS                         (1 << 27)
118 #define ID0_ATOSNS                      (1 << 26)
119 #define ID0_CTTW                        (1 << 14)
120 #define ID0_NUMIRPT_SHIFT               16
121 #define ID0_NUMIRPT_MASK                0xff
122 #define ID0_NUMSIDB_SHIFT               9
123 #define ID0_NUMSIDB_MASK                0xf
124 #define ID0_NUMSMRG_SHIFT               0
125 #define ID0_NUMSMRG_MASK                0xff
126
127 #define ID1_PAGESIZE                    (1 << 31)
128 #define ID1_NUMPAGENDXB_SHIFT           28
129 #define ID1_NUMPAGENDXB_MASK            7
130 #define ID1_NUMS2CB_SHIFT               16
131 #define ID1_NUMS2CB_MASK                0xff
132 #define ID1_NUMCB_SHIFT                 0
133 #define ID1_NUMCB_MASK                  0xff
134
135 #define ID2_OAS_SHIFT                   4
136 #define ID2_OAS_MASK                    0xf
137 #define ID2_IAS_SHIFT                   0
138 #define ID2_IAS_MASK                    0xf
139 #define ID2_UBS_SHIFT                   8
140 #define ID2_UBS_MASK                    0xf
141 #define ID2_PTFS_4K                     (1 << 12)
142 #define ID2_PTFS_16K                    (1 << 13)
143 #define ID2_PTFS_64K                    (1 << 14)
144
145 /* Global TLB invalidation */
146 #define ARM_SMMU_GR0_TLBIVMID           0x64
147 #define ARM_SMMU_GR0_TLBIALLNSNH        0x68
148 #define ARM_SMMU_GR0_TLBIALLH           0x6c
149 #define ARM_SMMU_GR0_sTLBGSYNC          0x70
150 #define ARM_SMMU_GR0_sTLBGSTATUS        0x74
151 #define sTLBGSTATUS_GSACTIVE            (1 << 0)
152 #define TLB_LOOP_TIMEOUT                1000000 /* 1s! */
153
154 /* Stream mapping registers */
155 #define ARM_SMMU_GR0_SMR(n)             (0x800 + ((n) << 2))
156 #define SMR_VALID                       (1 << 31)
157 #define SMR_MASK_SHIFT                  16
158 #define SMR_MASK_MASK                   0x7fff
159 #define SMR_ID_SHIFT                    0
160 #define SMR_ID_MASK                     0x7fff
161
162 #define ARM_SMMU_GR0_S2CR(n)            (0xc00 + ((n) << 2))
163 #define S2CR_CBNDX_SHIFT                0
164 #define S2CR_CBNDX_MASK                 0xff
165 #define S2CR_TYPE_SHIFT                 16
166 #define S2CR_TYPE_MASK                  0x3
167 #define S2CR_TYPE_TRANS                 (0 << S2CR_TYPE_SHIFT)
168 #define S2CR_TYPE_BYPASS                (1 << S2CR_TYPE_SHIFT)
169 #define S2CR_TYPE_FAULT                 (2 << S2CR_TYPE_SHIFT)
170
171 #define S2CR_PRIVCFG_SHIFT              24
172 #define S2CR_PRIVCFG_UNPRIV             (2 << S2CR_PRIVCFG_SHIFT)
173
174 /* Context bank attribute registers */
175 #define ARM_SMMU_GR1_CBAR(n)            (0x0 + ((n) << 2))
176 #define CBAR_VMID_SHIFT                 0
177 #define CBAR_VMID_MASK                  0xff
178 #define CBAR_S1_BPSHCFG_SHIFT           8
179 #define CBAR_S1_BPSHCFG_MASK            3
180 #define CBAR_S1_BPSHCFG_NSH             3
181 #define CBAR_S1_MEMATTR_SHIFT           12
182 #define CBAR_S1_MEMATTR_MASK            0xf
183 #define CBAR_S1_MEMATTR_WB              0xf
184 #define CBAR_TYPE_SHIFT                 16
185 #define CBAR_TYPE_MASK                  0x3
186 #define CBAR_TYPE_S2_TRANS              (0 << CBAR_TYPE_SHIFT)
187 #define CBAR_TYPE_S1_TRANS_S2_BYPASS    (1 << CBAR_TYPE_SHIFT)
188 #define CBAR_TYPE_S1_TRANS_S2_FAULT     (2 << CBAR_TYPE_SHIFT)
189 #define CBAR_TYPE_S1_TRANS_S2_TRANS     (3 << CBAR_TYPE_SHIFT)
190 #define CBAR_IRPTNDX_SHIFT              24
191 #define CBAR_IRPTNDX_MASK               0xff
192
193 #define ARM_SMMU_GR1_CBA2R(n)           (0x800 + ((n) << 2))
194 #define CBA2R_RW64_32BIT                (0 << 0)
195 #define CBA2R_RW64_64BIT                (1 << 0)
196
197 /* Translation context bank */
198 #define ARM_SMMU_CB_BASE(smmu)          ((smmu)->base + ((smmu)->size >> 1))
199 #define ARM_SMMU_CB(smmu, n)            ((n) * (1 << (smmu)->pgshift))
200
201 #define ARM_SMMU_CB_SCTLR               0x0
202 #define ARM_SMMU_CB_RESUME              0x8
203 #define ARM_SMMU_CB_TTBCR2              0x10
204 #define ARM_SMMU_CB_TTBR0               0x20
205 #define ARM_SMMU_CB_TTBR1               0x28
206 #define ARM_SMMU_CB_TTBCR               0x30
207 #define ARM_SMMU_CB_S1_MAIR0            0x38
208 #define ARM_SMMU_CB_S1_MAIR1            0x3c
209 #define ARM_SMMU_CB_PAR_LO              0x50
210 #define ARM_SMMU_CB_PAR_HI              0x54
211 #define ARM_SMMU_CB_FSR                 0x58
212 #define ARM_SMMU_CB_FAR_LO              0x60
213 #define ARM_SMMU_CB_FAR_HI              0x64
214 #define ARM_SMMU_CB_FSYNR0              0x68
215 #define ARM_SMMU_CB_S1_TLBIVA           0x600
216 #define ARM_SMMU_CB_S1_TLBIASID         0x610
217 #define ARM_SMMU_CB_S1_TLBIVAL          0x620
218 #define ARM_SMMU_CB_S2_TLBIIPAS2        0x630
219 #define ARM_SMMU_CB_S2_TLBIIPAS2L       0x638
220 #define ARM_SMMU_CB_ATS1PR              0x800
221 #define ARM_SMMU_CB_ATSR                0x8f0
222
223 #define SCTLR_S1_ASIDPNE                (1 << 12)
224 #define SCTLR_CFCFG                     (1 << 7)
225 #define SCTLR_CFIE                      (1 << 6)
226 #define SCTLR_CFRE                      (1 << 5)
227 #define SCTLR_E                         (1 << 4)
228 #define SCTLR_AFE                       (1 << 2)
229 #define SCTLR_TRE                       (1 << 1)
230 #define SCTLR_M                         (1 << 0)
231 #define SCTLR_EAE_SBOP                  (SCTLR_AFE | SCTLR_TRE)
232
233 #define CB_PAR_F                        (1 << 0)
234
235 #define ATSR_ACTIVE                     (1 << 0)
236
237 #define RESUME_RETRY                    (0 << 0)
238 #define RESUME_TERMINATE                (1 << 0)
239
240 #define TTBCR2_SEP_SHIFT                15
241 #define TTBCR2_SEP_UPSTREAM             (0x7 << TTBCR2_SEP_SHIFT)
242
243 #define TTBRn_ASID_SHIFT                48
244
245 #define FSR_MULTI                       (1 << 31)
246 #define FSR_SS                          (1 << 30)
247 #define FSR_UUT                         (1 << 8)
248 #define FSR_ASF                         (1 << 7)
249 #define FSR_TLBLKF                      (1 << 6)
250 #define FSR_TLBMCF                      (1 << 5)
251 #define FSR_EF                          (1 << 4)
252 #define FSR_PF                          (1 << 3)
253 #define FSR_AFF                         (1 << 2)
254 #define FSR_TF                          (1 << 1)
255
256 #define FSR_IGN                         (FSR_AFF | FSR_ASF | \
257                                          FSR_TLBMCF | FSR_TLBLKF)
258 #define FSR_FAULT                       (FSR_MULTI | FSR_SS | FSR_UUT | \
259                                          FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
260
261 #define FSYNR0_WNR                      (1 << 4)
262
263 static int force_stage;
264 module_param(force_stage, int, S_IRUGO);
265 MODULE_PARM_DESC(force_stage,
266         "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
267 static bool disable_bypass;
268 module_param(disable_bypass, bool, S_IRUGO);
269 MODULE_PARM_DESC(disable_bypass,
270         "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
271
272 enum arm_smmu_arch_version {
273         ARM_SMMU_V1 = 1,
274         ARM_SMMU_V2,
275 };
276
277 struct arm_smmu_smr {
278         u8                              idx;
279         u16                             mask;
280         u16                             id;
281 };
282
283 struct arm_smmu_master_cfg {
284         int                             num_streamids;
285         u16                             streamids[MAX_MASTER_STREAMIDS];
286         struct arm_smmu_smr             *smrs;
287 };
288
289 struct arm_smmu_master {
290         struct device_node              *of_node;
291         struct rb_node                  node;
292         struct arm_smmu_master_cfg      cfg;
293 };
294
295 struct arm_smmu_device {
296         struct device                   *dev;
297
298         void __iomem                    *base;
299         unsigned long                   size;
300         unsigned long                   pgshift;
301
302 #define ARM_SMMU_FEAT_COHERENT_WALK     (1 << 0)
303 #define ARM_SMMU_FEAT_STREAM_MATCH      (1 << 1)
304 #define ARM_SMMU_FEAT_TRANS_S1          (1 << 2)
305 #define ARM_SMMU_FEAT_TRANS_S2          (1 << 3)
306 #define ARM_SMMU_FEAT_TRANS_NESTED      (1 << 4)
307 #define ARM_SMMU_FEAT_TRANS_OPS         (1 << 5)
308         u32                             features;
309
310 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
311         u32                             options;
312         enum arm_smmu_arch_version      version;
313
314         u32                             num_context_banks;
315         u32                             num_s2_context_banks;
316         DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
317         atomic_t                        irptndx;
318
319         u32                             num_mapping_groups;
320         DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
321
322         unsigned long                   va_size;
323         unsigned long                   ipa_size;
324         unsigned long                   pa_size;
325
326         u32                             num_global_irqs;
327         u32                             num_context_irqs;
328         unsigned int                    *irqs;
329
330         struct list_head                list;
331         struct rb_root                  masters;
332 };
333
334 struct arm_smmu_cfg {
335         u8                              cbndx;
336         u8                              irptndx;
337         u32                             cbar;
338 };
339 #define INVALID_IRPTNDX                 0xff
340
341 #define ARM_SMMU_CB_ASID(cfg)           ((cfg)->cbndx)
342 #define ARM_SMMU_CB_VMID(cfg)           ((cfg)->cbndx + 1)
343
344 enum arm_smmu_domain_stage {
345         ARM_SMMU_DOMAIN_S1 = 0,
346         ARM_SMMU_DOMAIN_S2,
347         ARM_SMMU_DOMAIN_NESTED,
348 };
349
350 struct arm_smmu_domain {
351         struct arm_smmu_device          *smmu;
352         struct io_pgtable_ops           *pgtbl_ops;
353         spinlock_t                      pgtbl_lock;
354         struct arm_smmu_cfg             cfg;
355         enum arm_smmu_domain_stage      stage;
356         struct mutex                    init_mutex; /* Protects smmu pointer */
357         struct iommu_domain             domain;
358 };
359
360 static struct iommu_ops arm_smmu_ops;
361
362 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
363 static LIST_HEAD(arm_smmu_devices);
364
365 struct arm_smmu_option_prop {
366         u32 opt;
367         const char *prop;
368 };
369
370 static struct arm_smmu_option_prop arm_smmu_options[] = {
371         { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
372         { 0, NULL},
373 };
374
375 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
376 {
377         return container_of(dom, struct arm_smmu_domain, domain);
378 }
379
380 static void parse_driver_options(struct arm_smmu_device *smmu)
381 {
382         int i = 0;
383
384         do {
385                 if (of_property_read_bool(smmu->dev->of_node,
386                                                 arm_smmu_options[i].prop)) {
387                         smmu->options |= arm_smmu_options[i].opt;
388                         dev_notice(smmu->dev, "option %s\n",
389                                 arm_smmu_options[i].prop);
390                 }
391         } while (arm_smmu_options[++i].opt);
392 }
393
394 static struct device_node *dev_get_dev_node(struct device *dev)
395 {
396         if (dev_is_pci(dev)) {
397                 struct pci_bus *bus = to_pci_dev(dev)->bus;
398
399                 while (!pci_is_root_bus(bus))
400                         bus = bus->parent;
401                 return bus->bridge->parent->of_node;
402         }
403
404         return dev->of_node;
405 }
406
407 static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
408                                                 struct device_node *dev_node)
409 {
410         struct rb_node *node = smmu->masters.rb_node;
411
412         while (node) {
413                 struct arm_smmu_master *master;
414
415                 master = container_of(node, struct arm_smmu_master, node);
416
417                 if (dev_node < master->of_node)
418                         node = node->rb_left;
419                 else if (dev_node > master->of_node)
420                         node = node->rb_right;
421                 else
422                         return master;
423         }
424
425         return NULL;
426 }
427
428 static struct arm_smmu_master_cfg *
429 find_smmu_master_cfg(struct device *dev)
430 {
431         struct arm_smmu_master_cfg *cfg = NULL;
432         struct iommu_group *group = iommu_group_get(dev);
433
434         if (group) {
435                 cfg = iommu_group_get_iommudata(group);
436                 iommu_group_put(group);
437         }
438
439         return cfg;
440 }
441
442 static int insert_smmu_master(struct arm_smmu_device *smmu,
443                               struct arm_smmu_master *master)
444 {
445         struct rb_node **new, *parent;
446
447         new = &smmu->masters.rb_node;
448         parent = NULL;
449         while (*new) {
450                 struct arm_smmu_master *this
451                         = container_of(*new, struct arm_smmu_master, node);
452
453                 parent = *new;
454                 if (master->of_node < this->of_node)
455                         new = &((*new)->rb_left);
456                 else if (master->of_node > this->of_node)
457                         new = &((*new)->rb_right);
458                 else
459                         return -EEXIST;
460         }
461
462         rb_link_node(&master->node, parent, new);
463         rb_insert_color(&master->node, &smmu->masters);
464         return 0;
465 }
466
467 static int register_smmu_master(struct arm_smmu_device *smmu,
468                                 struct device *dev,
469                                 struct of_phandle_args *masterspec)
470 {
471         int i;
472         struct arm_smmu_master *master;
473
474         master = find_smmu_master(smmu, masterspec->np);
475         if (master) {
476                 dev_err(dev,
477                         "rejecting multiple registrations for master device %s\n",
478                         masterspec->np->name);
479                 return -EBUSY;
480         }
481
482         if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
483                 dev_err(dev,
484                         "reached maximum number (%d) of stream IDs for master device %s\n",
485                         MAX_MASTER_STREAMIDS, masterspec->np->name);
486                 return -ENOSPC;
487         }
488
489         master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
490         if (!master)
491                 return -ENOMEM;
492
493         master->of_node                 = masterspec->np;
494         master->cfg.num_streamids       = masterspec->args_count;
495
496         for (i = 0; i < master->cfg.num_streamids; ++i) {
497                 u16 streamid = masterspec->args[i];
498
499                 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
500                      (streamid >= smmu->num_mapping_groups)) {
501                         dev_err(dev,
502                                 "stream ID for master device %s greater than maximum allowed (%d)\n",
503                                 masterspec->np->name, smmu->num_mapping_groups);
504                         return -ERANGE;
505                 }
506                 master->cfg.streamids[i] = streamid;
507         }
508         return insert_smmu_master(smmu, master);
509 }
510
511 static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
512 {
513         struct arm_smmu_device *smmu;
514         struct arm_smmu_master *master = NULL;
515         struct device_node *dev_node = dev_get_dev_node(dev);
516
517         spin_lock(&arm_smmu_devices_lock);
518         list_for_each_entry(smmu, &arm_smmu_devices, list) {
519                 master = find_smmu_master(smmu, dev_node);
520                 if (master)
521                         break;
522         }
523         spin_unlock(&arm_smmu_devices_lock);
524
525         return master ? smmu : NULL;
526 }
527
528 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
529 {
530         int idx;
531
532         do {
533                 idx = find_next_zero_bit(map, end, start);
534                 if (idx == end)
535                         return -ENOSPC;
536         } while (test_and_set_bit(idx, map));
537
538         return idx;
539 }
540
541 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
542 {
543         clear_bit(idx, map);
544 }
545
546 /* Wait for any pending TLB invalidations to complete */
547 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
548 {
549         int count = 0;
550         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
551
552         writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
553         while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
554                & sTLBGSTATUS_GSACTIVE) {
555                 cpu_relax();
556                 if (++count == TLB_LOOP_TIMEOUT) {
557                         dev_err_ratelimited(smmu->dev,
558                         "TLB sync timed out -- SMMU may be deadlocked\n");
559                         return;
560                 }
561                 udelay(1);
562         }
563 }
564
565 static void arm_smmu_tlb_sync(void *cookie)
566 {
567         struct arm_smmu_domain *smmu_domain = cookie;
568         __arm_smmu_tlb_sync(smmu_domain->smmu);
569 }
570
571 static void arm_smmu_tlb_inv_context(void *cookie)
572 {
573         struct arm_smmu_domain *smmu_domain = cookie;
574         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
575         struct arm_smmu_device *smmu = smmu_domain->smmu;
576         bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
577         void __iomem *base;
578
579         if (stage1) {
580                 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
581                 writel_relaxed(ARM_SMMU_CB_ASID(cfg),
582                                base + ARM_SMMU_CB_S1_TLBIASID);
583         } else {
584                 base = ARM_SMMU_GR0(smmu);
585                 writel_relaxed(ARM_SMMU_CB_VMID(cfg),
586                                base + ARM_SMMU_GR0_TLBIVMID);
587         }
588
589         __arm_smmu_tlb_sync(smmu);
590 }
591
592 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
593                                           size_t granule, bool leaf, void *cookie)
594 {
595         struct arm_smmu_domain *smmu_domain = cookie;
596         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
597         struct arm_smmu_device *smmu = smmu_domain->smmu;
598         bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
599         void __iomem *reg;
600
601         if (stage1) {
602                 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
603                 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
604
605                 if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
606                         iova &= ~12UL;
607                         iova |= ARM_SMMU_CB_ASID(cfg);
608                         do {
609                                 writel_relaxed(iova, reg);
610                                 iova += granule;
611                         } while (size -= granule);
612 #ifdef CONFIG_64BIT
613                 } else {
614                         iova >>= 12;
615                         iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
616                         do {
617                                 writeq_relaxed(iova, reg);
618                                 iova += granule >> 12;
619                         } while (size -= granule);
620 #endif
621                 }
622 #ifdef CONFIG_64BIT
623         } else if (smmu->version == ARM_SMMU_V2) {
624                 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
625                 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
626                               ARM_SMMU_CB_S2_TLBIIPAS2;
627                 iova >>= 12;
628                 do {
629                         writeq_relaxed(iova, reg);
630                         iova += granule >> 12;
631                 } while (size -= granule);
632 #endif
633         } else {
634                 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
635                 writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
636         }
637 }
638
639 static struct iommu_gather_ops arm_smmu_gather_ops = {
640         .tlb_flush_all  = arm_smmu_tlb_inv_context,
641         .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
642         .tlb_sync       = arm_smmu_tlb_sync,
643 };
644
645 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
646 {
647         int flags, ret;
648         u32 fsr, far, fsynr, resume;
649         unsigned long iova;
650         struct iommu_domain *domain = dev;
651         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
652         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
653         struct arm_smmu_device *smmu = smmu_domain->smmu;
654         void __iomem *cb_base;
655
656         cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
657         fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
658
659         if (!(fsr & FSR_FAULT))
660                 return IRQ_NONE;
661
662         if (fsr & FSR_IGN)
663                 dev_err_ratelimited(smmu->dev,
664                                     "Unexpected context fault (fsr 0x%x)\n",
665                                     fsr);
666
667         fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
668         flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
669
670         far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
671         iova = far;
672 #ifdef CONFIG_64BIT
673         far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
674         iova |= ((unsigned long)far << 32);
675 #endif
676
677         if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
678                 ret = IRQ_HANDLED;
679                 resume = RESUME_RETRY;
680         } else {
681                 dev_err_ratelimited(smmu->dev,
682                     "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
683                     iova, fsynr, cfg->cbndx);
684                 ret = IRQ_NONE;
685                 resume = RESUME_TERMINATE;
686         }
687
688         /* Clear the faulting FSR */
689         writel(fsr, cb_base + ARM_SMMU_CB_FSR);
690
691         /* Retry or terminate any stalled transactions */
692         if (fsr & FSR_SS)
693                 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
694
695         return ret;
696 }
697
698 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
699 {
700         u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
701         struct arm_smmu_device *smmu = dev;
702         void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
703
704         gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
705         gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
706         gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
707         gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
708
709         if (!gfsr)
710                 return IRQ_NONE;
711
712         dev_err_ratelimited(smmu->dev,
713                 "Unexpected global fault, this could be serious\n");
714         dev_err_ratelimited(smmu->dev,
715                 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
716                 gfsr, gfsynr0, gfsynr1, gfsynr2);
717
718         writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
719         return IRQ_HANDLED;
720 }
721
722 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
723                                        struct io_pgtable_cfg *pgtbl_cfg)
724 {
725         u32 reg;
726         u64 reg64;
727         bool stage1;
728         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
729         struct arm_smmu_device *smmu = smmu_domain->smmu;
730         void __iomem *cb_base, *gr1_base;
731
732         gr1_base = ARM_SMMU_GR1(smmu);
733         stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
734         cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
735
736         if (smmu->version > ARM_SMMU_V1) {
737                 /*
738                  * CBA2R.
739                  * *Must* be initialised before CBAR thanks to VMID16
740                  * architectural oversight affected some implementations.
741                  */
742 #ifdef CONFIG_64BIT
743                 reg = CBA2R_RW64_64BIT;
744 #else
745                 reg = CBA2R_RW64_32BIT;
746 #endif
747                 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
748         }
749
750         /* CBAR */
751         reg = cfg->cbar;
752         if (smmu->version == ARM_SMMU_V1)
753                 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
754
755         /*
756          * Use the weakest shareability/memory types, so they are
757          * overridden by the ttbcr/pte.
758          */
759         if (stage1) {
760                 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
761                         (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
762         } else {
763                 reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT;
764         }
765         writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
766
767         /* TTBRs */
768         if (stage1) {
769                 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
770
771                 reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
772                 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
773
774                 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
775                 reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
776                 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1);
777         } else {
778                 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
779                 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
780         }
781
782         /* TTBCR */
783         if (stage1) {
784                 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
785                 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
786                 if (smmu->version > ARM_SMMU_V1) {
787                         reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
788                         reg |= TTBCR2_SEP_UPSTREAM;
789                         writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
790                 }
791         } else {
792                 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
793                 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
794         }
795
796         /* MAIRs (stage-1 only) */
797         if (stage1) {
798                 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
799                 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
800                 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
801                 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
802         }
803
804         /* SCTLR */
805         reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
806         if (stage1)
807                 reg |= SCTLR_S1_ASIDPNE;
808 #ifdef __BIG_ENDIAN
809         reg |= SCTLR_E;
810 #endif
811         writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
812 }
813
814 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
815                                         struct arm_smmu_device *smmu)
816 {
817         int irq, start, ret = 0;
818         unsigned long ias, oas;
819         struct io_pgtable_ops *pgtbl_ops;
820         struct io_pgtable_cfg pgtbl_cfg;
821         enum io_pgtable_fmt fmt;
822         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
823         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
824
825         mutex_lock(&smmu_domain->init_mutex);
826         if (smmu_domain->smmu)
827                 goto out_unlock;
828
829         /*
830          * Mapping the requested stage onto what we support is surprisingly
831          * complicated, mainly because the spec allows S1+S2 SMMUs without
832          * support for nested translation. That means we end up with the
833          * following table:
834          *
835          * Requested        Supported        Actual
836          *     S1               N              S1
837          *     S1             S1+S2            S1
838          *     S1               S2             S2
839          *     S1               S1             S1
840          *     N                N              N
841          *     N              S1+S2            S2
842          *     N                S2             S2
843          *     N                S1             S1
844          *
845          * Note that you can't actually request stage-2 mappings.
846          */
847         if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
848                 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
849         if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
850                 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
851
852         switch (smmu_domain->stage) {
853         case ARM_SMMU_DOMAIN_S1:
854                 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
855                 start = smmu->num_s2_context_banks;
856                 ias = smmu->va_size;
857                 oas = smmu->ipa_size;
858                 if (IS_ENABLED(CONFIG_64BIT))
859                         fmt = ARM_64_LPAE_S1;
860                 else
861                         fmt = ARM_32_LPAE_S1;
862                 break;
863         case ARM_SMMU_DOMAIN_NESTED:
864                 /*
865                  * We will likely want to change this if/when KVM gets
866                  * involved.
867                  */
868         case ARM_SMMU_DOMAIN_S2:
869                 cfg->cbar = CBAR_TYPE_S2_TRANS;
870                 start = 0;
871                 ias = smmu->ipa_size;
872                 oas = smmu->pa_size;
873                 if (IS_ENABLED(CONFIG_64BIT))
874                         fmt = ARM_64_LPAE_S2;
875                 else
876                         fmt = ARM_32_LPAE_S2;
877                 break;
878         default:
879                 ret = -EINVAL;
880                 goto out_unlock;
881         }
882
883         ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
884                                       smmu->num_context_banks);
885         if (IS_ERR_VALUE(ret))
886                 goto out_unlock;
887
888         cfg->cbndx = ret;
889         if (smmu->version == ARM_SMMU_V1) {
890                 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
891                 cfg->irptndx %= smmu->num_context_irqs;
892         } else {
893                 cfg->irptndx = cfg->cbndx;
894         }
895
896         pgtbl_cfg = (struct io_pgtable_cfg) {
897                 .pgsize_bitmap  = arm_smmu_ops.pgsize_bitmap,
898                 .ias            = ias,
899                 .oas            = oas,
900                 .tlb            = &arm_smmu_gather_ops,
901                 .iommu_dev      = smmu->dev,
902         };
903
904         smmu_domain->smmu = smmu;
905         pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
906         if (!pgtbl_ops) {
907                 ret = -ENOMEM;
908                 goto out_clear_smmu;
909         }
910
911         /* Update our support page sizes to reflect the page table format */
912         arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
913
914         /* Initialise the context bank with our page table cfg */
915         arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
916
917         /*
918          * Request context fault interrupt. Do this last to avoid the
919          * handler seeing a half-initialised domain state.
920          */
921         irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
922         ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
923                           "arm-smmu-context-fault", domain);
924         if (IS_ERR_VALUE(ret)) {
925                 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
926                         cfg->irptndx, irq);
927                 cfg->irptndx = INVALID_IRPTNDX;
928         }
929
930         mutex_unlock(&smmu_domain->init_mutex);
931
932         /* Publish page table ops for map/unmap */
933         smmu_domain->pgtbl_ops = pgtbl_ops;
934         return 0;
935
936 out_clear_smmu:
937         smmu_domain->smmu = NULL;
938 out_unlock:
939         mutex_unlock(&smmu_domain->init_mutex);
940         return ret;
941 }
942
943 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
944 {
945         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
946         struct arm_smmu_device *smmu = smmu_domain->smmu;
947         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
948         void __iomem *cb_base;
949         int irq;
950
951         if (!smmu)
952                 return;
953
954         /*
955          * Disable the context bank and free the page tables before freeing
956          * it.
957          */
958         cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
959         writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
960
961         if (cfg->irptndx != INVALID_IRPTNDX) {
962                 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
963                 free_irq(irq, domain);
964         }
965
966         free_io_pgtable_ops(smmu_domain->pgtbl_ops);
967         __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
968 }
969
970 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
971 {
972         struct arm_smmu_domain *smmu_domain;
973
974         if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
975                 return NULL;
976         /*
977          * Allocate the domain and initialise some of its data structures.
978          * We can't really do anything meaningful until we've added a
979          * master.
980          */
981         smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
982         if (!smmu_domain)
983                 return NULL;
984
985         if (type == IOMMU_DOMAIN_DMA &&
986             iommu_get_dma_cookie(&smmu_domain->domain)) {
987                 kfree(smmu_domain);
988                 return NULL;
989         }
990
991         mutex_init(&smmu_domain->init_mutex);
992         spin_lock_init(&smmu_domain->pgtbl_lock);
993
994         return &smmu_domain->domain;
995 }
996
997 static void arm_smmu_domain_free(struct iommu_domain *domain)
998 {
999         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1000
1001         /*
1002          * Free the domain resources. We assume that all devices have
1003          * already been detached.
1004          */
1005         iommu_put_dma_cookie(domain);
1006         arm_smmu_destroy_domain_context(domain);
1007         kfree(smmu_domain);
1008 }
1009
1010 static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
1011                                           struct arm_smmu_master_cfg *cfg)
1012 {
1013         int i;
1014         struct arm_smmu_smr *smrs;
1015         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1016
1017         if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1018                 return 0;
1019
1020         if (cfg->smrs)
1021                 return -EEXIST;
1022
1023         smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
1024         if (!smrs) {
1025                 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1026                         cfg->num_streamids);
1027                 return -ENOMEM;
1028         }
1029
1030         /* Allocate the SMRs on the SMMU */
1031         for (i = 0; i < cfg->num_streamids; ++i) {
1032                 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1033                                                   smmu->num_mapping_groups);
1034                 if (IS_ERR_VALUE(idx)) {
1035                         dev_err(smmu->dev, "failed to allocate free SMR\n");
1036                         goto err_free_smrs;
1037                 }
1038
1039                 smrs[i] = (struct arm_smmu_smr) {
1040                         .idx    = idx,
1041                         .mask   = 0, /* We don't currently share SMRs */
1042                         .id     = cfg->streamids[i],
1043                 };
1044         }
1045
1046         /* It worked! Now, poke the actual hardware */
1047         for (i = 0; i < cfg->num_streamids; ++i) {
1048                 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1049                           smrs[i].mask << SMR_MASK_SHIFT;
1050                 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1051         }
1052
1053         cfg->smrs = smrs;
1054         return 0;
1055
1056 err_free_smrs:
1057         while (--i >= 0)
1058                 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1059         kfree(smrs);
1060         return -ENOSPC;
1061 }
1062
1063 static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
1064                                       struct arm_smmu_master_cfg *cfg)
1065 {
1066         int i;
1067         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1068         struct arm_smmu_smr *smrs = cfg->smrs;
1069
1070         if (!smrs)
1071                 return;
1072
1073         /* Invalidate the SMRs before freeing back to the allocator */
1074         for (i = 0; i < cfg->num_streamids; ++i) {
1075                 u8 idx = smrs[i].idx;
1076
1077                 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1078                 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1079         }
1080
1081         cfg->smrs = NULL;
1082         kfree(smrs);
1083 }
1084
1085 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1086                                       struct arm_smmu_master_cfg *cfg)
1087 {
1088         int i, ret;
1089         struct arm_smmu_device *smmu = smmu_domain->smmu;
1090         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1091
1092         /*
1093          * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1094          * for all devices behind the SMMU. Note that we need to take
1095          * care configuring SMRs for devices both a platform_device and
1096          * and a PCI device (i.e. a PCI host controller)
1097          */
1098         if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1099                 return 0;
1100
1101         /* Devices in an IOMMU group may already be configured */
1102         ret = arm_smmu_master_configure_smrs(smmu, cfg);
1103         if (ret)
1104                 return ret == -EEXIST ? 0 : ret;
1105
1106         for (i = 0; i < cfg->num_streamids; ++i) {
1107                 u32 idx, s2cr;
1108
1109                 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1110                 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
1111                        (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
1112                 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1113         }
1114
1115         return 0;
1116 }
1117
1118 static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
1119                                           struct arm_smmu_master_cfg *cfg)
1120 {
1121         int i;
1122         struct arm_smmu_device *smmu = smmu_domain->smmu;
1123         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1124
1125         /* An IOMMU group is torn down by the first device to be removed */
1126         if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1127                 return;
1128
1129         /*
1130          * We *must* clear the S2CR first, because freeing the SMR means
1131          * that it can be re-allocated immediately.
1132          */
1133         for (i = 0; i < cfg->num_streamids; ++i) {
1134                 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1135                 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
1136
1137                 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1138         }
1139
1140         arm_smmu_master_free_smrs(smmu, cfg);
1141 }
1142
1143 static void arm_smmu_detach_dev(struct device *dev,
1144                                 struct arm_smmu_master_cfg *cfg)
1145 {
1146         struct iommu_domain *domain = dev->archdata.iommu;
1147         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1148
1149         dev->archdata.iommu = NULL;
1150         arm_smmu_domain_remove_master(smmu_domain, cfg);
1151 }
1152
1153 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1154 {
1155         int ret;
1156         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1157         struct arm_smmu_device *smmu;
1158         struct arm_smmu_master_cfg *cfg;
1159
1160         smmu = find_smmu_for_device(dev);
1161         if (!smmu) {
1162                 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1163                 return -ENXIO;
1164         }
1165
1166         /* Ensure that the domain is finalised */
1167         ret = arm_smmu_init_domain_context(domain, smmu);
1168         if (IS_ERR_VALUE(ret))
1169                 return ret;
1170
1171         /*
1172          * Sanity check the domain. We don't support domains across
1173          * different SMMUs.
1174          */
1175         if (smmu_domain->smmu != smmu) {
1176                 dev_err(dev,
1177                         "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1178                         dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1179                 return -EINVAL;
1180         }
1181
1182         /* Looks ok, so add the device to the domain */
1183         cfg = find_smmu_master_cfg(dev);
1184         if (!cfg)
1185                 return -ENODEV;
1186
1187         /* Detach the dev from its current domain */
1188         if (dev->archdata.iommu)
1189                 arm_smmu_detach_dev(dev, cfg);
1190
1191         ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1192         if (!ret)
1193                 dev->archdata.iommu = domain;
1194         return ret;
1195 }
1196
1197 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1198                         phys_addr_t paddr, size_t size, int prot)
1199 {
1200         int ret;
1201         unsigned long flags;
1202         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1203         struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1204
1205         if (!ops)
1206                 return -ENODEV;
1207
1208         spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1209         ret = ops->map(ops, iova, paddr, size, prot);
1210         spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1211         return ret;
1212 }
1213
1214 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1215                              size_t size)
1216 {
1217         size_t ret;
1218         unsigned long flags;
1219         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1220         struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1221
1222         if (!ops)
1223                 return 0;
1224
1225         spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1226         ret = ops->unmap(ops, iova, size);
1227         spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1228         return ret;
1229 }
1230
1231 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1232                                               dma_addr_t iova)
1233 {
1234         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1235         struct arm_smmu_device *smmu = smmu_domain->smmu;
1236         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1237         struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1238         struct device *dev = smmu->dev;
1239         void __iomem *cb_base;
1240         u32 tmp;
1241         u64 phys;
1242         unsigned long va;
1243
1244         cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1245
1246         /* ATS1 registers can only be written atomically */
1247         va = iova & ~0xfffUL;
1248         if (smmu->version == ARM_SMMU_V2)
1249                 smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1250         else
1251                 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
1252
1253         if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1254                                       !(tmp & ATSR_ACTIVE), 5, 50)) {
1255                 dev_err(dev,
1256                         "iova to phys timed out on %pad. Falling back to software table walk.\n",
1257                         &iova);
1258                 return ops->iova_to_phys(ops, iova);
1259         }
1260
1261         phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
1262         phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
1263
1264         if (phys & CB_PAR_F) {
1265                 dev_err(dev, "translation fault!\n");
1266                 dev_err(dev, "PAR = 0x%llx\n", phys);
1267                 return 0;
1268         }
1269
1270         return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1271 }
1272
1273 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1274                                         dma_addr_t iova)
1275 {
1276         phys_addr_t ret;
1277         unsigned long flags;
1278         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1279         struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1280
1281         if (!ops)
1282                 return 0;
1283
1284         spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1285         if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1286                         smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1287                 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1288         } else {
1289                 ret = ops->iova_to_phys(ops, iova);
1290         }
1291
1292         spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1293
1294         return ret;
1295 }
1296
1297 static bool arm_smmu_capable(enum iommu_cap cap)
1298 {
1299         switch (cap) {
1300         case IOMMU_CAP_CACHE_COHERENCY:
1301                 /*
1302                  * Return true here as the SMMU can always send out coherent
1303                  * requests.
1304                  */
1305                 return true;
1306         case IOMMU_CAP_INTR_REMAP:
1307                 return true; /* MSIs are just memory writes */
1308         case IOMMU_CAP_NOEXEC:
1309                 return true;
1310         default:
1311                 return false;
1312         }
1313 }
1314
1315 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1316 {
1317         *((u16 *)data) = alias;
1318         return 0; /* Continue walking */
1319 }
1320
1321 static void __arm_smmu_release_pci_iommudata(void *data)
1322 {
1323         kfree(data);
1324 }
1325
1326 static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1327                                     struct iommu_group *group)
1328 {
1329         struct arm_smmu_master_cfg *cfg;
1330         u16 sid;
1331         int i;
1332
1333         cfg = iommu_group_get_iommudata(group);
1334         if (!cfg) {
1335                 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1336                 if (!cfg)
1337                         return -ENOMEM;
1338
1339                 iommu_group_set_iommudata(group, cfg,
1340                                           __arm_smmu_release_pci_iommudata);
1341         }
1342
1343         if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1344                 return -ENOSPC;
1345
1346         /*
1347          * Assume Stream ID == Requester ID for now.
1348          * We need a way to describe the ID mappings in FDT.
1349          */
1350         pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1351         for (i = 0; i < cfg->num_streamids; ++i)
1352                 if (cfg->streamids[i] == sid)
1353                         break;
1354
1355         /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1356         if (i == cfg->num_streamids)
1357                 cfg->streamids[cfg->num_streamids++] = sid;
1358
1359         return 0;
1360 }
1361
1362 static int arm_smmu_init_platform_device(struct device *dev,
1363                                          struct iommu_group *group)
1364 {
1365         struct arm_smmu_device *smmu = find_smmu_for_device(dev);
1366         struct arm_smmu_master *master;
1367
1368         if (!smmu)
1369                 return -ENODEV;
1370
1371         master = find_smmu_master(smmu, dev->of_node);
1372         if (!master)
1373                 return -ENODEV;
1374
1375         iommu_group_set_iommudata(group, &master->cfg, NULL);
1376
1377         return 0;
1378 }
1379
1380 static int arm_smmu_add_device(struct device *dev)
1381 {
1382         struct iommu_group *group;
1383
1384         group = iommu_group_get_for_dev(dev);
1385         if (IS_ERR(group))
1386                 return PTR_ERR(group);
1387
1388         iommu_group_put(group);
1389         return 0;
1390 }
1391
1392 static void arm_smmu_remove_device(struct device *dev)
1393 {
1394         iommu_group_remove_device(dev);
1395 }
1396
1397 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1398 {
1399         struct iommu_group *group;
1400         int ret;
1401
1402         if (dev_is_pci(dev))
1403                 group = pci_device_group(dev);
1404         else
1405                 group = generic_device_group(dev);
1406
1407         if (IS_ERR(group))
1408                 return group;
1409
1410         if (dev_is_pci(dev))
1411                 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1412         else
1413                 ret = arm_smmu_init_platform_device(dev, group);
1414
1415         if (ret) {
1416                 iommu_group_put(group);
1417                 group = ERR_PTR(ret);
1418         }
1419
1420         return group;
1421 }
1422
1423 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1424                                     enum iommu_attr attr, void *data)
1425 {
1426         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1427
1428         switch (attr) {
1429         case DOMAIN_ATTR_NESTING:
1430                 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1431                 return 0;
1432         default:
1433                 return -ENODEV;
1434         }
1435 }
1436
1437 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1438                                     enum iommu_attr attr, void *data)
1439 {
1440         int ret = 0;
1441         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1442
1443         mutex_lock(&smmu_domain->init_mutex);
1444
1445         switch (attr) {
1446         case DOMAIN_ATTR_NESTING:
1447                 if (smmu_domain->smmu) {
1448                         ret = -EPERM;
1449                         goto out_unlock;
1450                 }
1451
1452                 if (*(int *)data)
1453                         smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1454                 else
1455                         smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1456
1457                 break;
1458         default:
1459                 ret = -ENODEV;
1460         }
1461
1462 out_unlock:
1463         mutex_unlock(&smmu_domain->init_mutex);
1464         return ret;
1465 }
1466
1467 static struct iommu_ops arm_smmu_ops = {
1468         .capable                = arm_smmu_capable,
1469         .domain_alloc           = arm_smmu_domain_alloc,
1470         .domain_free            = arm_smmu_domain_free,
1471         .attach_dev             = arm_smmu_attach_dev,
1472         .map                    = arm_smmu_map,
1473         .unmap                  = arm_smmu_unmap,
1474         .map_sg                 = default_iommu_map_sg,
1475         .iova_to_phys           = arm_smmu_iova_to_phys,
1476         .add_device             = arm_smmu_add_device,
1477         .remove_device          = arm_smmu_remove_device,
1478         .device_group           = arm_smmu_device_group,
1479         .domain_get_attr        = arm_smmu_domain_get_attr,
1480         .domain_set_attr        = arm_smmu_domain_set_attr,
1481         .pgsize_bitmap          = -1UL, /* Restricted during device attach */
1482 };
1483
1484 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1485 {
1486         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1487         void __iomem *cb_base;
1488         int i = 0;
1489         u32 reg;
1490
1491         /* clear global FSR */
1492         reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1493         writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1494
1495         /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1496         reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
1497         for (i = 0; i < smmu->num_mapping_groups; ++i) {
1498                 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
1499                 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
1500         }
1501
1502         /* Make sure all context banks are disabled and clear CB_FSR  */
1503         for (i = 0; i < smmu->num_context_banks; ++i) {
1504                 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1505                 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1506                 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1507         }
1508
1509         /* Invalidate the TLB, just in case */
1510         writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1511         writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1512
1513         reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1514
1515         /* Enable fault reporting */
1516         reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1517
1518         /* Disable TLB broadcasting. */
1519         reg |= (sCR0_VMIDPNE | sCR0_PTM);
1520
1521         /* Enable client access, handling unmatched streams as appropriate */
1522         reg &= ~sCR0_CLIENTPD;
1523         if (disable_bypass)
1524                 reg |= sCR0_USFCFG;
1525         else
1526                 reg &= ~sCR0_USFCFG;
1527
1528         /* Disable forced broadcasting */
1529         reg &= ~sCR0_FB;
1530
1531         /* Don't upgrade barriers */
1532         reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1533
1534         /* Push the button */
1535         __arm_smmu_tlb_sync(smmu);
1536         writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1537 }
1538
1539 static int arm_smmu_id_size_to_bits(int size)
1540 {
1541         switch (size) {
1542         case 0:
1543                 return 32;
1544         case 1:
1545                 return 36;
1546         case 2:
1547                 return 40;
1548         case 3:
1549                 return 42;
1550         case 4:
1551                 return 44;
1552         case 5:
1553         default:
1554                 return 48;
1555         }
1556 }
1557
1558 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1559 {
1560         unsigned long size;
1561         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1562         u32 id;
1563         bool cttw_dt, cttw_reg;
1564
1565         dev_notice(smmu->dev, "probing hardware configuration...\n");
1566         dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
1567
1568         /* ID0 */
1569         id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1570
1571         /* Restrict available stages based on module parameter */
1572         if (force_stage == 1)
1573                 id &= ~(ID0_S2TS | ID0_NTS);
1574         else if (force_stage == 2)
1575                 id &= ~(ID0_S1TS | ID0_NTS);
1576
1577         if (id & ID0_S1TS) {
1578                 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1579                 dev_notice(smmu->dev, "\tstage 1 translation\n");
1580         }
1581
1582         if (id & ID0_S2TS) {
1583                 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1584                 dev_notice(smmu->dev, "\tstage 2 translation\n");
1585         }
1586
1587         if (id & ID0_NTS) {
1588                 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1589                 dev_notice(smmu->dev, "\tnested translation\n");
1590         }
1591
1592         if (!(smmu->features &
1593                 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1594                 dev_err(smmu->dev, "\tno translation support!\n");
1595                 return -ENODEV;
1596         }
1597
1598         if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
1599                 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1600                 dev_notice(smmu->dev, "\taddress translation ops\n");
1601         }
1602
1603         /*
1604          * In order for DMA API calls to work properly, we must defer to what
1605          * the DT says about coherency, regardless of what the hardware claims.
1606          * Fortunately, this also opens up a workaround for systems where the
1607          * ID register value has ended up configured incorrectly.
1608          */
1609         cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1610         cttw_reg = !!(id & ID0_CTTW);
1611         if (cttw_dt)
1612                 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1613         if (cttw_dt || cttw_reg)
1614                 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1615                            cttw_dt ? "" : "non-");
1616         if (cttw_dt != cttw_reg)
1617                 dev_notice(smmu->dev,
1618                            "\t(IDR0.CTTW overridden by dma-coherent property)\n");
1619
1620         if (id & ID0_SMS) {
1621                 u32 smr, sid, mask;
1622
1623                 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1624                 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1625                                            ID0_NUMSMRG_MASK;
1626                 if (smmu->num_mapping_groups == 0) {
1627                         dev_err(smmu->dev,
1628                                 "stream-matching supported, but no SMRs present!\n");
1629                         return -ENODEV;
1630                 }
1631
1632                 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1633                 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1634                 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1635                 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1636
1637                 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1638                 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1639                 if ((mask & sid) != sid) {
1640                         dev_err(smmu->dev,
1641                                 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1642                                 mask, sid);
1643                         return -ENODEV;
1644                 }
1645
1646                 dev_notice(smmu->dev,
1647                            "\tstream matching with %u register groups, mask 0x%x",
1648                            smmu->num_mapping_groups, mask);
1649         } else {
1650                 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1651                                            ID0_NUMSIDB_MASK;
1652         }
1653
1654         /* ID1 */
1655         id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1656         smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1657
1658         /* Check for size mismatch of SMMU address space from mapped region */
1659         size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1660         size *= 2 << smmu->pgshift;
1661         if (smmu->size != size)
1662                 dev_warn(smmu->dev,
1663                         "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1664                         size, smmu->size);
1665
1666         smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1667         smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1668         if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1669                 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1670                 return -ENODEV;
1671         }
1672         dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1673                    smmu->num_context_banks, smmu->num_s2_context_banks);
1674
1675         /* ID2 */
1676         id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1677         size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1678         smmu->ipa_size = size;
1679
1680         /* The output mask is also applied for bypass */
1681         size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1682         smmu->pa_size = size;
1683
1684         /*
1685          * What the page table walker can address actually depends on which
1686          * descriptor format is in use, but since a) we don't know that yet,
1687          * and b) it can vary per context bank, this will have to do...
1688          */
1689         if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1690                 dev_warn(smmu->dev,
1691                          "failed to set DMA mask for table walker\n");
1692
1693         if (smmu->version == ARM_SMMU_V1) {
1694                 smmu->va_size = smmu->ipa_size;
1695                 size = SZ_4K | SZ_2M | SZ_1G;
1696         } else {
1697                 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1698                 smmu->va_size = arm_smmu_id_size_to_bits(size);
1699 #ifndef CONFIG_64BIT
1700                 smmu->va_size = min(32UL, smmu->va_size);
1701 #endif
1702                 size = 0;
1703                 if (id & ID2_PTFS_4K)
1704                         size |= SZ_4K | SZ_2M | SZ_1G;
1705                 if (id & ID2_PTFS_16K)
1706                         size |= SZ_16K | SZ_32M;
1707                 if (id & ID2_PTFS_64K)
1708                         size |= SZ_64K | SZ_512M;
1709         }
1710
1711         arm_smmu_ops.pgsize_bitmap &= size;
1712         dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1713
1714         if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1715                 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1716                            smmu->va_size, smmu->ipa_size);
1717
1718         if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1719                 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1720                            smmu->ipa_size, smmu->pa_size);
1721
1722         return 0;
1723 }
1724
1725 static const struct of_device_id arm_smmu_of_match[] = {
1726         { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 },
1727         { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 },
1728         { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 },
1729         { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 },
1730         { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 },
1731         { },
1732 };
1733 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1734
1735 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1736 {
1737         const struct of_device_id *of_id;
1738         struct resource *res;
1739         struct arm_smmu_device *smmu;
1740         struct device *dev = &pdev->dev;
1741         struct rb_node *node;
1742         struct of_phandle_args masterspec;
1743         int num_irqs, i, err;
1744
1745         smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1746         if (!smmu) {
1747                 dev_err(dev, "failed to allocate arm_smmu_device\n");
1748                 return -ENOMEM;
1749         }
1750         smmu->dev = dev;
1751
1752         of_id = of_match_node(arm_smmu_of_match, dev->of_node);
1753         smmu->version = (enum arm_smmu_arch_version)of_id->data;
1754
1755         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1756         smmu->base = devm_ioremap_resource(dev, res);
1757         if (IS_ERR(smmu->base))
1758                 return PTR_ERR(smmu->base);
1759         smmu->size = resource_size(res);
1760
1761         if (of_property_read_u32(dev->of_node, "#global-interrupts",
1762                                  &smmu->num_global_irqs)) {
1763                 dev_err(dev, "missing #global-interrupts property\n");
1764                 return -ENODEV;
1765         }
1766
1767         num_irqs = 0;
1768         while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1769                 num_irqs++;
1770                 if (num_irqs > smmu->num_global_irqs)
1771                         smmu->num_context_irqs++;
1772         }
1773
1774         if (!smmu->num_context_irqs) {
1775                 dev_err(dev, "found %d interrupts but expected at least %d\n",
1776                         num_irqs, smmu->num_global_irqs + 1);
1777                 return -ENODEV;
1778         }
1779
1780         smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1781                                   GFP_KERNEL);
1782         if (!smmu->irqs) {
1783                 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1784                 return -ENOMEM;
1785         }
1786
1787         for (i = 0; i < num_irqs; ++i) {
1788                 int irq = platform_get_irq(pdev, i);
1789
1790                 if (irq < 0) {
1791                         dev_err(dev, "failed to get irq index %d\n", i);
1792                         return -ENODEV;
1793                 }
1794                 smmu->irqs[i] = irq;
1795         }
1796
1797         err = arm_smmu_device_cfg_probe(smmu);
1798         if (err)
1799                 return err;
1800
1801         i = 0;
1802         smmu->masters = RB_ROOT;
1803         while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1804                                            "#stream-id-cells", i,
1805                                            &masterspec)) {
1806                 err = register_smmu_master(smmu, dev, &masterspec);
1807                 if (err) {
1808                         dev_err(dev, "failed to add master %s\n",
1809                                 masterspec.np->name);
1810                         goto out_put_masters;
1811                 }
1812
1813                 i++;
1814         }
1815         dev_notice(dev, "registered %d master devices\n", i);
1816
1817         parse_driver_options(smmu);
1818
1819         if (smmu->version > ARM_SMMU_V1 &&
1820             smmu->num_context_banks != smmu->num_context_irqs) {
1821                 dev_err(dev,
1822                         "found only %d context interrupt(s) but %d required\n",
1823                         smmu->num_context_irqs, smmu->num_context_banks);
1824                 err = -ENODEV;
1825                 goto out_put_masters;
1826         }
1827
1828         for (i = 0; i < smmu->num_global_irqs; ++i) {
1829                 err = request_irq(smmu->irqs[i],
1830                                   arm_smmu_global_fault,
1831                                   IRQF_SHARED,
1832                                   "arm-smmu global fault",
1833                                   smmu);
1834                 if (err) {
1835                         dev_err(dev, "failed to request global IRQ %d (%u)\n",
1836                                 i, smmu->irqs[i]);
1837                         goto out_free_irqs;
1838                 }
1839         }
1840
1841         INIT_LIST_HEAD(&smmu->list);
1842         spin_lock(&arm_smmu_devices_lock);
1843         list_add(&smmu->list, &arm_smmu_devices);
1844         spin_unlock(&arm_smmu_devices_lock);
1845
1846         arm_smmu_device_reset(smmu);
1847         return 0;
1848
1849 out_free_irqs:
1850         while (i--)
1851                 free_irq(smmu->irqs[i], smmu);
1852
1853 out_put_masters:
1854         for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1855                 struct arm_smmu_master *master
1856                         = container_of(node, struct arm_smmu_master, node);
1857                 of_node_put(master->of_node);
1858         }
1859
1860         return err;
1861 }
1862
1863 static int arm_smmu_device_remove(struct platform_device *pdev)
1864 {
1865         int i;
1866         struct device *dev = &pdev->dev;
1867         struct arm_smmu_device *curr, *smmu = NULL;
1868         struct rb_node *node;
1869
1870         spin_lock(&arm_smmu_devices_lock);
1871         list_for_each_entry(curr, &arm_smmu_devices, list) {
1872                 if (curr->dev == dev) {
1873                         smmu = curr;
1874                         list_del(&smmu->list);
1875                         break;
1876                 }
1877         }
1878         spin_unlock(&arm_smmu_devices_lock);
1879
1880         if (!smmu)
1881                 return -ENODEV;
1882
1883         for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1884                 struct arm_smmu_master *master
1885                         = container_of(node, struct arm_smmu_master, node);
1886                 of_node_put(master->of_node);
1887         }
1888
1889         if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
1890                 dev_err(dev, "removing device with active domains!\n");
1891
1892         for (i = 0; i < smmu->num_global_irqs; ++i)
1893                 free_irq(smmu->irqs[i], smmu);
1894
1895         /* Turn the thing off */
1896         writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1897         return 0;
1898 }
1899
1900 static struct platform_driver arm_smmu_driver = {
1901         .driver = {
1902                 .name           = "arm-smmu",
1903                 .of_match_table = of_match_ptr(arm_smmu_of_match),
1904         },
1905         .probe  = arm_smmu_device_dt_probe,
1906         .remove = arm_smmu_device_remove,
1907 };
1908
1909 static int __init arm_smmu_init(void)
1910 {
1911         struct device_node *np;
1912         int ret;
1913
1914         /*
1915          * Play nice with systems that don't have an ARM SMMU by checking that
1916          * an ARM SMMU exists in the system before proceeding with the driver
1917          * and IOMMU bus operation registration.
1918          */
1919         np = of_find_matching_node(NULL, arm_smmu_of_match);
1920         if (!np)
1921                 return 0;
1922
1923         of_node_put(np);
1924
1925         ret = platform_driver_register(&arm_smmu_driver);
1926         if (ret)
1927                 return ret;
1928
1929         /* Oh, for a proper bus abstraction */
1930         if (!iommu_present(&platform_bus_type))
1931                 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1932
1933 #ifdef CONFIG_ARM_AMBA
1934         if (!iommu_present(&amba_bustype))
1935                 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
1936 #endif
1937
1938 #ifdef CONFIG_PCI
1939         if (!iommu_present(&pci_bus_type))
1940                 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
1941 #endif
1942
1943         return 0;
1944 }
1945
1946 static void __exit arm_smmu_exit(void)
1947 {
1948         return platform_driver_unregister(&arm_smmu_driver);
1949 }
1950
1951 subsys_initcall(arm_smmu_init);
1952 module_exit(arm_smmu_exit);
1953
1954 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
1955 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1956 MODULE_LICENSE("GPL v2");