1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "qman_priv.h"
33 /* Enable portal interupts (as opposed to polling mode) */
34 #define CONFIG_FSL_DPA_PIRQ_SLOW 1
35 #define CONFIG_FSL_DPA_PIRQ_FAST 1
37 static struct cpumask portal_cpus;
38 /* protect qman global registers and global data shared among portals */
39 static DEFINE_SPINLOCK(qman_lock);
41 static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
43 #ifdef CONFIG_FSL_PAMU
44 struct device *dev = pcfg->dev;
46 struct iommu_domain_geometry geom_attr;
47 struct pamu_stash_attribute stash_attr;
50 pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
51 if (!pcfg->iommu_domain) {
52 dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
55 geom_attr.aperture_start = 0;
56 geom_attr.aperture_end =
57 ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
58 geom_attr.force_aperture = true;
59 ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
62 dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
66 ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
69 dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
74 stash_attr.cache = PAMU_ATTR_CACHE_L1;
75 ret = iommu_domain_set_attr(pcfg->iommu_domain,
76 DOMAIN_ATTR_FSL_PAMU_STASH,
79 dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
83 ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
84 IOMMU_READ | IOMMU_WRITE);
86 dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
90 ret = iommu_attach_device(pcfg->iommu_domain, dev);
92 dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
96 ret = iommu_domain_set_attr(pcfg->iommu_domain,
97 DOMAIN_ATTR_FSL_PAMU_ENABLE,
100 dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
102 goto out_detach_device;
107 qman_set_sdest(pcfg->channel, cpu);
111 #ifdef CONFIG_FSL_PAMU
113 iommu_detach_device(pcfg->iommu_domain, NULL);
115 iommu_domain_free(pcfg->iommu_domain);
116 pcfg->iommu_domain = NULL;
120 static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
122 struct qman_portal *p;
125 /* We need the same LIODN offset for all portals */
126 qman_liodn_fixup(pcfg->channel);
128 pcfg->iommu_domain = NULL;
129 portal_set_cpu(pcfg, pcfg->cpu);
131 p = qman_create_affine_portal(pcfg, NULL);
133 dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
134 __func__, pcfg->cpu);
138 /* Determine what should be interrupt-vs-poll driven */
139 #ifdef CONFIG_FSL_DPA_PIRQ_SLOW
140 irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
143 #ifdef CONFIG_FSL_DPA_PIRQ_FAST
144 irq_sources |= QM_PIRQ_DQRI;
146 qman_p_irqsource_add(p, irq_sources);
148 spin_lock(&qman_lock);
149 if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
150 /* all assigned portals are initialized now */
153 spin_unlock(&qman_lock);
155 dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
160 static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
163 #ifdef CONFIG_FSL_PAMU /* TODO */
164 struct pamu_stash_attribute stash_attr;
167 if (pcfg->iommu_domain) {
168 stash_attr.cpu = cpu;
169 stash_attr.cache = PAMU_ATTR_CACHE_L1;
170 ret = iommu_domain_set_attr(pcfg->iommu_domain,
171 DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
174 "Failed to update pamu stash setting\n");
179 qman_set_sdest(pcfg->channel, cpu);
182 static void qman_offline_cpu(unsigned int cpu)
184 struct qman_portal *p;
185 const struct qm_portal_config *pcfg;
187 p = affine_portals[cpu];
189 pcfg = qman_get_qm_portal_config(p);
191 irq_set_affinity(pcfg->irq, cpumask_of(0));
192 qman_portal_update_sdest(pcfg, 0);
197 static void qman_online_cpu(unsigned int cpu)
199 struct qman_portal *p;
200 const struct qm_portal_config *pcfg;
202 p = affine_portals[cpu];
204 pcfg = qman_get_qm_portal_config(p);
206 irq_set_affinity(pcfg->irq, cpumask_of(cpu));
207 qman_portal_update_sdest(pcfg, cpu);
212 static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
213 unsigned long action, void *hcpu)
215 unsigned int cpu = (unsigned long)hcpu;
219 case CPU_ONLINE_FROZEN:
220 qman_online_cpu(cpu);
222 case CPU_DOWN_PREPARE:
223 case CPU_DOWN_PREPARE_FROZEN:
224 qman_offline_cpu(cpu);
231 static struct notifier_block qman_hotplug_cpu_notifier = {
232 .notifier_call = qman_hotplug_cpu_callback,
235 static int qman_portal_probe(struct platform_device *pdev)
237 struct device *dev = &pdev->dev;
238 struct device_node *node = dev->of_node;
239 struct qm_portal_config *pcfg;
240 struct resource *addr_phys[2];
245 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
251 addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
254 dev_err(dev, "Can't get %s property 'reg::CE'\n",
259 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
262 dev_err(dev, "Can't get %s property 'reg::CI'\n",
267 channel = of_get_property(node, "cell-index", &len);
268 if (!channel || (len != 4)) {
269 dev_err(dev, "Can't get %s property 'cell-index'\n",
273 pcfg->channel = *channel;
275 irq = platform_get_irq(pdev, 0);
277 dev_err(dev, "Can't get %s IRQ\n", node->full_name);
282 va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
286 pcfg->addr_virt[DPAA_PORTAL_CE] = va;
288 va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
289 _PAGE_GUARDED | _PAGE_NO_CACHE);
293 pcfg->addr_virt[DPAA_PORTAL_CI] = va;
295 pcfg->pools = qm_get_pools_sdqcr();
297 spin_lock(&qman_lock);
298 cpu = cpumask_next_zero(-1, &portal_cpus);
299 if (cpu >= nr_cpu_ids) {
300 /* unassigned portal, skip init */
301 spin_unlock(&qman_lock);
305 cpumask_set_cpu(cpu, &portal_cpus);
306 spin_unlock(&qman_lock);
309 if (!init_pcfg(pcfg))
312 /* clear irq affinity if assigned cpu is offline */
313 if (!cpu_online(cpu))
314 qman_offline_cpu(cpu);
319 iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
321 dev_err(dev, "ioremap failed\n");
325 static const struct of_device_id qman_portal_ids[] = {
327 .compatible = "fsl,qman-portal",
331 MODULE_DEVICE_TABLE(of, qman_portal_ids);
333 static struct platform_driver qman_portal_driver = {
335 .name = KBUILD_MODNAME,
336 .of_match_table = qman_portal_ids,
338 .probe = qman_portal_probe,
341 static int __init qman_portal_driver_register(struct platform_driver *drv)
345 ret = platform_driver_register(drv);
349 register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
354 module_driver(qman_portal_driver,
355 qman_portal_driver_register, platform_driver_unregister);