2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
6 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
7 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
9 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/memblock.h>
32 #include <linux/spinlock.h>
33 #include <linux/sched.h> /* for show_stack */
34 #include <linux/string.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/crash_dump.h>
38 #include <linux/memory.h>
43 #include <asm/iommu.h>
44 #include <asm/pci-bridge.h>
45 #include <asm/machdep.h>
46 #include <asm/firmware.h>
48 #include <asm/ppc-pci.h>
50 #include <asm/mmzone.h>
51 #include <asm/plpar_wrappers.h>
54 static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
55 __be64 *startp, __be64 *endp)
57 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
58 unsigned long start, end, inc;
62 inc = L1_CACHE_BYTES; /* invalidate a cacheline of TCEs at a time */
64 /* If this is non-zero, change the format. We shift the
65 * address and or in the magic from the device tree. */
70 start |= tbl->it_busno;
74 end |= inc - 1; /* round up end to be different than start */
76 mb(); /* Make sure TCEs in memory are written */
77 while (start <= end) {
78 out_be64(invalidate, start);
83 static int tce_build_pSeries(struct iommu_table *tbl, long index,
84 long npages, unsigned long uaddr,
85 enum dma_data_direction direction,
86 struct dma_attrs *attrs)
92 proto_tce = TCE_PCI_READ; // Read allowed
94 if (direction != DMA_TO_DEVICE)
95 proto_tce |= TCE_PCI_WRITE;
97 tces = tcep = ((__be64 *)tbl->it_base) + index;
100 /* can't move this out since we might cross MEMBLOCK boundary */
101 rpn = __pa(uaddr) >> TCE_SHIFT;
102 *tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
104 uaddr += TCE_PAGE_SIZE;
108 if (tbl->it_type & TCE_PCI_SWINV_CREATE)
109 tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
114 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
118 tces = tcep = ((__be64 *)tbl->it_base) + index;
123 if (tbl->it_type & TCE_PCI_SWINV_FREE)
124 tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
127 static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
131 tcep = ((__be64 *)tbl->it_base) + index;
133 return be64_to_cpu(*tcep);
136 static void tce_free_pSeriesLP(struct iommu_table*, long, long);
137 static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
139 static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
140 long npages, unsigned long uaddr,
141 enum dma_data_direction direction,
142 struct dma_attrs *attrs)
148 long tcenum_start = tcenum, npages_start = npages;
150 rpn = __pa(uaddr) >> TCE_SHIFT;
151 proto_tce = TCE_PCI_READ;
152 if (direction != DMA_TO_DEVICE)
153 proto_tce |= TCE_PCI_WRITE;
156 tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
157 rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
159 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
161 tce_free_pSeriesLP(tbl, tcenum_start,
162 (npages_start - (npages + 1)));
166 if (rc && printk_ratelimit()) {
167 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
168 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
169 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
170 printk("\ttce val = 0x%llx\n", tce );
171 show_stack(current, (unsigned long *)__get_SP());
180 static DEFINE_PER_CPU(__be64 *, tce_page);
182 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
183 long npages, unsigned long uaddr,
184 enum dma_data_direction direction,
185 struct dma_attrs *attrs)
192 long tcenum_start = tcenum, npages_start = npages;
197 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
201 local_irq_save(flags); /* to protect tcep and the page behind it */
203 tcep = __get_cpu_var(tce_page);
205 /* This is safe to do since interrupts are off when we're called
206 * from iommu_alloc{,_sg}()
209 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
210 /* If allocation fails, fall back to the loop implementation */
212 local_irq_restore(flags);
213 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
216 __get_cpu_var(tce_page) = tcep;
219 rpn = __pa(uaddr) >> TCE_SHIFT;
220 proto_tce = TCE_PCI_READ;
221 if (direction != DMA_TO_DEVICE)
222 proto_tce |= TCE_PCI_WRITE;
224 /* We can map max one pageful of TCEs at a time */
227 * Set up the page with TCE data, looping through and setting
230 limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);
232 for (l = 0; l < limit; l++) {
233 tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
237 rc = plpar_tce_put_indirect((u64)tbl->it_index,
244 } while (npages > 0 && !rc);
246 local_irq_restore(flags);
248 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
250 tce_freemulti_pSeriesLP(tbl, tcenum_start,
251 (npages_start - (npages + limit)));
255 if (rc && printk_ratelimit()) {
256 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
257 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
258 printk("\tnpages = 0x%llx\n", (u64)npages);
259 printk("\ttce[0] val = 0x%llx\n", tcep[0]);
260 show_stack(current, (unsigned long *)__get_SP());
265 static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
270 rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
272 if (rc && printk_ratelimit()) {
273 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
274 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
275 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
276 show_stack(current, (unsigned long *)__get_SP());
284 static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
288 rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
290 if (rc && printk_ratelimit()) {
291 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
292 printk("\trc = %lld\n", rc);
293 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
294 printk("\tnpages = 0x%llx\n", (u64)npages);
295 show_stack(current, (unsigned long *)__get_SP());
299 static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
302 unsigned long tce_ret;
304 rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
306 if (rc && printk_ratelimit()) {
307 printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
308 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
309 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
310 show_stack(current, (unsigned long *)__get_SP());
316 /* this is compatible with cells for the device tree property */
317 struct dynamic_dma_window_prop {
318 __be32 liobn; /* tce table number */
319 __be64 dma_base; /* address hi,lo */
320 __be32 tce_shift; /* ilog2(tce_page_size) */
321 __be32 window_shift; /* ilog2(tce_window_size) */
324 struct direct_window {
325 struct device_node *device;
326 const struct dynamic_dma_window_prop *prop;
327 struct list_head list;
330 /* Dynamic DMA Window support */
331 struct ddw_query_response {
332 __be32 windows_available;
333 __be32 largest_available_block;
335 __be32 migration_capable;
338 struct ddw_create_response {
344 static LIST_HEAD(direct_window_list);
345 /* prevents races between memory on/offline and window creation */
346 static DEFINE_SPINLOCK(direct_window_list_lock);
347 /* protects initializing window twice for same device */
348 static DEFINE_MUTEX(direct_window_init_mutex);
349 #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
351 static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,
352 unsigned long num_pfn, const void *arg)
354 const struct dynamic_dma_window_prop *maprange = arg;
356 u64 tce_size, num_tce, dma_offset, next;
360 tce_shift = be32_to_cpu(maprange->tce_shift);
361 tce_size = 1ULL << tce_shift;
362 next = start_pfn << PAGE_SHIFT;
363 num_tce = num_pfn << PAGE_SHIFT;
365 /* round back to the beginning of the tce page size */
366 num_tce += next & (tce_size - 1);
367 next &= ~(tce_size - 1);
369 /* covert to number of tces */
370 num_tce |= tce_size - 1;
371 num_tce >>= tce_shift;
375 * Set up the page with TCE data, looping through and setting
378 limit = min_t(long, num_tce, 512);
379 dma_offset = next + be64_to_cpu(maprange->dma_base);
381 rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn),
384 next += limit * tce_size;
386 } while (num_tce > 0 && !rc);
391 static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
392 unsigned long num_pfn, const void *arg)
394 const struct dynamic_dma_window_prop *maprange = arg;
395 u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn;
401 local_irq_disable(); /* to protect tcep and the page behind it */
402 tcep = __get_cpu_var(tce_page);
405 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
410 __get_cpu_var(tce_page) = tcep;
413 proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
415 liobn = (u64)be32_to_cpu(maprange->liobn);
416 tce_shift = be32_to_cpu(maprange->tce_shift);
417 tce_size = 1ULL << tce_shift;
418 next = start_pfn << PAGE_SHIFT;
419 num_tce = num_pfn << PAGE_SHIFT;
421 /* round back to the beginning of the tce page size */
422 num_tce += next & (tce_size - 1);
423 next &= ~(tce_size - 1);
425 /* covert to number of tces */
426 num_tce |= tce_size - 1;
427 num_tce >>= tce_shift;
429 /* We can map max one pageful of TCEs at a time */
432 * Set up the page with TCE data, looping through and setting
435 limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE);
436 dma_offset = next + be64_to_cpu(maprange->dma_base);
438 for (l = 0; l < limit; l++) {
439 tcep[l] = cpu_to_be64(proto_tce | next);
443 rc = plpar_tce_put_indirect(liobn,
449 } while (num_tce > 0 && !rc);
451 /* error cleanup: caller will clear whole range */
457 static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,
458 unsigned long num_pfn, void *arg)
460 return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg);
465 static void iommu_table_setparms(struct pci_controller *phb,
466 struct device_node *dn,
467 struct iommu_table *tbl)
469 struct device_node *node;
470 const unsigned long *basep, *sw_inval;
475 basep = of_get_property(node, "linux,tce-base", NULL);
476 sizep = of_get_property(node, "linux,tce-size", NULL);
477 if (basep == NULL || sizep == NULL) {
478 printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %s has "
479 "missing tce entries !\n", dn->full_name);
483 tbl->it_base = (unsigned long)__va(*basep);
485 if (!is_kdump_kernel())
486 memset((void *)tbl->it_base, 0, *sizep);
488 tbl->it_busno = phb->bus->number;
490 /* Units of tce entries */
491 tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT;
493 /* Test if we are going over 2GB of DMA space */
494 if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
495 udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
496 panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
499 phb->dma_window_base_cur += phb->dma_window_size;
501 /* Set the tce table size - measured in entries */
502 tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT;
505 tbl->it_blocksize = 16;
506 tbl->it_type = TCE_PCI;
508 sw_inval = of_get_property(node, "linux,tce-sw-invalidate-info", NULL);
511 * This property contains information on how to
512 * invalidate the TCE entry. The first property is
513 * the base MMIO address used to invalidate entries.
514 * The second property tells us the format of the TCE
515 * invalidate (whether it needs to be shifted) and
516 * some magic routing info to add to our invalidate
519 tbl->it_index = (unsigned long) ioremap(sw_inval[0], 8);
520 tbl->it_busno = sw_inval[1]; /* overload this with magic */
521 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
526 * iommu_table_setparms_lpar
528 * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
530 static void iommu_table_setparms_lpar(struct pci_controller *phb,
531 struct device_node *dn,
532 struct iommu_table *tbl,
533 const __be32 *dma_window)
535 unsigned long offset, size;
537 of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
539 tbl->it_busno = phb->bus->number;
541 tbl->it_blocksize = 16;
542 tbl->it_type = TCE_PCI;
543 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
544 tbl->it_size = size >> IOMMU_PAGE_SHIFT;
547 static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
549 struct device_node *dn;
550 struct iommu_table *tbl;
551 struct device_node *isa_dn, *isa_dn_orig;
552 struct device_node *tmp;
556 dn = pci_bus_to_OF_node(bus);
558 pr_debug("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn->full_name);
561 /* This is not a root bus, any setup will be done for the
562 * device-side of the bridge in iommu_dev_setup_pSeries().
568 /* Check if the ISA bus on the system is under
571 isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa");
573 while (isa_dn && isa_dn != dn)
574 isa_dn = isa_dn->parent;
577 of_node_put(isa_dn_orig);
579 /* Count number of direct PCI children of the PHB. */
580 for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
583 pr_debug("Children: %d\n", children);
585 /* Calculate amount of DMA window per slot. Each window must be
586 * a power of two (due to pci_alloc_consistent requirements).
588 * Keep 256MB aside for PHBs with ISA.
592 /* No ISA/IDE - just set window size and return */
593 pci->phb->dma_window_size = 0x80000000ul; /* To be divided */
595 while (pci->phb->dma_window_size * children > 0x80000000ul)
596 pci->phb->dma_window_size >>= 1;
597 pr_debug("No ISA/IDE, window size is 0x%llx\n",
598 pci->phb->dma_window_size);
599 pci->phb->dma_window_base_cur = 0;
604 /* If we have ISA, then we probably have an IDE
605 * controller too. Allocate a 128MB table but
606 * skip the first 128MB to avoid stepping on ISA
609 pci->phb->dma_window_size = 0x8000000ul;
610 pci->phb->dma_window_base_cur = 0x8000000ul;
612 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
615 iommu_table_setparms(pci->phb, dn, tbl);
616 pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
617 iommu_register_group(tbl, pci_domain_nr(bus), 0);
619 /* Divide the rest (1.75GB) among the children */
620 pci->phb->dma_window_size = 0x80000000ul;
621 while (pci->phb->dma_window_size * children > 0x70000000ul)
622 pci->phb->dma_window_size >>= 1;
624 pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
628 static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
630 struct iommu_table *tbl;
631 struct device_node *dn, *pdn;
633 const __be32 *dma_window = NULL;
635 dn = pci_bus_to_OF_node(bus);
637 pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n",
640 /* Find nearest ibm,dma-window, walking up the device tree */
641 for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
642 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
643 if (dma_window != NULL)
647 if (dma_window == NULL) {
648 pr_debug(" no ibm,dma-window property !\n");
654 pr_debug(" parent is %s, iommu_table: 0x%p\n",
655 pdn->full_name, ppci->iommu_table);
657 if (!ppci->iommu_table) {
658 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
660 iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
661 ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
662 iommu_register_group(tbl, pci_domain_nr(bus), 0);
663 pr_debug(" created table: %p\n", ppci->iommu_table);
668 static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
670 struct device_node *dn;
671 struct iommu_table *tbl;
673 pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
675 dn = dev->dev.of_node;
677 /* If we're the direct child of a root bus, then we need to allocate
678 * an iommu table ourselves. The bus setup code should have setup
679 * the window sizes already.
681 if (!dev->bus->self) {
682 struct pci_controller *phb = PCI_DN(dn)->phb;
684 pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
685 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
687 iommu_table_setparms(phb, dn, tbl);
688 PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
689 iommu_register_group(tbl, pci_domain_nr(phb->bus), 0);
690 set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table);
694 /* If this device is further down the bus tree, search upwards until
695 * an already allocated iommu table is found and use that.
698 while (dn && PCI_DN(dn) && PCI_DN(dn)->iommu_table == NULL)
701 if (dn && PCI_DN(dn))
702 set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table);
704 printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
708 static int __read_mostly disable_ddw;
710 static int __init disable_ddw_setup(char *str)
713 printk(KERN_INFO "ppc iommu: disabling ddw.\n");
718 early_param("disable_ddw", disable_ddw_setup);
720 static inline void __remove_ddw(struct device_node *np, const u32 *ddw_avail, u64 liobn)
724 ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
726 pr_warning("%s: failed to remove DMA window: rtas returned "
727 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
728 np->full_name, ret, ddw_avail[2], liobn);
730 pr_debug("%s: successfully removed DMA window: rtas returned "
731 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
732 np->full_name, ret, ddw_avail[2], liobn);
735 static void remove_ddw(struct device_node *np)
737 struct dynamic_dma_window_prop *dwp;
738 struct property *win64;
739 const u32 *ddw_avail;
743 ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
744 win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
748 if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp))
752 liobn = (u64)be32_to_cpu(dwp->liobn);
754 /* clear the whole window, note the arg is in kernel pages */
755 ret = tce_clearrange_multi_pSeriesLP(0,
756 1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp);
758 pr_warning("%s failed to clear tces in window.\n",
761 pr_debug("%s successfully cleared tces in window.\n",
764 __remove_ddw(np, ddw_avail, liobn);
767 ret = of_remove_property(np, win64);
769 pr_warning("%s: failed to remove direct window property: %d\n",
773 static u64 find_existing_ddw(struct device_node *pdn)
775 struct direct_window *window;
776 const struct dynamic_dma_window_prop *direct64;
779 spin_lock(&direct_window_list_lock);
780 /* check if we already created a window and dupe that config if so */
781 list_for_each_entry(window, &direct_window_list, list) {
782 if (window->device == pdn) {
783 direct64 = window->prop;
784 dma_addr = be64_to_cpu(direct64->dma_base);
788 spin_unlock(&direct_window_list_lock);
793 static void __restore_default_window(struct eeh_dev *edev,
794 u32 ddw_restore_token)
801 * Get the config address and phb buid of the PE window.
802 * Rely on eeh to retrieve this for us.
803 * Retrieve them from the pci device, not the node with the
804 * dma-window property
806 cfg_addr = edev->config_addr;
807 if (edev->pe_config_addr)
808 cfg_addr = edev->pe_config_addr;
809 buid = edev->phb->buid;
812 ret = rtas_call(ddw_restore_token, 3, 1, NULL, cfg_addr,
813 BUID_HI(buid), BUID_LO(buid));
814 } while (rtas_busy_delay(ret));
815 pr_info("ibm,reset-pe-dma-windows(%x) %x %x %x returned %d\n",
816 ddw_restore_token, cfg_addr, BUID_HI(buid), BUID_LO(buid), ret);
819 static int find_existing_ddw_windows(void)
821 struct device_node *pdn;
822 const struct dynamic_dma_window_prop *direct64;
823 const u32 *ddw_extensions;
825 if (!firmware_has_feature(FW_FEATURE_LPAR))
828 for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
829 direct64 = of_get_property(pdn, DIRECT64_PROPNAME, NULL);
834 * We need to ensure the IOMMU table is active when we
835 * return from the IOMMU setup so that the common code
836 * can clear the table or find the holes. To that end,
837 * first, remove any existing DDW configuration.
842 * Second, if we are running on a new enough level of
843 * firmware where the restore API is present, use it to
844 * restore the 32-bit window, which was removed in
846 * If the API is not present, then create_ddw couldn't
847 * have removed the 32-bit window in the first place, so
848 * removing the DDW configuration should be sufficient.
850 ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions",
852 if (ddw_extensions && ddw_extensions[0] > 0)
853 __restore_default_window(of_node_to_eeh_dev(pdn),
859 machine_arch_initcall(pseries, find_existing_ddw_windows);
861 static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
862 struct ddw_query_response *query)
864 struct eeh_dev *edev;
870 * Get the config address and phb buid of the PE window.
871 * Rely on eeh to retrieve this for us.
872 * Retrieve them from the pci device, not the node with the
873 * dma-window property
875 edev = pci_dev_to_eeh_dev(dev);
876 cfg_addr = edev->config_addr;
877 if (edev->pe_config_addr)
878 cfg_addr = edev->pe_config_addr;
879 buid = edev->phb->buid;
881 ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
882 cfg_addr, BUID_HI(buid), BUID_LO(buid));
883 dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x"
884 " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid),
889 static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
890 struct ddw_create_response *create, int page_shift,
893 struct eeh_dev *edev;
899 * Get the config address and phb buid of the PE window.
900 * Rely on eeh to retrieve this for us.
901 * Retrieve them from the pci device, not the node with the
902 * dma-window property
904 edev = pci_dev_to_eeh_dev(dev);
905 cfg_addr = edev->config_addr;
906 if (edev->pe_config_addr)
907 cfg_addr = edev->pe_config_addr;
908 buid = edev->phb->buid;
911 /* extra outputs are LIOBN and dma-addr (hi, lo) */
912 ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr,
913 BUID_HI(buid), BUID_LO(buid), page_shift, window_shift);
914 } while (rtas_busy_delay(ret));
916 "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
917 "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1],
918 cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift,
919 window_shift, ret, create->liobn, create->addr_hi, create->addr_lo);
924 static void restore_default_window(struct pci_dev *dev,
925 u32 ddw_restore_token)
927 __restore_default_window(pci_dev_to_eeh_dev(dev), ddw_restore_token);
930 struct failed_ddw_pdn {
931 struct device_node *pdn;
932 struct list_head list;
935 static LIST_HEAD(failed_ddw_pdn_list);
938 * If the PE supports dynamic dma windows, and there is space for a table
939 * that can map all pages in a linear offset, then setup such a table,
940 * and record the dma-offset in the struct device.
942 * dev: the pci device we are checking
943 * pdn: the parent pe node with the ibm,dma_window property
944 * Future: also check if we can remap the base window for our base page size
946 * returns the dma offset for use by dma_set_mask
948 static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
951 struct ddw_query_response query;
952 struct ddw_create_response create;
954 u64 dma_addr, max_addr;
955 struct device_node *dn;
956 const u32 *uninitialized_var(ddw_avail);
957 const u32 *uninitialized_var(ddw_extensions);
958 u32 ddw_restore_token = 0;
959 struct direct_window *window;
960 struct property *win64;
961 struct dynamic_dma_window_prop *ddwprop;
962 const void *dma_window = NULL;
963 unsigned long liobn, offset, size;
964 struct failed_ddw_pdn *fpdn;
966 mutex_lock(&direct_window_init_mutex);
968 dma_addr = find_existing_ddw(pdn);
973 * If we already went through this for a previous function of
974 * the same device and failed, we don't want to muck with the
975 * DMA window again, as it will race with in-flight operations
976 * and can lead to EEHs. The above mutex protects access to the
979 list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
980 if (!strcmp(fpdn->pdn->full_name, pdn->full_name))
985 * the ibm,ddw-applicable property holds the tokens for:
986 * ibm,query-pe-dma-window
987 * ibm,create-pe-dma-window
988 * ibm,remove-pe-dma-window
989 * for the given node in that order.
990 * the property is actually in the parent, not the PE
992 ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
993 if (!ddw_avail || len < 3 * sizeof(u32))
997 * the extensions property is only required to exist in certain
998 * levels of firmware and later
999 * the ibm,ddw-extensions property is a list with the first
1000 * element containing the number of extensions and each
1001 * subsequent entry is a value corresponding to that extension
1003 ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions", &len);
1004 if (ddw_extensions) {
1006 * each new defined extension length should be added to
1007 * the top of the switch so the "earlier" entries also
1010 switch (ddw_extensions[0]) {
1011 /* ibm,reset-pe-dma-windows */
1013 ddw_restore_token = ddw_extensions[1];
1019 * Only remove the existing DMA window if we can restore back to
1020 * the default state. Removing the existing window maximizes the
1021 * resources available to firmware for dynamic window creation.
1023 if (ddw_restore_token) {
1024 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1025 of_parse_dma_window(pdn, dma_window, &liobn, &offset, &size);
1026 __remove_ddw(pdn, ddw_avail, liobn);
1030 * Query if there is a second window of size to map the
1031 * whole partition. Query returns number of windows, largest
1032 * block assigned to PE (partition endpoint), and two bitmasks
1033 * of page sizes: supported and supported for migrate-dma.
1035 dn = pci_device_to_OF_node(dev);
1036 ret = query_ddw(dev, ddw_avail, &query);
1038 goto out_restore_window;
1040 if (query.windows_available == 0) {
1042 * no additional windows are available for this device.
1043 * We might be able to reallocate the existing window,
1044 * trading in for a larger page size.
1046 dev_dbg(&dev->dev, "no free dynamic windows");
1047 goto out_restore_window;
1049 if (be32_to_cpu(query.page_size) & 4) {
1050 page_shift = 24; /* 16MB */
1051 } else if (be32_to_cpu(query.page_size) & 2) {
1052 page_shift = 16; /* 64kB */
1053 } else if (be32_to_cpu(query.page_size) & 1) {
1054 page_shift = 12; /* 4kB */
1056 dev_dbg(&dev->dev, "no supported direct page size in mask %x",
1058 goto out_restore_window;
1060 /* verify the window * number of ptes will map the partition */
1061 /* check largest block * page size > max memory hotplug addr */
1062 max_addr = memory_hotplug_max();
1063 if (be32_to_cpu(query.largest_available_block) < (max_addr >> page_shift)) {
1064 dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u "
1065 "%llu-sized pages\n", max_addr, query.largest_available_block,
1066 1ULL << page_shift);
1067 goto out_restore_window;
1069 len = order_base_2(max_addr);
1070 win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
1073 "couldn't allocate property for 64bit dma window\n");
1074 goto out_restore_window;
1076 win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
1077 win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
1078 win64->length = sizeof(*ddwprop);
1079 if (!win64->name || !win64->value) {
1081 "couldn't allocate property name and value\n");
1085 ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
1089 ddwprop->liobn = create.liobn;
1090 ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2));
1091 ddwprop->tce_shift = cpu_to_be32(page_shift);
1092 ddwprop->window_shift = cpu_to_be32(len);
1094 dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %s\n",
1095 create.liobn, dn->full_name);
1097 window = kzalloc(sizeof(*window), GFP_KERNEL);
1099 goto out_clear_window;
1101 ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
1102 win64->value, tce_setrange_multi_pSeriesLP_walk);
1104 dev_info(&dev->dev, "failed to map direct window for %s: %d\n",
1105 dn->full_name, ret);
1106 goto out_free_window;
1109 ret = of_add_property(pdn, win64);
1111 dev_err(&dev->dev, "unable to add dma window property for %s: %d",
1112 pdn->full_name, ret);
1113 goto out_free_window;
1116 window->device = pdn;
1117 window->prop = ddwprop;
1118 spin_lock(&direct_window_list_lock);
1119 list_add(&window->list, &direct_window_list);
1120 spin_unlock(&direct_window_list_lock);
1122 dma_addr = of_read_number(&create.addr_hi, 2);
1133 kfree(win64->value);
1137 if (ddw_restore_token)
1138 restore_default_window(dev, ddw_restore_token);
1140 fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
1144 list_add(&fpdn->list, &failed_ddw_pdn_list);
1147 mutex_unlock(&direct_window_init_mutex);
1151 static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
1153 struct device_node *pdn, *dn;
1154 struct iommu_table *tbl;
1155 const __be32 *dma_window = NULL;
1158 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
1160 /* dev setup for LPAR is a little tricky, since the device tree might
1161 * contain the dma-window properties per-device and not necessarily
1162 * for the bus. So we need to search upwards in the tree until we
1163 * either hit a dma-window property, OR find a parent with a table
1164 * already allocated.
1166 dn = pci_device_to_OF_node(dev);
1167 pr_debug(" node is %s\n", dn->full_name);
1169 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
1170 pdn = pdn->parent) {
1171 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1176 if (!pdn || !PCI_DN(pdn)) {
1177 printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
1178 "no DMA window found for pci dev=%s dn=%s\n",
1179 pci_name(dev), of_node_full_name(dn));
1182 pr_debug(" parent is %s\n", pdn->full_name);
1185 if (!pci->iommu_table) {
1186 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
1188 iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
1189 pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
1190 iommu_register_group(tbl, pci_domain_nr(pci->phb->bus), 0);
1191 pr_debug(" created table: %p\n", pci->iommu_table);
1193 pr_debug(" found DMA window, table: %p\n", pci->iommu_table);
1196 set_iommu_table_base(&dev->dev, pci->iommu_table);
1199 static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
1201 bool ddw_enabled = false;
1202 struct device_node *pdn, *dn;
1203 struct pci_dev *pdev;
1204 const __be32 *dma_window = NULL;
1210 if (!dev_is_pci(dev))
1213 pdev = to_pci_dev(dev);
1215 /* only attempt to use a new window if 64-bit DMA is requested */
1216 if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) {
1217 dn = pci_device_to_OF_node(pdev);
1218 dev_dbg(dev, "node is %s\n", dn->full_name);
1221 * the device tree might contain the dma-window properties
1222 * per-device and not necessarily for the bus. So we need to
1223 * search upwards in the tree until we either hit a dma-window
1224 * property, OR find a parent with a table already allocated.
1226 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
1227 pdn = pdn->parent) {
1228 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1232 if (pdn && PCI_DN(pdn)) {
1233 dma_offset = enable_ddw(pdev, pdn);
1234 if (dma_offset != 0) {
1235 dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset);
1236 set_dma_offset(dev, dma_offset);
1237 set_dma_ops(dev, &dma_direct_ops);
1243 /* fall back on iommu ops, restore table pointer with ops */
1244 if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) {
1245 dev_info(dev, "Restoring 32-bit DMA via iommu\n");
1246 set_dma_ops(dev, &dma_iommu_ops);
1247 pci_dma_dev_setup_pSeriesLP(pdev);
1251 if (!dma_supported(dev, dma_mask))
1254 *dev->dma_mask = dma_mask;
1258 static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
1263 if (!disable_ddw && dev_is_pci(dev)) {
1264 struct pci_dev *pdev = to_pci_dev(dev);
1265 struct device_node *dn;
1267 dn = pci_device_to_OF_node(pdev);
1269 /* search upwards for ibm,dma-window */
1270 for (; dn && PCI_DN(dn) && !PCI_DN(dn)->iommu_table;
1272 if (of_get_property(dn, "ibm,dma-window", NULL))
1274 /* if there is a ibm,ddw-applicable property require 64 bits */
1275 if (dn && PCI_DN(dn) &&
1276 of_get_property(dn, "ibm,ddw-applicable", NULL))
1277 return DMA_BIT_MASK(64);
1280 return dma_iommu_ops.get_required_mask(dev);
1283 #else /* CONFIG_PCI */
1284 #define pci_dma_bus_setup_pSeries NULL
1285 #define pci_dma_dev_setup_pSeries NULL
1286 #define pci_dma_bus_setup_pSeriesLP NULL
1287 #define pci_dma_dev_setup_pSeriesLP NULL
1288 #define dma_set_mask_pSeriesLP NULL
1289 #define dma_get_required_mask_pSeriesLP NULL
1290 #endif /* !CONFIG_PCI */
1292 static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
1295 struct direct_window *window;
1296 struct memory_notify *arg = data;
1300 case MEM_GOING_ONLINE:
1301 spin_lock(&direct_window_list_lock);
1302 list_for_each_entry(window, &direct_window_list, list) {
1303 ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
1304 arg->nr_pages, window->prop);
1307 spin_unlock(&direct_window_list_lock);
1309 case MEM_CANCEL_ONLINE:
1311 spin_lock(&direct_window_list_lock);
1312 list_for_each_entry(window, &direct_window_list, list) {
1313 ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
1314 arg->nr_pages, window->prop);
1317 spin_unlock(&direct_window_list_lock);
1322 if (ret && action != MEM_CANCEL_ONLINE)
1328 static struct notifier_block iommu_mem_nb = {
1329 .notifier_call = iommu_mem_notifier,
1332 static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
1334 int err = NOTIFY_OK;
1335 struct device_node *np = node;
1336 struct pci_dn *pci = PCI_DN(np);
1337 struct direct_window *window;
1340 case OF_RECONFIG_DETACH_NODE:
1342 if (pci && pci->iommu_table)
1343 iommu_free_table(pci->iommu_table, np->full_name);
1345 spin_lock(&direct_window_list_lock);
1346 list_for_each_entry(window, &direct_window_list, list) {
1347 if (window->device == np) {
1348 list_del(&window->list);
1353 spin_unlock(&direct_window_list_lock);
1362 static struct notifier_block iommu_reconfig_nb = {
1363 .notifier_call = iommu_reconfig_notifier,
1366 /* These are called very early. */
1367 void iommu_init_early_pSeries(void)
1369 if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
1372 if (firmware_has_feature(FW_FEATURE_LPAR)) {
1373 if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
1374 ppc_md.tce_build = tce_buildmulti_pSeriesLP;
1375 ppc_md.tce_free = tce_freemulti_pSeriesLP;
1377 ppc_md.tce_build = tce_build_pSeriesLP;
1378 ppc_md.tce_free = tce_free_pSeriesLP;
1380 ppc_md.tce_get = tce_get_pSeriesLP;
1381 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
1382 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
1383 ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
1384 ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
1386 ppc_md.tce_build = tce_build_pSeries;
1387 ppc_md.tce_free = tce_free_pSeries;
1388 ppc_md.tce_get = tce_get_pseries;
1389 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeries;
1390 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeries;
1394 of_reconfig_notifier_register(&iommu_reconfig_nb);
1395 register_memory_notifier(&iommu_mem_nb);
1397 set_pci_dma_ops(&dma_iommu_ops);
1400 static int __init disable_multitce(char *str)
1402 if (strcmp(str, "off") == 0 &&
1403 firmware_has_feature(FW_FEATURE_LPAR) &&
1404 firmware_has_feature(FW_FEATURE_MULTITCE)) {
1405 printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
1406 ppc_md.tce_build = tce_build_pSeriesLP;
1407 ppc_md.tce_free = tce_free_pSeriesLP;
1408 powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
1413 __setup("multitce=", disable_multitce);