2 * Support PCI/PCIe on PowerNV platforms
4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/crash_dump.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/string.h>
20 #include <linux/init.h>
21 #include <linux/bootmem.h>
22 #include <linux/irq.h>
24 #include <linux/msi.h>
25 #include <linux/memblock.h>
26 #include <linux/iommu.h>
27 #include <linux/rculist.h>
28 #include <linux/sizes.h>
30 #include <asm/sections.h>
33 #include <asm/pci-bridge.h>
34 #include <asm/machdep.h>
35 #include <asm/msi_bitmap.h>
36 #include <asm/ppc-pci.h>
38 #include <asm/iommu.h>
41 #include <asm/debug.h>
42 #include <asm/firmware.h>
43 #include <asm/pnv-pci.h>
44 #include <asm/mmzone.h>
46 #include <misc/cxl-base.h>
51 /* 256M DMA window, 4K TCE pages, 8 bytes TCE */
52 #define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
54 #define POWERNV_IOMMU_DEFAULT_LEVELS 1
55 #define POWERNV_IOMMU_MAX_LEVELS 5
57 static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
59 static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
71 if (pe->flags & PNV_IODA_PE_DEV)
72 strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
73 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
74 sprintf(pfix, "%04x:%02x ",
75 pci_domain_nr(pe->pbus), pe->pbus->number);
77 else if (pe->flags & PNV_IODA_PE_VF)
78 sprintf(pfix, "%04x:%02x:%2x.%d",
79 pci_domain_nr(pe->parent_dev->bus),
80 (pe->rid & 0xff00) >> 8,
81 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
82 #endif /* CONFIG_PCI_IOV*/
84 printk("%spci %s: [PE# %.3d] %pV",
85 level, pfix, pe->pe_number, &vaf);
90 #define pe_err(pe, fmt, ...) \
91 pe_level_printk(pe, KERN_ERR, fmt, ##__VA_ARGS__)
92 #define pe_warn(pe, fmt, ...) \
93 pe_level_printk(pe, KERN_WARNING, fmt, ##__VA_ARGS__)
94 #define pe_info(pe, fmt, ...) \
95 pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__)
97 static bool pnv_iommu_bypass_disabled __read_mostly;
99 static int __init iommu_setup(char *str)
105 if (!strncmp(str, "nobypass", 8)) {
106 pnv_iommu_bypass_disabled = true;
107 pr_info("PowerNV: IOMMU bypass window disabled.\n");
110 str += strcspn(str, ",");
117 early_param("iommu", iommu_setup);
119 static inline bool pnv_pci_is_mem_pref_64(unsigned long flags)
121 return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) ==
122 (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH));
125 static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
127 if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) {
128 pr_warn("%s: Invalid PE %d on PHB#%x\n",
129 __func__, pe_no, phb->hose->global_number);
133 if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
134 pr_debug("%s: PE %d was reserved on PHB#%x\n",
135 __func__, pe_no, phb->hose->global_number);
137 phb->ioda.pe_array[pe_no].phb = phb;
138 phb->ioda.pe_array[pe_no].pe_number = pe_no;
141 static unsigned int pnv_ioda_alloc_pe(struct pnv_phb *phb)
146 pe = find_next_zero_bit(phb->ioda.pe_alloc,
147 phb->ioda.total_pe_num, 0);
148 if (pe >= phb->ioda.total_pe_num)
149 return IODA_INVALID_PE;
150 } while(test_and_set_bit(pe, phb->ioda.pe_alloc));
152 phb->ioda.pe_array[pe].phb = phb;
153 phb->ioda.pe_array[pe].pe_number = pe;
157 static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
159 WARN_ON(phb->ioda.pe_array[pe].pdev);
161 memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe));
162 clear_bit(pe, phb->ioda.pe_alloc);
165 /* The default M64 BAR is shared by all PEs */
166 static int pnv_ioda2_init_m64(struct pnv_phb *phb)
172 /* Configure the default M64 BAR */
173 rc = opal_pci_set_phb_mem_window(phb->opal_id,
174 OPAL_M64_WINDOW_TYPE,
175 phb->ioda.m64_bar_idx,
179 if (rc != OPAL_SUCCESS) {
180 desc = "configuring";
184 /* Enable the default M64 BAR */
185 rc = opal_pci_phb_mmio_enable(phb->opal_id,
186 OPAL_M64_WINDOW_TYPE,
187 phb->ioda.m64_bar_idx,
188 OPAL_ENABLE_M64_SPLIT);
189 if (rc != OPAL_SUCCESS) {
194 /* Mark the M64 BAR assigned */
195 set_bit(phb->ioda.m64_bar_idx, &phb->ioda.m64_bar_alloc);
198 * Strip off the segment used by the reserved PE, which is
199 * expected to be 0 or last one of PE capabicity.
201 r = &phb->hose->mem_resources[1];
202 if (phb->ioda.reserved_pe_idx == 0)
203 r->start += phb->ioda.m64_segsize;
204 else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
205 r->end -= phb->ioda.m64_segsize;
207 pr_warn(" Cannot strip M64 segment for reserved PE#%d\n",
208 phb->ioda.reserved_pe_idx);
213 pr_warn(" Failure %lld %s M64 BAR#%d\n",
214 rc, desc, phb->ioda.m64_bar_idx);
215 opal_pci_phb_mmio_enable(phb->opal_id,
216 OPAL_M64_WINDOW_TYPE,
217 phb->ioda.m64_bar_idx,
222 static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
223 unsigned long *pe_bitmap)
225 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
226 struct pnv_phb *phb = hose->private_data;
228 resource_size_t base, sgsz, start, end;
231 base = phb->ioda.m64_base;
232 sgsz = phb->ioda.m64_segsize;
233 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
234 r = &pdev->resource[i];
235 if (!r->parent || !pnv_pci_is_mem_pref_64(r->flags))
238 start = _ALIGN_DOWN(r->start - base, sgsz);
239 end = _ALIGN_UP(r->end - base, sgsz);
240 for (segno = start / sgsz; segno < end / sgsz; segno++) {
242 set_bit(segno, pe_bitmap);
244 pnv_ioda_reserve_pe(phb, segno);
249 static void pnv_ioda_reserve_m64_pe(struct pci_bus *bus,
250 unsigned long *pe_bitmap,
253 struct pci_dev *pdev;
255 list_for_each_entry(pdev, &bus->devices, bus_list) {
256 pnv_ioda_reserve_dev_m64_pe(pdev, pe_bitmap);
258 if (all && pdev->subordinate)
259 pnv_ioda_reserve_m64_pe(pdev->subordinate,
264 static unsigned int pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
266 struct pci_controller *hose = pci_bus_to_host(bus);
267 struct pnv_phb *phb = hose->private_data;
268 struct pnv_ioda_pe *master_pe, *pe;
269 unsigned long size, *pe_alloc;
272 /* Root bus shouldn't use M64 */
273 if (pci_is_root_bus(bus))
274 return IODA_INVALID_PE;
276 /* Allocate bitmap */
277 size = _ALIGN_UP(phb->ioda.total_pe_num / 8, sizeof(unsigned long));
278 pe_alloc = kzalloc(size, GFP_KERNEL);
280 pr_warn("%s: Out of memory !\n",
282 return IODA_INVALID_PE;
285 /* Figure out reserved PE numbers by the PE */
286 pnv_ioda_reserve_m64_pe(bus, pe_alloc, all);
289 * the current bus might not own M64 window and that's all
290 * contributed by its child buses. For the case, we needn't
291 * pick M64 dependent PE#.
293 if (bitmap_empty(pe_alloc, phb->ioda.total_pe_num)) {
295 return IODA_INVALID_PE;
299 * Figure out the master PE and put all slave PEs to master
300 * PE's list to form compound PE.
304 while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe_num, i + 1)) <
305 phb->ioda.total_pe_num) {
306 pe = &phb->ioda.pe_array[i];
308 phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number;
310 pe->flags |= PNV_IODA_PE_MASTER;
311 INIT_LIST_HEAD(&pe->slaves);
314 pe->flags |= PNV_IODA_PE_SLAVE;
315 pe->master = master_pe;
316 list_add_tail(&pe->list, &master_pe->slaves);
321 return master_pe->pe_number;
324 static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
326 struct pci_controller *hose = phb->hose;
327 struct device_node *dn = hose->dn;
328 struct resource *res;
332 /* FIXME: Support M64 for P7IOC */
333 if (phb->type != PNV_PHB_IODA2) {
334 pr_info(" Not support M64 window\n");
338 if (!firmware_has_feature(FW_FEATURE_OPAL)) {
339 pr_info(" Firmware too old to support M64 window\n");
343 r = of_get_property(dn, "ibm,opal-m64-window", NULL);
345 pr_info(" No <ibm,opal-m64-window> on %s\n",
350 res = &hose->mem_resources[1];
351 res->name = dn->full_name;
352 res->start = of_translate_address(dn, r + 2);
353 res->end = res->start + of_read_number(r + 4, 2) - 1;
354 res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
355 pci_addr = of_read_number(r, 2);
356 hose->mem_offset[1] = res->start - pci_addr;
358 phb->ioda.m64_size = resource_size(res);
359 phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe_num;
360 phb->ioda.m64_base = pci_addr;
362 pr_info(" MEM64 0x%016llx..0x%016llx -> 0x%016llx\n",
363 res->start, res->end, pci_addr);
365 /* Use last M64 BAR to cover M64 window */
366 phb->ioda.m64_bar_idx = 15;
367 phb->init_m64 = pnv_ioda2_init_m64;
368 phb->reserve_m64_pe = pnv_ioda_reserve_m64_pe;
369 phb->pick_m64_pe = pnv_ioda_pick_m64_pe;
372 static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
374 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no];
375 struct pnv_ioda_pe *slave;
378 /* Fetch master PE */
379 if (pe->flags & PNV_IODA_PE_SLAVE) {
381 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)))
384 pe_no = pe->pe_number;
387 /* Freeze master PE */
388 rc = opal_pci_eeh_freeze_set(phb->opal_id,
390 OPAL_EEH_ACTION_SET_FREEZE_ALL);
391 if (rc != OPAL_SUCCESS) {
392 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
393 __func__, rc, phb->hose->global_number, pe_no);
397 /* Freeze slave PEs */
398 if (!(pe->flags & PNV_IODA_PE_MASTER))
401 list_for_each_entry(slave, &pe->slaves, list) {
402 rc = opal_pci_eeh_freeze_set(phb->opal_id,
404 OPAL_EEH_ACTION_SET_FREEZE_ALL);
405 if (rc != OPAL_SUCCESS)
406 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
407 __func__, rc, phb->hose->global_number,
412 static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
414 struct pnv_ioda_pe *pe, *slave;
418 pe = &phb->ioda.pe_array[pe_no];
419 if (pe->flags & PNV_IODA_PE_SLAVE) {
421 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
422 pe_no = pe->pe_number;
425 /* Clear frozen state for master PE */
426 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt);
427 if (rc != OPAL_SUCCESS) {
428 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
429 __func__, rc, opt, phb->hose->global_number, pe_no);
433 if (!(pe->flags & PNV_IODA_PE_MASTER))
436 /* Clear frozen state for slave PEs */
437 list_for_each_entry(slave, &pe->slaves, list) {
438 rc = opal_pci_eeh_freeze_clear(phb->opal_id,
441 if (rc != OPAL_SUCCESS) {
442 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
443 __func__, rc, opt, phb->hose->global_number,
452 static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
454 struct pnv_ioda_pe *slave, *pe;
459 /* Sanity check on PE number */
460 if (pe_no < 0 || pe_no >= phb->ioda.total_pe_num)
461 return OPAL_EEH_STOPPED_PERM_UNAVAIL;
464 * Fetch the master PE and the PE instance might be
465 * not initialized yet.
467 pe = &phb->ioda.pe_array[pe_no];
468 if (pe->flags & PNV_IODA_PE_SLAVE) {
470 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
471 pe_no = pe->pe_number;
474 /* Check the master PE */
475 rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
476 &state, &pcierr, NULL);
477 if (rc != OPAL_SUCCESS) {
478 pr_warn("%s: Failure %lld getting "
479 "PHB#%x-PE#%x state\n",
481 phb->hose->global_number, pe_no);
482 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
485 /* Check the slave PE */
486 if (!(pe->flags & PNV_IODA_PE_MASTER))
489 list_for_each_entry(slave, &pe->slaves, list) {
490 rc = opal_pci_eeh_freeze_status(phb->opal_id,
495 if (rc != OPAL_SUCCESS) {
496 pr_warn("%s: Failure %lld getting "
497 "PHB#%x-PE#%x state\n",
499 phb->hose->global_number, slave->pe_number);
500 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
504 * Override the result based on the ascending
514 /* Currently those 2 are only used when MSIs are enabled, this will change
515 * but in the meantime, we need to protect them to avoid warnings
517 #ifdef CONFIG_PCI_MSI
518 static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
520 struct pci_controller *hose = pci_bus_to_host(dev->bus);
521 struct pnv_phb *phb = hose->private_data;
522 struct pci_dn *pdn = pci_get_pdn(dev);
526 if (pdn->pe_number == IODA_INVALID_PE)
528 return &phb->ioda.pe_array[pdn->pe_number];
530 #endif /* CONFIG_PCI_MSI */
532 static int pnv_ioda_set_one_peltv(struct pnv_phb *phb,
533 struct pnv_ioda_pe *parent,
534 struct pnv_ioda_pe *child,
537 const char *desc = is_add ? "adding" : "removing";
538 uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN :
539 OPAL_REMOVE_PE_FROM_DOMAIN;
540 struct pnv_ioda_pe *slave;
543 /* Parent PE affects child PE */
544 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
545 child->pe_number, op);
546 if (rc != OPAL_SUCCESS) {
547 pe_warn(child, "OPAL error %ld %s to parent PELTV\n",
552 if (!(child->flags & PNV_IODA_PE_MASTER))
555 /* Compound case: parent PE affects slave PEs */
556 list_for_each_entry(slave, &child->slaves, list) {
557 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
558 slave->pe_number, op);
559 if (rc != OPAL_SUCCESS) {
560 pe_warn(slave, "OPAL error %ld %s to parent PELTV\n",
569 static int pnv_ioda_set_peltv(struct pnv_phb *phb,
570 struct pnv_ioda_pe *pe,
573 struct pnv_ioda_pe *slave;
574 struct pci_dev *pdev = NULL;
578 * Clear PE frozen state. If it's master PE, we need
579 * clear slave PE frozen state as well.
582 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
583 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
584 if (pe->flags & PNV_IODA_PE_MASTER) {
585 list_for_each_entry(slave, &pe->slaves, list)
586 opal_pci_eeh_freeze_clear(phb->opal_id,
588 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
593 * Associate PE in PELT. We need add the PE into the
594 * corresponding PELT-V as well. Otherwise, the error
595 * originated from the PE might contribute to other
598 ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add);
602 /* For compound PEs, any one affects all of them */
603 if (pe->flags & PNV_IODA_PE_MASTER) {
604 list_for_each_entry(slave, &pe->slaves, list) {
605 ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add);
611 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
612 pdev = pe->pbus->self;
613 else if (pe->flags & PNV_IODA_PE_DEV)
614 pdev = pe->pdev->bus->self;
615 #ifdef CONFIG_PCI_IOV
616 else if (pe->flags & PNV_IODA_PE_VF)
617 pdev = pe->parent_dev;
618 #endif /* CONFIG_PCI_IOV */
620 struct pci_dn *pdn = pci_get_pdn(pdev);
621 struct pnv_ioda_pe *parent;
623 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
624 parent = &phb->ioda.pe_array[pdn->pe_number];
625 ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add);
630 pdev = pdev->bus->self;
636 #ifdef CONFIG_PCI_IOV
637 static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
639 struct pci_dev *parent;
640 uint8_t bcomp, dcomp, fcomp;
644 /* Currently, we just deconfigure VF PE. Bus PE will always there.*/
648 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
649 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
650 parent = pe->pbus->self;
651 if (pe->flags & PNV_IODA_PE_BUS_ALL)
652 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
657 case 1: bcomp = OpalPciBusAll; break;
658 case 2: bcomp = OpalPciBus7Bits; break;
659 case 4: bcomp = OpalPciBus6Bits; break;
660 case 8: bcomp = OpalPciBus5Bits; break;
661 case 16: bcomp = OpalPciBus4Bits; break;
662 case 32: bcomp = OpalPciBus3Bits; break;
664 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
666 /* Do an exact match only */
667 bcomp = OpalPciBusAll;
669 rid_end = pe->rid + (count << 8);
671 if (pe->flags & PNV_IODA_PE_VF)
672 parent = pe->parent_dev;
674 parent = pe->pdev->bus->self;
675 bcomp = OpalPciBusAll;
676 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
677 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
678 rid_end = pe->rid + 1;
681 /* Clear the reverse map */
682 for (rid = pe->rid; rid < rid_end; rid++)
683 phb->ioda.pe_rmap[rid] = 0;
685 /* Release from all parents PELT-V */
687 struct pci_dn *pdn = pci_get_pdn(parent);
688 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
689 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
690 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
691 /* XXX What to do in case of error ? */
693 parent = parent->bus->self;
696 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
697 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
699 /* Disassociate PE in PELT */
700 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
701 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
703 pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc);
704 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
705 bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
707 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
711 pe->parent_dev = NULL;
715 #endif /* CONFIG_PCI_IOV */
717 static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
719 struct pci_dev *parent;
720 uint8_t bcomp, dcomp, fcomp;
721 long rc, rid_end, rid;
723 /* Bus validation ? */
727 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
728 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
729 parent = pe->pbus->self;
730 if (pe->flags & PNV_IODA_PE_BUS_ALL)
731 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
736 case 1: bcomp = OpalPciBusAll; break;
737 case 2: bcomp = OpalPciBus7Bits; break;
738 case 4: bcomp = OpalPciBus6Bits; break;
739 case 8: bcomp = OpalPciBus5Bits; break;
740 case 16: bcomp = OpalPciBus4Bits; break;
741 case 32: bcomp = OpalPciBus3Bits; break;
743 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
745 /* Do an exact match only */
746 bcomp = OpalPciBusAll;
748 rid_end = pe->rid + (count << 8);
750 #ifdef CONFIG_PCI_IOV
751 if (pe->flags & PNV_IODA_PE_VF)
752 parent = pe->parent_dev;
754 #endif /* CONFIG_PCI_IOV */
755 parent = pe->pdev->bus->self;
756 bcomp = OpalPciBusAll;
757 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
758 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
759 rid_end = pe->rid + 1;
763 * Associate PE in PELT. We need add the PE into the
764 * corresponding PELT-V as well. Otherwise, the error
765 * originated from the PE might contribute to other
768 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
769 bcomp, dcomp, fcomp, OPAL_MAP_PE);
771 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
776 * Configure PELTV. NPUs don't have a PELTV table so skip
777 * configuration on them.
779 if (phb->type != PNV_PHB_NPU)
780 pnv_ioda_set_peltv(phb, pe, true);
782 /* Setup reverse map */
783 for (rid = pe->rid; rid < rid_end; rid++)
784 phb->ioda.pe_rmap[rid] = pe->pe_number;
786 /* Setup one MVTs on IODA1 */
787 if (phb->type != PNV_PHB_IODA1) {
792 pe->mve_number = pe->pe_number;
793 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
794 if (rc != OPAL_SUCCESS) {
795 pe_err(pe, "OPAL error %ld setting up MVE %d\n",
799 rc = opal_pci_set_mve_enable(phb->opal_id,
800 pe->mve_number, OPAL_ENABLE_MVE);
802 pe_err(pe, "OPAL error %ld enabling MVE %d\n",
812 static void pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
813 struct pnv_ioda_pe *pe)
815 struct pnv_ioda_pe *lpe;
817 list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
818 if (lpe->dma_weight < pe->dma_weight) {
819 list_add_tail(&pe->dma_link, &lpe->dma_link);
823 list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
826 static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
828 /* This is quite simplistic. The "base" weight of a device
829 * is 10. 0 means no DMA is to be accounted for it.
832 /* If it's a bridge, no DMA */
833 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
836 /* Reduce the weight of slow USB controllers */
837 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
838 dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
839 dev->class == PCI_CLASS_SERIAL_USB_EHCI)
842 /* Increase the weight of RAID (includes Obsidian) */
843 if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
850 #ifdef CONFIG_PCI_IOV
851 static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
853 struct pci_dn *pdn = pci_get_pdn(dev);
855 struct resource *res, res2;
856 resource_size_t size;
863 * "offset" is in VFs. The M64 windows are sized so that when they
864 * are segmented, each segment is the same size as the IOV BAR.
865 * Each segment is in a separate PE, and the high order bits of the
866 * address are the PE number. Therefore, each VF's BAR is in a
867 * separate PE, and changing the IOV BAR start address changes the
868 * range of PEs the VFs are in.
870 num_vfs = pdn->num_vfs;
871 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
872 res = &dev->resource[i + PCI_IOV_RESOURCES];
873 if (!res->flags || !res->parent)
877 * The actual IOV BAR range is determined by the start address
878 * and the actual size for num_vfs VFs BAR. This check is to
879 * make sure that after shifting, the range will not overlap
880 * with another device.
882 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
883 res2.flags = res->flags;
884 res2.start = res->start + (size * offset);
885 res2.end = res2.start + (size * num_vfs) - 1;
887 if (res2.end > res->end) {
888 dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
889 i, &res2, res, num_vfs, offset);
895 * After doing so, there would be a "hole" in the /proc/iomem when
896 * offset is a positive value. It looks like the device return some
897 * mmio back to the system, which actually no one could use it.
899 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
900 res = &dev->resource[i + PCI_IOV_RESOURCES];
901 if (!res->flags || !res->parent)
904 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
906 res->start += size * offset;
908 dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n",
909 i, &res2, res, (offset > 0) ? "En" : "Dis",
911 pci_update_resource(dev, i + PCI_IOV_RESOURCES);
915 #endif /* CONFIG_PCI_IOV */
917 static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
919 struct pci_controller *hose = pci_bus_to_host(dev->bus);
920 struct pnv_phb *phb = hose->private_data;
921 struct pci_dn *pdn = pci_get_pdn(dev);
922 struct pnv_ioda_pe *pe;
926 pr_err("%s: Device tree node not associated properly\n",
930 if (pdn->pe_number != IODA_INVALID_PE)
933 pe_num = pnv_ioda_alloc_pe(phb);
934 if (pe_num == IODA_INVALID_PE) {
935 pr_warning("%s: Not enough PE# available, disabling device\n",
940 /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
941 * pointer in the PE data structure, both should be destroyed at the
942 * same time. However, this needs to be looked at more closely again
943 * once we actually start removing things (Hotplug, SR-IOV, ...)
945 * At some point we want to remove the PDN completely anyways
947 pe = &phb->ioda.pe_array[pe_num];
950 pdn->pe_number = pe_num;
951 pe->flags = PNV_IODA_PE_DEV;
956 pe->rid = dev->bus->number << 8 | pdn->devfn;
958 pe_info(pe, "Associated device to PE\n");
960 if (pnv_ioda_configure_pe(phb, pe)) {
961 /* XXX What do we do here ? */
963 pnv_ioda_free_pe(phb, pe_num);
964 pdn->pe_number = IODA_INVALID_PE;
970 /* Assign a DMA weight to the device */
971 pe->dma_weight = pnv_ioda_dma_weight(dev);
972 if (pe->dma_weight != 0) {
973 phb->ioda.dma_weight += pe->dma_weight;
974 phb->ioda.dma_pe_count++;
978 pnv_ioda_link_pe_by_weight(phb, pe);
983 static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
987 list_for_each_entry(dev, &bus->devices, bus_list) {
988 struct pci_dn *pdn = pci_get_pdn(dev);
991 pr_warn("%s: No device node associated with device !\n",
996 pdn->pe_number = pe->pe_number;
997 pe->dma_weight += pnv_ioda_dma_weight(dev);
998 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
999 pnv_ioda_setup_same_PE(dev->subordinate, pe);
1004 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
1005 * single PCI bus. Another one that contains the primary PCI bus and its
1006 * subordinate PCI devices and buses. The second type of PE is normally
1007 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
1009 static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
1011 struct pci_controller *hose = pci_bus_to_host(bus);
1012 struct pnv_phb *phb = hose->private_data;
1013 struct pnv_ioda_pe *pe;
1014 unsigned int pe_num = IODA_INVALID_PE;
1016 /* Check if PE is determined by M64 */
1017 if (phb->pick_m64_pe)
1018 pe_num = phb->pick_m64_pe(bus, all);
1020 /* The PE number isn't pinned by M64 */
1021 if (pe_num == IODA_INVALID_PE)
1022 pe_num = pnv_ioda_alloc_pe(phb);
1024 if (pe_num == IODA_INVALID_PE) {
1025 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
1026 __func__, pci_domain_nr(bus), bus->number);
1030 pe = &phb->ioda.pe_array[pe_num];
1031 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
1035 pe->mve_number = -1;
1036 pe->rid = bus->busn_res.start << 8;
1040 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
1041 bus->busn_res.start, bus->busn_res.end, pe_num);
1043 pe_info(pe, "Secondary bus %d associated with PE#%d\n",
1044 bus->busn_res.start, pe_num);
1046 if (pnv_ioda_configure_pe(phb, pe)) {
1047 /* XXX What do we do here ? */
1049 pnv_ioda_free_pe(phb, pe_num);
1054 /* Associate it with all child devices */
1055 pnv_ioda_setup_same_PE(bus, pe);
1057 /* Put PE to the list */
1058 list_add_tail(&pe->list, &phb->ioda.pe_list);
1060 /* Account for one DMA PE if at least one DMA capable device exist
1063 if (pe->dma_weight != 0) {
1064 phb->ioda.dma_weight += pe->dma_weight;
1065 phb->ioda.dma_pe_count++;
1069 pnv_ioda_link_pe_by_weight(phb, pe);
1072 static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
1074 int pe_num, found_pe = false, rc;
1076 struct pnv_ioda_pe *pe;
1077 struct pci_dev *gpu_pdev;
1078 struct pci_dn *npu_pdn;
1079 struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus);
1080 struct pnv_phb *phb = hose->private_data;
1083 * Due to a hardware errata PE#0 on the NPU is reserved for
1084 * error handling. This means we only have three PEs remaining
1085 * which need to be assigned to four links, implying some
1086 * links must share PEs.
1088 * To achieve this we assign PEs such that NPUs linking the
1089 * same GPU get assigned the same PE.
1091 gpu_pdev = pnv_pci_get_gpu_dev(npu_pdev);
1092 for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) {
1093 pe = &phb->ioda.pe_array[pe_num];
1097 if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) {
1099 * This device has the same peer GPU so should
1100 * be assigned the same PE as the existing
1103 dev_info(&npu_pdev->dev,
1104 "Associating to existing PE %d\n", pe_num);
1105 pci_dev_get(npu_pdev);
1106 npu_pdn = pci_get_pdn(npu_pdev);
1107 rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
1108 npu_pdn->pcidev = npu_pdev;
1109 npu_pdn->pe_number = pe_num;
1110 pe->dma_weight += pnv_ioda_dma_weight(npu_pdev);
1111 phb->ioda.pe_rmap[rid] = pe->pe_number;
1113 /* Map the PE to this link */
1114 rc = opal_pci_set_pe(phb->opal_id, pe_num, rid,
1116 OPAL_COMPARE_RID_DEVICE_NUMBER,
1117 OPAL_COMPARE_RID_FUNCTION_NUMBER,
1119 WARN_ON(rc != OPAL_SUCCESS);
1127 * Could not find an existing PE so allocate a new
1130 return pnv_ioda_setup_dev_PE(npu_pdev);
1135 static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
1137 struct pci_dev *pdev;
1139 list_for_each_entry(pdev, &bus->devices, bus_list)
1140 pnv_ioda_setup_npu_PE(pdev);
1143 static void pnv_ioda_setup_PEs(struct pci_bus *bus)
1145 struct pci_dev *dev;
1147 pnv_ioda_setup_bus_PE(bus, false);
1149 list_for_each_entry(dev, &bus->devices, bus_list) {
1150 if (dev->subordinate) {
1151 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
1152 pnv_ioda_setup_bus_PE(dev->subordinate, true);
1154 pnv_ioda_setup_PEs(dev->subordinate);
1160 * Configure PEs so that the downstream PCI buses and devices
1161 * could have their associated PE#. Unfortunately, we didn't
1162 * figure out the way to identify the PLX bridge yet. So we
1163 * simply put the PCI bus and the subordinate behind the root
1164 * port to PE# here. The game rule here is expected to be changed
1165 * as soon as we can detected PLX bridge correctly.
1167 static void pnv_pci_ioda_setup_PEs(void)
1169 struct pci_controller *hose, *tmp;
1170 struct pnv_phb *phb;
1172 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1173 phb = hose->private_data;
1175 /* M64 layout might affect PE allocation */
1176 if (phb->reserve_m64_pe)
1177 phb->reserve_m64_pe(hose->bus, NULL, true);
1180 * On NPU PHB, we expect separate PEs for individual PCI
1181 * functions. PCI bus dependent PEs are required for the
1182 * remaining types of PHBs.
1184 if (phb->type == PNV_PHB_NPU) {
1185 /* PE#0 is needed for error reporting */
1186 pnv_ioda_reserve_pe(phb, 0);
1187 pnv_ioda_setup_npu_PEs(hose->bus);
1189 pnv_ioda_setup_PEs(hose->bus);
1193 #ifdef CONFIG_PCI_IOV
1194 static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs)
1196 struct pci_bus *bus;
1197 struct pci_controller *hose;
1198 struct pnv_phb *phb;
1204 hose = pci_bus_to_host(bus);
1205 phb = hose->private_data;
1206 pdn = pci_get_pdn(pdev);
1208 if (pdn->m64_single_mode)
1213 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
1214 for (j = 0; j < m64_bars; j++) {
1215 if (pdn->m64_map[j][i] == IODA_INVALID_M64)
1217 opal_pci_phb_mmio_enable(phb->opal_id,
1218 OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 0);
1219 clear_bit(pdn->m64_map[j][i], &phb->ioda.m64_bar_alloc);
1220 pdn->m64_map[j][i] = IODA_INVALID_M64;
1223 kfree(pdn->m64_map);
1227 static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
1229 struct pci_bus *bus;
1230 struct pci_controller *hose;
1231 struct pnv_phb *phb;
1234 struct resource *res;
1238 resource_size_t size, start;
1243 hose = pci_bus_to_host(bus);
1244 phb = hose->private_data;
1245 pdn = pci_get_pdn(pdev);
1246 total_vfs = pci_sriov_get_totalvfs(pdev);
1248 if (pdn->m64_single_mode)
1253 pdn->m64_map = kmalloc(sizeof(*pdn->m64_map) * m64_bars, GFP_KERNEL);
1256 /* Initialize the m64_map to IODA_INVALID_M64 */
1257 for (i = 0; i < m64_bars ; i++)
1258 for (j = 0; j < PCI_SRIOV_NUM_BARS; j++)
1259 pdn->m64_map[i][j] = IODA_INVALID_M64;
1262 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1263 res = &pdev->resource[i + PCI_IOV_RESOURCES];
1264 if (!res->flags || !res->parent)
1267 for (j = 0; j < m64_bars; j++) {
1269 win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
1270 phb->ioda.m64_bar_idx + 1, 0);
1272 if (win >= phb->ioda.m64_bar_idx + 1)
1274 } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
1276 pdn->m64_map[j][i] = win;
1278 if (pdn->m64_single_mode) {
1279 size = pci_iov_resource_size(pdev,
1280 PCI_IOV_RESOURCES + i);
1281 start = res->start + size * j;
1283 size = resource_size(res);
1287 /* Map the M64 here */
1288 if (pdn->m64_single_mode) {
1289 pe_num = pdn->pe_num_map[j];
1290 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
1291 pe_num, OPAL_M64_WINDOW_TYPE,
1292 pdn->m64_map[j][i], 0);
1295 rc = opal_pci_set_phb_mem_window(phb->opal_id,
1296 OPAL_M64_WINDOW_TYPE,
1303 if (rc != OPAL_SUCCESS) {
1304 dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
1309 if (pdn->m64_single_mode)
1310 rc = opal_pci_phb_mmio_enable(phb->opal_id,
1311 OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 2);
1313 rc = opal_pci_phb_mmio_enable(phb->opal_id,
1314 OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 1);
1316 if (rc != OPAL_SUCCESS) {
1317 dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
1326 pnv_pci_vf_release_m64(pdev, num_vfs);
1330 static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
1332 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
1334 static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
1336 struct iommu_table *tbl;
1339 tbl = pe->table_group.tables[0];
1340 rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
1342 pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
1344 pnv_pci_ioda2_set_bypass(pe, false);
1345 if (pe->table_group.group) {
1346 iommu_group_put(pe->table_group.group);
1347 BUG_ON(pe->table_group.group);
1349 pnv_pci_ioda2_table_free_pages(tbl);
1350 iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
1353 static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)
1355 struct pci_bus *bus;
1356 struct pci_controller *hose;
1357 struct pnv_phb *phb;
1358 struct pnv_ioda_pe *pe, *pe_n;
1362 hose = pci_bus_to_host(bus);
1363 phb = hose->private_data;
1364 pdn = pci_get_pdn(pdev);
1366 if (!pdev->is_physfn)
1369 list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
1370 if (pe->parent_dev != pdev)
1373 pnv_pci_ioda2_release_dma_pe(pdev, pe);
1375 /* Remove from list */
1376 mutex_lock(&phb->ioda.pe_list_mutex);
1377 list_del(&pe->list);
1378 mutex_unlock(&phb->ioda.pe_list_mutex);
1380 pnv_ioda_deconfigure_pe(phb, pe);
1382 pnv_ioda_free_pe(phb, pe->pe_number);
1386 void pnv_pci_sriov_disable(struct pci_dev *pdev)
1388 struct pci_bus *bus;
1389 struct pci_controller *hose;
1390 struct pnv_phb *phb;
1392 struct pci_sriov *iov;
1396 hose = pci_bus_to_host(bus);
1397 phb = hose->private_data;
1398 pdn = pci_get_pdn(pdev);
1400 num_vfs = pdn->num_vfs;
1402 /* Release VF PEs */
1403 pnv_ioda_release_vf_PE(pdev);
1405 if (phb->type == PNV_PHB_IODA2) {
1406 if (!pdn->m64_single_mode)
1407 pnv_pci_vf_resource_shift(pdev, -*pdn->pe_num_map);
1409 /* Release M64 windows */
1410 pnv_pci_vf_release_m64(pdev, num_vfs);
1412 /* Release PE numbers */
1413 if (pdn->m64_single_mode) {
1414 for (i = 0; i < num_vfs; i++) {
1415 if (pdn->pe_num_map[i] != IODA_INVALID_PE)
1416 pnv_ioda_free_pe(phb, pdn->pe_num_map[i]);
1419 bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
1420 /* Releasing pe_num_map */
1421 kfree(pdn->pe_num_map);
1425 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1426 struct pnv_ioda_pe *pe);
1427 static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
1429 struct pci_bus *bus;
1430 struct pci_controller *hose;
1431 struct pnv_phb *phb;
1432 struct pnv_ioda_pe *pe;
1438 hose = pci_bus_to_host(bus);
1439 phb = hose->private_data;
1440 pdn = pci_get_pdn(pdev);
1442 if (!pdev->is_physfn)
1445 /* Reserve PE for each VF */
1446 for (vf_index = 0; vf_index < num_vfs; vf_index++) {
1447 if (pdn->m64_single_mode)
1448 pe_num = pdn->pe_num_map[vf_index];
1450 pe_num = *pdn->pe_num_map + vf_index;
1452 pe = &phb->ioda.pe_array[pe_num];
1453 pe->pe_number = pe_num;
1455 pe->flags = PNV_IODA_PE_VF;
1457 pe->parent_dev = pdev;
1459 pe->mve_number = -1;
1460 pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
1461 pci_iov_virtfn_devfn(pdev, vf_index);
1463 pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n",
1464 hose->global_number, pdev->bus->number,
1465 PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
1466 PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
1468 if (pnv_ioda_configure_pe(phb, pe)) {
1469 /* XXX What do we do here ? */
1471 pnv_ioda_free_pe(phb, pe_num);
1476 /* Put PE to the list */
1477 mutex_lock(&phb->ioda.pe_list_mutex);
1478 list_add_tail(&pe->list, &phb->ioda.pe_list);
1479 mutex_unlock(&phb->ioda.pe_list_mutex);
1481 pnv_pci_ioda2_setup_dma_pe(phb, pe);
1485 int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1487 struct pci_bus *bus;
1488 struct pci_controller *hose;
1489 struct pnv_phb *phb;
1495 hose = pci_bus_to_host(bus);
1496 phb = hose->private_data;
1497 pdn = pci_get_pdn(pdev);
1499 if (phb->type == PNV_PHB_IODA2) {
1500 if (!pdn->vfs_expanded) {
1501 dev_info(&pdev->dev, "don't support this SRIOV device"
1502 " with non 64bit-prefetchable IOV BAR\n");
1507 * When M64 BARs functions in Single PE mode, the number of VFs
1508 * could be enabled must be less than the number of M64 BARs.
1510 if (pdn->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) {
1511 dev_info(&pdev->dev, "Not enough M64 BAR for VFs\n");
1515 /* Allocating pe_num_map */
1516 if (pdn->m64_single_mode)
1517 pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map) * num_vfs,
1520 pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map), GFP_KERNEL);
1522 if (!pdn->pe_num_map)
1525 if (pdn->m64_single_mode)
1526 for (i = 0; i < num_vfs; i++)
1527 pdn->pe_num_map[i] = IODA_INVALID_PE;
1529 /* Calculate available PE for required VFs */
1530 if (pdn->m64_single_mode) {
1531 for (i = 0; i < num_vfs; i++) {
1532 pdn->pe_num_map[i] = pnv_ioda_alloc_pe(phb);
1533 if (pdn->pe_num_map[i] == IODA_INVALID_PE) {
1539 mutex_lock(&phb->ioda.pe_alloc_mutex);
1540 *pdn->pe_num_map = bitmap_find_next_zero_area(
1541 phb->ioda.pe_alloc, phb->ioda.total_pe_num,
1543 if (*pdn->pe_num_map >= phb->ioda.total_pe_num) {
1544 mutex_unlock(&phb->ioda.pe_alloc_mutex);
1545 dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
1546 kfree(pdn->pe_num_map);
1549 bitmap_set(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
1550 mutex_unlock(&phb->ioda.pe_alloc_mutex);
1552 pdn->num_vfs = num_vfs;
1554 /* Assign M64 window accordingly */
1555 ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
1557 dev_info(&pdev->dev, "Not enough M64 window resources\n");
1562 * When using one M64 BAR to map one IOV BAR, we need to shift
1563 * the IOV BAR according to the PE# allocated to the VFs.
1564 * Otherwise, the PE# for the VF will conflict with others.
1566 if (!pdn->m64_single_mode) {
1567 ret = pnv_pci_vf_resource_shift(pdev, *pdn->pe_num_map);
1574 pnv_ioda_setup_vf_PE(pdev, num_vfs);
1579 if (pdn->m64_single_mode) {
1580 for (i = 0; i < num_vfs; i++) {
1581 if (pdn->pe_num_map[i] != IODA_INVALID_PE)
1582 pnv_ioda_free_pe(phb, pdn->pe_num_map[i]);
1585 bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
1587 /* Releasing pe_num_map */
1588 kfree(pdn->pe_num_map);
1593 int pcibios_sriov_disable(struct pci_dev *pdev)
1595 pnv_pci_sriov_disable(pdev);
1597 /* Release PCI data */
1598 remove_dev_pci_data(pdev);
1602 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1604 /* Allocate PCI data */
1605 add_dev_pci_data(pdev);
1607 return pnv_pci_sriov_enable(pdev, num_vfs);
1609 #endif /* CONFIG_PCI_IOV */
1611 static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
1613 struct pci_dn *pdn = pci_get_pdn(pdev);
1614 struct pnv_ioda_pe *pe;
1617 * The function can be called while the PE#
1618 * hasn't been assigned. Do nothing for the
1621 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1624 pe = &phb->ioda.pe_array[pdn->pe_number];
1625 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
1626 set_dma_offset(&pdev->dev, pe->tce_bypass_base);
1627 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
1629 * Note: iommu_add_device() will fail here as
1630 * for physical PE: the device is already added by now;
1631 * for virtual PE: sysfs entries are not ready yet and
1632 * tce_iommu_bus_notifier will add the device to a group later.
1636 static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
1638 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
1639 struct pnv_phb *phb = hose->private_data;
1640 struct pci_dn *pdn = pci_get_pdn(pdev);
1641 struct pnv_ioda_pe *pe;
1643 bool bypass = false;
1644 struct pci_dev *linked_npu_dev;
1647 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1650 pe = &phb->ioda.pe_array[pdn->pe_number];
1651 if (pe->tce_bypass_enabled) {
1652 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
1653 bypass = (dma_mask >= top);
1657 dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
1658 set_dma_ops(&pdev->dev, &dma_direct_ops);
1660 dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
1661 set_dma_ops(&pdev->dev, &dma_iommu_ops);
1663 *pdev->dev.dma_mask = dma_mask;
1665 /* Update peer npu devices */
1666 if (pe->flags & PNV_IODA_PE_PEER)
1667 for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
1671 linked_npu_dev = pe->peers[i]->pdev;
1672 if (dma_get_mask(&linked_npu_dev->dev) != dma_mask)
1673 dma_set_mask(&linked_npu_dev->dev, dma_mask);
1679 static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
1681 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
1682 struct pnv_phb *phb = hose->private_data;
1683 struct pci_dn *pdn = pci_get_pdn(pdev);
1684 struct pnv_ioda_pe *pe;
1687 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1690 pe = &phb->ioda.pe_array[pdn->pe_number];
1691 if (!pe->tce_bypass_enabled)
1692 return __dma_get_required_mask(&pdev->dev);
1695 end = pe->tce_bypass_base + memblock_end_of_DRAM();
1696 mask = 1ULL << (fls64(end) - 1);
1702 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
1703 struct pci_bus *bus)
1705 struct pci_dev *dev;
1707 list_for_each_entry(dev, &bus->devices, bus_list) {
1708 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
1709 set_dma_offset(&dev->dev, pe->tce_bypass_base);
1710 iommu_add_device(&dev->dev);
1712 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
1713 pnv_ioda_setup_bus_dma(pe, dev->subordinate);
1717 static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
1718 unsigned long index, unsigned long npages, bool rm)
1720 struct iommu_table_group_link *tgl = list_first_entry_or_null(
1721 &tbl->it_group_list, struct iommu_table_group_link,
1723 struct pnv_ioda_pe *pe = container_of(tgl->table_group,
1724 struct pnv_ioda_pe, table_group);
1725 __be64 __iomem *invalidate = rm ?
1726 (__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys :
1727 pe->phb->ioda.tce_inval_reg;
1728 unsigned long start, end, inc;
1729 const unsigned shift = tbl->it_page_shift;
1731 start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
1732 end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
1735 /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
1736 if (tbl->it_busno) {
1739 inc = 128ull << shift;
1740 start |= tbl->it_busno;
1741 end |= tbl->it_busno;
1742 } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
1743 /* p7ioc-style invalidation, 2 TCEs per write */
1744 start |= (1ull << 63);
1745 end |= (1ull << 63);
1748 /* Default (older HW) */
1752 end |= inc - 1; /* round up end to be different than start */
1754 mb(); /* Ensure above stores are visible */
1755 while (start <= end) {
1757 __raw_rm_writeq(cpu_to_be64(start), invalidate);
1759 __raw_writeq(cpu_to_be64(start), invalidate);
1764 * The iommu layer will do another mb() for us on build()
1765 * and we don't care on free()
1769 static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
1770 long npages, unsigned long uaddr,
1771 enum dma_data_direction direction,
1772 struct dma_attrs *attrs)
1774 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1777 if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
1778 pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
1783 #ifdef CONFIG_IOMMU_API
1784 static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
1785 unsigned long *hpa, enum dma_data_direction *direction)
1787 long ret = pnv_tce_xchg(tbl, index, hpa, direction);
1789 if (!ret && (tbl->it_type &
1790 (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE)))
1791 pnv_pci_ioda1_tce_invalidate(tbl, index, 1, false);
1797 static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
1800 pnv_tce_free(tbl, index, npages);
1802 if (tbl->it_type & TCE_PCI_SWINV_FREE)
1803 pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
1806 static struct iommu_table_ops pnv_ioda1_iommu_ops = {
1807 .set = pnv_ioda1_tce_build,
1808 #ifdef CONFIG_IOMMU_API
1809 .exchange = pnv_ioda1_tce_xchg,
1811 .clear = pnv_ioda1_tce_free,
1815 static inline void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_ioda_pe *pe)
1817 /* 01xb - invalidate TCEs that match the specified PE# */
1818 unsigned long val = (0x4ull << 60) | (pe->pe_number & 0xFF);
1819 struct pnv_phb *phb = pe->phb;
1820 struct pnv_ioda_pe *npe;
1823 if (!phb->ioda.tce_inval_reg)
1826 mb(); /* Ensure above stores are visible */
1827 __raw_writeq(cpu_to_be64(val), phb->ioda.tce_inval_reg);
1829 if (pe->flags & PNV_IODA_PE_PEER)
1830 for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
1832 if (!npe || npe->phb->type != PNV_PHB_NPU)
1835 pnv_npu_tce_invalidate_entire(npe);
1839 static void pnv_pci_ioda2_do_tce_invalidate(unsigned pe_number, bool rm,
1840 __be64 __iomem *invalidate, unsigned shift,
1841 unsigned long index, unsigned long npages)
1843 unsigned long start, end, inc;
1845 /* We'll invalidate DMA address in PE scope */
1846 start = 0x2ull << 60;
1847 start |= (pe_number & 0xFF);
1850 /* Figure out the start, end and step */
1851 start |= (index << shift);
1852 end |= ((index + npages - 1) << shift);
1853 inc = (0x1ull << shift);
1856 while (start <= end) {
1858 __raw_rm_writeq(cpu_to_be64(start), invalidate);
1860 __raw_writeq(cpu_to_be64(start), invalidate);
1865 static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
1866 unsigned long index, unsigned long npages, bool rm)
1868 struct iommu_table_group_link *tgl;
1870 list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
1871 struct pnv_ioda_pe *npe;
1872 struct pnv_ioda_pe *pe = container_of(tgl->table_group,
1873 struct pnv_ioda_pe, table_group);
1874 __be64 __iomem *invalidate = rm ?
1875 (__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys :
1876 pe->phb->ioda.tce_inval_reg;
1879 pnv_pci_ioda2_do_tce_invalidate(pe->pe_number, rm,
1880 invalidate, tbl->it_page_shift,
1883 if (pe->flags & PNV_IODA_PE_PEER)
1884 /* Invalidate PEs using the same TCE table */
1885 for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
1887 if (!npe || npe->phb->type != PNV_PHB_NPU)
1890 pnv_npu_tce_invalidate(npe, tbl, index,
1896 static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
1897 long npages, unsigned long uaddr,
1898 enum dma_data_direction direction,
1899 struct dma_attrs *attrs)
1901 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1904 if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
1905 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
1910 #ifdef CONFIG_IOMMU_API
1911 static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
1912 unsigned long *hpa, enum dma_data_direction *direction)
1914 long ret = pnv_tce_xchg(tbl, index, hpa, direction);
1916 if (!ret && (tbl->it_type &
1917 (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE)))
1918 pnv_pci_ioda2_tce_invalidate(tbl, index, 1, false);
1924 static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
1927 pnv_tce_free(tbl, index, npages);
1929 if (tbl->it_type & TCE_PCI_SWINV_FREE)
1930 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
1933 static void pnv_ioda2_table_free(struct iommu_table *tbl)
1935 pnv_pci_ioda2_table_free_pages(tbl);
1936 iommu_free_table(tbl, "pnv");
1939 static struct iommu_table_ops pnv_ioda2_iommu_ops = {
1940 .set = pnv_ioda2_tce_build,
1941 #ifdef CONFIG_IOMMU_API
1942 .exchange = pnv_ioda2_tce_xchg,
1944 .clear = pnv_ioda2_tce_free,
1946 .free = pnv_ioda2_table_free,
1949 static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
1950 struct pnv_ioda_pe *pe, unsigned int base,
1954 struct page *tce_mem = NULL;
1955 struct iommu_table *tbl;
1960 /* XXX FIXME: Handle 64-bit only DMA devices */
1961 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
1962 /* XXX FIXME: Allocate multi-level tables on PHB3 */
1964 /* We shouldn't already have a 32-bit DMA associated */
1965 if (WARN_ON(pe->tce32_seg >= 0))
1968 tbl = pnv_pci_table_alloc(phb->hose->node);
1969 iommu_register_group(&pe->table_group, phb->hose->global_number,
1971 pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group);
1973 /* Grab a 32-bit TCE table */
1974 pe->tce32_seg = base;
1975 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
1976 (base << 28), ((base + segs) << 28) - 1);
1978 /* XXX Currently, we allocate one big contiguous table for the
1979 * TCEs. We only really need one chunk per 256M of TCE space
1980 * (ie per segment) but that's an optimization for later, it
1981 * requires some added smarts with our get/put_tce implementation
1983 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
1984 get_order(TCE32_TABLE_SIZE * segs));
1986 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
1989 addr = page_address(tce_mem);
1990 memset(addr, 0, TCE32_TABLE_SIZE * segs);
1993 for (i = 0; i < segs; i++) {
1994 rc = opal_pci_map_pe_dma_window(phb->opal_id,
1997 __pa(addr) + TCE32_TABLE_SIZE * i,
1998 TCE32_TABLE_SIZE, 0x1000);
2000 pe_err(pe, " Failed to configure 32-bit TCE table,"
2006 /* Setup linux iommu table */
2007 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
2008 base << 28, IOMMU_PAGE_SHIFT_4K);
2010 /* OPAL variant of P7IOC SW invalidated TCEs */
2011 if (phb->ioda.tce_inval_reg)
2012 tbl->it_type |= (TCE_PCI_SWINV_CREATE |
2013 TCE_PCI_SWINV_FREE |
2014 TCE_PCI_SWINV_PAIR);
2016 tbl->it_ops = &pnv_ioda1_iommu_ops;
2017 pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
2018 pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
2019 iommu_init_table(tbl, phb->hose->node);
2021 if (pe->flags & PNV_IODA_PE_DEV) {
2023 * Setting table base here only for carrying iommu_group
2024 * further down to let iommu_add_device() do the job.
2025 * pnv_pci_ioda_dma_dev_setup will override it later anyway.
2027 set_iommu_table_base(&pe->pdev->dev, tbl);
2028 iommu_add_device(&pe->pdev->dev);
2029 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
2030 pnv_ioda_setup_bus_dma(pe, pe->pbus);
2034 /* XXX Failure: Try to fallback to 64-bit only ? */
2035 if (pe->tce32_seg >= 0)
2038 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
2040 pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
2041 iommu_free_table(tbl, "pnv");
2045 static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
2046 int num, struct iommu_table *tbl)
2048 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2050 struct pnv_phb *phb = pe->phb;
2052 const unsigned long size = tbl->it_indirect_levels ?
2053 tbl->it_level_size : tbl->it_size;
2054 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
2055 const __u64 win_size = tbl->it_size << tbl->it_page_shift;
2057 pe_info(pe, "Setting up window#%d %llx..%llx pg=%x\n", num,
2058 start_addr, start_addr + win_size - 1,
2059 IOMMU_PAGE_SIZE(tbl));
2062 * Map TCE table through TVT. The TVE index is the PE number
2063 * shifted by 1 bit for 32-bits DMA space.
2065 rc = opal_pci_map_pe_dma_window(phb->opal_id,
2067 (pe->pe_number << 1) + num,
2068 tbl->it_indirect_levels + 1,
2071 IOMMU_PAGE_SIZE(tbl));
2073 pe_err(pe, "Failed to configure TCE table, err %ld\n", rc);
2077 pnv_pci_link_table_and_group(phb->hose->node, num,
2078 tbl, &pe->table_group);
2079 pnv_pci_ioda2_tce_invalidate_entire(pe);
2084 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
2086 uint16_t window_id = (pe->pe_number << 1 ) + 1;
2089 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
2091 phys_addr_t top = memblock_end_of_DRAM();
2093 top = roundup_pow_of_two(top);
2094 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
2097 pe->tce_bypass_base,
2100 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
2103 pe->tce_bypass_base,
2107 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
2109 pe->tce_bypass_enabled = enable;
2112 static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2113 __u32 page_shift, __u64 window_size, __u32 levels,
2114 struct iommu_table *tbl);
2116 static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
2117 int num, __u32 page_shift, __u64 window_size, __u32 levels,
2118 struct iommu_table **ptbl)
2120 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2122 int nid = pe->phb->hose->node;
2123 __u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start;
2125 struct iommu_table *tbl;
2127 tbl = pnv_pci_table_alloc(nid);
2131 ret = pnv_pci_ioda2_table_alloc_pages(nid,
2132 bus_offset, page_shift, window_size,
2135 iommu_free_table(tbl, "pnv");
2139 tbl->it_ops = &pnv_ioda2_iommu_ops;
2140 if (pe->phb->ioda.tce_inval_reg)
2141 tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
2148 static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
2150 struct iommu_table *tbl = NULL;
2154 * crashkernel= specifies the kdump kernel's maximum memory at
2155 * some offset and there is no guaranteed the result is a power
2156 * of 2, which will cause errors later.
2158 const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max());
2161 * In memory constrained environments, e.g. kdump kernel, the
2162 * DMA window can be larger than available memory, which will
2163 * cause errors later.
2165 const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory);
2167 rc = pnv_pci_ioda2_create_table(&pe->table_group, 0,
2168 IOMMU_PAGE_SHIFT_4K,
2170 POWERNV_IOMMU_DEFAULT_LEVELS, &tbl);
2172 pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
2177 iommu_init_table(tbl, pe->phb->hose->node);
2179 rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
2181 pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n",
2183 pnv_ioda2_table_free(tbl);
2187 if (!pnv_iommu_bypass_disabled)
2188 pnv_pci_ioda2_set_bypass(pe, true);
2190 /* OPAL variant of PHB3 invalidated TCEs */
2191 if (pe->phb->ioda.tce_inval_reg)
2192 tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
2195 * Setting table base here only for carrying iommu_group
2196 * further down to let iommu_add_device() do the job.
2197 * pnv_pci_ioda_dma_dev_setup will override it later anyway.
2199 if (pe->flags & PNV_IODA_PE_DEV)
2200 set_iommu_table_base(&pe->pdev->dev, tbl);
2205 #if defined(CONFIG_IOMMU_API) || defined(CONFIG_PCI_IOV)
2206 static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
2209 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2211 struct pnv_phb *phb = pe->phb;
2214 pe_info(pe, "Removing DMA window #%d\n", num);
2216 ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
2217 (pe->pe_number << 1) + num,
2218 0/* levels */, 0/* table address */,
2219 0/* table size */, 0/* page size */);
2221 pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
2223 pnv_pci_ioda2_tce_invalidate_entire(pe);
2225 pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
2231 #ifdef CONFIG_IOMMU_API
2232 static unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
2233 __u64 window_size, __u32 levels)
2235 unsigned long bytes = 0;
2236 const unsigned window_shift = ilog2(window_size);
2237 unsigned entries_shift = window_shift - page_shift;
2238 unsigned table_shift = entries_shift + 3;
2239 unsigned long tce_table_size = max(0x1000UL, 1UL << table_shift);
2240 unsigned long direct_table_size;
2242 if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS) ||
2243 (window_size > memory_hotplug_max()) ||
2244 !is_power_of_2(window_size))
2247 /* Calculate a direct table size from window_size and levels */
2248 entries_shift = (entries_shift + levels - 1) / levels;
2249 table_shift = entries_shift + 3;
2250 table_shift = max_t(unsigned, table_shift, PAGE_SHIFT);
2251 direct_table_size = 1UL << table_shift;
2253 for ( ; levels; --levels) {
2254 bytes += _ALIGN_UP(tce_table_size, direct_table_size);
2256 tce_table_size /= direct_table_size;
2257 tce_table_size <<= 3;
2258 tce_table_size = _ALIGN_UP(tce_table_size, direct_table_size);
2264 static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
2266 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2268 /* Store @tbl as pnv_pci_ioda2_unset_window() resets it */
2269 struct iommu_table *tbl = pe->table_group.tables[0];
2271 pnv_pci_ioda2_set_bypass(pe, false);
2272 pnv_pci_ioda2_unset_window(&pe->table_group, 0);
2273 pnv_ioda2_table_free(tbl);
2276 static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
2278 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2281 pnv_pci_ioda2_setup_default_config(pe);
2284 static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
2285 .get_table_size = pnv_pci_ioda2_get_table_size,
2286 .create_table = pnv_pci_ioda2_create_table,
2287 .set_window = pnv_pci_ioda2_set_window,
2288 .unset_window = pnv_pci_ioda2_unset_window,
2289 .take_ownership = pnv_ioda2_take_ownership,
2290 .release_ownership = pnv_ioda2_release_ownership,
2294 static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb)
2296 const __be64 *swinvp;
2298 /* OPAL variant of PHB3 invalidated TCEs */
2299 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
2303 phb->ioda.tce_inval_reg_phys = be64_to_cpup(swinvp);
2304 phb->ioda.tce_inval_reg = ioremap(phb->ioda.tce_inval_reg_phys, 8);
2307 static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2308 unsigned levels, unsigned long limit,
2309 unsigned long *current_offset, unsigned long *total_allocated)
2311 struct page *tce_mem = NULL;
2313 unsigned order = max_t(unsigned, shift, PAGE_SHIFT) - PAGE_SHIFT;
2314 unsigned long allocated = 1UL << (order + PAGE_SHIFT);
2315 unsigned entries = 1UL << (shift - 3);
2318 tce_mem = alloc_pages_node(nid, GFP_KERNEL, order);
2320 pr_err("Failed to allocate a TCE memory, order=%d\n", order);
2323 addr = page_address(tce_mem);
2324 memset(addr, 0, allocated);
2325 *total_allocated += allocated;
2329 *current_offset += allocated;
2333 for (i = 0; i < entries; ++i) {
2334 tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
2335 levels, limit, current_offset, total_allocated);
2339 addr[i] = cpu_to_be64(__pa(tmp) |
2340 TCE_PCI_READ | TCE_PCI_WRITE);
2342 if (*current_offset >= limit)
2349 static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
2350 unsigned long size, unsigned level);
2352 static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2353 __u32 page_shift, __u64 window_size, __u32 levels,
2354 struct iommu_table *tbl)
2357 unsigned long offset = 0, level_shift, total_allocated = 0;
2358 const unsigned window_shift = ilog2(window_size);
2359 unsigned entries_shift = window_shift - page_shift;
2360 unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
2361 const unsigned long tce_table_size = 1UL << table_shift;
2363 if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
2366 if ((window_size > memory_hotplug_max()) || !is_power_of_2(window_size))
2369 /* Adjust direct table size from window_size and levels */
2370 entries_shift = (entries_shift + levels - 1) / levels;
2371 level_shift = entries_shift + 3;
2372 level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
2374 /* Allocate TCE table */
2375 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
2376 levels, tce_table_size, &offset, &total_allocated);
2378 /* addr==NULL means that the first level allocation failed */
2383 * First level was allocated but some lower level failed as
2384 * we did not allocate as much as we wanted,
2385 * release partially allocated table.
2387 if (offset < tce_table_size) {
2388 pnv_pci_ioda2_table_do_free_pages(addr,
2389 1ULL << (level_shift - 3), levels - 1);
2393 /* Setup linux iommu table */
2394 pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset,
2396 tbl->it_level_size = 1ULL << (level_shift - 3);
2397 tbl->it_indirect_levels = levels - 1;
2398 tbl->it_allocated_size = total_allocated;
2400 pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
2401 window_size, tce_table_size, bus_offset);
2406 static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
2407 unsigned long size, unsigned level)
2409 const unsigned long addr_ul = (unsigned long) addr &
2410 ~(TCE_PCI_READ | TCE_PCI_WRITE);
2414 u64 *tmp = (u64 *) addr_ul;
2416 for (i = 0; i < size; ++i) {
2417 unsigned long hpa = be64_to_cpu(tmp[i]);
2419 if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE)))
2422 pnv_pci_ioda2_table_do_free_pages(__va(hpa), size,
2427 free_pages(addr_ul, get_order(size << 3));
2430 static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl)
2432 const unsigned long size = tbl->it_indirect_levels ?
2433 tbl->it_level_size : tbl->it_size;
2438 pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size,
2439 tbl->it_indirect_levels);
2442 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
2443 struct pnv_ioda_pe *pe)
2447 /* We shouldn't already have a 32-bit DMA associated */
2448 if (WARN_ON(pe->tce32_seg >= 0))
2451 /* TVE #1 is selected by PCI address bit 59 */
2452 pe->tce_bypass_base = 1ull << 59;
2454 iommu_register_group(&pe->table_group, phb->hose->global_number,
2457 /* The PE will reserve all possible 32-bits space */
2459 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
2460 phb->ioda.m32_pci_base);
2462 /* Setup linux iommu table */
2463 pe->table_group.tce32_start = 0;
2464 pe->table_group.tce32_size = phb->ioda.m32_pci_base;
2465 pe->table_group.max_dynamic_windows_supported =
2466 IOMMU_TABLE_GROUP_MAX_TABLES;
2467 pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS;
2468 pe->table_group.pgsizes = SZ_4K | SZ_64K | SZ_16M;
2469 #ifdef CONFIG_IOMMU_API
2470 pe->table_group.ops = &pnv_pci_ioda2_ops;
2473 rc = pnv_pci_ioda2_setup_default_config(pe);
2475 if (pe->tce32_seg >= 0)
2480 if (pe->flags & PNV_IODA_PE_DEV)
2481 iommu_add_device(&pe->pdev->dev);
2482 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
2483 pnv_ioda_setup_bus_dma(pe, pe->pbus);
2486 static void pnv_ioda_setup_dma(struct pnv_phb *phb)
2488 struct pci_controller *hose = phb->hose;
2489 unsigned int residual, remaining, segs, tw, base;
2490 struct pnv_ioda_pe *pe;
2492 /* If we have more PE# than segments available, hand out one
2493 * per PE until we run out and let the rest fail. If not,
2494 * then we assign at least one segment per PE, plus more based
2495 * on the amount of devices under that PE
2497 if (phb->ioda.dma_pe_count > phb->ioda.tce32_count)
2500 residual = phb->ioda.tce32_count -
2501 phb->ioda.dma_pe_count;
2503 pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
2504 hose->global_number, phb->ioda.tce32_count);
2505 pr_info("PCI: %d PE# for a total weight of %d\n",
2506 phb->ioda.dma_pe_count, phb->ioda.dma_weight);
2508 pnv_pci_ioda_setup_opal_tce_kill(phb);
2510 /* Walk our PE list and configure their DMA segments, hand them
2511 * out one base segment plus any residual segments based on
2514 remaining = phb->ioda.tce32_count;
2515 tw = phb->ioda.dma_weight;
2517 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
2518 if (!pe->dma_weight)
2521 pe_warn(pe, "No DMA32 resources available\n");
2526 segs += ((pe->dma_weight * residual) + (tw / 2)) / tw;
2527 if (segs > remaining)
2532 * For IODA2 compliant PHB3, we needn't care about the weight.
2533 * The all available 32-bits DMA space will be assigned to
2536 if (phb->type == PNV_PHB_IODA1) {
2537 pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
2538 pe->dma_weight, segs);
2539 pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
2540 } else if (phb->type == PNV_PHB_IODA2) {
2541 pe_info(pe, "Assign DMA32 space\n");
2543 pnv_pci_ioda2_setup_dma_pe(phb, pe);
2544 } else if (phb->type == PNV_PHB_NPU) {
2546 * We initialise the DMA space for an NPU PHB
2547 * after setup of the PHB is complete as we
2548 * point the NPU TVT to the the same location
2558 #ifdef CONFIG_PCI_MSI
2559 static void pnv_ioda2_msi_eoi(struct irq_data *d)
2561 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
2562 struct irq_chip *chip = irq_data_get_irq_chip(d);
2563 struct pnv_phb *phb = container_of(chip, struct pnv_phb,
2567 rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
2574 static void set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
2576 struct irq_data *idata;
2577 struct irq_chip *ichip;
2579 if (phb->type != PNV_PHB_IODA2)
2582 if (!phb->ioda.irq_chip_init) {
2584 * First time we setup an MSI IRQ, we need to setup the
2585 * corresponding IRQ chip to route correctly.
2587 idata = irq_get_irq_data(virq);
2588 ichip = irq_data_get_irq_chip(idata);
2589 phb->ioda.irq_chip_init = 1;
2590 phb->ioda.irq_chip = *ichip;
2591 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
2593 irq_set_chip(virq, &phb->ioda.irq_chip);
2596 #ifdef CONFIG_CXL_BASE
2598 struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
2600 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2602 return of_node_get(hose->dn);
2604 EXPORT_SYMBOL(pnv_pci_get_phb_node);
2606 int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
2608 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2609 struct pnv_phb *phb = hose->private_data;
2610 struct pnv_ioda_pe *pe;
2613 pe = pnv_ioda_get_pe(dev);
2617 pe_info(pe, "Switching PHB to CXL\n");
2619 rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
2621 dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
2625 EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
2627 /* Find PHB for cxl dev and allocate MSI hwirqs?
2628 * Returns the absolute hardware IRQ number
2630 int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
2632 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2633 struct pnv_phb *phb = hose->private_data;
2634 int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
2637 dev_warn(&dev->dev, "Failed to find a free MSI\n");
2641 return phb->msi_base + hwirq;
2643 EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
2645 void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
2647 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2648 struct pnv_phb *phb = hose->private_data;
2650 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
2652 EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
2654 void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
2655 struct pci_dev *dev)
2657 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2658 struct pnv_phb *phb = hose->private_data;
2661 for (i = 1; i < CXL_IRQ_RANGES; i++) {
2662 if (!irqs->range[i])
2664 pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n",
2667 hwirq = irqs->offset[i] - phb->msi_base;
2668 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
2672 EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
2674 int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
2675 struct pci_dev *dev, int num)
2677 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2678 struct pnv_phb *phb = hose->private_data;
2681 memset(irqs, 0, sizeof(struct cxl_irq_ranges));
2683 /* 0 is reserved for the multiplexed PSL DSI interrupt */
2684 for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
2687 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
2695 irqs->offset[i] = phb->msi_base + hwirq;
2696 irqs->range[i] = try;
2697 pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n",
2698 i, irqs->offset[i], irqs->range[i]);
2706 pnv_cxl_release_hwirq_ranges(irqs, dev);
2709 EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
2711 int pnv_cxl_get_irq_count(struct pci_dev *dev)
2713 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2714 struct pnv_phb *phb = hose->private_data;
2716 return phb->msi_bmp.irq_count;
2718 EXPORT_SYMBOL(pnv_cxl_get_irq_count);
2720 int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
2723 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2724 struct pnv_phb *phb = hose->private_data;
2725 unsigned int xive_num = hwirq - phb->msi_base;
2726 struct pnv_ioda_pe *pe;
2729 if (!(pe = pnv_ioda_get_pe(dev)))
2732 /* Assign XIVE to PE */
2733 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2735 pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
2736 "hwirq 0x%x XIVE 0x%x PE\n",
2737 pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
2740 set_msi_irq_chip(phb, virq);
2744 EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
2747 static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
2748 unsigned int hwirq, unsigned int virq,
2749 unsigned int is_64, struct msi_msg *msg)
2751 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
2752 unsigned int xive_num = hwirq - phb->msi_base;
2756 /* No PE assigned ? bail out ... no MSI for you ! */
2760 /* Check if we have an MVE */
2761 if (pe->mve_number < 0)
2764 /* Force 32-bit MSI on some broken devices */
2765 if (dev->no_64bit_msi)
2768 /* Assign XIVE to PE */
2769 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2771 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
2772 pci_name(dev), rc, xive_num);
2779 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
2782 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
2786 msg->address_hi = be64_to_cpu(addr64) >> 32;
2787 msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
2791 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
2794 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
2798 msg->address_hi = 0;
2799 msg->address_lo = be32_to_cpu(addr32);
2801 msg->data = be32_to_cpu(data);
2803 set_msi_irq_chip(phb, virq);
2805 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
2806 " address=%x_%08x data=%x PE# %d\n",
2807 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
2808 msg->address_hi, msg->address_lo, data, pe->pe_number);
2813 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
2816 const __be32 *prop = of_get_property(phb->hose->dn,
2817 "ibm,opal-msi-ranges", NULL);
2820 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
2825 phb->msi_base = be32_to_cpup(prop);
2826 count = be32_to_cpup(prop + 1);
2827 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
2828 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
2829 phb->hose->global_number);
2833 phb->msi_setup = pnv_pci_ioda_msi_setup;
2834 phb->msi32_support = 1;
2835 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
2836 count, phb->msi_base);
2839 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
2840 #endif /* CONFIG_PCI_MSI */
2842 #ifdef CONFIG_PCI_IOV
2843 static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
2845 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
2846 struct pnv_phb *phb = hose->private_data;
2847 const resource_size_t gate = phb->ioda.m64_segsize >> 2;
2848 struct resource *res;
2850 resource_size_t size, total_vf_bar_sz;
2854 if (!pdev->is_physfn || pdev->is_added)
2857 pdn = pci_get_pdn(pdev);
2858 pdn->vfs_expanded = 0;
2859 pdn->m64_single_mode = false;
2861 total_vfs = pci_sriov_get_totalvfs(pdev);
2862 mul = phb->ioda.total_pe_num;
2863 total_vf_bar_sz = 0;
2865 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2866 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2867 if (!res->flags || res->parent)
2869 if (!pnv_pci_is_mem_pref_64(res->flags)) {
2870 dev_warn(&pdev->dev, "Don't support SR-IOV with"
2871 " non M64 VF BAR%d: %pR. \n",
2876 total_vf_bar_sz += pci_iov_resource_size(pdev,
2877 i + PCI_IOV_RESOURCES);
2880 * If bigger than quarter of M64 segment size, just round up
2883 * Generally, one M64 BAR maps one IOV BAR. To avoid conflict
2884 * with other devices, IOV BAR size is expanded to be
2885 * (total_pe * VF_BAR_size). When VF_BAR_size is half of M64
2886 * segment size , the expanded size would equal to half of the
2887 * whole M64 space size, which will exhaust the M64 Space and
2888 * limit the system flexibility. This is a design decision to
2889 * set the boundary to quarter of the M64 segment size.
2891 if (total_vf_bar_sz > gate) {
2892 mul = roundup_pow_of_two(total_vfs);
2893 dev_info(&pdev->dev,
2894 "VF BAR Total IOV size %llx > %llx, roundup to %d VFs\n",
2895 total_vf_bar_sz, gate, mul);
2896 pdn->m64_single_mode = true;
2901 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2902 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2903 if (!res->flags || res->parent)
2906 size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
2908 * On PHB3, the minimum size alignment of M64 BAR in single
2911 if (pdn->m64_single_mode && (size < SZ_32M))
2913 dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
2914 res->end = res->start + size * mul - 1;
2915 dev_dbg(&pdev->dev, " %pR\n", res);
2916 dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
2919 pdn->vfs_expanded = mul;
2924 /* To save MMIO space, IOV BAR is truncated. */
2925 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2926 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2928 res->end = res->start - 1;
2931 #endif /* CONFIG_PCI_IOV */
2933 static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
2934 struct resource *res)
2936 struct pnv_phb *phb = pe->phb;
2937 struct pci_bus_region region;
2941 if (!res || !res->flags || res->start > res->end)
2944 if (res->flags & IORESOURCE_IO) {
2945 region.start = res->start - phb->ioda.io_pci_base;
2946 region.end = res->end - phb->ioda.io_pci_base;
2947 index = region.start / phb->ioda.io_segsize;
2949 while (index < phb->ioda.total_pe_num &&
2950 region.start <= region.end) {
2951 phb->ioda.io_segmap[index] = pe->pe_number;
2952 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2953 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
2954 if (rc != OPAL_SUCCESS) {
2955 pr_err("%s: Error %lld mapping IO segment#%d to PE#%d\n",
2956 __func__, rc, index, pe->pe_number);
2960 region.start += phb->ioda.io_segsize;
2963 } else if ((res->flags & IORESOURCE_MEM) &&
2964 !pnv_pci_is_mem_pref_64(res->flags)) {
2965 region.start = res->start -
2966 phb->hose->mem_offset[0] -
2967 phb->ioda.m32_pci_base;
2968 region.end = res->end -
2969 phb->hose->mem_offset[0] -
2970 phb->ioda.m32_pci_base;
2971 index = region.start / phb->ioda.m32_segsize;
2973 while (index < phb->ioda.total_pe_num &&
2974 region.start <= region.end) {
2975 phb->ioda.m32_segmap[index] = pe->pe_number;
2976 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2977 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
2978 if (rc != OPAL_SUCCESS) {
2979 pr_err("%s: Error %lld mapping M32 segment#%d to PE#%d",
2980 __func__, rc, index, pe->pe_number);
2984 region.start += phb->ioda.m32_segsize;
2991 * This function is supposed to be called on basis of PE from top
2992 * to bottom style. So the the I/O or MMIO segment assigned to
2993 * parent PE could be overrided by its child PEs if necessary.
2995 static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
2997 struct pci_dev *pdev;
3001 * NOTE: We only care PCI bus based PE for now. For PCI
3002 * device based PE, for example SRIOV sensitive VF should
3003 * be figured out later.
3005 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
3007 list_for_each_entry(pdev, &pe->pbus->devices, bus_list) {
3008 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
3009 pnv_ioda_setup_pe_res(pe, &pdev->resource[i]);
3012 * If the PE contains all subordinate PCI buses, the
3013 * windows of the child bridges should be mapped to
3016 if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev))
3018 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
3019 pnv_ioda_setup_pe_res(pe,
3020 &pdev->resource[PCI_BRIDGE_RESOURCES + i]);
3024 static void pnv_pci_ioda_setup_seg(void)
3026 struct pci_controller *tmp, *hose;
3027 struct pnv_phb *phb;
3028 struct pnv_ioda_pe *pe;
3030 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
3031 phb = hose->private_data;
3033 /* NPU PHB does not support IO or MMIO segmentation */
3034 if (phb->type == PNV_PHB_NPU)
3037 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
3038 pnv_ioda_setup_pe_seg(pe);
3043 static void pnv_pci_ioda_setup_DMA(void)
3045 struct pci_controller *hose, *tmp;
3046 struct pnv_phb *phb;
3048 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
3049 pnv_ioda_setup_dma(hose->private_data);
3051 /* Mark the PHB initialization done */
3052 phb = hose->private_data;
3053 phb->initialized = 1;
3057 static void pnv_pci_ioda_create_dbgfs(void)
3059 #ifdef CONFIG_DEBUG_FS
3060 struct pci_controller *hose, *tmp;
3061 struct pnv_phb *phb;
3064 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
3065 phb = hose->private_data;
3067 sprintf(name, "PCI%04x", hose->global_number);
3068 phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
3070 pr_warning("%s: Error on creating debugfs on PHB#%x\n",
3071 __func__, hose->global_number);
3073 #endif /* CONFIG_DEBUG_FS */
3076 static void pnv_npu_ioda_fixup(void)
3079 struct pci_controller *hose, *tmp;
3080 struct pnv_phb *phb;
3081 struct pnv_ioda_pe *pe;
3083 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
3084 phb = hose->private_data;
3085 if (phb->type != PNV_PHB_NPU)
3088 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
3089 enable_bypass = dma_get_mask(&pe->pdev->dev) ==
3091 pnv_npu_init_dma_pe(pe);
3092 pnv_npu_dma_set_bypass(pe, enable_bypass);
3097 static void pnv_pci_ioda_fixup(void)
3099 pnv_pci_ioda_setup_PEs();
3100 pnv_pci_ioda_setup_seg();
3101 pnv_pci_ioda_setup_DMA();
3103 pnv_pci_ioda_create_dbgfs();
3107 eeh_addr_cache_build();
3110 /* Link NPU IODA tables to their PCI devices. */
3111 pnv_npu_ioda_fixup();
3115 * Returns the alignment for I/O or memory windows for P2P
3116 * bridges. That actually depends on how PEs are segmented.
3117 * For now, we return I/O or M32 segment size for PE sensitive
3118 * P2P bridges. Otherwise, the default values (4KiB for I/O,
3119 * 1MiB for memory) will be returned.
3121 * The current PCI bus might be put into one PE, which was
3122 * create against the parent PCI bridge. For that case, we
3123 * needn't enlarge the alignment so that we can save some
3126 static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
3129 struct pci_dev *bridge;
3130 struct pci_controller *hose = pci_bus_to_host(bus);
3131 struct pnv_phb *phb = hose->private_data;
3132 int num_pci_bridges = 0;
3136 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
3138 if (num_pci_bridges >= 2)
3142 bridge = bridge->bus->self;
3145 /* We fail back to M32 if M64 isn't supported */
3146 if (phb->ioda.m64_segsize &&
3147 pnv_pci_is_mem_pref_64(type))
3148 return phb->ioda.m64_segsize;
3149 if (type & IORESOURCE_MEM)
3150 return phb->ioda.m32_segsize;
3152 return phb->ioda.io_segsize;
3155 #ifdef CONFIG_PCI_IOV
3156 static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
3159 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
3160 struct pnv_phb *phb = hose->private_data;
3161 struct pci_dn *pdn = pci_get_pdn(pdev);
3162 resource_size_t align;
3165 * On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the
3166 * SR-IOV. While from hardware perspective, the range mapped by M64
3167 * BAR should be size aligned.
3169 * When IOV BAR is mapped with M64 BAR in Single PE mode, the extra
3170 * powernv-specific hardware restriction is gone. But if just use the
3171 * VF BAR size as the alignment, PF BAR / VF BAR may be allocated with
3172 * in one segment of M64 #15, which introduces the PE conflict between
3173 * PF and VF. Based on this, the minimum alignment of an IOV BAR is
3176 * This function returns the total IOV BAR size if M64 BAR is in
3177 * Shared PE mode or just VF BAR size if not.
3178 * If the M64 BAR is in Single PE mode, return the VF BAR size or
3179 * M64 segment size if IOV BAR size is less.
3181 align = pci_iov_resource_size(pdev, resno);
3182 if (!pdn->vfs_expanded)
3184 if (pdn->m64_single_mode)
3185 return max(align, (resource_size_t)phb->ioda.m64_segsize);
3187 return pdn->vfs_expanded * align;
3189 #endif /* CONFIG_PCI_IOV */
3191 /* Prevent enabling devices for which we couldn't properly
3194 static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
3196 struct pci_controller *hose = pci_bus_to_host(dev->bus);
3197 struct pnv_phb *phb = hose->private_data;
3200 /* The function is probably called while the PEs have
3201 * not be created yet. For example, resource reassignment
3202 * during PCI probe period. We just skip the check if
3205 if (!phb->initialized)
3208 pdn = pci_get_pdn(dev);
3209 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
3215 static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
3217 struct pnv_phb *phb = hose->private_data;
3219 opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE,
3223 static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
3224 .dma_dev_setup = pnv_pci_dma_dev_setup,
3225 .dma_bus_setup = pnv_pci_dma_bus_setup,
3226 #ifdef CONFIG_PCI_MSI
3227 .setup_msi_irqs = pnv_setup_msi_irqs,
3228 .teardown_msi_irqs = pnv_teardown_msi_irqs,
3230 .enable_device_hook = pnv_pci_enable_device_hook,
3231 .window_alignment = pnv_pci_window_alignment,
3232 .reset_secondary_bus = pnv_pci_reset_secondary_bus,
3233 .dma_set_mask = pnv_pci_ioda_dma_set_mask,
3234 .dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
3235 .shutdown = pnv_pci_ioda_shutdown,
3238 static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
3239 .dma_dev_setup = pnv_pci_dma_dev_setup,
3240 #ifdef CONFIG_PCI_MSI
3241 .setup_msi_irqs = pnv_setup_msi_irqs,
3242 .teardown_msi_irqs = pnv_teardown_msi_irqs,
3244 .enable_device_hook = pnv_pci_enable_device_hook,
3245 .window_alignment = pnv_pci_window_alignment,
3246 .reset_secondary_bus = pnv_pci_reset_secondary_bus,
3247 .dma_set_mask = pnv_npu_dma_set_mask,
3248 .shutdown = pnv_pci_ioda_shutdown,
3251 static void __init pnv_pci_init_ioda_phb(struct device_node *np,
3252 u64 hub_id, int ioda_type)
3254 struct pci_controller *hose;
3255 struct pnv_phb *phb;
3256 unsigned long size, m64map_off, m32map_off, pemap_off, iomap_off = 0;
3257 const __be64 *prop64;
3258 const __be32 *prop32;
3265 pr_info("Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
3267 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
3269 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
3272 phb_id = be64_to_cpup(prop64);
3273 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
3275 phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0);
3277 /* Allocate PCI controller */
3278 phb->hose = hose = pcibios_alloc_controller(np);
3280 pr_err(" Can't allocate PCI controller for %s\n",
3282 memblock_free(__pa(phb), sizeof(struct pnv_phb));
3286 spin_lock_init(&phb->lock);
3287 prop32 = of_get_property(np, "bus-range", &len);
3288 if (prop32 && len == 8) {
3289 hose->first_busno = be32_to_cpu(prop32[0]);
3290 hose->last_busno = be32_to_cpu(prop32[1]);
3292 pr_warn(" Broken <bus-range> on %s\n", np->full_name);
3293 hose->first_busno = 0;
3294 hose->last_busno = 0xff;
3296 hose->private_data = phb;
3297 phb->hub_id = hub_id;
3298 phb->opal_id = phb_id;
3299 phb->type = ioda_type;
3300 mutex_init(&phb->ioda.pe_alloc_mutex);
3302 /* Detect specific models for error handling */
3303 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
3304 phb->model = PNV_PHB_MODEL_P7IOC;
3305 else if (of_device_is_compatible(np, "ibm,power8-pciex"))
3306 phb->model = PNV_PHB_MODEL_PHB3;
3307 else if (of_device_is_compatible(np, "ibm,power8-npu-pciex"))
3308 phb->model = PNV_PHB_MODEL_NPU;
3310 phb->model = PNV_PHB_MODEL_UNKNOWN;
3312 /* Parse 32-bit and IO ranges (if any) */
3313 pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
3316 phb->regs = of_iomap(np, 0);
3317 if (phb->regs == NULL)
3318 pr_err(" Failed to map registers !\n");
3320 /* Initialize more IODA stuff */
3321 phb->ioda.total_pe_num = 1;
3322 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
3324 phb->ioda.total_pe_num = be32_to_cpup(prop32);
3325 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
3327 phb->ioda.reserved_pe_idx = be32_to_cpup(prop32);
3329 /* Parse 64-bit MMIO range */
3330 pnv_ioda_parse_m64_window(phb);
3332 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
3333 /* FW Has already off top 64k of M32 space (MSI space) */
3334 phb->ioda.m32_size += 0x10000;
3336 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe_num;
3337 phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
3338 phb->ioda.io_size = hose->pci_io_size;
3339 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe_num;
3340 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
3342 /* Allocate aux data & arrays. We don't have IO ports on PHB3 */
3343 size = _ALIGN_UP(phb->ioda.total_pe_num / 8, sizeof(unsigned long));
3345 size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]);
3347 size += phb->ioda.total_pe_num * sizeof(phb->ioda.m32_segmap[0]);
3348 if (phb->type == PNV_PHB_IODA1) {
3350 size += phb->ioda.total_pe_num * sizeof(phb->ioda.io_segmap[0]);
3353 size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
3354 aux = memblock_virt_alloc(size, 0);
3355 phb->ioda.pe_alloc = aux;
3356 phb->ioda.m64_segmap = aux + m64map_off;
3357 phb->ioda.m32_segmap = aux + m32map_off;
3358 for (segno = 0; segno < phb->ioda.total_pe_num; segno++) {
3359 phb->ioda.m64_segmap[segno] = IODA_INVALID_PE;
3360 phb->ioda.m32_segmap[segno] = IODA_INVALID_PE;
3362 if (phb->type == PNV_PHB_IODA1) {
3363 phb->ioda.io_segmap = aux + iomap_off;
3364 for (segno = 0; segno < phb->ioda.total_pe_num; segno++)
3365 phb->ioda.io_segmap[segno] = IODA_INVALID_PE;
3367 phb->ioda.pe_array = aux + pemap_off;
3368 set_bit(phb->ioda.reserved_pe_idx, phb->ioda.pe_alloc);
3370 INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
3371 INIT_LIST_HEAD(&phb->ioda.pe_list);
3372 mutex_init(&phb->ioda.pe_list_mutex);
3374 /* Calculate how many 32-bit TCE segments we have */
3375 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
3377 #if 0 /* We should really do that ... */
3378 rc = opal_pci_set_phb_mem_window(opal->phb_id,
3381 starting_real_address,
3382 starting_pci_address,
3386 pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n",
3387 phb->ioda.total_pe_num, phb->ioda.reserved_pe_idx,
3388 phb->ioda.m32_size, phb->ioda.m32_segsize);
3389 if (phb->ioda.m64_size)
3390 pr_info(" M64: 0x%lx [segment=0x%lx]\n",
3391 phb->ioda.m64_size, phb->ioda.m64_segsize);
3392 if (phb->ioda.io_size)
3393 pr_info(" IO: 0x%x [segment=0x%x]\n",
3394 phb->ioda.io_size, phb->ioda.io_segsize);
3397 phb->hose->ops = &pnv_pci_ops;
3398 phb->get_pe_state = pnv_ioda_get_pe_state;
3399 phb->freeze_pe = pnv_ioda_freeze_pe;
3400 phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
3403 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
3405 /* Setup MSI support */
3406 pnv_pci_init_ioda_msis(phb);
3409 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
3410 * to let the PCI core do resource assignment. It's supposed
3411 * that the PCI core will do correct I/O and MMIO alignment
3412 * for the P2P bridge bars so that each PCI bus (excluding
3413 * the child P2P bridges) can form individual PE.
3415 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
3417 if (phb->type == PNV_PHB_NPU)
3418 hose->controller_ops = pnv_npu_ioda_controller_ops;
3420 hose->controller_ops = pnv_pci_ioda_controller_ops;
3422 #ifdef CONFIG_PCI_IOV
3423 ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
3424 ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
3427 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
3429 /* Reset IODA tables to a clean state */
3430 rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET);
3432 pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
3434 /* If we're running in kdump kerenl, the previous kerenl never
3435 * shutdown PCI devices correctly. We already got IODA table
3436 * cleaned out. So we have to issue PHB reset to stop all PCI
3437 * transactions from previous kerenl.
3439 if (is_kdump_kernel()) {
3440 pr_info(" Issue PHB reset ...\n");
3441 pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
3442 pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
3445 /* Remove M64 resource if we can't configure it successfully */
3446 if (!phb->init_m64 || phb->init_m64(phb))
3447 hose->mem_resources[1].flags = 0;
3450 void __init pnv_pci_init_ioda2_phb(struct device_node *np)
3452 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
3455 void __init pnv_pci_init_npu_phb(struct device_node *np)
3457 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU);
3460 void __init pnv_pci_init_ioda_hub(struct device_node *np)
3462 struct device_node *phbn;
3463 const __be64 *prop64;
3466 pr_info("Probing IODA IO-Hub %s\n", np->full_name);
3468 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
3470 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
3473 hub_id = be64_to_cpup(prop64);
3474 pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
3476 /* Count child PHBs */
3477 for_each_child_of_node(np, phbn) {
3478 /* Look for IODA1 PHBs */
3479 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
3480 pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);