powerpc/pci: Delay populating pdn
[cascardo/linux.git] / arch / powerpc / platforms / powernv / pci.c
1 /*
2  * Support PCI/PCIe on PowerNV platforms
3  *
4  * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/pci.h>
14 #include <linux/delay.h>
15 #include <linux/string.h>
16 #include <linux/init.h>
17 #include <linux/irq.h>
18 #include <linux/io.h>
19 #include <linux/msi.h>
20 #include <linux/iommu.h>
21
22 #include <asm/sections.h>
23 #include <asm/io.h>
24 #include <asm/prom.h>
25 #include <asm/pci-bridge.h>
26 #include <asm/machdep.h>
27 #include <asm/msi_bitmap.h>
28 #include <asm/ppc-pci.h>
29 #include <asm/opal.h>
30 #include <asm/iommu.h>
31 #include <asm/tce.h>
32 #include <asm/firmware.h>
33 #include <asm/eeh_event.h>
34 #include <asm/eeh.h>
35
36 #include "powernv.h"
37 #include "pci.h"
38
39 #ifdef CONFIG_PCI_MSI
40 int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
41 {
42         struct pci_controller *hose = pci_bus_to_host(pdev->bus);
43         struct pnv_phb *phb = hose->private_data;
44         struct msi_desc *entry;
45         struct msi_msg msg;
46         int hwirq;
47         unsigned int virq;
48         int rc;
49
50         if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
51                 return -ENODEV;
52
53         if (pdev->no_64bit_msi && !phb->msi32_support)
54                 return -ENODEV;
55
56         for_each_pci_msi_entry(entry, pdev) {
57                 if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
58                         pr_warn("%s: Supports only 64-bit MSIs\n",
59                                 pci_name(pdev));
60                         return -ENXIO;
61                 }
62                 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
63                 if (hwirq < 0) {
64                         pr_warn("%s: Failed to find a free MSI\n",
65                                 pci_name(pdev));
66                         return -ENOSPC;
67                 }
68                 virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
69                 if (virq == NO_IRQ) {
70                         pr_warn("%s: Failed to map MSI to linux irq\n",
71                                 pci_name(pdev));
72                         msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
73                         return -ENOMEM;
74                 }
75                 rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
76                                     virq, entry->msi_attrib.is_64, &msg);
77                 if (rc) {
78                         pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
79                         irq_dispose_mapping(virq);
80                         msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
81                         return rc;
82                 }
83                 irq_set_msi_desc(virq, entry);
84                 pci_write_msi_msg(virq, &msg);
85         }
86         return 0;
87 }
88
89 void pnv_teardown_msi_irqs(struct pci_dev *pdev)
90 {
91         struct pci_controller *hose = pci_bus_to_host(pdev->bus);
92         struct pnv_phb *phb = hose->private_data;
93         struct msi_desc *entry;
94         irq_hw_number_t hwirq;
95
96         if (WARN_ON(!phb))
97                 return;
98
99         for_each_pci_msi_entry(entry, pdev) {
100                 if (entry->irq == NO_IRQ)
101                         continue;
102                 hwirq = virq_to_hw(entry->irq);
103                 irq_set_msi_desc(entry->irq, NULL);
104                 irq_dispose_mapping(entry->irq);
105                 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
106         }
107 }
108 #endif /* CONFIG_PCI_MSI */
109
110 static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
111                                          struct OpalIoPhbErrorCommon *common)
112 {
113         struct OpalIoP7IOCPhbErrorData *data;
114         int i;
115
116         data = (struct OpalIoP7IOCPhbErrorData *)common;
117         pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n",
118                 hose->global_number, be32_to_cpu(common->version));
119
120         if (data->brdgCtl)
121                 pr_info("brdgCtl:     %08x\n",
122                         be32_to_cpu(data->brdgCtl));
123         if (data->portStatusReg || data->rootCmplxStatus ||
124             data->busAgentStatus)
125                 pr_info("UtlSts:      %08x %08x %08x\n",
126                         be32_to_cpu(data->portStatusReg),
127                         be32_to_cpu(data->rootCmplxStatus),
128                         be32_to_cpu(data->busAgentStatus));
129         if (data->deviceStatus || data->slotStatus   ||
130             data->linkStatus   || data->devCmdStatus ||
131             data->devSecStatus)
132                 pr_info("RootSts:     %08x %08x %08x %08x %08x\n",
133                         be32_to_cpu(data->deviceStatus),
134                         be32_to_cpu(data->slotStatus),
135                         be32_to_cpu(data->linkStatus),
136                         be32_to_cpu(data->devCmdStatus),
137                         be32_to_cpu(data->devSecStatus));
138         if (data->rootErrorStatus   || data->uncorrErrorStatus ||
139             data->corrErrorStatus)
140                 pr_info("RootErrSts:  %08x %08x %08x\n",
141                         be32_to_cpu(data->rootErrorStatus),
142                         be32_to_cpu(data->uncorrErrorStatus),
143                         be32_to_cpu(data->corrErrorStatus));
144         if (data->tlpHdr1 || data->tlpHdr2 ||
145             data->tlpHdr3 || data->tlpHdr4)
146                 pr_info("RootErrLog:  %08x %08x %08x %08x\n",
147                         be32_to_cpu(data->tlpHdr1),
148                         be32_to_cpu(data->tlpHdr2),
149                         be32_to_cpu(data->tlpHdr3),
150                         be32_to_cpu(data->tlpHdr4));
151         if (data->sourceId || data->errorClass ||
152             data->correlator)
153                 pr_info("RootErrLog1: %08x %016llx %016llx\n",
154                         be32_to_cpu(data->sourceId),
155                         be64_to_cpu(data->errorClass),
156                         be64_to_cpu(data->correlator));
157         if (data->p7iocPlssr || data->p7iocCsr)
158                 pr_info("PhbSts:      %016llx %016llx\n",
159                         be64_to_cpu(data->p7iocPlssr),
160                         be64_to_cpu(data->p7iocCsr));
161         if (data->lemFir)
162                 pr_info("Lem:         %016llx %016llx %016llx\n",
163                         be64_to_cpu(data->lemFir),
164                         be64_to_cpu(data->lemErrorMask),
165                         be64_to_cpu(data->lemWOF));
166         if (data->phbErrorStatus)
167                 pr_info("PhbErr:      %016llx %016llx %016llx %016llx\n",
168                         be64_to_cpu(data->phbErrorStatus),
169                         be64_to_cpu(data->phbFirstErrorStatus),
170                         be64_to_cpu(data->phbErrorLog0),
171                         be64_to_cpu(data->phbErrorLog1));
172         if (data->mmioErrorStatus)
173                 pr_info("OutErr:      %016llx %016llx %016llx %016llx\n",
174                         be64_to_cpu(data->mmioErrorStatus),
175                         be64_to_cpu(data->mmioFirstErrorStatus),
176                         be64_to_cpu(data->mmioErrorLog0),
177                         be64_to_cpu(data->mmioErrorLog1));
178         if (data->dma0ErrorStatus)
179                 pr_info("InAErr:      %016llx %016llx %016llx %016llx\n",
180                         be64_to_cpu(data->dma0ErrorStatus),
181                         be64_to_cpu(data->dma0FirstErrorStatus),
182                         be64_to_cpu(data->dma0ErrorLog0),
183                         be64_to_cpu(data->dma0ErrorLog1));
184         if (data->dma1ErrorStatus)
185                 pr_info("InBErr:      %016llx %016llx %016llx %016llx\n",
186                         be64_to_cpu(data->dma1ErrorStatus),
187                         be64_to_cpu(data->dma1FirstErrorStatus),
188                         be64_to_cpu(data->dma1ErrorLog0),
189                         be64_to_cpu(data->dma1ErrorLog1));
190
191         for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
192                 if ((data->pestA[i] >> 63) == 0 &&
193                     (data->pestB[i] >> 63) == 0)
194                         continue;
195
196                 pr_info("PE[%3d] A/B: %016llx %016llx\n",
197                         i, be64_to_cpu(data->pestA[i]),
198                         be64_to_cpu(data->pestB[i]));
199         }
200 }
201
202 static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
203                                         struct OpalIoPhbErrorCommon *common)
204 {
205         struct OpalIoPhb3ErrorData *data;
206         int i;
207
208         data = (struct OpalIoPhb3ErrorData*)common;
209         pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n",
210                 hose->global_number, be32_to_cpu(common->version));
211         if (data->brdgCtl)
212                 pr_info("brdgCtl:     %08x\n",
213                         be32_to_cpu(data->brdgCtl));
214         if (data->portStatusReg || data->rootCmplxStatus ||
215             data->busAgentStatus)
216                 pr_info("UtlSts:      %08x %08x %08x\n",
217                         be32_to_cpu(data->portStatusReg),
218                         be32_to_cpu(data->rootCmplxStatus),
219                         be32_to_cpu(data->busAgentStatus));
220         if (data->deviceStatus || data->slotStatus   ||
221             data->linkStatus   || data->devCmdStatus ||
222             data->devSecStatus)
223                 pr_info("RootSts:     %08x %08x %08x %08x %08x\n",
224                         be32_to_cpu(data->deviceStatus),
225                         be32_to_cpu(data->slotStatus),
226                         be32_to_cpu(data->linkStatus),
227                         be32_to_cpu(data->devCmdStatus),
228                         be32_to_cpu(data->devSecStatus));
229         if (data->rootErrorStatus || data->uncorrErrorStatus ||
230             data->corrErrorStatus)
231                 pr_info("RootErrSts:  %08x %08x %08x\n",
232                         be32_to_cpu(data->rootErrorStatus),
233                         be32_to_cpu(data->uncorrErrorStatus),
234                         be32_to_cpu(data->corrErrorStatus));
235         if (data->tlpHdr1 || data->tlpHdr2 ||
236             data->tlpHdr3 || data->tlpHdr4)
237                 pr_info("RootErrLog:  %08x %08x %08x %08x\n",
238                         be32_to_cpu(data->tlpHdr1),
239                         be32_to_cpu(data->tlpHdr2),
240                         be32_to_cpu(data->tlpHdr3),
241                         be32_to_cpu(data->tlpHdr4));
242         if (data->sourceId || data->errorClass ||
243             data->correlator)
244                 pr_info("RootErrLog1: %08x %016llx %016llx\n",
245                         be32_to_cpu(data->sourceId),
246                         be64_to_cpu(data->errorClass),
247                         be64_to_cpu(data->correlator));
248         if (data->nFir)
249                 pr_info("nFir:        %016llx %016llx %016llx\n",
250                         be64_to_cpu(data->nFir),
251                         be64_to_cpu(data->nFirMask),
252                         be64_to_cpu(data->nFirWOF));
253         if (data->phbPlssr || data->phbCsr)
254                 pr_info("PhbSts:      %016llx %016llx\n",
255                         be64_to_cpu(data->phbPlssr),
256                         be64_to_cpu(data->phbCsr));
257         if (data->lemFir)
258                 pr_info("Lem:         %016llx %016llx %016llx\n",
259                         be64_to_cpu(data->lemFir),
260                         be64_to_cpu(data->lemErrorMask),
261                         be64_to_cpu(data->lemWOF));
262         if (data->phbErrorStatus)
263                 pr_info("PhbErr:      %016llx %016llx %016llx %016llx\n",
264                         be64_to_cpu(data->phbErrorStatus),
265                         be64_to_cpu(data->phbFirstErrorStatus),
266                         be64_to_cpu(data->phbErrorLog0),
267                         be64_to_cpu(data->phbErrorLog1));
268         if (data->mmioErrorStatus)
269                 pr_info("OutErr:      %016llx %016llx %016llx %016llx\n",
270                         be64_to_cpu(data->mmioErrorStatus),
271                         be64_to_cpu(data->mmioFirstErrorStatus),
272                         be64_to_cpu(data->mmioErrorLog0),
273                         be64_to_cpu(data->mmioErrorLog1));
274         if (data->dma0ErrorStatus)
275                 pr_info("InAErr:      %016llx %016llx %016llx %016llx\n",
276                         be64_to_cpu(data->dma0ErrorStatus),
277                         be64_to_cpu(data->dma0FirstErrorStatus),
278                         be64_to_cpu(data->dma0ErrorLog0),
279                         be64_to_cpu(data->dma0ErrorLog1));
280         if (data->dma1ErrorStatus)
281                 pr_info("InBErr:      %016llx %016llx %016llx %016llx\n",
282                         be64_to_cpu(data->dma1ErrorStatus),
283                         be64_to_cpu(data->dma1FirstErrorStatus),
284                         be64_to_cpu(data->dma1ErrorLog0),
285                         be64_to_cpu(data->dma1ErrorLog1));
286
287         for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
288                 if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
289                     (be64_to_cpu(data->pestB[i]) >> 63) == 0)
290                         continue;
291
292                 pr_info("PE[%3d] A/B: %016llx %016llx\n",
293                                 i, be64_to_cpu(data->pestA[i]),
294                                 be64_to_cpu(data->pestB[i]));
295         }
296 }
297
298 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
299                                 unsigned char *log_buff)
300 {
301         struct OpalIoPhbErrorCommon *common;
302
303         if (!hose || !log_buff)
304                 return;
305
306         common = (struct OpalIoPhbErrorCommon *)log_buff;
307         switch (be32_to_cpu(common->ioType)) {
308         case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
309                 pnv_pci_dump_p7ioc_diag_data(hose, common);
310                 break;
311         case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
312                 pnv_pci_dump_phb3_diag_data(hose, common);
313                 break;
314         default:
315                 pr_warn("%s: Unrecognized ioType %d\n",
316                         __func__, be32_to_cpu(common->ioType));
317         }
318 }
319
320 static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
321 {
322         unsigned long flags, rc;
323         int has_diag, ret = 0;
324
325         spin_lock_irqsave(&phb->lock, flags);
326
327         /* Fetch PHB diag-data */
328         rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
329                                          PNV_PCI_DIAG_BUF_SIZE);
330         has_diag = (rc == OPAL_SUCCESS);
331
332         /* If PHB supports compound PE, to handle it */
333         if (phb->unfreeze_pe) {
334                 ret = phb->unfreeze_pe(phb,
335                                        pe_no,
336                                        OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
337         } else {
338                 rc = opal_pci_eeh_freeze_clear(phb->opal_id,
339                                              pe_no,
340                                              OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
341                 if (rc) {
342                         pr_warn("%s: Failure %ld clearing frozen "
343                                 "PHB#%x-PE#%x\n",
344                                 __func__, rc, phb->hose->global_number,
345                                 pe_no);
346                         ret = -EIO;
347                 }
348         }
349
350         /*
351          * For now, let's only display the diag buffer when we fail to clear
352          * the EEH status. We'll do more sensible things later when we have
353          * proper EEH support. We need to make sure we don't pollute ourselves
354          * with the normal errors generated when probing empty slots
355          */
356         if (has_diag && ret)
357                 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
358
359         spin_unlock_irqrestore(&phb->lock, flags);
360 }
361
362 static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
363 {
364         struct pnv_phb *phb = pdn->phb->private_data;
365         u8      fstate;
366         __be16  pcierr;
367         unsigned int pe_no;
368         s64     rc;
369
370         /*
371          * Get the PE#. During the PCI probe stage, we might not
372          * setup that yet. So all ER errors should be mapped to
373          * reserved PE.
374          */
375         pe_no = pdn->pe_number;
376         if (pe_no == IODA_INVALID_PE) {
377                 pe_no = phb->ioda.reserved_pe_idx;
378         }
379
380         /*
381          * Fetch frozen state. If the PHB support compound PE,
382          * we need handle that case.
383          */
384         if (phb->get_pe_state) {
385                 fstate = phb->get_pe_state(phb, pe_no);
386         } else {
387                 rc = opal_pci_eeh_freeze_status(phb->opal_id,
388                                                 pe_no,
389                                                 &fstate,
390                                                 &pcierr,
391                                                 NULL);
392                 if (rc) {
393                         pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
394                                 __func__, rc, phb->hose->global_number, pe_no);
395                         return;
396                 }
397         }
398
399         pr_devel(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
400                  (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
401
402         /* Clear the frozen state if applicable */
403         if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
404             fstate == OPAL_EEH_STOPPED_DMA_FREEZE  ||
405             fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
406                 /*
407                  * If PHB supports compound PE, freeze it for
408                  * consistency.
409                  */
410                 if (phb->freeze_pe)
411                         phb->freeze_pe(phb, pe_no);
412
413                 pnv_pci_handle_eeh_config(phb, pe_no);
414         }
415 }
416
417 int pnv_pci_cfg_read(struct pci_dn *pdn,
418                      int where, int size, u32 *val)
419 {
420         struct pnv_phb *phb = pdn->phb->private_data;
421         u32 bdfn = (pdn->busno << 8) | pdn->devfn;
422         s64 rc;
423
424         switch (size) {
425         case 1: {
426                 u8 v8;
427                 rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
428                 *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
429                 break;
430         }
431         case 2: {
432                 __be16 v16;
433                 rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
434                                                    &v16);
435                 *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
436                 break;
437         }
438         case 4: {
439                 __be32 v32;
440                 rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
441                 *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
442                 break;
443         }
444         default:
445                 return PCIBIOS_FUNC_NOT_SUPPORTED;
446         }
447
448         pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
449                  __func__, pdn->busno, pdn->devfn, where, size, *val);
450         return PCIBIOS_SUCCESSFUL;
451 }
452
453 int pnv_pci_cfg_write(struct pci_dn *pdn,
454                       int where, int size, u32 val)
455 {
456         struct pnv_phb *phb = pdn->phb->private_data;
457         u32 bdfn = (pdn->busno << 8) | pdn->devfn;
458
459         pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
460                  __func__, pdn->busno, pdn->devfn, where, size, val);
461         switch (size) {
462         case 1:
463                 opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
464                 break;
465         case 2:
466                 opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
467                 break;
468         case 4:
469                 opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
470                 break;
471         default:
472                 return PCIBIOS_FUNC_NOT_SUPPORTED;
473         }
474
475         return PCIBIOS_SUCCESSFUL;
476 }
477
478 #if CONFIG_EEH
479 static bool pnv_pci_cfg_check(struct pci_dn *pdn)
480 {
481         struct eeh_dev *edev = NULL;
482         struct pnv_phb *phb = pdn->phb->private_data;
483
484         /* EEH not enabled ? */
485         if (!(phb->flags & PNV_PHB_FLAG_EEH))
486                 return true;
487
488         /* PE reset or device removed ? */
489         edev = pdn->edev;
490         if (edev) {
491                 if (edev->pe &&
492                     (edev->pe->state & EEH_PE_CFG_BLOCKED))
493                         return false;
494
495                 if (edev->mode & EEH_DEV_REMOVED)
496                         return false;
497         }
498
499         return true;
500 }
501 #else
502 static inline pnv_pci_cfg_check(struct pci_dn *pdn)
503 {
504         return true;
505 }
506 #endif /* CONFIG_EEH */
507
508 static int pnv_pci_read_config(struct pci_bus *bus,
509                                unsigned int devfn,
510                                int where, int size, u32 *val)
511 {
512         struct pci_dn *pdn;
513         struct pnv_phb *phb;
514         int ret;
515
516         *val = 0xFFFFFFFF;
517         pdn = pci_get_pdn_by_devfn(bus, devfn);
518         if (!pdn)
519                 return PCIBIOS_DEVICE_NOT_FOUND;
520
521         if (!pnv_pci_cfg_check(pdn))
522                 return PCIBIOS_DEVICE_NOT_FOUND;
523
524         ret = pnv_pci_cfg_read(pdn, where, size, val);
525         phb = pdn->phb->private_data;
526         if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
527                 if (*val == EEH_IO_ERROR_VALUE(size) &&
528                     eeh_dev_check_failure(pdn->edev))
529                         return PCIBIOS_DEVICE_NOT_FOUND;
530         } else {
531                 pnv_pci_config_check_eeh(pdn);
532         }
533
534         return ret;
535 }
536
537 static int pnv_pci_write_config(struct pci_bus *bus,
538                                 unsigned int devfn,
539                                 int where, int size, u32 val)
540 {
541         struct pci_dn *pdn;
542         struct pnv_phb *phb;
543         int ret;
544
545         pdn = pci_get_pdn_by_devfn(bus, devfn);
546         if (!pdn)
547                 return PCIBIOS_DEVICE_NOT_FOUND;
548
549         if (!pnv_pci_cfg_check(pdn))
550                 return PCIBIOS_DEVICE_NOT_FOUND;
551
552         ret = pnv_pci_cfg_write(pdn, where, size, val);
553         phb = pdn->phb->private_data;
554         if (!(phb->flags & PNV_PHB_FLAG_EEH))
555                 pnv_pci_config_check_eeh(pdn);
556
557         return ret;
558 }
559
560 struct pci_ops pnv_pci_ops = {
561         .read  = pnv_pci_read_config,
562         .write = pnv_pci_write_config,
563 };
564
565 static __be64 *pnv_tce(struct iommu_table *tbl, long idx)
566 {
567         __be64 *tmp = ((__be64 *)tbl->it_base);
568         int  level = tbl->it_indirect_levels;
569         const long shift = ilog2(tbl->it_level_size);
570         unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
571
572         while (level) {
573                 int n = (idx & mask) >> (level * shift);
574                 unsigned long tce = be64_to_cpu(tmp[n]);
575
576                 tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
577                 idx &= ~mask;
578                 mask >>= shift;
579                 --level;
580         }
581
582         return tmp + idx;
583 }
584
585 int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
586                 unsigned long uaddr, enum dma_data_direction direction,
587                 struct dma_attrs *attrs)
588 {
589         u64 proto_tce = iommu_direction_to_tce_perm(direction);
590         u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
591         long i;
592
593         if (proto_tce & TCE_PCI_WRITE)
594                 proto_tce |= TCE_PCI_READ;
595
596         for (i = 0; i < npages; i++) {
597                 unsigned long newtce = proto_tce |
598                         ((rpn + i) << tbl->it_page_shift);
599                 unsigned long idx = index - tbl->it_offset + i;
600
601                 *(pnv_tce(tbl, idx)) = cpu_to_be64(newtce);
602         }
603
604         return 0;
605 }
606
607 #ifdef CONFIG_IOMMU_API
608 int pnv_tce_xchg(struct iommu_table *tbl, long index,
609                 unsigned long *hpa, enum dma_data_direction *direction)
610 {
611         u64 proto_tce = iommu_direction_to_tce_perm(*direction);
612         unsigned long newtce = *hpa | proto_tce, oldtce;
613         unsigned long idx = index - tbl->it_offset;
614
615         BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
616
617         if (newtce & TCE_PCI_WRITE)
618                 newtce |= TCE_PCI_READ;
619
620         oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
621         *hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
622         *direction = iommu_tce_direction(oldtce);
623
624         return 0;
625 }
626 #endif
627
628 void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
629 {
630         long i;
631
632         for (i = 0; i < npages; i++) {
633                 unsigned long idx = index - tbl->it_offset + i;
634
635                 *(pnv_tce(tbl, idx)) = cpu_to_be64(0);
636         }
637 }
638
639 unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
640 {
641         return *(pnv_tce(tbl, index - tbl->it_offset));
642 }
643
644 struct iommu_table *pnv_pci_table_alloc(int nid)
645 {
646         struct iommu_table *tbl;
647
648         tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
649         INIT_LIST_HEAD_RCU(&tbl->it_group_list);
650
651         return tbl;
652 }
653
654 long pnv_pci_link_table_and_group(int node, int num,
655                 struct iommu_table *tbl,
656                 struct iommu_table_group *table_group)
657 {
658         struct iommu_table_group_link *tgl = NULL;
659
660         if (WARN_ON(!tbl || !table_group))
661                 return -EINVAL;
662
663         tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
664                         node);
665         if (!tgl)
666                 return -ENOMEM;
667
668         tgl->table_group = table_group;
669         list_add_rcu(&tgl->next, &tbl->it_group_list);
670
671         table_group->tables[num] = tbl;
672
673         return 0;
674 }
675
676 static void pnv_iommu_table_group_link_free(struct rcu_head *head)
677 {
678         struct iommu_table_group_link *tgl = container_of(head,
679                         struct iommu_table_group_link, rcu);
680
681         kfree(tgl);
682 }
683
684 void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
685                 struct iommu_table_group *table_group)
686 {
687         long i;
688         bool found;
689         struct iommu_table_group_link *tgl;
690
691         if (!tbl || !table_group)
692                 return;
693
694         /* Remove link to a group from table's list of attached groups */
695         found = false;
696         list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
697                 if (tgl->table_group == table_group) {
698                         list_del_rcu(&tgl->next);
699                         call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
700                         found = true;
701                         break;
702                 }
703         }
704         if (WARN_ON(!found))
705                 return;
706
707         /* Clean a pointer to iommu_table in iommu_table_group::tables[] */
708         found = false;
709         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
710                 if (table_group->tables[i] == tbl) {
711                         table_group->tables[i] = NULL;
712                         found = true;
713                         break;
714                 }
715         }
716         WARN_ON(!found);
717 }
718
719 void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
720                                void *tce_mem, u64 tce_size,
721                                u64 dma_offset, unsigned page_shift)
722 {
723         tbl->it_blocksize = 16;
724         tbl->it_base = (unsigned long)tce_mem;
725         tbl->it_page_shift = page_shift;
726         tbl->it_offset = dma_offset >> tbl->it_page_shift;
727         tbl->it_index = 0;
728         tbl->it_size = tce_size >> 3;
729         tbl->it_busno = 0;
730         tbl->it_type = TCE_PCI;
731 }
732
733 void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
734 {
735         struct pci_controller *hose = pci_bus_to_host(pdev->bus);
736         struct pnv_phb *phb = hose->private_data;
737 #ifdef CONFIG_PCI_IOV
738         struct pnv_ioda_pe *pe;
739         struct pci_dn *pdn;
740
741         /* Fix the VF pdn PE number */
742         if (pdev->is_virtfn) {
743                 pdn = pci_get_pdn(pdev);
744                 WARN_ON(pdn->pe_number != IODA_INVALID_PE);
745                 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
746                         if (pe->rid == ((pdev->bus->number << 8) |
747                             (pdev->devfn & 0xff))) {
748                                 pdn->pe_number = pe->pe_number;
749                                 pe->pdev = pdev;
750                                 break;
751                         }
752                 }
753         }
754 #endif /* CONFIG_PCI_IOV */
755
756         if (phb && phb->dma_dev_setup)
757                 phb->dma_dev_setup(phb, pdev);
758 }
759
760 void pnv_pci_dma_bus_setup(struct pci_bus *bus)
761 {
762         struct pci_controller *hose = bus->sysdata;
763         struct pnv_phb *phb = hose->private_data;
764         struct pnv_ioda_pe *pe;
765
766         list_for_each_entry(pe, &phb->ioda.pe_list, list) {
767                 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
768                         continue;
769
770                 if (!pe->pbus)
771                         continue;
772
773                 if (bus->number == ((pe->rid >> 8) & 0xFF)) {
774                         pe->pbus = bus;
775                         break;
776                 }
777         }
778 }
779
780 void pnv_pci_shutdown(void)
781 {
782         struct pci_controller *hose;
783
784         list_for_each_entry(hose, &hose_list, list_node)
785                 if (hose->controller_ops.shutdown)
786                         hose->controller_ops.shutdown(hose);
787 }
788
789 /* Fixup wrong class code in p7ioc and p8 root complex */
790 static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
791 {
792         dev->class = PCI_CLASS_BRIDGE_PCI << 8;
793 }
794 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
795
796 void __init pnv_pci_init(void)
797 {
798         struct device_node *np;
799
800         pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
801
802         /* If we don't have OPAL, eg. in sim, just skip PCI probe */
803         if (!firmware_has_feature(FW_FEATURE_OPAL))
804                 return;
805
806         /* Look for IODA IO-Hubs. */
807         for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
808                 pnv_pci_init_ioda_hub(np);
809         }
810
811         /* Look for ioda2 built-in PHB3's */
812         for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
813                 pnv_pci_init_ioda2_phb(np);
814
815         /* Look for NPU PHBs */
816         for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
817                 pnv_pci_init_npu_phb(np);
818
819         /* Configure IOMMU DMA hooks */
820         set_pci_dma_ops(&dma_iommu_ops);
821 }
822
823 machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);