2 * iommu.c: IOMMU specific routines for memory management.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 #include <linux/kernel.h>
11 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
15 #include <linux/scatterlist.h>
17 #include <linux/of_device.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pgtable.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/bitext.h>
27 #include <asm/iommu.h>
33 * This can be sized dynamically, but we will do this
34 * only when we have a guidance about actual I/O pressures.
36 #define IOMMU_RNGE IOMMU_RNGE_256MB
37 #define IOMMU_START 0xF0000000
38 #define IOMMU_WINSIZE (256*1024*1024U)
39 #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
40 #define IOMMU_ORDER 6 /* 4096 * (1<<6) */
42 static int viking_flush;
44 extern void viking_flush_page(unsigned long page);
45 extern void viking_mxcc_flush_page(unsigned long page);
48 * Values precomputed according to CPU type.
50 static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
51 static pgprot_t dvma_prot; /* Consistent mapping pte flags */
53 #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
54 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
56 static void __init sbus_iommu_init(struct platform_device *op)
58 struct iommu_struct *iommu;
59 unsigned int impl, vers;
60 unsigned long *bitmap;
63 iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
65 prom_printf("Unable to allocate iommu structure\n");
69 iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
72 prom_printf("Cannot map IOMMU registers\n");
75 impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
76 vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
77 tmp = iommu->regs->control;
78 tmp &= ~(IOMMU_CTRL_RNGE);
79 tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
80 iommu->regs->control = tmp;
81 iommu_invalidate(iommu->regs);
82 iommu->start = IOMMU_START;
83 iommu->end = 0xffffffff;
85 /* Allocate IOMMU page table */
86 /* Stupid alignment constraints give me a headache.
87 We need 256K or 512K or 1M or 2M area aligned to
88 its size and current gfp will fortunately give
90 tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
92 prom_printf("Unable to allocate iommu table [0x%lx]\n",
93 IOMMU_NPTES * sizeof(iopte_t));
96 iommu->page_table = (iopte_t *)tmp;
98 /* Initialize new table. */
99 memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
102 iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
103 iommu_invalidate(iommu->regs);
105 bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
107 prom_printf("Unable to allocate iommu bitmap [%d]\n",
108 (int)(IOMMU_NPTES>>3));
111 bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
112 /* To be coherent on HyperSparc, the page color of DVMA
113 * and physical addresses must match.
115 if (srmmu_modtype == HyperSparc)
116 iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
118 iommu->usemap.num_colors = 1;
120 printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
121 impl, vers, iommu->page_table,
122 (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
124 op->dev.archdata.iommu = iommu;
127 static int __init iommu_init(void)
129 struct device_node *dp;
131 for_each_node_by_name(dp, "iommu") {
132 struct platform_device *op = of_find_device_by_node(dp);
135 of_propagate_archdata(op);
141 subsys_initcall(iommu_init);
143 /* Flush the iotlb entries to ram. */
144 /* This could be better if we didn't have to flush whole pages. */
145 static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
150 start = (unsigned long)iopte;
151 end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
153 if (viking_mxcc_present) {
155 viking_mxcc_flush_page(start);
158 } else if (viking_flush) {
160 viking_flush_page(start);
165 __flush_page_to_ram(start);
171 static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
173 struct iommu_struct *iommu = dev->archdata.iommu;
175 iopte_t *iopte, *iopte0;
176 unsigned int busa, busa0;
179 /* page color = pfn of page */
180 ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
183 busa0 = iommu->start + (ioptex << PAGE_SHIFT);
184 iopte0 = &iommu->page_table[ioptex];
188 for (i = 0; i < npages; i++) {
189 iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
190 iommu_invalidate_page(iommu->regs, busa);
196 iommu_flush_iotlb(iopte0, npages);
201 static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
208 off = (unsigned long)vaddr & ~PAGE_MASK;
209 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
210 page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
211 busa = iommu_get_one(dev, page, npages);
215 static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
217 flush_page_for_dma(0);
218 return iommu_get_scsi_one(dev, vaddr, len);
221 static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
223 unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
225 while(page < ((unsigned long)(vaddr + len))) {
226 flush_page_for_dma(page);
229 return iommu_get_scsi_one(dev, vaddr, len);
232 static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
236 flush_page_for_dma(0);
239 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
240 sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
241 sg->dma_length = sg->length;
246 static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
248 unsigned long page, oldpage = 0;
254 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
257 * We expect unmapped highmem pages to be not in the cache.
258 * XXX Is this a good assumption?
259 * XXX What if someone else unmaps it here and races us?
261 if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
262 for (i = 0; i < n; i++) {
263 if (page != oldpage) { /* Already flushed? */
264 flush_page_for_dma(page);
271 sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
272 sg->dma_length = sg->length;
277 static void iommu_release_one(struct device *dev, u32 busa, int npages)
279 struct iommu_struct *iommu = dev->archdata.iommu;
283 BUG_ON(busa < iommu->start);
284 ioptex = (busa - iommu->start) >> PAGE_SHIFT;
285 for (i = 0; i < npages; i++) {
286 iopte_val(iommu->page_table[ioptex + i]) = 0;
287 iommu_invalidate_page(iommu->regs, busa);
290 bit_map_clear(&iommu->usemap, ioptex, npages);
293 static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
298 off = vaddr & ~PAGE_MASK;
299 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
300 iommu_release_one(dev, vaddr & PAGE_MASK, npages);
303 static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
310 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
311 iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
312 sg->dma_address = 0x21212121;
318 static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
319 unsigned long addr, int len)
321 struct iommu_struct *iommu = dev->archdata.iommu;
322 unsigned long page, end;
323 iopte_t *iopte = iommu->page_table;
327 BUG_ON((va & ~PAGE_MASK) != 0);
328 BUG_ON((addr & ~PAGE_MASK) != 0);
329 BUG_ON((len & ~PAGE_MASK) != 0);
331 /* page color = physical address */
332 ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
347 if (viking_mxcc_present)
348 viking_mxcc_flush_page(page);
349 else if (viking_flush)
350 viking_flush_page(page);
352 __flush_page_to_ram(page);
354 pgdp = pgd_offset(&init_mm, addr);
355 pmdp = pmd_offset(pgdp, addr);
356 ptep = pte_offset_map(pmdp, addr);
358 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
360 iopte_val(*iopte++) =
361 MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
365 /* P3: why do we need this?
367 * DAVEM: Because there are several aspects, none of which
368 * are handled by a single interface. Some cpus are
369 * completely not I/O DMA coherent, and some have
370 * virtually indexed caches. The driver DMA flushing
371 * methods handle the former case, but here during
372 * IOMMU page table modifications, and usage of non-cacheable
373 * cpu mappings of pages potentially in the cpu caches, we have
374 * to handle the latter case as well.
377 iommu_flush_iotlb(first, len >> PAGE_SHIFT);
379 iommu_invalidate(iommu->regs);
381 *pba = iommu->start + (ioptex << PAGE_SHIFT);
385 static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
387 struct iommu_struct *iommu = dev->archdata.iommu;
388 iopte_t *iopte = iommu->page_table;
390 int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
392 BUG_ON((busa & ~PAGE_MASK) != 0);
393 BUG_ON((len & ~PAGE_MASK) != 0);
398 iopte_val(*iopte++) = 0;
402 iommu_invalidate(iommu->regs);
403 bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
407 static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
408 .get_scsi_one = iommu_get_scsi_one_gflush,
409 .get_scsi_sgl = iommu_get_scsi_sgl_gflush,
410 .release_scsi_one = iommu_release_scsi_one,
411 .release_scsi_sgl = iommu_release_scsi_sgl,
413 .map_dma_area = iommu_map_dma_area,
414 .unmap_dma_area = iommu_unmap_dma_area,
418 static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
419 .get_scsi_one = iommu_get_scsi_one_pflush,
420 .get_scsi_sgl = iommu_get_scsi_sgl_pflush,
421 .release_scsi_one = iommu_release_scsi_one,
422 .release_scsi_sgl = iommu_release_scsi_sgl,
424 .map_dma_area = iommu_map_dma_area,
425 .unmap_dma_area = iommu_unmap_dma_area,
429 void __init ld_mmu_iommu(void)
431 if (flush_page_for_dma_global) {
432 /* flush_page_for_dma flushes everything, no matter of what page is it */
433 sparc32_dma_ops = &iommu_dma_gflush_ops;
435 sparc32_dma_ops = &iommu_dma_pflush_ops;
438 if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
439 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
440 ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
442 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
443 ioperm_noc = IOPTE_WRITE | IOPTE_VALID;