static void update_domain(struct protection_domain *domain);
static int protection_domain_init(struct protection_domain *domain);
+/*
+ * For dynamic growth the aperture size is split into ranges of 128MB of
+ * DMA address space each. This struct represents one such range.
+ */
+struct aperture_range {
+
+ spinlock_t bitmap_lock;
+
+ /* address allocation bitmap */
+ unsigned long *bitmap;
+
+ /*
+ * Array of PTE pages for the aperture. In this array we save all the
+ * leaf pages of the domain page table used for the aperture. This way
+ * we don't need to walk the page table to find a specific PTE. We can
+ * just calculate its address in constant time.
+ */
+ u64 *pte_pages[64];
+
+ unsigned long offset;
+};
+
+/*
+ * Data container for a dma_ops specific protection domain
+ */
+struct dma_ops_domain {
+ /* generic protection domain information */
+ struct protection_domain domain;
+
+ /* size of the aperture for the mappings */
+ unsigned long aperture_size;
+
+ /* address we start to search for free addresses */
+ unsigned long next_address;
+
+ /* address space relevant data */
+ struct aperture_range *aperture[APERTURE_MAX_RANGES];
+
+ /* This will be set to true when TLB needs to be flushed */
+ bool need_flush;
+};
+
/****************************************************************************
*
* Helper functions
dma_dom->aperture[index]->offset = dma_dom->aperture_size;
+ spin_lock_init(&dma_dom->aperture[index]->bitmap_lock);
+
if (populate) {
unsigned long address = dma_dom->aperture_size;
int i, num_ptes = APERTURE_RANGE_PAGES / 512;
unsigned long boundary_size, mask;
unsigned long address = -1;
unsigned long limit;
+ unsigned long flags;
next_bit >>= PAGE_SHIFT;
limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
dma_mask >> PAGE_SHIFT);
+ spin_lock_irqsave(&dom->aperture[i]->bitmap_lock, flags);
address = iommu_area_alloc(dom->aperture[i]->bitmap,
- limit, next_bit, pages, 0,
+ limit, next_bit, pages, offset,
boundary_size, align_mask);
+ spin_unlock_irqrestore(&dom->aperture[i]->bitmap_lock, flags);
if (address != -1) {
address = dom->aperture[i]->offset +
(address << PAGE_SHIFT);
{
unsigned i = address >> APERTURE_RANGE_SHIFT;
struct aperture_range *range = dom->aperture[i];
+ unsigned long flags;
BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
+ spin_lock_irqsave(&range->bitmap_lock, flags);
bitmap_clear(range->bitmap, address, pages);
+ spin_unlock_irqrestore(&range->bitmap_lock, flags);
}
else if (direction == DMA_BIDIRECTIONAL)
__pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
- WARN_ON(*pte);
+ WARN_ON_ONCE(*pte);
*pte = __pte;
pte += PM_LEVEL_INDEX(0, address);
- WARN_ON(!*pte);
+ WARN_ON_ONCE(!*pte);
*pte = 0ULL;
}
dma_ops_domain_unmap(dma_dom, start);
}
+ domain_flush_pages(&dma_dom->domain, address, size);
+
dma_ops_free_addresses(dma_dom, address, pages);
return DMA_ERROR_CODE;
start += PAGE_SIZE;
}
- SUB_STATS_COUNTER(alloced_io_mem, size);
-
- dma_ops_free_addresses(dma_dom, dma_addr, pages);
-
if (amd_iommu_unmap_flush || dma_dom->need_flush) {
domain_flush_pages(&dma_dom->domain, flush_addr, size);
dma_dom->need_flush = false;
}
+
+ SUB_STATS_COUNTER(alloced_io_mem, size);
+
+ dma_ops_free_addresses(dma_dom, dma_addr, pages);
}
/*