#define unmap_page_from_agp(page)
#define flush_agp_cache() mb()
-/* Convert a physical address to an address suitable for the GART. */
-#define phys_to_gart(x) (x)
-#define gart_to_phys(x) (x)
-
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define unmap_page_from_agp(page) /* nothing */
#define flush_agp_cache() mb()
-/* Convert a physical address to an address suitable for the GART. */
-#define phys_to_gart(x) (x)
-#define gart_to_phys(x) (x)
-
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define unmap_page_from_agp(page) /* nothing */
#define flush_agp_cache() mb()
-/* Convert a physical address to an address suitable for the GART. */
-#define phys_to_gart(x) (x)
-#define gart_to_phys(x) (x)
-
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define unmap_page_from_agp(page)
#define flush_agp_cache() mb()
-/* Convert a physical address to an address suitable for the GART. */
-#define phys_to_gart(x) (x)
-#define gart_to_phys(x) (x)
-
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define unmap_page_from_agp(page)
#define flush_agp_cache() mb()
-/* Convert a physical address to an address suitable for the GART. */
-#define phys_to_gart(x) (x)
-#define gart_to_phys(x) (x)
-
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
*/
#define flush_agp_cache() wbinvd()
-/* Convert a physical address to an address suitable for the GART. */
-#define phys_to_gart(x) (x)
-#define gart_to_phys(x) (x)
-
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
void (*agp_enable)(struct agp_bridge_data *, u32);
void (*cleanup)(void);
void (*tlb_flush)(struct agp_memory *);
- unsigned long (*mask_memory)(struct agp_bridge_data *, struct page *, int);
+ unsigned long (*mask_memory)(struct agp_bridge_data *, dma_addr_t, int);
void (*cache_flush)(void);
int (*create_gatt_table)(struct agp_bridge_data *);
int (*free_gatt_table)(struct agp_bridge_data *);
void (*agp_destroy_pages)(struct agp_memory *);
int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
void (*chipset_flush)(struct agp_bridge_data *);
+
+ int (*agp_map_page)(struct page *page, dma_addr_t *ret);
+ void (*agp_unmap_page)(struct page *page, dma_addr_t dma);
+ int (*agp_map_memory)(struct agp_memory *mem);
+ void (*agp_unmap_memory)(struct agp_memory *mem);
};
struct agp_bridge_data {
u32 __iomem *gatt_table;
u32 *gatt_table_real;
unsigned long scratch_page;
- unsigned long scratch_page_real;
+ struct page *scratch_page_page;
+ dma_addr_t scratch_page_dma;
unsigned long gart_bus_addr;
unsigned long gatt_bus_addr;
u32 mode;
void global_cache_flush(void);
void get_agp_version(struct agp_bridge_data *bridge);
unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
- struct page *page, int type);
+ dma_addr_t phys, int type);
int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
int type);
struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
#define AGP_GENERIC_SIZES_ENTRIES 11
extern const struct aper_size_info_16 agp3_generic_sizes[];
-#define virt_to_gart(x) (phys_to_gart(virt_to_phys(x)))
-#define gart_to_virt(x) (phys_to_virt(gart_to_phys(x)))
-
extern int agp_off;
extern int agp_try_unsupported_boot;
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
- phys_to_gart(page_to_phys(page))) | ALI_CACHE_FLUSH_EN ));
+ page_to_phys(page)) | ALI_CACHE_FLUSH_EN ));
return page;
}
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
- phys_to_gart(page_to_phys(page))) | ALI_CACHE_FLUSH_EN));
+ page_to_phys(page)) | ALI_CACHE_FLUSH_EN));
}
agp_generic_destroy_page(page, flags);
}
#ifndef CONFIG_X86
SetPageReserved(virt_to_page(page_map->real));
global_cache_flush();
- page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
+ page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
PAGE_SIZE);
if (page_map->remapped == NULL) {
ClearPageReserved(virt_to_page(page_map->real));
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
- agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
+ agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
/* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its
/* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
- writel(virt_to_gart(amd_irongate_private.gatt_pages[i]->real) | 1,
+ writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_generic_mask_memory(agp_bridge,
- mem->pages[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
+ page_to_phys(mem->pages[i]),
+ mem->type),
+ cur_gatt+GET_GATT_OFF(addr));
readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
}
amd_irongate_tlbflush(mem);
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
tmp = agp_bridge->driver->mask_memory(agp_bridge,
- mem->pages[i], mask_type);
+ page_to_phys(mem->pages[i]),
+ mask_type);
BUG_ON(tmp & 0xffffff0000000ffcULL);
pte = (tmp & 0x000000ff00000000ULL) >> 28;
static int amd_8151_configure(void)
{
- unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real);
+ unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
int i;
/* Configure AGP regs in each x86-64 host bridge. */
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
- release_mem_region(virt_to_gart(bridge->gatt_table_real),
+ release_mem_region(virt_to_phys(bridge->gatt_table_real),
amd64_aperture_sizes[bridge->aperture_size_idx].size);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->driver->mask_memory(agp_bridge,
- mem->pages[i], mem->type),
+ page_to_phys(mem->pages[i]),
+ mem->type),
cur_gatt+GET_GATT_OFF(addr));
}
readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
- agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
+ agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
/* Write out the size register */
current_size = A_SIZE_LVL2(agp_bridge->current_size);
/* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
- writel(virt_to_gart(ati_generic_private.gatt_pages[i]->real) | 1,
+ writel(virt_to_phys(ati_generic_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
return -ENOMEM;
}
- bridge->scratch_page_real = phys_to_gart(page_to_phys(page));
- bridge->scratch_page =
- bridge->driver->mask_memory(bridge, page, 0);
+ bridge->scratch_page_page = page;
+ if (bridge->driver->agp_map_page) {
+ if (bridge->driver->agp_map_page(page,
+ &bridge->scratch_page_dma)) {
+ dev_err(&bridge->dev->dev,
+ "unable to dma-map scratch page\n");
+ rc = -ENOMEM;
+ goto err_out_nounmap;
+ }
+ } else {
+ bridge->scratch_page_dma = page_to_phys(page);
+ }
+
+ bridge->scratch_page = bridge->driver->mask_memory(bridge,
+ bridge->scratch_page_dma, 0);
}
size_value = bridge->driver->fetch_size();
return 0;
err_out:
+ if (bridge->driver->needs_scratch_page &&
+ bridge->driver->agp_unmap_page) {
+ bridge->driver->agp_unmap_page(bridge->scratch_page_page,
+ bridge->scratch_page_dma);
+ }
+err_out_nounmap:
if (bridge->driver->needs_scratch_page) {
- void *va = gart_to_virt(bridge->scratch_page_real);
+ void *va = page_address(bridge->scratch_page_page);
bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
if (bridge->driver->agp_destroy_page &&
bridge->driver->needs_scratch_page) {
- void *va = gart_to_virt(bridge->scratch_page_real);
+ void *va = page_address(bridge->scratch_page_page);
+
+ if (bridge->driver->agp_unmap_page)
+ bridge->driver->agp_unmap_page(bridge->scratch_page_page,
+ bridge->scratch_page_dma);
bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
/* This function does the same thing as mask_memory() for this chipset... */
static inline unsigned long efficeon_mask_memory(struct page *page)
{
- unsigned long addr = phys_to_gart(page_to_phys(page));
+ unsigned long addr = page_to_phys(page);
return addr | 0x00000001;
}
efficeon_private.l1_table[index] = page;
- value = virt_to_gart((unsigned long *)page) | pati | present | index;
+ value = virt_to_phys((unsigned long *)page) | pati | present | index;
pci_write_config_dword(agp_bridge->dev,
EFFICEON_ATTPAGE, value);
curr->bridge->driver->cache_flush();
curr->is_flushed = true;
}
+
+ if (curr->bridge->driver->agp_map_memory) {
+ ret_val = curr->bridge->driver->agp_map_memory(curr);
+ if (ret_val)
+ return ret_val;
+ }
ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
if (ret_val != 0)
if (ret_val != 0)
return ret_val;
+ if (curr->bridge->driver->agp_unmap_memory)
+ curr->bridge->driver->agp_unmap_memory(curr);
+
curr->is_bound = false;
curr->pg_start = 0;
spin_lock(&curr->bridge->mapped_lock);
set_memory_uc((unsigned long)table, 1 << page_order);
bridge->gatt_table = (void *)table;
#else
- bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
+ bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
(PAGE_SIZE * (1 << page_order)));
bridge->driver->cache_flush();
#endif
return -ENOMEM;
}
- bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real);
+ bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
/* AK: bogus, should encode addresses > 4GB */
for (i = 0; i < num_entries; i++) {
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(bridge->driver->mask_memory(bridge, mem->pages[i], mask_type),
+ writel(bridge->driver->mask_memory(bridge,
+ page_to_phys(mem->pages[i]),
+ mask_type),
bridge->gatt_table+j);
}
readl(bridge->gatt_table+j-1); /* PCI Posting. */
EXPORT_SYMBOL(global_cache_flush);
unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
- struct page *page, int type)
+ dma_addr_t addr, int type)
{
- unsigned long addr = phys_to_gart(page_to_phys(page));
/* memory type is ignored in the generic routine */
if (bridge->driver->masks)
return addr | bridge->driver->masks[0].mask;
hp->gart_size = HP_ZX1_GART_SIZE;
hp->gatt_entries = hp->gart_size / hp->io_page_size;
- hp->io_pdir = gart_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
+ hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
if (hp->io_pdir_owner) {
- writel(virt_to_gart(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
+ writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
readl(hp->ioc_regs+HP_ZX1_TCNFG);
}
static unsigned long
-hp_zx1_mask_memory (struct agp_bridge_data *bridge,
- struct page *page, int type)
+hp_zx1_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type)
{
- unsigned long addr = phys_to_gart(page_to_phys(page));
return HP_ZX1_PDIR_VALID_BIT | addr;
}
#define WR_FLUSH_GATT(index) RD_GATT(index)
static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
- unsigned long addr, int type);
+ dma_addr_t addr, int type);
static struct {
void *gatt; /* ioremap'd GATT area */
io_page_size = 1UL << I460_IO_PAGE_SHIFT;
for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
- paddr = phys_to_gart(page_to_phys(mem->pages[i]));
+ paddr = page_to_phys(mem->pages[i]);
for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type));
}
return -ENOMEM;
}
- lp->paddr = phys_to_gart(page_to_phys(lp->page));
+ lp->paddr = page_to_phys(lp->page);
lp->refcount = 0;
atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
return 0;
#endif /* I460_LARGE_IO_PAGES */
static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
- unsigned long addr, int type)
+ dma_addr_t addr, int type)
{
/* Make sure the returned address is a valid GATT entry */
return bridge->driver->masks[0].mask
| (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12);
}
-static unsigned long i460_page_mask_memory(struct agp_bridge_data *bridge,
- struct page *page, int type)
-{
- unsigned long addr = phys_to_gart(page_to_phys(page));
- return i460_mask_memory(bridge, addr, type);
-}
-
const struct agp_bridge_driver intel_i460_driver = {
.owner = THIS_MODULE,
.aperture_sizes = i460_sizes,
.fetch_size = i460_fetch_size,
.cleanup = i460_cleanup,
.tlb_flush = i460_tlb_flush,
- .mask_memory = i460_page_mask_memory,
+ .mask_memory = i460_mask_memory,
.masks = i460_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
#include <linux/agp_backend.h>
#include "agp.h"
+/*
+ * If we have Intel graphics, we're not going to have anything other than
+ * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
+ * on the Intel IOMMU support (CONFIG_DMAR).
+ * Only newer chipsets need to bother with this, of course.
+ */
+#ifdef CONFIG_DMAR
+#define USE_PCI_DMA_API 1
+#endif
+
#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
int resource_valid;
} intel_private;
+#ifdef USE_PCI_DMA_API
+static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
+{
+ *ret = pci_map_page(intel_private.pcidev, page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(intel_private.pcidev, *ret))
+ return -EINVAL;
+ return 0;
+}
+
+static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
+{
+ pci_unmap_page(intel_private.pcidev, dma,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+}
+
+static void intel_agp_free_sglist(struct agp_memory *mem)
+{
+ struct sg_table st;
+
+ st.sgl = mem->sg_list;
+ st.orig_nents = st.nents = mem->page_count;
+
+ sg_free_table(&st);
+
+ mem->sg_list = NULL;
+ mem->num_sg = 0;
+}
+
+static int intel_agp_map_memory(struct agp_memory *mem)
+{
+ struct sg_table st;
+ struct scatterlist *sg;
+ int i;
+
+ DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
+
+ if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
+ return -ENOMEM;
+
+ mem->sg_list = sg = st.sgl;
+
+ for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
+ sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
+
+ mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
+ mem->page_count, PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(!mem->num_sg)) {
+ intel_agp_free_sglist(mem);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void intel_agp_unmap_memory(struct agp_memory *mem)
+{
+ DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
+
+ pci_unmap_sg(intel_private.pcidev, mem->sg_list,
+ mem->page_count, PCI_DMA_BIDIRECTIONAL);
+ intel_agp_free_sglist(mem);
+}
+
+static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+ off_t pg_start, int mask_type)
+{
+ struct scatterlist *sg;
+ int i, j;
+
+ j = pg_start;
+
+ WARN_ON(!mem->num_sg);
+
+ if (mem->num_sg == mem->page_count) {
+ for_each_sg(mem->sg_list, sg, mem->page_count, i) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ sg_dma_address(sg), mask_type),
+ intel_private.gtt+j);
+ j++;
+ }
+ } else {
+ /* sg may merge pages, but we have to seperate
+ * per-page addr for GTT */
+ unsigned int len, m;
+
+ for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
+ len = sg_dma_len(sg) / PAGE_SIZE;
+ for (m = 0; m < len; m++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ sg_dma_address(sg) + m * PAGE_SIZE,
+ mask_type),
+ intel_private.gtt+j);
+ j++;
+ }
+ }
+ }
+ readl(intel_private.gtt+j-1);
+}
+
+#else
+
+static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+ off_t pg_start, int mask_type)
+{
+ int i, j;
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]), mask_type),
+ intel_private.gtt+j);
+ }
+
+ readl(intel_private.gtt+j-1);
+}
+
+#endif
+
static int intel_i810_fetch_size(void)
{
u32 smram_miscc;
global_cache_flush();
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
- mem->pages[i],
- mask_type),
+ page_to_phys(mem->pages[i]), mask_type),
intel_private.registers+I810_PTE_BASE+(j*4));
}
readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
}
static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
- struct page *page, int type)
+ dma_addr_t addr, int type)
{
- unsigned long addr = phys_to_gart(page_to_phys(page));
/* Type checking must be done elsewhere */
return addr | bridge->driver->masks[type].mask;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
- mem->pages[i], mask_type),
+ page_to_phys(mem->pages[i]), mask_type),
intel_private.registers+I810_PTE_BASE+(j*4));
}
readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
intel_i9xx_setup_flush();
+#ifdef USE_PCI_DMA_API
+ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
+ dev_err(&intel_private.pcidev->dev,
+ "set gfx device dma mask 36bit failed!\n");
+#endif
+
return 0;
}
static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
- int i, j, num_entries;
+ int num_entries;
void *temp;
int ret = -EINVAL;
int mask_type;
if ((pg_start + mem->page_count) > num_entries)
goto out_err;
- /* The i915 can't check the GTT for entries since its read only,
+ /* The i915 can't check the GTT for entries since it's read only;
* depend on the caller to make the correct offset decisions.
*/
if (!mem->is_flushed)
global_cache_flush();
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- mem->pages[i], mask_type), intel_private.gtt+j);
- }
-
- readl(intel_private.gtt+j-1);
+ intel_agp_insert_sg_entries(mem, pg_start, mask_type);
agp_bridge->driver->tlb_flush(mem);
out:
* this conditional.
*/
static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
- struct page *page, int type)
+ dma_addr_t addr, int type)
{
- dma_addr_t addr = phys_to_gart(page_to_phys(page));
/* Shift high bits down */
addr |= (addr >> 28) & 0xf0;
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
.chipset_flush = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+ .agp_map_page = intel_agp_map_page,
+ .agp_unmap_page = intel_agp_unmap_page,
+ .agp_map_memory = intel_agp_map_memory,
+ .agp_unmap_memory = intel_agp_unmap_memory,
+#endif
};
static const struct agp_bridge_driver intel_i965_driver = {
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
.chipset_flush = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+ .agp_map_page = intel_agp_map_page,
+ .agp_unmap_page = intel_agp_unmap_page,
+ .agp_map_memory = intel_agp_map_memory,
+ .agp_unmap_memory = intel_agp_unmap_memory,
+#endif
};
static const struct agp_bridge_driver intel_7505_driver = {
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
.chipset_flush = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+ .agp_map_page = intel_agp_map_page,
+ .agp_unmap_page = intel_agp_unmap_page,
+ .agp_map_memory = intel_agp_map_memory,
+ .agp_unmap_memory = intel_agp_unmap_memory,
+#endif
};
static int find_gmch(u16 device)
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
int ret_val;
- pci_restore_state(pdev);
-
- /* We should restore our graphics device's config space,
- * as host bridge (00:00) resumes before graphics device (02:00),
- * then our access to its pci space can work right.
- */
- if (intel_private.pcidev)
- pci_restore_state(intel_private.pcidev);
-
if (bridge->driver == &intel_generic_driver)
intel_configure();
else if (bridge->driver == &intel_850_driver)
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
- mem->pages[i], mask_type),
+ page_to_phys(mem->pages[i]), mask_type),
agp_bridge->gatt_table+nvidia_private.pg_offset+j);
}
#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
static unsigned long
-parisc_agp_mask_memory(struct agp_bridge_data *bridge, unsigned long addr,
+parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
int type);
static struct _parisc_agp_info {
}
static unsigned long
-parisc_agp_mask_memory(struct agp_bridge_data *bridge, unsigned long addr,
+parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
int type)
{
return SBA_PDIR_VALID_BIT | addr;
}
-static unsigned long
-parisc_agp_page_mask_memory(struct agp_bridge_data *bridge, struct page *page,
- int type)
-{
- unsigned long addr = phys_to_gart(page_to_phys(page));
- return SBA_PDIR_VALID_BIT | addr;
-}
-
static void
parisc_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
* entry.
*/
static unsigned long
-sgi_tioca_mask_memory(struct agp_bridge_data *bridge,
- struct page *page, int type)
+sgi_tioca_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
+ int type)
{
- unsigned long addr = phys_to_gart(page_to_phys(page));
return tioca_physpage_to_gart(addr);
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
table[j] =
- bridge->driver->mask_memory(bridge, mem->pages[i],
+ bridge->driver->mask_memory(bridge,
+ page_to_phys(mem->pages[i]),
mem->type);
}
/* Create a fake scratch directory */
for (i = 0; i < 1024; i++) {
writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
- writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
+ writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
}
retval = serverworks_create_gatt_pages(value->num_entries / 1024);
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
- agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
+ agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
/* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its
/* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++)
- writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
+ writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
return 0;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = SVRWRKS_GET_GATT(addr);
- writel(agp_bridge->driver->mask_memory(agp_bridge, mem->pages[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]), mem->type),
+ cur_gatt+GET_GATT_OFF(addr));
}
serverworks_tlbflush(mem);
return 0;
if (is_u3) {
pci_write_config_dword(agp_bridge->dev,
UNI_N_CFG_GART_DUMMY_PAGE,
- agp_bridge->scratch_page_real >> 12);
+ page_to_phys(agp_bridge->scratch_page_page) >> 12);
}
return 0;
bridge->gatt_table_real = (u32 *) table;
bridge->gatt_table = (u32 *)table;
- bridge->gatt_bus_addr = virt_to_gart(table);
+ bridge->gatt_bus_addr = virt_to_phys(table);
for (i = 0; i < num_entries; i++)
bridge->gatt_table[i] = 0;
u32 physical;
bool is_bound;
bool is_flushed;
- bool vmalloc_flag;
+ bool vmalloc_flag;
/* list of agp_memory mapped to the aperture */
struct list_head mapped_list;
+ /* DMA-mapped addresses */
+ struct scatterlist *sg_list;
+ int num_sg;
};
#define AGP_NORMAL_MEMORY 0