6d238cc4dc
This patch converts various users of change_page_attr() to the new, more intent driven set_page_*/set_memory_* API set. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
40 lines
1.2 KiB
C
40 lines
1.2 KiB
C
#ifndef _ASM_X86_AGP_H
|
|
#define _ASM_X86_AGP_H
|
|
|
|
#include <asm/pgtable.h>
|
|
#include <asm/cacheflush.h>
|
|
|
|
/*
|
|
* Functions to keep the agpgart mappings coherent with the MMU. The
|
|
* GART gives the CPU a physical alias of pages in memory. The alias
|
|
* region is mapped uncacheable. Make sure there are no conflicting
|
|
* mappings with different cachability attributes for the same
|
|
* page. This avoids data corruption on some CPUs.
|
|
*/
|
|
|
|
/*
|
|
* Caller's responsibility to call global_flush_tlb() for performance
|
|
* reasons
|
|
*/
|
|
#define map_page_into_agp(page) set_pages_uc(page, 1)
|
|
#define unmap_page_from_agp(page) set_pages_wb(page, 1)
|
|
#define flush_agp_mappings() global_flush_tlb()
|
|
|
|
/*
|
|
* Could use CLFLUSH here if the cpu supports it. But then it would
|
|
* need to be called for each cacheline of the whole page so it may
|
|
* not be worth it. Would need a page for it.
|
|
*/
|
|
#define flush_agp_cache() wbinvd()
|
|
|
|
/* Convert a physical address to an address suitable for the GART. */
|
|
#define phys_to_gart(x) (x)
|
|
#define gart_to_phys(x) (x)
|
|
|
|
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
|
|
#define alloc_gatt_pages(order) \
|
|
((char *)__get_free_pages(GFP_KERNEL, (order)))
|
|
#define free_gatt_pages(table, order) \
|
|
free_pages((unsigned long)(table), (order))
|
|
|
|
#endif
|