e57778a1e3
Xen has a pte update function which will update a pte while preserving its accessed and dirty bits. This means that ptep_modify_prot_start() can be implemented as a simple read of the pte value. The hardware may update the pte in the meantime, but ptep_modify_prot_commit() updates it while preserving any changes that may have happened in the meantime. The updates in ptep_modify_prot_commit() are batched if we're currently in lazy mmu mode. The mmu_update hypercall can take a batch of updates to perform, but this code doesn't make particular use of that feature, in favour of using generic multicall batching to get them all into the hypervisor. The net effect of this is that each mprotect pte update turns from two expensive trap-and-emulate faults into they hypervisor into a single hypercall whose cost is amortized in a batched multicall. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
59 lines
1.8 KiB
C
59 lines
1.8 KiB
C
#ifndef _XEN_MMU_H
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/page.h>
|
|
|
|
enum pt_level {
|
|
PT_PGD,
|
|
PT_PUD,
|
|
PT_PMD,
|
|
PT_PTE
|
|
};
|
|
|
|
/*
|
|
* Page-directory addresses above 4GB do not fit into architectural %cr3.
|
|
* When accessing %cr3, or equivalent field in vcpu_guest_context, guests
|
|
* must use the following accessor macros to pack/unpack valid MFNs.
|
|
*
|
|
* Note that Xen is using the fact that the pagetable base is always
|
|
* page-aligned, and putting the 12 MSB of the address into the 12 LSB
|
|
* of cr3.
|
|
*/
|
|
#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
|
|
#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
|
|
|
|
|
|
void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
|
|
|
|
|
|
void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next);
|
|
void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
|
|
void xen_exit_mmap(struct mm_struct *mm);
|
|
|
|
void xen_pgd_pin(pgd_t *pgd);
|
|
//void xen_pgd_unpin(pgd_t *pgd);
|
|
|
|
pteval_t xen_pte_val(pte_t);
|
|
pmdval_t xen_pmd_val(pmd_t);
|
|
pgdval_t xen_pgd_val(pgd_t);
|
|
|
|
pte_t xen_make_pte(pteval_t);
|
|
pmd_t xen_make_pmd(pmdval_t);
|
|
pgd_t xen_make_pgd(pgdval_t);
|
|
|
|
void xen_set_pte(pte_t *ptep, pte_t pteval);
|
|
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, pte_t pteval);
|
|
void xen_set_pte_atomic(pte_t *ptep, pte_t pte);
|
|
void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval);
|
|
void xen_set_pud(pud_t *ptr, pud_t val);
|
|
void xen_set_pmd_hyper(pmd_t *pmdp, pmd_t pmdval);
|
|
void xen_set_pud_hyper(pud_t *ptr, pud_t val);
|
|
void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
|
void xen_pmd_clear(pmd_t *pmdp);
|
|
|
|
pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
|
void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, pte_t pte);
|
|
|
|
#endif /* _XEN_MMU_H */
|