5b6985ce8e
The current Intel IOMMU code assumes that both host page size and Intel IOMMU page size are 4KiB. The first patch supports variable page size. This provides support for IA64 which has multiple page sizes. This patch also adds some other code hooks for IA64 platform including DMAR_OPERATION_TIMEOUT definition. [dwmw2: some cleanup] Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: Tony Luck <tony.luck@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
156 lines
4.1 KiB
C
156 lines
4.1 KiB
C
#ifndef _DMA_REMAPPING_H
|
|
#define _DMA_REMAPPING_H
|
|
|
|
/*
|
|
* VT-d hardware uses 4KiB page size regardless of host page size.
|
|
*/
|
|
#define VTD_PAGE_SHIFT (12)
|
|
#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
|
|
#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
|
|
#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
|
|
|
|
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
|
|
#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
|
|
#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
|
|
|
|
|
|
/*
|
|
* 0: Present
|
|
* 1-11: Reserved
|
|
* 12-63: Context Ptr (12 - (haw-1))
|
|
* 64-127: Reserved
|
|
*/
|
|
struct root_entry {
|
|
u64 val;
|
|
u64 rsvd1;
|
|
};
|
|
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
|
|
static inline bool root_present(struct root_entry *root)
|
|
{
|
|
return (root->val & 1);
|
|
}
|
|
static inline void set_root_present(struct root_entry *root)
|
|
{
|
|
root->val |= 1;
|
|
}
|
|
static inline void set_root_value(struct root_entry *root, unsigned long value)
|
|
{
|
|
root->val |= value & VTD_PAGE_MASK;
|
|
}
|
|
|
|
struct context_entry;
|
|
static inline struct context_entry *
|
|
get_context_addr_from_root(struct root_entry *root)
|
|
{
|
|
return (struct context_entry *)
|
|
(root_present(root)?phys_to_virt(
|
|
root->val & VTD_PAGE_MASK) :
|
|
NULL);
|
|
}
|
|
|
|
/*
|
|
* low 64 bits:
|
|
* 0: present
|
|
* 1: fault processing disable
|
|
* 2-3: translation type
|
|
* 12-63: address space root
|
|
* high 64 bits:
|
|
* 0-2: address width
|
|
* 3-6: aval
|
|
* 8-23: domain id
|
|
*/
|
|
struct context_entry {
|
|
u64 lo;
|
|
u64 hi;
|
|
};
|
|
#define context_present(c) ((c).lo & 1)
|
|
#define context_fault_disable(c) (((c).lo >> 1) & 1)
|
|
#define context_translation_type(c) (((c).lo >> 2) & 3)
|
|
#define context_address_root(c) ((c).lo & VTD_PAGE_MASK)
|
|
#define context_address_width(c) ((c).hi & 7)
|
|
#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
|
|
|
|
#define context_set_present(c) do {(c).lo |= 1;} while (0)
|
|
#define context_set_fault_enable(c) \
|
|
do {(c).lo &= (((u64)-1) << 2) | 1;} while (0)
|
|
#define context_set_translation_type(c, val) \
|
|
do { \
|
|
(c).lo &= (((u64)-1) << 4) | 3; \
|
|
(c).lo |= ((val) & 3) << 2; \
|
|
} while (0)
|
|
#define CONTEXT_TT_MULTI_LEVEL 0
|
|
#define context_set_address_root(c, val) \
|
|
do {(c).lo |= (val) & VTD_PAGE_MASK; } while (0)
|
|
#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
|
|
#define context_set_domain_id(c, val) \
|
|
do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
|
|
#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0)
|
|
|
|
/*
|
|
* 0: readable
|
|
* 1: writable
|
|
* 2-6: reserved
|
|
* 7: super page
|
|
* 8-11: available
|
|
* 12-63: Host physcial address
|
|
*/
|
|
struct dma_pte {
|
|
u64 val;
|
|
};
|
|
#define dma_clear_pte(p) do {(p).val = 0;} while (0)
|
|
|
|
#define DMA_PTE_READ (1)
|
|
#define DMA_PTE_WRITE (2)
|
|
|
|
#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0)
|
|
#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
|
|
#define dma_set_pte_prot(p, prot) \
|
|
do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
|
|
#define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK)
|
|
#define dma_set_pte_addr(p, addr) do {\
|
|
(p).val |= ((addr) & VTD_PAGE_MASK); } while (0)
|
|
#define dma_pte_present(p) (((p).val & 3) != 0)
|
|
|
|
struct intel_iommu;
|
|
|
|
struct dmar_domain {
|
|
int id; /* domain id */
|
|
struct intel_iommu *iommu; /* back pointer to owning iommu */
|
|
|
|
struct list_head devices; /* all devices' list */
|
|
struct iova_domain iovad; /* iova's that belong to this domain */
|
|
|
|
struct dma_pte *pgd; /* virtual address */
|
|
spinlock_t mapping_lock; /* page table lock */
|
|
int gaw; /* max guest address width */
|
|
|
|
/* adjusted guest address width, 0 is level 2 30-bit */
|
|
int agaw;
|
|
|
|
#define DOMAIN_FLAG_MULTIPLE_DEVICES 1
|
|
int flags;
|
|
};
|
|
|
|
/* PCI domain-device relationship */
|
|
struct device_domain_info {
|
|
struct list_head link; /* link to domain siblings */
|
|
struct list_head global; /* link to global list */
|
|
u8 bus; /* PCI bus numer */
|
|
u8 devfn; /* PCI devfn number */
|
|
struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
|
|
struct dmar_domain *domain; /* pointer to domain */
|
|
};
|
|
|
|
extern int init_dmars(void);
|
|
extern void free_dmar_iommu(struct intel_iommu *iommu);
|
|
|
|
extern int dmar_disabled;
|
|
|
|
#ifndef CONFIG_DMAR_GFX_WA
|
|
static inline void iommu_prepare_gfx_mapping(void)
|
|
{
|
|
return;
|
|
}
|
|
#endif /* !CONFIG_DMAR_GFX_WA */
|
|
|
|
#endif
|