diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2011-11-17 14:18:46 +0100 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2011-12-12 14:55:13 +0100 |
commit | 132bd68f180dd5de9176e20532910503f6393f14 (patch) | |
tree | 1fc1a0f0417427a47c6d352a3969ca1e1bf9f5fc /drivers/iommu/amd_iommu.c | |
parent | 72e1dcc4192288ad5e37888aa1dbb23b3ef4aa9a (diff) | |
download | linux-next-132bd68f180dd5de9176e20532910503f6393f14.tar.gz |
iommu/amd: Add amd_iommu_domain_direct_map function
This function can be used to switch a domain into
paging-mode 0. In this mode all devices can access physical
system memory directly without any remapping.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r-- | drivers/iommu/amd_iommu.c | 38 |
1 files changed, 36 insertions, 2 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index db9b788c28ba..6ed536769102 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1684,9 +1684,12 @@ static bool dma_ops_domain(struct protection_domain *domain) static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) { - u64 pte_root = virt_to_phys(domain->pt_root); + u64 pte_root = 0; u64 flags = 0; + if (domain->mode != PAGE_MODE_NONE) + pte_root = virt_to_phys(domain->pt_root); + pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) << DEV_ENTRY_MODE_SHIFT; pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; @@ -2782,7 +2785,8 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) BUG_ON(domain->dev_cnt != 0); - free_pagetable(domain); + if (domain->mode != PAGE_MODE_NONE) + free_pagetable(domain); protection_domain_free(domain); @@ -2846,6 +2850,9 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, int prot = 0; int ret; + if (domain->mode == PAGE_MODE_NONE) + return -EINVAL; + if (iommu_prot & IOMMU_READ) prot |= IOMMU_PROT_IR; if (iommu_prot & IOMMU_WRITE) @@ -2864,6 +2871,9 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, struct protection_domain *domain = dom->priv; unsigned long page_size, unmap_size; + if (domain->mode == PAGE_MODE_NONE) + return -EINVAL; + page_size = 0x1000UL << gfp_order; mutex_lock(&domain->api_lock); @@ -2883,6 +2893,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, phys_addr_t paddr; u64 *pte, __pte; + if (domain->mode == PAGE_MODE_NONE) + return iova; + pte = fetch_pte(domain, iova); if (!pte || !IOMMU_PTE_PRESENT(*pte)) @@ -2976,3 +2989,24 @@ int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb) return atomic_notifier_chain_unregister(&ppr_notifier, nb); } EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier); + +void amd_iommu_domain_direct_map(struct iommu_domain *dom) +{ + struct protection_domain *domain = dom->priv; + unsigned long flags; + + spin_lock_irqsave(&domain->lock, flags); + + /* Update data structure */ + domain->mode = PAGE_MODE_NONE; + domain->updated = true; + + /* Make changes visible to IOMMUs */ + update_domain(domain); + + /* Page-table is not visible to IOMMU anymore, so free it */ + free_pagetable(domain); + + spin_unlock_irqrestore(&domain->lock, flags); +} +EXPORT_SYMBOL(amd_iommu_domain_direct_map); |