diff options
author | Mark McLoughlin <markmc@redhat.com> | 2008-11-21 16:56:53 +0000 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-01-03 11:57:36 +0100 |
commit | 19c239ce3d089fee339d1ab7e97b43d6f0557ce5 (patch) | |
tree | 712a8dd5e3a42d2e4c2a4514faa649a11e50a658 /drivers/pci/intel-iommu.c | |
parent | c07e7d217bef198422b7eface456ecfd4bb1ab87 (diff) | |
download | linux-next-19c239ce3d089fee339d1ab7e97b43d6f0557ce5.tar.gz |
intel-iommu: trivially inline DMA PTE macros
Signed-off-by: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r-- | drivers/pci/intel-iommu.c | 71 |
1 files changed, 48 insertions, 23 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 3be931b3bf98..213a5c87fde2 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -164,16 +164,41 @@ static inline void context_clear_entry(struct context_entry *context) struct dma_pte { u64 val; }; -#define dma_clear_pte(p) do {(p).val = 0;} while (0) -#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0) -#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) -#define dma_set_pte_prot(p, prot) \ - do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) -#define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK) -#define dma_set_pte_addr(p, addr) do {\ - (p).val |= ((addr) & VTD_PAGE_MASK); } while (0) -#define dma_pte_present(p) (((p).val & 3) != 0) +static inline void dma_clear_pte(struct dma_pte *pte) +{ + pte->val = 0; +} + +static inline void dma_set_pte_readable(struct dma_pte *pte) +{ + pte->val |= DMA_PTE_READ; +} + +static inline void dma_set_pte_writable(struct dma_pte *pte) +{ + pte->val |= DMA_PTE_WRITE; +} + +static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) +{ + pte->val = (pte->val & ~3) | (prot & 3); +} + +static inline u64 dma_pte_addr(struct dma_pte *pte) +{ + return (pte->val & VTD_PAGE_MASK); +} + +static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr) +{ + pte->val |= (addr & VTD_PAGE_MASK); +} + +static inline bool dma_pte_present(struct dma_pte *pte) +{ + return (pte->val & 3) != 0; +} struct dmar_domain { int id; /* domain id */ @@ -487,7 +512,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) if (level == 1) break; - if (!dma_pte_present(*pte)) { + if (!dma_pte_present(pte)) { tmp_page = alloc_pgtable_page(); if (!tmp_page) { @@ -497,16 +522,16 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) } __iommu_flush_cache(domain->iommu, tmp_page, PAGE_SIZE); - dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); + dma_set_pte_addr(pte, virt_to_phys(tmp_page)); /* * high level table always sets r/w, last level page * table control read/write */ - dma_set_pte_readable(*pte); - dma_set_pte_writable(*pte); + dma_set_pte_readable(pte); + dma_set_pte_writable(pte); __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); } - parent = phys_to_virt(dma_pte_addr(*pte)); + parent = phys_to_virt(dma_pte_addr(pte)); level--; } @@ -529,9 +554,9 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, if (level == total) return pte; - if (!dma_pte_present(*pte)) + if (!dma_pte_present(pte)) break; - parent = phys_to_virt(dma_pte_addr(*pte)); + parent = phys_to_virt(dma_pte_addr(pte)); total--; } return NULL; @@ -546,7 +571,7 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) pte = dma_addr_level_pte(domain, addr, 1); if (pte) { - dma_clear_pte(*pte); + dma_clear_pte(pte); __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); } } @@ -593,8 +618,8 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, pte = dma_addr_level_pte(domain, tmp, level); if (pte) { free_pgtable_page( - phys_to_virt(dma_pte_addr(*pte))); - dma_clear_pte(*pte); + phys_to_virt(dma_pte_addr(pte))); + dma_clear_pte(pte); __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); } @@ -1421,9 +1446,9 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, /* We don't need lock here, nobody else * touches the iova range */ - BUG_ON(dma_pte_addr(*pte)); - dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT); - dma_set_pte_prot(*pte, prot); + BUG_ON(dma_pte_addr(pte)); + dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); + dma_set_pte_prot(pte, prot); __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); start_pfn++; index++; @@ -2584,7 +2609,7 @@ u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova) pte = addr_to_dma_pte(domain, iova); if (pte) - pfn = dma_pte_addr(*pte); + pfn = dma_pte_addr(pte); return pfn >> VTD_PAGE_SHIFT; } |