summaryrefslogtreecommitdiff
path: root/drivers/iommu/amd_iommu_init.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2012-06-15 18:03:31 +0200
committerJoerg Roedel <joerg.roedel@amd.com>2012-09-28 17:31:41 +0200
commit33f28c59e18d83fd2aeef258d211be66b9b80eb3 (patch)
tree37076d60ce5e1b7830edad4ece164361d3185ecb /drivers/iommu/amd_iommu_init.c
parenteb1eb7ae65a9d32f6c16a90419caf01221f94734 (diff)
downloadlinux-next-33f28c59e18d83fd2aeef258d211be66b9b80eb3.tar.gz
iommu/amd: Split device table initialization into irq and dma part
When the IOMMU is enabled very early (as with irq-remapping) some devices are still in BIOS hand. When dma is blocked early this can cause lots of IO_PAGE_FAULTs. So delay the DMA initialization and do it right before the dma_ops are initialized. To be secure, block all interrupts by default when irq-remapping is enabled in the system. They will be reenabled on demand later. Without blocking interrupts by default devices can issue arbitrary interrupts by sending special DMA packets to the CPU that look like MSI messages. This is especially dangerous when a device is assigned to a KVM guest because the guest can then DoS the host. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/iommu/amd_iommu_init.c')
-rw-r--r--drivers/iommu/amd_iommu_init.c19
1 files changed, 18 insertions, 1 deletions
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index f8a222b0ac3f..8a7f1971d633 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1381,7 +1381,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
* Init the device table to not allow DMA access for devices and
* suppress all page faults
*/
-static void init_device_table(void)
+static void init_device_table_dma(void)
{
u32 devid;
@@ -1391,6 +1391,17 @@ static void init_device_table(void)
}
}
+static void init_device_table(void)
+{
+ u32 devid;
+
+ if (!amd_iommu_irq_remap)
+ return;
+
+ for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
+ set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
+}
+
static void iommu_init_flags(struct amd_iommu *iommu)
{
iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
@@ -1781,8 +1792,14 @@ static bool detect_ivrs(void)
static int amd_iommu_init_dma(void)
{
+ struct amd_iommu *iommu;
int ret;
+ init_device_table_dma();
+
+ for_each_iommu(iommu)
+ iommu_flush_all_caches(iommu);
+
if (iommu_pass_through)
ret = amd_iommu_init_passthrough();
else