diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 21:03:26 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 21:03:26 -0700 |
commit | bb0fd7ab0986105765d11baa82e619c618a235aa (patch) | |
tree | 6a0585ece827e1025aa48819959d02155a871be9 /arch/arm/mm/dma-mapping.c | |
parent | bdfa54dfd9eea001274dbcd622657a904fe43b81 (diff) | |
parent | 4b2f8838479eb2abe042e094f7d2cced6d5ea772 (diff) | |
download | linux-next-bb0fd7ab0986105765d11baa82e619c618a235aa.tar.gz |
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King:
"Included in this update are both some long term fixes and some new
features.
Fixes:
- An integer overflow in the calculation of ELF_ET_DYN_BASE.
- Avoiding OOMs for high-order IOMMU allocations
- SMP requires the data cache to be enabled for synchronisation
primitives to work, so prevent the CPU_DCACHE_DISABLE option being
visible on SMP builds.
- A bug going back 10+ years in the noMMU ARM94* CPU support code,
where it corrupts registers. Found by folk getting Linux running
on their cameras.
- Versatile Express needs an errata workaround enabled for CPU
hot-unplug to work.
Features:
- Clean up module linker by handling out of range relocations
separately from relocation cases we don't handle.
- Fix a long term bug in the pci_mmap_page_range() code, which we
hope won't impact userspace (we hope there's no users of the
existing broken interface.)
- Don't map DMA coherent allocations when we don't have a MMU.
- Drop experimental status for SMP_ON_UP.
- Warn when DT doesn't specify ePAPR mandatory cache properties.
- Add documentation concerning how we find the start of physical
memory for AUTO_ZRELADDR kernels, detailing why we have chosen the
mask and the implications of changing it.
- Updates from Ard Biesheuvel to address some issues with large
kernels (such as allyesconfig) failing to link.
- Allow hibernation to work on modern (ARMv7) CPUs - this appears to
have never worked in the past on these CPUs.
- Enable IRQ_SHOW_LEVEL, which changes the /proc/interrupts output
format (hopefully without userspace breaking... let's hope that if
it causes someone a problem, they tell us.)
- Fix tegra-ahb DT offsets.
- Rework ARM errata 643719 code (and ARMv7 flush_cache_louis()/
flush_dcache_all()) code to be more efficient, and enable this
errata workaround by default for ARMv7+SMP CPUs. This complements
the Versatile Express fix above.
- Rework ARMv7 context code for errata 430973, so that only Cortex A8
CPUs are impacted by the branch target buffer flush when this
errata is enabled. Also update the help text to indicate that all
r1p* A8 CPUs are impacted.
- Switch ARM to the generic show_mem() implementation, it conveys all
the information which we were already reporting.
- Prevent slow timer sources being used for udelay() - timers running
at less than 1MHz are not useful for this, and can cause udelay()
to return immediately, without any wait. Using such a slow timer
is silly.
- VDSO support for 32-bit ARM, mainly for gettimeofday() using the
ARM architected timer.
- Perf support for Scorpion performance monitoring units"
vdso semantic conflict fixed up as per linux-next.
* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (52 commits)
ARM: update errata 430973 documentation to cover Cortex A8 r1p*
ARM: ensure delay timer has sufficient accuracy for delays
ARM: switch to use the generic show_mem() implementation
ARM: proc-v7: avoid errata 430973 workaround for non-Cortex A8 CPUs
ARM: enable ARM errata 643719 workaround by default
ARM: cache-v7: optimise test for Cortex A9 r0pX devices
ARM: cache-v7: optimise branches in v7_flush_cache_louis
ARM: cache-v7: consolidate initialisation of cache level index
ARM: cache-v7: shift CLIDR to extract appropriate field before masking
ARM: cache-v7: use movw/movt instructions
ARM: allow 16-bit instructions in ALT_UP()
ARM: proc-arm94*.S: fix setup function
ARM: vexpress: fix CPU hotplug with CT9x4 tile.
ARM: 8276/1: Make CPU_DCACHE_DISABLE depend on !SMP
ARM: 8335/1: Documentation: DT bindings: Tegra AHB: document the legacy base address
ARM: 8334/1: amba: tegra-ahb: detect and correct bogus base address
ARM: 8333/1: amba: tegra-ahb: fix register offsets in the macros
ARM: 8339/1: Enable CONFIG_GENERIC_IRQ_SHOW_LEVEL
ARM: 8338/1: kexec: Relax SMP validation to improve DT compatibility
ARM: 8337/1: mm: Do not invoke OOM for higher order IOMMU DMA allocations
...
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 116 |
1 files changed, 73 insertions, 43 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index e315dfe3af1b..09c5fe3d30c2 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -289,11 +289,11 @@ static void __dma_free_buffer(struct page *page, size_t size) static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page, - const void *caller); + const void *caller, bool want_vaddr); static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, pgprot_t prot, struct page **ret_page, - const void *caller); + const void *caller, bool want_vaddr); static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, @@ -357,10 +357,10 @@ static int __init atomic_pool_init(void) if (dev_get_cma_area(NULL)) ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, - &page, atomic_pool_init); + &page, atomic_pool_init, true); else ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, - &page, atomic_pool_init); + &page, atomic_pool_init, true); if (ptr) { int ret; @@ -467,13 +467,15 @@ static void __dma_remap(struct page *page, size_t size, pgprot_t prot) static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, pgprot_t prot, struct page **ret_page, - const void *caller) + const void *caller, bool want_vaddr) { struct page *page; - void *ptr; + void *ptr = NULL; page = __dma_alloc_buffer(dev, size, gfp); if (!page) return NULL; + if (!want_vaddr) + goto out; ptr = __dma_alloc_remap(page, size, gfp, prot, caller); if (!ptr) { @@ -481,6 +483,7 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, return NULL; } + out: *ret_page = page; return ptr; } @@ -523,12 +526,12 @@ static int __free_from_pool(void *start, size_t size) static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page, - const void *caller) + const void *caller, bool want_vaddr) { unsigned long order = get_order(size); size_t count = size >> PAGE_SHIFT; struct page *page; - void *ptr; + void *ptr = NULL; page = dma_alloc_from_contiguous(dev, count, order); if (!page) @@ -536,6 +539,9 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size, __dma_clear_buffer(page, size); + if (!want_vaddr) + goto out; + if (PageHighMem(page)) { ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); if (!ptr) { @@ -546,17 +552,21 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size, __dma_remap(page, size, prot); ptr = page_address(page); } + + out: *ret_page = page; return ptr; } static void __free_from_contiguous(struct device *dev, struct page *page, - void *cpu_addr, size_t size) + void *cpu_addr, size_t size, bool want_vaddr) { - if (PageHighMem(page)) - __dma_free_remap(cpu_addr, size); - else - __dma_remap(page, size, PAGE_KERNEL); + if (want_vaddr) { + if (PageHighMem(page)) + __dma_free_remap(cpu_addr, size); + else + __dma_remap(page, size, PAGE_KERNEL); + } dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); } @@ -574,12 +584,12 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) #define nommu() 1 -#define __get_dma_pgprot(attrs, prot) __pgprot(0) -#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL +#define __get_dma_pgprot(attrs, prot) __pgprot(0) +#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL #define __alloc_from_pool(size, ret_page) NULL -#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL +#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL #define __free_from_pool(cpu_addr, size) 0 -#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0) +#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0) #define __dma_free_remap(cpu_addr, size) do { } while (0) #endif /* CONFIG_MMU */ @@ -599,11 +609,13 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, - gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller) + gfp_t gfp, pgprot_t prot, bool is_coherent, + struct dma_attrs *attrs, const void *caller) { u64 mask = get_coherent_dma_mask(dev); struct page *page = NULL; void *addr; + bool want_vaddr; #ifdef CONFIG_DMA_API_DEBUG u64 limit = (mask + 1) & ~mask; @@ -631,20 +643,21 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); + want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); if (is_coherent || nommu()) addr = __alloc_simple_buffer(dev, size, gfp, &page); else if (!(gfp & __GFP_WAIT)) addr = __alloc_from_pool(size, &page); else if (!dev_get_cma_area(dev)) - addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); + addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr); else - addr = __alloc_from_contiguous(dev, size, prot, &page, caller); + addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr); - if (addr) + if (page) *handle = pfn_to_dma(dev, page_to_pfn(page)); - return addr; + return want_vaddr ? addr : page; } /* @@ -661,7 +674,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, return memory; return __dma_alloc(dev, size, handle, gfp, prot, false, - __builtin_return_address(0)); + attrs, __builtin_return_address(0)); } static void *arm_coherent_dma_alloc(struct device *dev, size_t size, @@ -674,7 +687,7 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size, return memory; return __dma_alloc(dev, size, handle, gfp, prot, true, - __builtin_return_address(0)); + attrs, __builtin_return_address(0)); } /* @@ -715,6 +728,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, bool is_coherent) { struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); + bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) return; @@ -726,14 +740,15 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, } else if (__free_from_pool(cpu_addr, size)) { return; } else if (!dev_get_cma_area(dev)) { - __dma_free_remap(cpu_addr, size); + if (want_vaddr) + __dma_free_remap(cpu_addr, size); __dma_free_buffer(page, size); } else { /* * Non-atomic allocations cannot be freed with IRQs disabled */ WARN_ON(irqs_disabled()); - __free_from_contiguous(dev, page, cpu_addr, size); + __free_from_contiguous(dev, page, cpu_addr, size, want_vaddr); } } @@ -1135,13 +1150,28 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp |= __GFP_NOWARN | __GFP_HIGHMEM; while (count) { - int j, order = __fls(count); + int j, order; + + for (order = __fls(count); order > 0; --order) { + /* + * We do not want OOM killer to be invoked as long + * as we can fall back to single pages, so we force + * __GFP_NORETRY for orders higher than zero. + */ + pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); + if (pages[i]) + break; + } - pages[i] = alloc_pages(gfp, order); - while (!pages[i] && order) - pages[i] = alloc_pages(gfp, --order); - if (!pages[i]) - goto error; + if (!pages[i]) { + /* + * Fall back to single page allocation. + * Might invoke OOM killer as last resort. + */ + pages[i] = alloc_pages(gfp, 0); + if (!pages[i]) + goto error; + } if (order) { split_page(pages[i], order); @@ -1206,7 +1236,7 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, static dma_addr_t __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; dma_addr_t dma_addr, iova; int i, ret = DMA_ERROR_CODE; @@ -1242,7 +1272,7 @@ fail: static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); /* * add optional in-page offset from iova to size and align @@ -1457,7 +1487,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, enum dma_data_direction dir, struct dma_attrs *attrs, bool is_coherent) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova, iova_base; int ret = 0; unsigned int count; @@ -1678,7 +1708,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t dma_addr; int ret, prot, len = PAGE_ALIGN(size + offset); @@ -1731,7 +1761,7 @@ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; int offset = handle & ~PAGE_MASK; int len = PAGE_ALIGN(size + offset); @@ -1756,7 +1786,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); int offset = handle & ~PAGE_MASK; @@ -1775,7 +1805,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, static void arm_iommu_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; @@ -1789,7 +1819,7 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev, static void arm_iommu_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; @@ -1950,7 +1980,7 @@ static int __arm_iommu_attach_device(struct device *dev, return err; kref_get(&mapping->kref); - dev->archdata.mapping = mapping; + to_dma_iommu_mapping(dev) = mapping; pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); return 0; @@ -1995,7 +2025,7 @@ static void __arm_iommu_detach_device(struct device *dev) iommu_detach_device(mapping->domain, dev); kref_put(&mapping->kref, release_iommu_mapping); - dev->archdata.mapping = NULL; + to_dma_iommu_mapping(dev) = NULL; pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); } @@ -2053,7 +2083,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, static void arm_teardown_iommu_dma_ops(struct device *dev) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); if (!mapping) return; |