diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-08 14:35:59 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-08 14:35:59 -0700 |
commit | 12f03ee606914317e7e6a0815e53a48205c31dae (patch) | |
tree | f8579bf77d29b3921e1877e0ae12ec65b5ebc738 /include/linux/pmem.h | |
parent | d9241b22b58e012f26dd2244508d9f4837402af0 (diff) | |
parent | 004f1afbe199e6ab20805b95aefd83ccd24bc5c7 (diff) | |
download | linux-rt-12f03ee606914317e7e6a0815e53a48205c31dae.tar.gz |
Merge tag 'libnvdimm-for-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm updates from Dan Williams:
"This update has successfully completed a 0day-kbuild run and has
appeared in a linux-next release. The changes outside of the typical
drivers/nvdimm/ and drivers/acpi/nfit.[ch] paths are related to the
removal of IORESOURCE_CACHEABLE, the introduction of memremap(), and
the introduction of ZONE_DEVICE + devm_memremap_pages().
Summary:
- Introduce ZONE_DEVICE and devm_memremap_pages() as a generic
mechanism for adding device-driver-discovered memory regions to the
kernel's direct map.
This facility is used by the pmem driver to enable pfn_to_page()
operations on the page frames returned by DAX ('direct_access' in
'struct block_device_operations').
For now, the 'memmap' allocation for these "device" pages comes
from "System RAM". Support for allocating the memmap from device
memory will arrive in a later kernel.
- Introduce memremap() to replace usages of ioremap_cache() and
ioremap_wt(). memremap() drops the __iomem annotation for these
mappings to memory that do not have i/o side effects. The
replacement of ioremap_cache() with memremap() is limited to the
pmem driver to ease merging the api change in v4.3.
Completion of the conversion is targeted for v4.4.
- Similar to the usage of memcpy_to_pmem() + wmb_pmem() in the pmem
driver, update the VFS DAX implementation and PMEM api to provide
persistence guarantees for kernel operations on a DAX mapping.
- Convert the ACPI NFIT 'BLK' driver to map the block apertures as
cacheable to improve performance.
- Miscellaneous updates and fixes to libnvdimm including support for
issuing "address range scrub" commands, clarifying the optimal
'sector size' of pmem devices, a clarification of the usage of the
ACPI '_STA' (status) property for DIMM devices, and other minor
fixes"
* tag 'libnvdimm-for-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (34 commits)
libnvdimm, pmem: direct map legacy pmem by default
libnvdimm, pmem: 'struct page' for pmem
libnvdimm, pfn: 'struct page' provider infrastructure
x86, pmem: clarify that ARCH_HAS_PMEM_API implies PMEM mapped WB
add devm_memremap_pages
mm: ZONE_DEVICE for "device memory"
mm: move __phys_to_pfn and __pfn_to_phys to asm/generic/memory_model.h
dax: drop size parameter to ->direct_access()
nd_blk: change aperture mapping from WC to WB
nvdimm: change to use generic kvfree()
pmem, dax: have direct_access use __pmem annotation
dax: update I/O path to do proper PMEM flushing
pmem: add copy_from_iter_pmem() and clear_pmem()
pmem, x86: clean up conditional pmem includes
pmem: remove layer when calling arch_has_wmb_pmem()
pmem, x86: move x86 PMEM API to new pmem.h header
libnvdimm, e820: make CONFIG_X86_PMEM_LEGACY a tristate option
pmem: switch to devm_ allocations
devres: add devm_memremap
libnvdimm, btt: write and validate parent_uuid
...
Diffstat (limited to 'include/linux/pmem.h')
-rw-r--r-- | include/linux/pmem.h | 115 |
1 files changed, 84 insertions, 31 deletions
diff --git a/include/linux/pmem.h b/include/linux/pmem.h index d2114045a6c4..85f810b33917 100644 --- a/include/linux/pmem.h +++ b/include/linux/pmem.h @@ -14,28 +14,42 @@ #define __PMEM_H__ #include <linux/io.h> +#include <linux/uio.h> #ifdef CONFIG_ARCH_HAS_PMEM_API -#include <asm/cacheflush.h> +#define ARCH_MEMREMAP_PMEM MEMREMAP_WB +#include <asm/pmem.h> #else +#define ARCH_MEMREMAP_PMEM MEMREMAP_WT +/* + * These are simply here to enable compilation, all call sites gate + * calling these symbols with arch_has_pmem_api() and redirect to the + * implementation in asm/pmem.h. + */ +static inline bool __arch_has_wmb_pmem(void) +{ + return false; +} + static inline void arch_wmb_pmem(void) { BUG(); } -static inline bool __arch_has_wmb_pmem(void) +static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, + size_t n) { - return false; + BUG(); } -static inline void __pmem *arch_memremap_pmem(resource_size_t offset, - unsigned long size) +static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, + struct iov_iter *i) { - return NULL; + BUG(); + return 0; } -static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, - size_t n) +static inline void arch_clear_pmem(void __pmem *addr, size_t size) { BUG(); } @@ -43,18 +57,22 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, /* * Architectures that define ARCH_HAS_PMEM_API must provide - * implementations for arch_memremap_pmem(), arch_memcpy_to_pmem(), - * arch_wmb_pmem(), and __arch_has_wmb_pmem(). + * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(), + * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem(). */ - static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) { memcpy(dst, (void __force const *) src, size); } -static inline void memunmap_pmem(void __pmem *addr) +static inline void memunmap_pmem(struct device *dev, void __pmem *addr) +{ + devm_memunmap(dev, (void __force *) addr); +} + +static inline bool arch_has_pmem_api(void) { - iounmap((void __force __iomem *) addr); + return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API); } /** @@ -68,14 +86,7 @@ static inline void memunmap_pmem(void __pmem *addr) */ static inline bool arch_has_wmb_pmem(void) { - if (IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)) - return __arch_has_wmb_pmem(); - return false; -} - -static inline bool arch_has_pmem_api(void) -{ - return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem(); + return arch_has_pmem_api() && __arch_has_wmb_pmem(); } /* @@ -85,16 +96,24 @@ static inline bool arch_has_pmem_api(void) * default_memremap_pmem + default_memcpy_to_pmem is sufficient for * making data durable relative to i/o completion. */ -static void default_memcpy_to_pmem(void __pmem *dst, const void *src, +static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src, size_t size) { memcpy((void __force *) dst, src, size); } -static void __pmem *default_memremap_pmem(resource_size_t offset, - unsigned long size) +static inline size_t default_copy_from_iter_pmem(void __pmem *addr, + size_t bytes, struct iov_iter *i) +{ + return copy_from_iter_nocache((void __force *)addr, bytes, i); +} + +static inline void default_clear_pmem(void __pmem *addr, size_t size) { - return (void __pmem __force *)ioremap_wt(offset, size); + if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0) + clear_page((void __force *)addr); + else + memset((void __force *)addr, 0, size); } /** @@ -109,12 +128,11 @@ static void __pmem *default_memremap_pmem(resource_size_t offset, * wmb_pmem() arrange for the data to be written through the * cache to persistent media. */ -static inline void __pmem *memremap_pmem(resource_size_t offset, - unsigned long size) +static inline void __pmem *memremap_pmem(struct device *dev, + resource_size_t offset, unsigned long size) { - if (arch_has_pmem_api()) - return arch_memremap_pmem(offset, size); - return default_memremap_pmem(offset, size); + return (void __pmem *) devm_memremap(dev, offset, size, + ARCH_MEMREMAP_PMEM); } /** @@ -146,7 +164,42 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) */ static inline void wmb_pmem(void) { - if (arch_has_pmem_api()) + if (arch_has_wmb_pmem()) arch_wmb_pmem(); + else + wmb(); +} + +/** + * copy_from_iter_pmem - copy data from an iterator to PMEM + * @addr: PMEM destination address + * @bytes: number of bytes to copy + * @i: iterator with source data + * + * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. + * This function requires explicit ordering with a wmb_pmem() call. + */ +static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, + struct iov_iter *i) +{ + if (arch_has_pmem_api()) + return arch_copy_from_iter_pmem(addr, bytes, i); + return default_copy_from_iter_pmem(addr, bytes, i); +} + +/** + * clear_pmem - zero a PMEM memory range + * @addr: virtual start address + * @size: number of bytes to zero + * + * Write zeros into the memory range starting at 'addr' for 'size' bytes. + * This function requires explicit ordering with a wmb_pmem() call. + */ +static inline void clear_pmem(void __pmem *addr, size_t size) +{ + if (arch_has_pmem_api()) + arch_clear_pmem(addr, size); + else + default_clear_pmem(addr, size); } #endif /* __PMEM_H__ */ |