diff options
-rw-r--r-- | .mailmap | 1 | ||||
-rw-r--r-- | MAINTAINERS | 13 | ||||
-rw-r--r-- | drivers/acpi/nfit/core.c | 24 | ||||
-rw-r--r-- | drivers/acpi/nfit/nfit.h | 1 | ||||
-rw-r--r-- | drivers/dax/pmem.c | 12 | ||||
-rw-r--r-- | drivers/dax/super.c | 3 | ||||
-rw-r--r-- | drivers/md/dm-writecache.c | 3 | ||||
-rw-r--r-- | drivers/nvdimm/bus.c | 4 | ||||
-rw-r--r-- | drivers/nvdimm/dimm.c | 24 | ||||
-rw-r--r-- | drivers/nvdimm/dimm_devs.c | 31 | ||||
-rw-r--r-- | drivers/nvdimm/namespace_devs.c | 29 | ||||
-rw-r--r-- | drivers/nvdimm/nd-core.h | 8 | ||||
-rw-r--r-- | drivers/nvdimm/nd.h | 1 | ||||
-rw-r--r-- | drivers/nvdimm/pmem.c | 7 | ||||
-rw-r--r-- | drivers/nvdimm/region_devs.c | 40 | ||||
-rw-r--r-- | drivers/s390/block/dcssblk.c | 8 | ||||
-rw-r--r-- | fs/dax.c | 13 | ||||
-rw-r--r-- | tools/testing/nvdimm/pmem-dax.c | 12 | ||||
-rw-r--r-- | tools/testing/nvdimm/test/nfit.c | 126 |
19 files changed, 270 insertions, 90 deletions
@@ -159,6 +159,7 @@ Ralf Wildenhues <Ralf.Wildenhues@gmx.de> Randy Dunlap <rdunlap@infradead.org> <rdunlap@xenotime.net> RĂ©mi Denis-Courmont <rdenis@simphalempin.com> Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com> +Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com> Rudolf Marek <R.Marek@sh.cvut.cz> Rui Saraiva <rmps@joel.ist.utl.pt> Sachin P Sant <ssant@in.ibm.com> diff --git a/MAINTAINERS b/MAINTAINERS index e20e7c42347b..a5b256b25905 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4364,7 +4364,8 @@ F: drivers/i2c/busses/i2c-diolan-u2c.c FILESYSTEM DIRECT ACCESS (DAX) M: Matthew Wilcox <mawilcox@microsoft.com> -M: Ross Zwisler <ross.zwisler@linux.intel.com> +M: Ross Zwisler <zwisler@kernel.org> +M: Jan Kara <jack@suse.cz> L: linux-fsdevel@vger.kernel.org S: Supported F: fs/dax.c @@ -4374,7 +4375,7 @@ F: include/trace/events/fs_dax.h DEVICE DIRECT ACCESS (DAX) M: Dan Williams <dan.j.williams@intel.com> M: Dave Jiang <dave.jiang@intel.com> -M: Ross Zwisler <ross.zwisler@linux.intel.com> +M: Ross Zwisler <zwisler@kernel.org> M: Vishal Verma <vishal.l.verma@intel.com> L: linux-nvdimm@lists.01.org S: Supported @@ -8303,7 +8304,7 @@ S: Maintained F: tools/lib/lockdep/ LIBNVDIMM BLK: MMIO-APERTURE DRIVER -M: Ross Zwisler <ross.zwisler@linux.intel.com> +M: Ross Zwisler <zwisler@kernel.org> M: Dan Williams <dan.j.williams@intel.com> M: Vishal Verma <vishal.l.verma@intel.com> M: Dave Jiang <dave.jiang@intel.com> @@ -8316,7 +8317,7 @@ F: drivers/nvdimm/region_devs.c LIBNVDIMM BTT: BLOCK TRANSLATION TABLE M: Vishal Verma <vishal.l.verma@intel.com> M: Dan Williams <dan.j.williams@intel.com> -M: Ross Zwisler <ross.zwisler@linux.intel.com> +M: Ross Zwisler <zwisler@kernel.org> M: Dave Jiang <dave.jiang@intel.com> L: linux-nvdimm@lists.01.org Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ @@ -8324,7 +8325,7 @@ S: Supported F: drivers/nvdimm/btt* LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER -M: Ross Zwisler <ross.zwisler@linux.intel.com> +M: Ross Zwisler <zwisler@kernel.org> M: Dan Williams <dan.j.williams@intel.com> M: Vishal Verma <vishal.l.verma@intel.com> M: Dave Jiang <dave.jiang@intel.com> @@ -8343,7 +8344,7 @@ F: Documentation/devicetree/bindings/pmem/pmem-region.txt LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM M: Dan Williams <dan.j.williams@intel.com> -M: Ross Zwisler <ross.zwisler@linux.intel.com> +M: Ross Zwisler <zwisler@kernel.org> M: Vishal Verma <vishal.l.verma@intel.com> M: Dave Jiang <dave.jiang@intel.com> L: linux-nvdimm@lists.01.org diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 7c479002e798..b072cfc5f20e 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1699,7 +1699,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, { struct acpi_device *adev, *adev_dimm; struct device *dev = acpi_desc->dev; - unsigned long dsm_mask; + unsigned long dsm_mask, label_mask; const guid_t *guid; int i; int family = -1; @@ -1771,6 +1771,16 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, 1ULL << i)) set_bit(i, &nfit_mem->dsm_mask); + /* + * Prefer the NVDIMM_FAMILY_INTEL label read commands if present + * due to their better semantics handling locked capacity. + */ + label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA + | 1 << ND_CMD_SET_CONFIG_DATA; + if (family == NVDIMM_FAMILY_INTEL + && (dsm_mask & label_mask) == label_mask) + return 0; + if (acpi_nvdimm_has_method(adev_dimm, "_LSI") && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); @@ -2559,7 +2569,12 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc, test_bit(ARS_SHORT, &nfit_spa->ars_state) ? "short" : "long"); clear_bit(ARS_SHORT, &nfit_spa->ars_state); - set_bit(ARS_DONE, &nfit_spa->ars_state); + if (test_and_clear_bit(ARS_REQ_REDO, &nfit_spa->ars_state)) { + set_bit(ARS_SHORT, &nfit_spa->ars_state); + set_bit(ARS_REQ, &nfit_spa->ars_state); + dev_dbg(dev, "ARS: processing scrub request received while in progress\n"); + } else + set_bit(ARS_DONE, &nfit_spa->ars_state); } static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) @@ -3256,9 +3271,10 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags) if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) continue; - if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) + if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) { busy++; - else { + set_bit(ARS_REQ_REDO, &nfit_spa->ars_state); + } else { if (test_bit(ARS_SHORT, &flags)) set_bit(ARS_SHORT, &nfit_spa->ars_state); scheduled++; diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index a97ff42fe311..d1274ea2d251 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -119,6 +119,7 @@ enum nfit_dimm_notifiers { enum nfit_ars_state { ARS_REQ, + ARS_REQ_REDO, ARS_DONE, ARS_SHORT, ARS_FAILED, diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c index fd49b24fd6af..99e2aace8078 100644 --- a/drivers/dax/pmem.c +++ b/drivers/dax/pmem.c @@ -105,15 +105,19 @@ static int dax_pmem_probe(struct device *dev) if (rc) return rc; - rc = devm_add_action_or_reset(dev, dax_pmem_percpu_exit, - &dax_pmem->ref); - if (rc) + rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref); + if (rc) { + percpu_ref_exit(&dax_pmem->ref); return rc; + } dax_pmem->pgmap.ref = &dax_pmem->ref; addr = devm_memremap_pages(dev, &dax_pmem->pgmap); - if (IS_ERR(addr)) + if (IS_ERR(addr)) { + devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref); + percpu_ref_exit(&dax_pmem->ref); return PTR_ERR(addr); + } rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill, &dax_pmem->ref); diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 45276abf03aa..6e928f37d084 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -89,7 +89,6 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) struct request_queue *q; pgoff_t pgoff; int err, id; - void *kaddr; pfn_t pfn; long len; char buf[BDEVNAME_SIZE]; @@ -122,7 +121,7 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) } id = dax_read_lock(); - len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); + len = dax_direct_access(dax_dev, pgoff, 1, NULL, &pfn); dax_read_unlock(id); put_dax(dax_dev); diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 3a28a68f184c..5f1f80d424dd 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -268,9 +268,8 @@ static int persistent_memory_claim(struct dm_writecache *wc) i = 0; do { long daa; - void *dummy_addr; daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i, - &dummy_addr, &pfn); + NULL, &pfn); if (daa <= 0) { r = daa ? daa : -EINVAL; goto err3; diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 27902a8799b1..8aae6dcc839f 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -812,9 +812,9 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd, * overshoots the remainder by 4 bytes, assume it was * including 'status'. */ - if (out_field[1] - 8 == remainder) + if (out_field[1] - 4 == remainder) return remainder; - return out_field[1] - 4; + return out_field[1] - 8; } else if (cmd == ND_CMD_CALL) { struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field; diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c index 233907889f96..6c8fb7590838 100644 --- a/drivers/nvdimm/dimm.c +++ b/drivers/nvdimm/dimm.c @@ -34,6 +34,9 @@ static int nvdimm_probe(struct device *dev) return rc; } + /* reset locked, to be validated below... */ + nvdimm_clear_locked(dev); + ndd = kzalloc(sizeof(*ndd), GFP_KERNEL); if (!ndd) return -ENOMEM; @@ -48,12 +51,30 @@ static int nvdimm_probe(struct device *dev) get_device(dev); kref_init(&ndd->kref); + /* + * EACCES failures reading the namespace label-area-properties + * are interpreted as the DIMM capacity being locked but the + * namespace labels themselves being accessible. + */ rc = nvdimm_init_nsarea(ndd); - if (rc == -EACCES) + if (rc == -EACCES) { + /* + * See nvdimm_namespace_common_probe() where we fail to + * allow namespaces to probe while the DIMM is locked, + * but we do allow for namespace enumeration. + */ nvdimm_set_locked(dev); + rc = 0; + } if (rc) goto err; + /* + * EACCES failures reading the namespace label-data are + * interpreted as the label area being locked in addition to the + * DIMM capacity. We fail the dimm probe to prevent regions from + * attempting to parse the label area. + */ rc = nvdimm_init_config_data(ndd); if (rc == -EACCES) nvdimm_set_locked(dev); @@ -72,7 +93,6 @@ static int nvdimm_probe(struct device *dev) if (rc == 0) nvdimm_set_aliasing(dev); } - nvdimm_clear_locked(dev); nvdimm_bus_unlock(dev); if (rc) diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 8d348b22ba45..863cabc35215 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -537,6 +537,37 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) } /** + * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max + * contiguous unallocated dpa range. + * @nd_region: constrain available space check to this reference region + * @nd_mapping: container of dpa-resource-root + labels + */ +resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, + struct nd_mapping *nd_mapping) +{ + struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); + struct nvdimm_bus *nvdimm_bus; + resource_size_t max = 0; + struct resource *res; + + /* if a dimm is disabled the available capacity is zero */ + if (!ndd) + return 0; + + nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); + if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm)) + return 0; + for_each_dpa_resource(ndd, res) { + if (strcmp(res->name, "pmem-reserve") != 0) + continue; + if (resource_size(res) > max) + max = resource_size(res); + } + release_free_pmem(nvdimm_bus, nd_mapping); + return max; +} + +/** * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa * @nd_mapping: container of dpa-resource-root + labels * @nd_region: constrain available space check to this reference region diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 28afdd668905..4a4266250c28 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -799,7 +799,7 @@ static int merge_dpa(struct nd_region *nd_region, return 0; } -static int __reserve_free_pmem(struct device *dev, void *data) +int __reserve_free_pmem(struct device *dev, void *data) { struct nvdimm *nvdimm = data; struct nd_region *nd_region; @@ -836,7 +836,7 @@ static int __reserve_free_pmem(struct device *dev, void *data) return 0; } -static void release_free_pmem(struct nvdimm_bus *nvdimm_bus, +void release_free_pmem(struct nvdimm_bus *nvdimm_bus, struct nd_mapping *nd_mapping) { struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); @@ -1032,7 +1032,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) allocated += nvdimm_allocated_dpa(ndd, &label_id); } - available = nd_region_available_dpa(nd_region); + available = nd_region_allocatable_dpa(nd_region); if (val > available + allocated) return -ENOSPC; @@ -1144,6 +1144,26 @@ resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns) } EXPORT_SYMBOL(nvdimm_namespace_capacity); +bool nvdimm_namespace_locked(struct nd_namespace_common *ndns) +{ + int i; + bool locked = false; + struct device *dev = &ndns->dev; + struct nd_region *nd_region = to_nd_region(dev->parent); + + for (i = 0; i < nd_region->ndr_mappings; i++) { + struct nd_mapping *nd_mapping = &nd_region->mapping[i]; + struct nvdimm *nvdimm = nd_mapping->nvdimm; + + if (test_bit(NDD_LOCKED, &nvdimm->flags)) { + dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm)); + locked = true; + } + } + return locked; +} +EXPORT_SYMBOL(nvdimm_namespace_locked); + static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1695,6 +1715,9 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) } } + if (nvdimm_namespace_locked(ndns)) + return ERR_PTR(-EACCES); + size = nvdimm_namespace_capacity(ndns); if (size < ND_MIN_NAMESPACE_SIZE) { dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n", diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 79274ead54fb..ac68072fb8cd 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -100,6 +100,14 @@ struct nd_region; struct nvdimm_drvdata; struct nd_mapping; void nd_mapping_free_labels(struct nd_mapping *nd_mapping); + +int __reserve_free_pmem(struct device *dev, void *data); +void release_free_pmem(struct nvdimm_bus *nvdimm_bus, + struct nd_mapping *nd_mapping); + +resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, + struct nd_mapping *nd_mapping); +resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region); resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, struct nd_mapping *nd_mapping, resource_size_t *overlap); resource_size_t nd_blk_available_dpa(struct nd_region *nd_region); diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 6ee7fd7e4bbd..98317e7ce5b5 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -357,6 +357,7 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, struct nd_label_id *label_id, resource_size_t start, resource_size_t n); resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns); +bool nvdimm_namespace_locked(struct nd_namespace_common *ndns); struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev); int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns); int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index dd17acd8fe68..c23649867696 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -226,8 +226,11 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512, PFN_PHYS(nr_pages)))) return -EIO; - *kaddr = pmem->virt_addr + offset; - *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); + + if (kaddr) + *kaddr = pmem->virt_addr + offset; + if (pfn) + *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); /* * If badblocks are present, limit known good range to the diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index ec3543b83330..fa37afcd43ff 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -389,6 +389,30 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region) return available; } +resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region) +{ + resource_size_t available = 0; + int i; + + if (is_memory(&nd_region->dev)) + available = PHYS_ADDR_MAX; + + WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); + for (i = 0; i < nd_region->ndr_mappings; i++) { + struct nd_mapping *nd_mapping = &nd_region->mapping[i]; + + if (is_memory(&nd_region->dev)) + available = min(available, + nd_pmem_max_contiguous_dpa(nd_region, + nd_mapping)); + else if (is_nd_blk(&nd_region->dev)) + available += nd_blk_available_dpa(nd_region); + } + if (is_memory(&nd_region->dev)) + return available * nd_region->ndr_mappings; + return available; +} + static ssize_t available_size_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -410,6 +434,21 @@ static ssize_t available_size_show(struct device *dev, } static DEVICE_ATTR_RO(available_size); +static ssize_t max_available_extent_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nd_region *nd_region = to_nd_region(dev); + unsigned long long available = 0; + + nvdimm_bus_lock(dev); + wait_nvdimm_bus_probe_idle(dev); + available = nd_region_allocatable_dpa(nd_region); + nvdimm_bus_unlock(dev); + + return sprintf(buf, "%llu\n", available); +} +static DEVICE_ATTR_RO(max_available_extent); + static ssize_t init_namespaces_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -561,6 +600,7 @@ static struct attribute *nd_region_attributes[] = { &dev_attr_read_only.attr, &dev_attr_set_cookie.attr, &dev_attr_available_size.attr, + &dev_attr_max_available_extent.attr, &dev_attr_namespace_seed.attr, &dev_attr_init_namespaces.attr, &dev_attr_badblocks.attr, diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index ed607288e696..23e526cda5c1 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -922,9 +922,11 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff, unsigned long dev_sz; dev_sz = dev_info->end - dev_info->start + 1; - *kaddr = (void *) dev_info->start + offset; - *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), - PFN_DEV|PFN_SPECIAL); + if (kaddr) + *kaddr = (void *) dev_info->start + offset; + if (pfn) + *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), + PFN_DEV|PFN_SPECIAL); return (dev_sz - offset) / PAGE_SIZE; } @@ -655,7 +655,6 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, { void *vto, *kaddr; pgoff_t pgoff; - pfn_t pfn; long rc; int id; @@ -664,7 +663,7 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, return rc; id = dax_read_lock(); - rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); + rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL); if (rc < 0) { dax_read_unlock(id); return rc; @@ -975,7 +974,6 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, { const sector_t sector = dax_iomap_sector(iomap, pos); pgoff_t pgoff; - void *kaddr; int id, rc; long length; @@ -984,7 +982,7 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, return rc; id = dax_read_lock(); length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), - &kaddr, pfnp); + NULL, pfnp); if (length < 0) { rc = length; goto out; @@ -1060,15 +1058,13 @@ int __dax_zero_page_range(struct block_device *bdev, pgoff_t pgoff; long rc, id; void *kaddr; - pfn_t pfn; rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); if (rc) return rc; id = dax_read_lock(); - rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, - &pfn); + rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); if (rc < 0) { dax_read_unlock(id); return rc; @@ -1124,7 +1120,6 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, ssize_t map_len; pgoff_t pgoff; void *kaddr; - pfn_t pfn; if (fatal_signal_pending(current)) { ret = -EINTR; @@ -1136,7 +1131,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, break; map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), - &kaddr, &pfn); + &kaddr, NULL); if (map_len < 0) { ret = map_len; break; diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c index b53596ad601b..2e7fd8227969 100644 --- a/tools/testing/nvdimm/pmem-dax.c +++ b/tools/testing/nvdimm/pmem-dax.c @@ -31,17 +31,21 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, if (get_nfit_res(pmem->phys_addr + offset)) { struct page *page; - *kaddr = pmem->virt_addr + offset; + if (kaddr) + *kaddr = pmem->virt_addr + offset; page = vmalloc_to_page(pmem->virt_addr + offset); - *pfn = page_to_pfn_t(page); + if (pfn) + *pfn = page_to_pfn_t(page); pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n", __func__, pmem, pgoff, page_to_pfn(page)); return 1; } - *kaddr = pmem->virt_addr + offset; - *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); + if (kaddr) + *kaddr = pmem->virt_addr + offset; + if (pfn) + *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); /* * If badblocks are present, limit known good range to the diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index e2926f72a821..cffc2c5a778d 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -142,6 +142,28 @@ static u32 handle[] = { static unsigned long dimm_fail_cmd_flags[NUM_DCR]; static int dimm_fail_cmd_code[NUM_DCR]; +static const struct nd_intel_smart smart_def = { + .flags = ND_INTEL_SMART_HEALTH_VALID + | ND_INTEL_SMART_SPARES_VALID + | ND_INTEL_SMART_ALARM_VALID + | ND_INTEL_SMART_USED_VALID + | ND_INTEL_SMART_SHUTDOWN_VALID + | ND_INTEL_SMART_MTEMP_VALID + | ND_INTEL_SMART_CTEMP_VALID, + .health = ND_INTEL_SMART_NON_CRITICAL_HEALTH, + .media_temperature = 23 * 16, + .ctrl_temperature = 25 * 16, + .pmic_temperature = 40 * 16, + .spares = 75, + .alarm_flags = ND_INTEL_SMART_SPARE_TRIP + | ND_INTEL_SMART_TEMP_TRIP, + .ait_status = 1, + .life_used = 5, + .shutdown_state = 0, + .vendor_size = 0, + .shutdown_count = 100, +}; + struct nfit_test_fw { enum intel_fw_update_state state; u32 context; @@ -752,15 +774,30 @@ static int nfit_test_cmd_smart_inject( if (buf_len != sizeof(*inj)) return -EINVAL; - if (inj->mtemp_enable) - smart->media_temperature = inj->media_temperature; - if (inj->spare_enable) - smart->spares = inj->spares; - if (inj->fatal_enable) - smart->health = ND_INTEL_SMART_FATAL_HEALTH; - if (inj->unsafe_shutdown_enable) { - smart->shutdown_state = 1; - smart->shutdown_count++; + if (inj->flags & ND_INTEL_SMART_INJECT_MTEMP) { + if (inj->mtemp_enable) + smart->media_temperature = inj->media_temperature; + else + smart->media_temperature = smart_def.media_temperature; + } + if (inj->flags & ND_INTEL_SMART_INJECT_SPARE) { + if (inj->spare_enable) + smart->spares = inj->spares; + else + smart->spares = smart_def.spares; + } + if (inj->flags & ND_INTEL_SMART_INJECT_FATAL) { + if (inj->fatal_enable) + smart->health = ND_INTEL_SMART_FATAL_HEALTH; + else + smart->health = ND_INTEL_SMART_NON_CRITICAL_HEALTH; + } + if (inj->flags & ND_INTEL_SMART_INJECT_SHUTDOWN) { + if (inj->unsafe_shutdown_enable) { + smart->shutdown_state = 1; + smart->shutdown_count++; + } else + smart->shutdown_state = 0; } inj->status = 0; smart_notify(bus_dev, dimm_dev, smart, thresh); @@ -884,6 +921,16 @@ static int nd_intel_test_cmd_set_lss_status(struct nfit_test *t, return 0; } +static int override_return_code(int dimm, unsigned int func, int rc) +{ + if ((1 << func) & dimm_fail_cmd_flags[dimm]) { + if (dimm_fail_cmd_code[dimm]) + return dimm_fail_cmd_code[dimm]; + return -EIO; + } + return rc; +} + static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func) { int i; @@ -894,13 +941,6 @@ static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func) break; if (i >= ARRAY_SIZE(handle)) return -ENXIO; - - if ((1 << func) & dimm_fail_cmd_flags[i]) { - if (dimm_fail_cmd_code[i]) - return dimm_fail_cmd_code[i]; - return -EIO; - } - return i; } @@ -939,48 +979,59 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc, switch (func) { case ND_INTEL_ENABLE_LSS_STATUS: - return nd_intel_test_cmd_set_lss_status(t, + rc = nd_intel_test_cmd_set_lss_status(t, buf, buf_len); + break; case ND_INTEL_FW_GET_INFO: - return nd_intel_test_get_fw_info(t, buf, + rc = nd_intel_test_get_fw_info(t, buf, buf_len, i - t->dcr_idx); + break; case ND_INTEL_FW_START_UPDATE: - return nd_intel_test_start_update(t, buf, + rc = nd_intel_test_start_update(t, buf, buf_len, i - t->dcr_idx); + break; case ND_INTEL_FW_SEND_DATA: - return nd_intel_test_send_data(t, buf, + rc = nd_intel_test_send_data(t, buf, buf_len, i - t->dcr_idx); + break; case ND_INTEL_FW_FINISH_UPDATE: - return nd_intel_test_finish_fw(t, buf, + rc = nd_intel_test_finish_fw(t, buf, buf_len, i - t->dcr_idx); + break; case ND_INTEL_FW_FINISH_QUERY: - return nd_intel_test_finish_query(t, buf, + rc = nd_intel_test_finish_query(t, buf, buf_len, i - t->dcr_idx); + break; case ND_INTEL_SMART: - return nfit_test_cmd_smart(buf, buf_len, + rc = nfit_test_cmd_smart(buf, buf_len, &t->smart[i - t->dcr_idx]); + break; case ND_INTEL_SMART_THRESHOLD: - return nfit_test_cmd_smart_threshold(buf, + rc = nfit_test_cmd_smart_threshold(buf, buf_len, &t->smart_threshold[i - t->dcr_idx]); + break; case ND_INTEL_SMART_SET_THRESHOLD: - return nfit_test_cmd_smart_set_threshold(buf, + rc = nfit_test_cmd_smart_set_threshold(buf, buf_len, &t->smart_threshold[i - t->dcr_idx], &t->smart[i - t->dcr_idx], &t->pdev.dev, t->dimm_dev[i]); + break; case ND_INTEL_SMART_INJECT: - return nfit_test_cmd_smart_inject(buf, + rc = nfit_test_cmd_smart_inject(buf, buf_len, &t->smart_threshold[i - t->dcr_idx], &t->smart[i - t->dcr_idx], &t->pdev.dev, t->dimm_dev[i]); + break; default: return -ENOTTY; } + return override_return_code(i, func, rc); } if (!test_bit(cmd, &cmd_mask) @@ -1006,6 +1057,7 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc, default: return -ENOTTY; } + return override_return_code(i, func, rc); } else { struct ars_state *ars_state = &t->ars_state; struct nd_cmd_pkg *call_pkg = buf; @@ -1302,29 +1354,9 @@ static void smart_init(struct nfit_test *t) .ctrl_temperature = 30 * 16, .spares = 5, }; - const struct nd_intel_smart smart_data = { - .flags = ND_INTEL_SMART_HEALTH_VALID - | ND_INTEL_SMART_SPARES_VALID - | ND_INTEL_SMART_ALARM_VALID - | ND_INTEL_SMART_USED_VALID - | ND_INTEL_SMART_SHUTDOWN_VALID - | ND_INTEL_SMART_MTEMP_VALID, - .health = ND_INTEL_SMART_NON_CRITICAL_HEALTH, - .media_temperature = 23 * 16, - .ctrl_temperature = 25 * 16, - .pmic_temperature = 40 * 16, - .spares = 75, - .alarm_flags = ND_INTEL_SMART_SPARE_TRIP - | ND_INTEL_SMART_TEMP_TRIP, - .ait_status = 1, - .life_used = 5, - .shutdown_state = 0, - .vendor_size = 0, - .shutdown_count = 100, - }; for (i = 0; i < t->num_dcr; i++) { - memcpy(&t->smart[i], &smart_data, sizeof(smart_data)); + memcpy(&t->smart[i], &smart_def, sizeof(smart_def)); memcpy(&t->smart_threshold[i], &smart_t_data, sizeof(smart_t_data)); } |