summaryrefslogtreecommitdiff
path: root/drivers/cxl
diff options
context:
space:
mode:
authorAlison Schofield <alison.schofield@intel.com>2023-04-18 20:26:27 -0700
committerDan Williams <dan.j.williams@intel.com>2023-04-23 11:46:22 -0700
commit0a105ab28a4de44eb738ce64e9ac74946aa5133b (patch)
treea83773bd9431d5cde703a4a8cdec64ab728e43cf /drivers/cxl
parent9690b07748d18ac667036a68442081c4aea33ba7 (diff)
downloadlinux-0a105ab28a4de44eb738ce64e9ac74946aa5133b.tar.gz
cxl/memdev: Warn of poison inject or clear to a mapped region
Inject and clear poison capabilities and intended for debug usage only. In order to be useful in debug environments, the driver needs to allow inject and clear operations on DPAs mapped in regions. dev_warn_once() when either operation occurs. Signed-off-by: Alison Schofield <alison.schofield@intel.com> Link: https://lore.kernel.org/r/f911ca5277c9d0f9757b72d7e6842871bfff4fa2.1681874357.git.alison.schofield@intel.com Tested-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/cxl')
-rw-r--r--drivers/cxl/core/memdev.c59
1 files changed, 59 insertions, 0 deletions
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 813fd1eeba3d..40ce74f5500a 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -160,6 +160,50 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
}
EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL);
+struct cxl_dpa_to_region_context {
+ struct cxl_region *cxlr;
+ u64 dpa;
+};
+
+static int __cxl_dpa_to_region(struct device *dev, void *arg)
+{
+ struct cxl_dpa_to_region_context *ctx = arg;
+ struct cxl_endpoint_decoder *cxled;
+ u64 dpa = ctx->dpa;
+
+ if (!is_endpoint_decoder(dev))
+ return 0;
+
+ cxled = to_cxl_endpoint_decoder(dev);
+ if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
+ return 0;
+
+ if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
+ return 0;
+
+ dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
+ dev_name(&cxled->cxld.region->dev));
+
+ ctx->cxlr = cxled->cxld.region;
+
+ return 1;
+}
+
+static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa)
+{
+ struct cxl_dpa_to_region_context ctx;
+ struct cxl_port *port;
+
+ ctx = (struct cxl_dpa_to_region_context) {
+ .dpa = dpa,
+ };
+ port = dev_get_drvdata(&cxlmd->dev);
+ if (port && is_cxl_endpoint(port) && port->commit_end != -1)
+ device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
+
+ return ctx.cxlr;
+}
+
static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
@@ -189,6 +233,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_mbox_inject_poison inject;
struct cxl_mbox_cmd mbox_cmd;
+ struct cxl_region *cxlr;
int rc;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
@@ -209,6 +254,14 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
.payload_in = &inject,
};
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ if (rc)
+ goto out;
+
+ cxlr = cxl_dpa_to_region(cxlmd, dpa);
+ if (cxlr)
+ dev_warn_once(cxlds->dev,
+ "poison inject dpa:%#llx region: %s\n", dpa,
+ dev_name(&cxlr->dev));
out:
up_read(&cxl_dpa_rwsem);
@@ -221,6 +274,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_mbox_clear_poison clear;
struct cxl_mbox_cmd mbox_cmd;
+ struct cxl_region *cxlr;
int rc;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
@@ -252,6 +306,11 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
if (rc)
goto out;
+
+ cxlr = cxl_dpa_to_region(cxlmd, dpa);
+ if (cxlr)
+ dev_warn_once(cxlds->dev, "poison clear dpa:%#llx region: %s\n",
+ dpa, dev_name(&cxlr->dev));
out:
up_read(&cxl_dpa_rwsem);