From 4a20bc3e207488064e08fc5d7220d6acf95c80dd Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 8 Dec 2022 09:02:00 -0800 Subject: cxl/pci: Move tracepoint definitions to drivers/cxl/core/ CXL is using tracepoints for reporting RAS capability register payloads for AER events, and has plans to use tracepoints for the output payload of Get Poison List and Get Event Records commands. For organization purposes it would be nice to keep those all under a single + local CXL trace system. This also organization also potentially helps in the future when CXL drivers expand beyond generic memory expanders, however that would also entail a move away from the expander-specific cxl_dev_state context, save that for later. Note that the powerpc-specific drivers/misc/cxl/ also defines a 'cxl' trace system, however, it is unlikely that a single platform will ever load both drivers simultaneously. Cc: Steven Rostedt Tested-by: Alison Schofield Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/167051869176.436579.9728373544811641087.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/Kbuild | 2 ++ 1 file changed, 2 insertions(+) (limited to 'tools') diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild index 0805f08af8b3..12af1c9270ff 100644 --- a/tools/testing/cxl/Kbuild +++ b/tools/testing/cxl/Kbuild @@ -17,6 +17,7 @@ CXL_SRC := $(DRIVERS)/cxl CXL_CORE_SRC := $(DRIVERS)/cxl/core ccflags-y := -I$(srctree)/drivers/cxl/ ccflags-y += -D__mock=__weak +ccflags-y += -DTRACE_INCLUDE_PATH=$(CXL_CORE_SRC) -I$(srctree)/drivers/cxl/core/ obj-m += cxl_acpi.o @@ -49,6 +50,7 @@ cxl_core-y += $(CXL_CORE_SRC)/memdev.o cxl_core-y += $(CXL_CORE_SRC)/mbox.o cxl_core-y += $(CXL_CORE_SRC)/pci.o cxl_core-y += $(CXL_CORE_SRC)/hdm.o +cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o cxl_core-y += config_check.o -- cgit v1.2.3-70-g09d2 From 8c149eb011be23679b3320d3939f4e3c8271969c Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 13 Dec 2022 08:44:24 -0800 Subject: tools/testing/cxl: Prevent cxl_test from confusing production modules The cxl_test machinery builds modified versions of the modules in drivers/cxl/ and intercepts some of their calls to allow cxl_test to inject mock CXL topologies for test. However, if cxl_test attempts the same with production modules, fireworks ensue as Luis discovered [1]. Prevent that scenario by arranging for cxl_test to check for a "watermark" symbol in each of the modules it expects to be modified before the test can run. This turns undefined runtime behavior or crashes into a safer failure to load the cxl_test module. Link: http://lore.kernel.org/r/20221209062919.1096779-1-mcgrof@kernel.org [1] Reported-by: Luis Chamberlain Signed-off-by: Dan Williams --- tools/testing/cxl/Kbuild | 6 ++++++ tools/testing/cxl/cxl_acpi_test.c | 6 ++++++ tools/testing/cxl/cxl_core_test.c | 6 ++++++ tools/testing/cxl/cxl_mem_test.c | 6 ++++++ tools/testing/cxl/cxl_pmem_test.c | 6 ++++++ tools/testing/cxl/cxl_port_test.c | 6 ++++++ tools/testing/cxl/test/cxl.c | 8 ++++++++ tools/testing/cxl/watermark.h | 25 +++++++++++++++++++++++++ 8 files changed, 69 insertions(+) create mode 100644 tools/testing/cxl/cxl_acpi_test.c create mode 100644 tools/testing/cxl/cxl_core_test.c create mode 100644 tools/testing/cxl/cxl_mem_test.c create mode 100644 tools/testing/cxl/cxl_pmem_test.c create mode 100644 tools/testing/cxl/cxl_port_test.c create mode 100644 tools/testing/cxl/watermark.h (limited to 'tools') diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild index 12af1c9270ff..37f77ac9b917 100644 --- a/tools/testing/cxl/Kbuild +++ b/tools/testing/cxl/Kbuild @@ -24,22 +24,27 @@ obj-m += cxl_acpi.o cxl_acpi-y := $(CXL_SRC)/acpi.o cxl_acpi-y += mock_acpi.o cxl_acpi-y += config_check.o +cxl_acpi-y += cxl_acpi_test.o obj-m += cxl_pmem.o cxl_pmem-y := $(CXL_SRC)/pmem.o cxl_pmem-y += $(CXL_SRC)/security.o cxl_pmem-y += config_check.o +cxl_pmem-y += cxl_pmem_test.o obj-m += cxl_port.o cxl_port-y := $(CXL_SRC)/port.o cxl_port-y += config_check.o +cxl_port-y += cxl_port_test.o + obj-m += cxl_mem.o cxl_mem-y := $(CXL_SRC)/mem.o cxl_mem-y += config_check.o +cxl_mem-y += cxl_mem_test.o obj-m += cxl_core.o @@ -53,5 +58,6 @@ cxl_core-y += $(CXL_CORE_SRC)/hdm.o cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o cxl_core-y += config_check.o +cxl_core-y += cxl_core_test.o obj-m += test/ diff --git a/tools/testing/cxl/cxl_acpi_test.c b/tools/testing/cxl/cxl_acpi_test.c new file mode 100644 index 000000000000..8602dc27c81c --- /dev/null +++ b/tools/testing/cxl/cxl_acpi_test.c @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 Intel Corporation. All rights reserved. */ + +#include "watermark.h" + +cxl_test_watermark(cxl_acpi); diff --git a/tools/testing/cxl/cxl_core_test.c b/tools/testing/cxl/cxl_core_test.c new file mode 100644 index 000000000000..464a9255e4d6 --- /dev/null +++ b/tools/testing/cxl/cxl_core_test.c @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 Intel Corporation. All rights reserved. */ + +#include "watermark.h" + +cxl_test_watermark(cxl_core); diff --git a/tools/testing/cxl/cxl_mem_test.c b/tools/testing/cxl/cxl_mem_test.c new file mode 100644 index 000000000000..ba7fb8a44288 --- /dev/null +++ b/tools/testing/cxl/cxl_mem_test.c @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 Intel Corporation. All rights reserved. */ + +#include "watermark.h" + +cxl_test_watermark(cxl_mem); diff --git a/tools/testing/cxl/cxl_pmem_test.c b/tools/testing/cxl/cxl_pmem_test.c new file mode 100644 index 000000000000..3fd884fae537 --- /dev/null +++ b/tools/testing/cxl/cxl_pmem_test.c @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 Intel Corporation. All rights reserved. */ + +#include "watermark.h" + +cxl_test_watermark(cxl_pmem); diff --git a/tools/testing/cxl/cxl_port_test.c b/tools/testing/cxl/cxl_port_test.c new file mode 100644 index 000000000000..be183917a9f6 --- /dev/null +++ b/tools/testing/cxl/cxl_port_test.c @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 Intel Corporation. All rights reserved. */ + +#include "watermark.h" + +cxl_test_watermark(cxl_port); diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c index 30ee680d38ff..920bd969c554 100644 --- a/tools/testing/cxl/test/cxl.c +++ b/tools/testing/cxl/test/cxl.c @@ -9,6 +9,8 @@ #include #include #include + +#include "../watermark.h" #include "mock.h" static int interleave_arithmetic; @@ -1119,6 +1121,12 @@ static __init int cxl_test_init(void) { int rc, i; + cxl_acpi_test(); + cxl_core_test(); + cxl_mem_test(); + cxl_pmem_test(); + cxl_port_test(); + register_cxl_mock_ops(&cxl_mock_ops); cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE); diff --git a/tools/testing/cxl/watermark.h b/tools/testing/cxl/watermark.h new file mode 100644 index 000000000000..9d81d4a5f6be --- /dev/null +++ b/tools/testing/cxl/watermark.h @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 Intel Corporation. All rights reserved. */ +#ifndef _TEST_CXL_WATERMARK_H_ +#define _TEST_CXL_WATERMARK_H_ +#include +#include + +int cxl_acpi_test(void); +int cxl_core_test(void); +int cxl_mem_test(void); +int cxl_pmem_test(void); +int cxl_port_test(void); + +/* + * dummy routine for cxl_test to validate it is linking to the properly + * mocked module and not the standard one from the base tree. + */ +#define cxl_test_watermark(x) \ +int x##_test(void) \ +{ \ + pr_debug("%s for cxl_test\n", KBUILD_MODNAME); \ + return 0; \ +} \ +EXPORT_SYMBOL(x##_test) +#endif /* _TEST_CXL_WATERMARK_H_ */ -- cgit v1.2.3-70-g09d2 From f45d63c1218636f77b9c3c53318c56ecd27dc8ec Mon Sep 17 00:00:00 2001 From: Luis Chamberlain Date: Mon, 19 Dec 2022 11:50:50 -0800 Subject: tools/testing/cxl: require 64-bit size_t is limited to 32-bits and so the gen_pool_alloc() using the size of SZ_64G would map to 0, triggering a low allocation which is not expected. Force the dependency on 64-bit for cxl_test as that is what it was designed for. This issue was found by build test reports when converting this driver as a proper upstream driver. Signed-off-by: Luis Chamberlain Link: https://lore.kernel.org/r/20221219195050.325959-1-mcgrof@kernel.org Signed-off-by: Dan Williams --- tools/testing/cxl/config_check.c | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/testing/cxl/config_check.c b/tools/testing/cxl/config_check.c index c4c457e59841..99b56b5f6edf 100644 --- a/tools/testing/cxl/config_check.c +++ b/tools/testing/cxl/config_check.c @@ -7,6 +7,7 @@ void check(void) * These kconfig symbols must be set to "m" for cxl_test to load * and operate. */ + BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT)); BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_BUS)); BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_ACPI)); BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_PMEM)); -- cgit v1.2.3-70-g09d2 From 66f3cb7993c2729b72e20313f8dc6e0716416186 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Thu, 26 Jan 2023 09:05:55 -0800 Subject: tools/testing/cxl: Remove cxl_test module math loading message Commit "tools/testing/cxl: Add XOR Math support to cxl_test" added a module parameter to cxl_test for the interleave_arithmetic option. In doing so, it also added this dev_dbg() message describing which option cxl_test used during load: "[ 111.743246] (NULL device *): cxl_test loading modulo math option" That "(NULL device *)" has raised needless user concern. Remove the dev_dbg() message and make the module_param readable via sysfs for users that need to know which math option is active. Suggested-by: Dan Williams Reviewed-by: Vishal Verma Link: https://lore.kernel.org/r/20230126170555.701240-1-alison.schofield@intel.com Signed-off-by: Alison Schofield Signed-off-by: Dan Williams --- tools/testing/cxl/test/cxl.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'tools') diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c index 920bd969c554..a65305218c90 100644 --- a/tools/testing/cxl/test/cxl.c +++ b/tools/testing/cxl/test/cxl.c @@ -1143,11 +1143,9 @@ static __init int cxl_test_init(void) if (interleave_arithmetic == 1) { cfmws_start = CFMWS_XOR_ARRAY_START; cfmws_end = CFMWS_XOR_ARRAY_END; - dev_dbg(NULL, "cxl_test loading xor math option\n"); } else { cfmws_start = CFMWS_MOD_ARRAY_START; cfmws_end = CFMWS_MOD_ARRAY_END; - dev_dbg(NULL, "cxl_test loading modulo math option\n"); } rc = populate_cedt(); @@ -1334,7 +1332,7 @@ static __exit void cxl_test_exit(void) unregister_cxl_mock_ops(&cxl_mock_ops); } -module_param(interleave_arithmetic, int, 0000); +module_param(interleave_arithmetic, int, 0444); MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1"); module_init(cxl_test_init); module_exit(cxl_test_exit); -- cgit v1.2.3-70-g09d2 From d1dca858f058f53f68aeacb6db0e1cb3568fa6ef Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Tue, 17 Jan 2023 21:53:41 -0800 Subject: cxl/test: Add generic mock events Facilitate testing basic Get/Clear Event functionality by creating multiple logs and generic events with made up UUID's. Data is completely made up with data patterns which should be easy to spot in trace output. A single sysfs entry resets the event data and triggers collecting the events for testing. Test traces are easy to obtain with a small script such as this: #!/bin/bash -x devices=`find /sys/devices/platform -name cxl_mem*` # Turn on tracing echo "" > /sys/kernel/tracing/trace echo 1 > /sys/kernel/tracing/events/cxl/enable echo 1 > /sys/kernel/tracing/tracing_on # Generate fake interrupt for device in $devices; do echo 1 > $device/event_trigger done # Turn off tracing and report events echo 0 > /sys/kernel/tracing/tracing_on cat /sys/kernel/tracing/trace Reviewed-by: Dan Williams Reviewed-by: Jonathan Cameron Signed-off-by: Ira Weiny Link: https://lore.kernel.org/r/20221216-cxl-ev-log-v7-6-2316a5c8f7d8@intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/test/Kbuild | 2 +- tools/testing/cxl/test/mem.c | 231 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 232 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/cxl/test/Kbuild b/tools/testing/cxl/test/Kbuild index 4e59e2c911f6..61d5f7bcddf9 100644 --- a/tools/testing/cxl/test/Kbuild +++ b/tools/testing/cxl/test/Kbuild @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y := -I$(srctree)/drivers/cxl/ +ccflags-y := -I$(srctree)/drivers/cxl/ -I$(srctree)/drivers/cxl/core obj-m += cxl_test.o obj-m += cxl_mock.o diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index 5e4ecd93f1d2..90a463f83ae4 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -9,6 +9,8 @@ #include #include +#include "trace.h" + #define LSA_SIZE SZ_128K #define DEV_SIZE SZ_2G #define EFFECT(x) (1U << x) @@ -67,6 +69,24 @@ static struct { #define PASS_TRY_LIMIT 3 +#define CXL_TEST_EVENT_CNT_MAX 15 + +/* Set a number of events to return at a time for simulation. */ +#define CXL_TEST_EVENT_CNT 3 + +struct mock_event_log { + u16 clear_idx; + u16 cur_idx; + u16 nr_events; + struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX]; +}; + +struct mock_event_store { + struct cxl_dev_state *cxlds; + struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX]; + u32 ev_status; +}; + struct cxl_mockmem_data { void *lsa; u32 security_state; @@ -74,9 +94,198 @@ struct cxl_mockmem_data { u8 master_pass[NVDIMM_PASSPHRASE_LEN]; int user_limit; int master_limit; + struct mock_event_store mes; + u8 event_buf[SZ_4K]; +}; + +static struct mock_event_log *event_find_log(struct device *dev, int log_type) +{ + struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); + + if (log_type >= CXL_EVENT_TYPE_MAX) + return NULL; + return &mdata->mes.mock_logs[log_type]; +} + +static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log) +{ + return log->events[log->cur_idx]; +} + +static void event_reset_log(struct mock_event_log *log) +{ + log->cur_idx = 0; + log->clear_idx = 0; +} + +/* Handle can never be 0 use 1 based indexing for handle */ +static u16 event_get_clear_handle(struct mock_event_log *log) +{ + return log->clear_idx + 1; +} + +/* Handle can never be 0 use 1 based indexing for handle */ +static __le16 event_get_cur_event_handle(struct mock_event_log *log) +{ + u16 cur_handle = log->cur_idx + 1; + + return cpu_to_le16(cur_handle); +} + +static bool event_log_empty(struct mock_event_log *log) +{ + return log->cur_idx == log->nr_events; +} + +static void mes_add_event(struct mock_event_store *mes, + enum cxl_event_log_type log_type, + struct cxl_event_record_raw *event) +{ + struct mock_event_log *log; + + if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX)) + return; + + log = &mes->mock_logs[log_type]; + if (WARN_ON(log->nr_events >= CXL_TEST_EVENT_CNT_MAX)) + return; + + log->events[log->nr_events] = event; + log->nr_events++; +} + +static int mock_get_event(struct cxl_dev_state *cxlds, + struct cxl_mbox_cmd *cmd) +{ + struct cxl_get_event_payload *pl; + struct mock_event_log *log; + u8 log_type; + int i; + + if (cmd->size_in != sizeof(log_type)) + return -EINVAL; + + if (cmd->size_out < struct_size(pl, records, CXL_TEST_EVENT_CNT)) + return -EINVAL; + + log_type = *((u8 *)cmd->payload_in); + if (log_type >= CXL_EVENT_TYPE_MAX) + return -EINVAL; + + memset(cmd->payload_out, 0, cmd->size_out); + + log = event_find_log(cxlds->dev, log_type); + if (!log || event_log_empty(log)) + return 0; + + pl = cmd->payload_out; + + for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) { + memcpy(&pl->records[i], event_get_current(log), + sizeof(pl->records[i])); + pl->records[i].hdr.handle = event_get_cur_event_handle(log); + log->cur_idx++; + } + + pl->record_count = cpu_to_le16(i); + if (!event_log_empty(log)) + pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS; + + return 0; +} + +static int mock_clear_event(struct cxl_dev_state *cxlds, + struct cxl_mbox_cmd *cmd) +{ + struct cxl_mbox_clear_event_payload *pl = cmd->payload_in; + struct mock_event_log *log; + u8 log_type = pl->event_log; + u16 handle; + int nr; + + if (log_type >= CXL_EVENT_TYPE_MAX) + return -EINVAL; + + log = event_find_log(cxlds->dev, log_type); + if (!log) + return 0; /* No mock data in this log */ + + /* + * This check is technically not invalid per the specification AFAICS. + * (The host could 'guess' handles and clear them in order). + * However, this is not good behavior for the host so test it. + */ + if (log->clear_idx + pl->nr_recs > log->cur_idx) { + dev_err(cxlds->dev, + "Attempting to clear more events than returned!\n"); + return -EINVAL; + } + + /* Check handle order prior to clearing events */ + for (nr = 0, handle = event_get_clear_handle(log); + nr < pl->nr_recs; + nr++, handle++) { + if (handle != le16_to_cpu(pl->handles[nr])) { + dev_err(cxlds->dev, "Clearing events out of order\n"); + return -EINVAL; + } + } + + /* Clear events */ + log->clear_idx += pl->nr_recs; + return 0; +} + +static void cxl_mock_event_trigger(struct device *dev) +{ + struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); + struct mock_event_store *mes = &mdata->mes; + int i; + + for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) { + struct mock_event_log *log; + + log = event_find_log(dev, i); + if (log) + event_reset_log(log); + } + + cxl_mem_get_event_records(mes->cxlds, mes->ev_status); +} + +struct cxl_event_record_raw maint_needed = { + .hdr = { + .id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB, + 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5), + .length = sizeof(struct cxl_event_record_raw), + .flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, + /* .handle = Set dynamically */ + .related_handle = cpu_to_le16(0xa5b6), + }, + .data = { 0xDE, 0xAD, 0xBE, 0xEF }, +}; +struct cxl_event_record_raw hardware_replace = { + .hdr = { + .id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E, + 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5), + .length = sizeof(struct cxl_event_record_raw), + .flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE, + /* .handle = Set dynamically */ + .related_handle = cpu_to_le16(0xb6a5), + }, + .data = { 0xDE, 0xAD, 0xBE, 0xEF }, }; +static void cxl_mock_add_event_logs(struct mock_event_store *mes) +{ + mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed); + mes->ev_status |= CXLDEV_EVENT_STATUS_INFO; + + mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace); + mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL; +} + static int mock_gsl(struct cxl_mbox_cmd *cmd) { if (cmd->size_out < sizeof(mock_gsl_payload)) @@ -582,6 +791,12 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd * case CXL_MBOX_OP_GET_PARTITION_INFO: rc = mock_partition_info(cxlds, cmd); break; + case CXL_MBOX_OP_GET_EVENT_RECORD: + rc = mock_get_event(cxlds, cmd); + break; + case CXL_MBOX_OP_CLEAR_EVENT_RECORD: + rc = mock_clear_event(cxlds, cmd); + break; case CXL_MBOX_OP_SET_LSA: rc = mock_set_lsa(cxlds, cmd); break; @@ -628,6 +843,15 @@ static bool is_rcd(struct platform_device *pdev) return !!id->driver_data; } +static ssize_t event_trigger_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + cxl_mock_event_trigger(dev); + return count; +} +static DEVICE_ATTR_WO(event_trigger); + static int cxl_mock_mem_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -655,6 +879,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev) cxlds->serial = pdev->id; cxlds->mbox_send = cxl_mock_mbox_send; cxlds->payload_size = SZ_4K; + cxlds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf; if (is_rcd(pdev)) { cxlds->rcd = true; cxlds->component_reg_phys = CXL_RESOURCE_NONE; @@ -672,10 +897,15 @@ static int cxl_mock_mem_probe(struct platform_device *pdev) if (rc) return rc; + mdata->mes.cxlds = cxlds; + cxl_mock_add_event_logs(&mdata->mes); + cxlmd = devm_cxl_add_memdev(cxlds); if (IS_ERR(cxlmd)) return PTR_ERR(cxlmd); + cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL); + return 0; } @@ -714,6 +944,7 @@ static DEVICE_ATTR_RW(security_lock); static struct attribute *cxl_mock_mem_attrs[] = { &dev_attr_security_lock.attr, + &dev_attr_event_trigger.attr, NULL }; ATTRIBUTE_GROUPS(cxl_mock_mem); -- cgit v1.2.3-70-g09d2 From 0092f62acc31ada89af09fe84b65999b8f434dd9 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Tue, 17 Jan 2023 21:53:42 -0800 Subject: cxl/test: Add specific events Each type of event has different trace point outputs. Add mock General Media Event, DRAM event, and Memory Module Event records to the mock list of events returned. Reviewed-by: Dan Williams Reviewed-by: Jonathan Cameron Signed-off-by: Ira Weiny Link: https://lore.kernel.org/r/20221216-cxl-ev-log-v7-7-2316a5c8f7d8@intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/test/mem.c | 73 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) (limited to 'tools') diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index 90a463f83ae4..00bf19a68604 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -277,12 +277,85 @@ struct cxl_event_record_raw hardware_replace = { .data = { 0xDE, 0xAD, 0xBE, 0xEF }, }; +struct cxl_event_gen_media gen_media = { + .hdr = { + .id = UUID_INIT(0xfbcd0a77, 0xc260, 0x417f, + 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6), + .length = sizeof(struct cxl_event_gen_media), + .flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT, + /* .handle = Set dynamically */ + .related_handle = cpu_to_le16(0), + }, + .phys_addr = cpu_to_le64(0x2000), + .descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT, + .type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, + .transaction_type = CXL_GMER_TRANS_HOST_WRITE, + /* .validity_flags = */ + .channel = 1, + .rank = 30 +}; + +struct cxl_event_dram dram = { + .hdr = { + .id = UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, + 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24), + .length = sizeof(struct cxl_event_dram), + .flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, + /* .handle = Set dynamically */ + .related_handle = cpu_to_le16(0), + }, + .phys_addr = cpu_to_le64(0x8000), + .descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT, + .type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR, + .transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB, + /* .validity_flags = */ + .channel = 1, + .bank_group = 5, + .bank = 2, + .column = {0xDE, 0xAD}, +}; + +struct cxl_event_mem_module mem_module = { + .hdr = { + .id = UUID_INIT(0xfe927475, 0xdd59, 0x4339, + 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74), + .length = sizeof(struct cxl_event_mem_module), + /* .handle = Set dynamically */ + .related_handle = cpu_to_le16(0), + }, + .event_type = CXL_MMER_TEMP_CHANGE, + .info = { + .health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED, + .media_status = CXL_DHI_MS_ALL_DATA_LOST, + .add_status = (CXL_DHI_AS_CRITICAL << 2) | + (CXL_DHI_AS_WARNING << 4) | + (CXL_DHI_AS_WARNING << 5), + .device_temp = { 0xDE, 0xAD}, + .dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef }, + .cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef }, + .cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef }, + } +}; + static void cxl_mock_add_event_logs(struct mock_event_store *mes) { + put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK, + &gen_media.validity_flags); + + put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP | + CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN, + &dram.validity_flags); + mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed); + mes_add_event(mes, CXL_EVENT_TYPE_INFO, + (struct cxl_event_record_raw *)&gen_media); + mes_add_event(mes, CXL_EVENT_TYPE_INFO, + (struct cxl_event_record_raw *)&mem_module); mes->ev_status |= CXLDEV_EVENT_STATUS_INFO; mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace); + mes_add_event(mes, CXL_EVENT_TYPE_FATAL, + (struct cxl_event_record_raw *)&dram); mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL; } -- cgit v1.2.3-70-g09d2 From bab2a5e6fe7fddc00be0356bd538e38161bab085 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Tue, 17 Jan 2023 21:53:43 -0800 Subject: cxl/test: Simulate event log overflow Log overflow is marked by a separate trace message. Simulate a log with lots of messages and flag overflow until space is cleared. Reviewed-by: Dan Williams Reviewed-by: Jonathan Cameron Signed-off-by: Ira Weiny Link: https://lore.kernel.org/r/20221216-cxl-ev-log-v7-8-2316a5c8f7d8@intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/test/mem.c | 50 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index 00bf19a68604..9263b04d35f7 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -78,6 +78,8 @@ struct mock_event_log { u16 clear_idx; u16 cur_idx; u16 nr_events; + u16 nr_overflow; + u16 overflow_reset; struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX]; }; @@ -116,6 +118,7 @@ static void event_reset_log(struct mock_event_log *log) { log->cur_idx = 0; log->clear_idx = 0; + log->nr_overflow = log->overflow_reset; } /* Handle can never be 0 use 1 based indexing for handle */ @@ -147,8 +150,12 @@ static void mes_add_event(struct mock_event_store *mes, return; log = &mes->mock_logs[log_type]; - if (WARN_ON(log->nr_events >= CXL_TEST_EVENT_CNT_MAX)) + + if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) { + log->nr_overflow++; + log->overflow_reset = log->nr_overflow; return; + } log->events[log->nr_events] = event; log->nr_events++; @@ -159,6 +166,7 @@ static int mock_get_event(struct cxl_dev_state *cxlds, { struct cxl_get_event_payload *pl; struct mock_event_log *log; + u16 nr_overflow; u8 log_type; int i; @@ -191,6 +199,19 @@ static int mock_get_event(struct cxl_dev_state *cxlds, if (!event_log_empty(log)) pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS; + if (log->nr_overflow) { + u64 ns; + + pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW; + pl->overflow_err_count = cpu_to_le16(nr_overflow); + ns = ktime_get_real_ns(); + ns -= 5000000000; /* 5s ago */ + pl->first_overflow_timestamp = cpu_to_le64(ns); + ns = ktime_get_real_ns(); + ns -= 1000000000; /* 1s ago */ + pl->last_overflow_timestamp = cpu_to_le64(ns); + } + return 0; } @@ -231,6 +252,9 @@ static int mock_clear_event(struct cxl_dev_state *cxlds, } } + if (log->nr_overflow) + log->nr_overflow = 0; + /* Clear events */ log->clear_idx += pl->nr_recs; return 0; @@ -353,6 +377,30 @@ static void cxl_mock_add_event_logs(struct mock_event_store *mes) (struct cxl_event_record_raw *)&mem_module); mes->ev_status |= CXLDEV_EVENT_STATUS_INFO; + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed); + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, + (struct cxl_event_record_raw *)&dram); + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, + (struct cxl_event_record_raw *)&gen_media); + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, + (struct cxl_event_record_raw *)&mem_module); + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, + (struct cxl_event_record_raw *)&dram); + /* Overflow this log */ + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); + mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); + mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL; + mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace); mes_add_event(mes, CXL_EVENT_TYPE_FATAL, (struct cxl_event_record_raw *)&dram); -- cgit v1.2.3-70-g09d2 From 3d8f7ccaa611a743ae3a1e6f605346993d37c513 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 10 Feb 2023 01:06:45 -0800 Subject: tools/testing/cxl: Define a fixed volatile configuration to parse Take two endpoints attached to the first switch on the first host-bridge in the cxl_test topology and define a pre-initialized region. This is a x2 interleave underneath a x1 CXL Window. $ modprobe cxl_test $ # cxl list -Ru { "region":"region3", "resource":"0xf010000000", "size":"512.00 MiB (536.87 MB)", "interleave_ways":2, "interleave_granularity":4096, "decode_state":"commit" } Tested-by: Fan Ni Reviewed-by: Vishal Verma Reviewed-by: Dave Jiang Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/167602000547.1924368.11613151863880268868.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/core.h | 3 - drivers/cxl/core/hdm.c | 3 +- drivers/cxl/core/port.c | 2 + drivers/cxl/cxl.h | 2 + drivers/cxl/cxlmem.h | 3 + tools/testing/cxl/test/cxl.c | 147 ++++++++++++++++++++++++++++++++++++++++--- 6 files changed, 146 insertions(+), 14 deletions(-) (limited to 'tools') diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index 5eb873da5a30..479f01da6d35 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -57,9 +57,6 @@ resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled); resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled); extern struct rw_semaphore cxl_dpa_rwsem; -bool is_switch_decoder(struct device *dev); -struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev); - int cxl_memdev_init(void); void cxl_memdev_exit(void); void cxl_mbox_init(void); diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 8c29026a4b9d..80eccae6ba9e 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -279,7 +279,7 @@ success: return 0; } -static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, +int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, resource_size_t base, resource_size_t len, resource_size_t skipped) { @@ -295,6 +295,7 @@ static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); } +EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL); resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled) { diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index 59620528571a..b45d2796ef35 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -458,6 +458,7 @@ bool is_switch_decoder(struct device *dev) { return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type; } +EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL); struct cxl_decoder *to_cxl_decoder(struct device *dev) { @@ -485,6 +486,7 @@ struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev) return NULL; return container_of(dev, struct cxl_switch_decoder, cxld.dev); } +EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL); static void cxl_ep_release(struct cxl_ep *ep) { diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index c8ee4bb8cce6..2ac344235235 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -653,8 +653,10 @@ struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port, struct cxl_decoder *to_cxl_decoder(struct device *dev); struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev); +struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev); struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev); bool is_root_decoder(struct device *dev); +bool is_switch_decoder(struct device *dev); bool is_endpoint_decoder(struct device *dev); struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, unsigned int nr_targets, diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index c9da3c699a21..bf7d4c5c8612 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -81,6 +81,9 @@ static inline bool is_cxl_endpoint(struct cxl_port *port) } struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds); +int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, + resource_size_t base, resource_size_t len, + resource_size_t skipped); static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port, struct cxl_memdev *cxlmd) diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c index 920bd969c554..5342f69d70d2 100644 --- a/tools/testing/cxl/test/cxl.c +++ b/tools/testing/cxl/test/cxl.c @@ -703,6 +703,142 @@ static int mock_decoder_reset(struct cxl_decoder *cxld) return 0; } +static void default_mock_decoder(struct cxl_decoder *cxld) +{ + cxld->hpa_range = (struct range){ + .start = 0, + .end = -1, + }; + + cxld->interleave_ways = 1; + cxld->interleave_granularity = 256; + cxld->target_type = CXL_DECODER_EXPANDER; + cxld->commit = mock_decoder_commit; + cxld->reset = mock_decoder_reset; +} + +static int first_decoder(struct device *dev, void *data) +{ + struct cxl_decoder *cxld; + + if (!is_switch_decoder(dev)) + return 0; + cxld = to_cxl_decoder(dev); + if (cxld->id == 0) + return 1; + return 0; +} + +static void mock_init_hdm_decoder(struct cxl_decoder *cxld) +{ + struct acpi_cedt_cfmws *window = mock_cfmws[0]; + struct platform_device *pdev = NULL; + struct cxl_endpoint_decoder *cxled; + struct cxl_switch_decoder *cxlsd; + struct cxl_port *port, *iter; + const int size = SZ_512M; + struct cxl_memdev *cxlmd; + struct cxl_dport *dport; + struct device *dev; + bool hb0 = false; + u64 base; + int i; + + if (is_endpoint_decoder(&cxld->dev)) { + cxled = to_cxl_endpoint_decoder(&cxld->dev); + cxlmd = cxled_to_memdev(cxled); + WARN_ON(!dev_is_platform(cxlmd->dev.parent)); + pdev = to_platform_device(cxlmd->dev.parent); + + /* check is endpoint is attach to host-bridge0 */ + port = cxled_to_port(cxled); + do { + if (port->uport == &cxl_host_bridge[0]->dev) { + hb0 = true; + break; + } + if (is_cxl_port(port->dev.parent)) + port = to_cxl_port(port->dev.parent); + else + port = NULL; + } while (port); + port = cxled_to_port(cxled); + } + + /* + * The first decoder on the first 2 devices on the first switch + * attached to host-bridge0 mock a fake / static RAM region. All + * other decoders are default disabled. Given the round robin + * assignment those devices are named cxl_mem.0, and cxl_mem.4. + * + * See 'cxl list -BMPu -m cxl_mem.0,cxl_mem.4' + */ + if (!hb0 || pdev->id % 4 || pdev->id > 4 || cxld->id > 0) { + default_mock_decoder(cxld); + return; + } + + base = window->base_hpa; + cxld->hpa_range = (struct range) { + .start = base, + .end = base + size - 1, + }; + + cxld->interleave_ways = 2; + eig_to_granularity(window->granularity, &cxld->interleave_granularity); + cxld->target_type = CXL_DECODER_EXPANDER; + cxld->flags = CXL_DECODER_F_ENABLE; + cxled->state = CXL_DECODER_STATE_AUTO; + port->commit_end = cxld->id; + devm_cxl_dpa_reserve(cxled, 0, size / cxld->interleave_ways, 0); + cxld->commit = mock_decoder_commit; + cxld->reset = mock_decoder_reset; + + /* + * Now that endpoint decoder is set up, walk up the hierarchy + * and setup the switch and root port decoders targeting @cxlmd. + */ + iter = port; + for (i = 0; i < 2; i++) { + dport = iter->parent_dport; + iter = dport->port; + dev = device_find_child(&iter->dev, NULL, first_decoder); + /* + * Ancestor ports are guaranteed to be enumerated before + * @port, and all ports have at least one decoder. + */ + if (WARN_ON(!dev)) + continue; + cxlsd = to_cxl_switch_decoder(dev); + if (i == 0) { + /* put cxl_mem.4 second in the decode order */ + if (pdev->id == 4) + cxlsd->target[1] = dport; + else + cxlsd->target[0] = dport; + } else + cxlsd->target[0] = dport; + cxld = &cxlsd->cxld; + cxld->target_type = CXL_DECODER_EXPANDER; + cxld->flags = CXL_DECODER_F_ENABLE; + iter->commit_end = 0; + /* + * Switch targets 2 endpoints, while host bridge targets + * one root port + */ + if (i == 0) + cxld->interleave_ways = 2; + else + cxld->interleave_ways = 1; + cxld->interleave_granularity = 256; + cxld->hpa_range = (struct range) { + .start = base, + .end = base + size - 1, + }; + put_device(dev); + } +} + static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm) { struct cxl_port *port = cxlhdm->port; @@ -748,16 +884,7 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm) cxld = &cxled->cxld; } - cxld->hpa_range = (struct range) { - .start = 0, - .end = -1, - }; - - cxld->interleave_ways = min_not_zero(target_count, 1); - cxld->interleave_granularity = SZ_4K; - cxld->target_type = CXL_DECODER_EXPANDER; - cxld->commit = mock_decoder_commit; - cxld->reset = mock_decoder_reset; + mock_init_hdm_decoder(cxld); if (target_count) { rc = device_for_each_child(port->uport, &ctx, -- cgit v1.2.3-70-g09d2 From 59c3368b2e69eb7da7f271286a0bd80930dfc070 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Tue, 14 Feb 2023 11:41:13 -0800 Subject: cxl/port: Export cxl_dvsec_rr_decode() to cxl_port Call cxl_dvsec_rr_decode() in the beginning of cxl_port_probe() and preserve the decoded information in a local 'struct cxl_endpoint_dvsec_info'. This info can be passed to various functions later on in order to support the HDM decoder emulation. The invocation of cxl_dvsec_rr_decode() in cxl_hdm_decode_init() is removed and a pointer to the 'struct cxl_endpoint_dvsec_info' is passed in. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/167640367377.935665.2848747799651019676.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 18 +++++++----------- drivers/cxl/cxl.h | 14 ++++++++++++++ drivers/cxl/cxlmem.h | 12 ------------ drivers/cxl/cxlpci.h | 3 ++- drivers/cxl/port.c | 20 +++++++++++++------- tools/testing/cxl/Kbuild | 1 + tools/testing/cxl/test/mock.c | 21 +++++++++++++++++++-- 7 files changed, 56 insertions(+), 33 deletions(-) (limited to 'tools') diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 52bf6b4d093e..948fa3724a0f 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -333,8 +333,8 @@ static bool __cxl_hdm_decode_init(struct cxl_dev_state *cxlds, return true; } -static int cxl_dvsec_rr_decode(struct device *dev, int d, - struct cxl_endpoint_dvsec_info *info) +int cxl_dvsec_rr_decode(struct device *dev, int d, + struct cxl_endpoint_dvsec_info *info) { struct pci_dev *pdev = to_pci_dev(dev); int hdm_count, rc, i, ranges = 0; @@ -434,30 +434,26 @@ static int cxl_dvsec_rr_decode(struct device *dev, int d, return 0; } +EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, CXL); /** * cxl_hdm_decode_init() - Setup HDM decoding for the endpoint * @cxlds: Device state * @cxlhdm: Mapped HDM decoder Capability + * @info: Cached DVSEC range registers info * * Try to enable the endpoint's HDM Decoder Capability */ -int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm) +int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, + struct cxl_endpoint_dvsec_info *info) { - struct cxl_endpoint_dvsec_info info = { 0 }; struct device *dev = cxlds->dev; - int d = cxlds->cxl_dvsec; - int rc; - - rc = cxl_dvsec_rr_decode(dev, d, &info); - if (rc < 0) - return rc; /* * If DVSEC ranges are being used instead of HDM decoder registers there * is no use in trying to manage those. */ - if (!__cxl_hdm_decode_init(cxlds, cxlhdm, &info)) { + if (!__cxl_hdm_decode_init(cxlds, cxlhdm, info)) { dev_err(dev, "Legacy range registers configuration prevents HDM operation.\n"); return -EBUSY; diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 1b1cf459ac77..fc01ce96d326 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -630,10 +630,24 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map); int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld); int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint); +/** + * struct cxl_endpoint_dvsec_info - Cached DVSEC info + * @mem_enabled: cached value of mem_enabled in the DVSEC, PCIE_DEVICE + * @ranges: Number of active HDM ranges this device uses. + * @dvsec_range: cached attributes of the ranges in the DVSEC, PCIE_DEVICE + */ +struct cxl_endpoint_dvsec_info { + bool mem_enabled; + int ranges; + struct range dvsec_range[2]; +}; + struct cxl_hdm; struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port); int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm); int devm_cxl_add_passthrough_decoder(struct cxl_port *port); +int cxl_dvsec_rr_decode(struct device *dev, int dvsec, + struct cxl_endpoint_dvsec_info *info); bool is_cxl_region(struct device *dev); diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index ab138004f644..187a310780a9 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -181,18 +181,6 @@ static inline int cxl_mbox_cmd_rc2errno(struct cxl_mbox_cmd *mbox_cmd) */ #define CXL_CAPACITY_MULTIPLIER SZ_256M -/** - * struct cxl_endpoint_dvsec_info - Cached DVSEC info - * @mem_enabled: cached value of mem_enabled in the DVSEC, PCIE_DEVICE - * @ranges: Number of active HDM ranges this device uses. - * @dvsec_range: cached attributes of the ranges in the DVSEC, PCIE_DEVICE - */ -struct cxl_endpoint_dvsec_info { - bool mem_enabled; - int ranges; - struct range dvsec_range[2]; -}; - /** * struct cxl_dev_state - The driver device state * diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h index 920909791bb9..430e23345a16 100644 --- a/drivers/cxl/cxlpci.h +++ b/drivers/cxl/cxlpci.h @@ -64,6 +64,7 @@ enum cxl_regloc_type { int devm_cxl_port_enumerate_dports(struct cxl_port *port); struct cxl_dev_state; -int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm); +int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, + struct cxl_endpoint_dvsec_info *info); void read_cdat_data(struct cxl_port *port); #endif /* __CXL_PCI_H__ */ diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c index 5453771bf330..9e09728b20d9 100644 --- a/drivers/cxl/port.c +++ b/drivers/cxl/port.c @@ -32,12 +32,21 @@ static void schedule_detach(void *cxlmd) static int cxl_port_probe(struct device *dev) { + struct cxl_endpoint_dvsec_info info = { 0 }; struct cxl_port *port = to_cxl_port(dev); + bool is_ep = is_cxl_endpoint(port); + struct cxl_dev_state *cxlds; + struct cxl_memdev *cxlmd; struct cxl_hdm *cxlhdm; int rc; - - if (!is_cxl_endpoint(port)) { + if (is_ep) { + cxlmd = to_cxl_memdev(port->uport); + cxlds = cxlmd->cxlds; + rc = cxl_dvsec_rr_decode(cxlds->dev, cxlds->cxl_dvsec, &info); + if (rc < 0) + return rc; + } else { rc = devm_cxl_port_enumerate_dports(port); if (rc < 0) return rc; @@ -49,10 +58,7 @@ static int cxl_port_probe(struct device *dev) if (IS_ERR(cxlhdm)) return PTR_ERR(cxlhdm); - if (is_cxl_endpoint(port)) { - struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport); - struct cxl_dev_state *cxlds = cxlmd->cxlds; - + if (is_ep) { /* Cache the data early to ensure is_visible() works */ read_cdat_data(port); @@ -61,7 +67,7 @@ static int cxl_port_probe(struct device *dev) if (rc) return rc; - rc = cxl_hdm_decode_init(cxlds, cxlhdm); + rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info); if (rc) return rc; diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild index 0805f08af8b3..012149ad5c1c 100644 --- a/tools/testing/cxl/Kbuild +++ b/tools/testing/cxl/Kbuild @@ -10,6 +10,7 @@ ldflags-y += --wrap=devm_cxl_add_passthrough_decoder ldflags-y += --wrap=devm_cxl_enumerate_decoders ldflags-y += --wrap=cxl_await_media_ready ldflags-y += --wrap=cxl_hdm_decode_init +ldflags-y += --wrap=cxl_dvsec_rr_decode ldflags-y += --wrap=cxl_rcrb_to_component DRIVERS := ../../../drivers diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c index 5dface08e0de..2a13f4722891 100644 --- a/tools/testing/cxl/test/mock.c +++ b/tools/testing/cxl/test/mock.c @@ -209,7 +209,8 @@ int __wrap_cxl_await_media_ready(struct cxl_dev_state *cxlds) EXPORT_SYMBOL_NS_GPL(__wrap_cxl_await_media_ready, CXL); int __wrap_cxl_hdm_decode_init(struct cxl_dev_state *cxlds, - struct cxl_hdm *cxlhdm) + struct cxl_hdm *cxlhdm, + struct cxl_endpoint_dvsec_info *info) { int rc = 0, index; struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); @@ -217,13 +218,29 @@ int __wrap_cxl_hdm_decode_init(struct cxl_dev_state *cxlds, if (ops && ops->is_mock_dev(cxlds->dev)) rc = 0; else - rc = cxl_hdm_decode_init(cxlds, cxlhdm); + rc = cxl_hdm_decode_init(cxlds, cxlhdm, info); put_cxl_mock_ops(index); return rc; } EXPORT_SYMBOL_NS_GPL(__wrap_cxl_hdm_decode_init, CXL); +int __wrap_cxl_dvsec_rr_decode(struct device *dev, int dvsec, + struct cxl_endpoint_dvsec_info *info) +{ + int rc = 0, index; + struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); + + if (ops && ops->is_mock_dev(dev)) + rc = 0; + else + rc = cxl_dvsec_rr_decode(dev, dvsec, info); + put_cxl_mock_ops(index); + + return rc; +} +EXPORT_SYMBOL_NS_GPL(__wrap_cxl_dvsec_rr_decode, CXL); + resource_size_t __wrap_cxl_rcrb_to_component(struct device *dev, resource_size_t rcrb, enum cxl_rcrb which) -- cgit v1.2.3-70-g09d2 From b777e9bec960a29374dc486d47784c73b7ac4cef Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Tue, 14 Feb 2023 11:41:24 -0800 Subject: cxl/hdm: Emulate HDM decoder from DVSEC range registers In the case where HDM decoder register block exists but is not programmed and at the same time the DVSEC range register range is active, populate the CXL decoder object 'cxl_decoder' with info from DVSEC range registers. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/167640368454.935665.13806415120298330717.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/hdm.c | 36 +++++++++++++++++++++++++++++++++--- drivers/cxl/core/pci.c | 2 +- drivers/cxl/cxl.h | 3 ++- drivers/cxl/port.c | 2 +- tools/testing/cxl/test/cxl.c | 3 ++- tools/testing/cxl/test/mock.c | 7 ++++--- tools/testing/cxl/test/mock.h | 3 ++- 7 files changed, 45 insertions(+), 11 deletions(-) (limited to 'tools') diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index dcc16d7cb8f3..c0f224454447 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -679,9 +679,34 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld) return 0; } +static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port, + struct cxl_decoder *cxld, int which, + struct cxl_endpoint_dvsec_info *info) +{ + if (!is_cxl_endpoint(port)) + return -EOPNOTSUPP; + + if (!range_len(&info->dvsec_range[which])) + return -ENOENT; + + cxld->target_type = CXL_DECODER_EXPANDER; + cxld->commit = NULL; + cxld->reset = NULL; + cxld->hpa_range = info->dvsec_range[which]; + + /* + * Set the emulated decoder as locked pending additional support to + * change the range registers at run time. + */ + cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK; + port->commit_end = cxld->id; + + return 0; +} + static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, int *target_map, void __iomem *hdm, int which, - u64 *dpa_base) + u64 *dpa_base, struct cxl_endpoint_dvsec_info *info) { struct cxl_endpoint_decoder *cxled = NULL; u64 size, base, skip, dpa_size; @@ -717,6 +742,9 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, .end = base + size - 1, }; + if (cxled && !committed && range_len(&info->dvsec_range[which])) + return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info); + /* decoders are enabled if committed */ if (committed) { cxld->flags |= CXL_DECODER_F_ENABLE; @@ -790,7 +818,8 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, * devm_cxl_enumerate_decoders - add decoder objects per HDM register set * @cxlhdm: Structure to populate with HDM capabilities */ -int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm) +int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, + struct cxl_endpoint_dvsec_info *info) { void __iomem *hdm = cxlhdm->regs.hdm_decoder; struct cxl_port *port = cxlhdm->port; @@ -842,7 +871,8 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm) cxld = &cxlsd->cxld; } - rc = init_hdm_decoder(port, cxld, target_map, hdm, i, &dpa_base); + rc = init_hdm_decoder(port, cxld, target_map, hdm, i, + &dpa_base, info); if (rc) { put_device(&cxld->dev); return rc; diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index d0b25481bdce..4df0b35c9b1a 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -426,7 +426,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, * Decoder Capability Enable. */ if (info->mem_enabled) - return -EBUSY; + return 0; rc = devm_cxl_enable_hdm(&port->dev, cxlhdm); if (rc) diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index fc01ce96d326..fe9d75989c8a 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -644,7 +644,8 @@ struct cxl_endpoint_dvsec_info { struct cxl_hdm; struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port); -int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm); +int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, + struct cxl_endpoint_dvsec_info *info); int devm_cxl_add_passthrough_decoder(struct cxl_port *port); int cxl_dvsec_rr_decode(struct device *dev, int dvsec, struct cxl_endpoint_dvsec_info *info); diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c index 9e09728b20d9..d3a708e32565 100644 --- a/drivers/cxl/port.c +++ b/drivers/cxl/port.c @@ -78,7 +78,7 @@ static int cxl_port_probe(struct device *dev) } } - rc = devm_cxl_enumerate_decoders(cxlhdm); + rc = devm_cxl_enumerate_decoders(cxlhdm, &info); if (rc) { dev_err(dev, "Couldn't enumerate decoders (%d)\n", rc); return rc; diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c index 30ee680d38ff..3b4916adf29c 100644 --- a/tools/testing/cxl/test/cxl.c +++ b/tools/testing/cxl/test/cxl.c @@ -701,7 +701,8 @@ static int mock_decoder_reset(struct cxl_decoder *cxld) return 0; } -static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm) +static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, + struct cxl_endpoint_dvsec_info *info) { struct cxl_port *port = cxlhdm->port; struct cxl_port *parent_port = to_cxl_port(port->dev.parent); diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c index 2a13f4722891..3116c9f07c5d 100644 --- a/tools/testing/cxl/test/mock.c +++ b/tools/testing/cxl/test/mock.c @@ -162,16 +162,17 @@ int __wrap_devm_cxl_add_passthrough_decoder(struct cxl_port *port) } EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_add_passthrough_decoder, CXL); -int __wrap_devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm) +int __wrap_devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, + struct cxl_endpoint_dvsec_info *info) { int rc, index; struct cxl_port *port = cxlhdm->port; struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); if (ops && ops->is_mock_port(port->uport)) - rc = ops->devm_cxl_enumerate_decoders(cxlhdm); + rc = ops->devm_cxl_enumerate_decoders(cxlhdm, info); else - rc = devm_cxl_enumerate_decoders(cxlhdm); + rc = devm_cxl_enumerate_decoders(cxlhdm, info); put_cxl_mock_ops(index); return rc; diff --git a/tools/testing/cxl/test/mock.h b/tools/testing/cxl/test/mock.h index ef33f159375e..e377ced5f1b3 100644 --- a/tools/testing/cxl/test/mock.h +++ b/tools/testing/cxl/test/mock.h @@ -25,7 +25,8 @@ struct cxl_mock_ops { int (*devm_cxl_port_enumerate_dports)(struct cxl_port *port); struct cxl_hdm *(*devm_cxl_setup_hdm)(struct cxl_port *port); int (*devm_cxl_add_passthrough_decoder)(struct cxl_port *port); - int (*devm_cxl_enumerate_decoders)(struct cxl_hdm *hdm); + int (*devm_cxl_enumerate_decoders)( + struct cxl_hdm *hdm, struct cxl_endpoint_dvsec_info *info); }; void register_cxl_mock_ops(struct cxl_mock_ops *ops); -- cgit v1.2.3-70-g09d2 From 4474ce565ee4490fb4e6d8443b617a9d98ae10ff Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Tue, 14 Feb 2023 11:41:30 -0800 Subject: cxl/hdm: Create emulated cxl_hdm for devices that do not have HDM decoders CXL rev3 spec 8.1.3 RCDs may not have HDM register blocks. Create a fake HDM with information from the CXL PCIe DVSEC registers. The decoder count will be set to the HDM count retrieved from the DVSEC cap register. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/167640368994.935665.15831225724059704620.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/hdm.c | 58 +++++++++++++++++++++++++++++++++++-------- drivers/cxl/core/pci.c | 9 ++++--- drivers/cxl/cxl.h | 3 ++- drivers/cxl/port.c | 2 +- tools/testing/cxl/test/cxl.c | 3 ++- tools/testing/cxl/test/mock.c | 8 +++--- tools/testing/cxl/test/mock.h | 3 ++- 7 files changed, 66 insertions(+), 20 deletions(-) (limited to 'tools') diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index c0f224454447..a49543f22dca 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -101,11 +101,34 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb, BIT(CXL_CM_CAP_CAP_ID_HDM)); } +static struct cxl_hdm *devm_cxl_setup_emulated_hdm(struct cxl_port *port, + struct cxl_endpoint_dvsec_info *info) +{ + struct device *dev = &port->dev; + struct cxl_hdm *cxlhdm; + + if (!info->mem_enabled) + return ERR_PTR(-ENODEV); + + cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL); + if (!cxlhdm) + return ERR_PTR(-ENOMEM); + + cxlhdm->port = port; + cxlhdm->decoder_count = info->ranges; + cxlhdm->target_count = info->ranges; + dev_set_drvdata(&port->dev, cxlhdm); + + return cxlhdm; +} + /** * devm_cxl_setup_hdm - map HDM decoder component registers * @port: cxl_port to map + * @info: cached DVSEC range register info */ -struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port) +struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, + struct cxl_endpoint_dvsec_info *info) { struct device *dev = &port->dev; struct cxl_hdm *cxlhdm; @@ -119,6 +142,9 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port) cxlhdm->port = port; crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE); if (!crb) { + if (info->mem_enabled) + return devm_cxl_setup_emulated_hdm(port, info); + dev_err(dev, "No component registers mapped\n"); return ERR_PTR(-ENXIO); } @@ -814,19 +840,15 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, return 0; } -/** - * devm_cxl_enumerate_decoders - add decoder objects per HDM register set - * @cxlhdm: Structure to populate with HDM capabilities - */ -int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, - struct cxl_endpoint_dvsec_info *info) +static void cxl_settle_decoders(struct cxl_hdm *cxlhdm) { void __iomem *hdm = cxlhdm->regs.hdm_decoder; - struct cxl_port *port = cxlhdm->port; - int i, committed; - u64 dpa_base = 0; + int committed, i; u32 ctrl; + if (!hdm) + return; + /* * Since the register resource was recently claimed via request_region() * be careful about trusting the "not-committed" status until the commit @@ -843,6 +865,22 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, /* ensure that future checks of committed can be trusted */ if (committed != cxlhdm->decoder_count) msleep(20); +} + +/** + * devm_cxl_enumerate_decoders - add decoder objects per HDM register set + * @cxlhdm: Structure to populate with HDM capabilities + * @info: cached DVSEC range register info + */ +int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, + struct cxl_endpoint_dvsec_info *info) +{ + void __iomem *hdm = cxlhdm->regs.hdm_decoder; + struct cxl_port *port = cxlhdm->port; + int i; + u64 dpa_base = 0; + + cxl_settle_decoders(cxlhdm); for (i = 0; i < cxlhdm->decoder_count; i++) { int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 }; diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 4df0b35c9b1a..4eb34dee7c95 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -378,16 +378,19 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, struct device *dev = cxlds->dev; struct cxl_port *root; int i, rc, allowed; - u32 global_ctrl; + u32 global_ctrl = 0; - global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET); + if (hdm) + global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET); /* * If the HDM Decoder Capability is already enabled then assume * that some other agent like platform firmware set it up. */ - if (global_ctrl & CXL_HDM_DECODER_ENABLE) + if (global_ctrl & CXL_HDM_DECODER_ENABLE || (!hdm && info->mem_enabled)) return devm_cxl_enable_mem(&port->dev, cxlds); + else if (!hdm) + return -ENODEV; root = to_cxl_port(port->dev.parent); while (!is_cxl_root(root) && is_cxl_port(root->dev.parent)) diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index fe9d75989c8a..f8cbc5275451 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -643,7 +643,8 @@ struct cxl_endpoint_dvsec_info { }; struct cxl_hdm; -struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port); +struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, + struct cxl_endpoint_dvsec_info *info); int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, struct cxl_endpoint_dvsec_info *info); int devm_cxl_add_passthrough_decoder(struct cxl_port *port); diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c index d3a708e32565..9f9cc268b597 100644 --- a/drivers/cxl/port.c +++ b/drivers/cxl/port.c @@ -54,7 +54,7 @@ static int cxl_port_probe(struct device *dev) return devm_cxl_add_passthrough_decoder(port); } - cxlhdm = devm_cxl_setup_hdm(port); + cxlhdm = devm_cxl_setup_hdm(port, &info); if (IS_ERR(cxlhdm)) return PTR_ERR(cxlhdm); diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c index 3b4916adf29c..94197abd44aa 100644 --- a/tools/testing/cxl/test/cxl.c +++ b/tools/testing/cxl/test/cxl.c @@ -618,7 +618,8 @@ static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle) return &mock_pci_root[host_bridge_index(adev)]; } -static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port) +static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port, + struct cxl_endpoint_dvsec_info *info) { struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL); diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c index 3116c9f07c5d..c4e53f22e421 100644 --- a/tools/testing/cxl/test/mock.c +++ b/tools/testing/cxl/test/mock.c @@ -131,16 +131,18 @@ __wrap_nvdimm_bus_register(struct device *dev, } EXPORT_SYMBOL_GPL(__wrap_nvdimm_bus_register); -struct cxl_hdm *__wrap_devm_cxl_setup_hdm(struct cxl_port *port) +struct cxl_hdm *__wrap_devm_cxl_setup_hdm(struct cxl_port *port, + struct cxl_endpoint_dvsec_info *info) + { int index; struct cxl_hdm *cxlhdm; struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); if (ops && ops->is_mock_port(port->uport)) - cxlhdm = ops->devm_cxl_setup_hdm(port); + cxlhdm = ops->devm_cxl_setup_hdm(port, info); else - cxlhdm = devm_cxl_setup_hdm(port); + cxlhdm = devm_cxl_setup_hdm(port, info); put_cxl_mock_ops(index); return cxlhdm; diff --git a/tools/testing/cxl/test/mock.h b/tools/testing/cxl/test/mock.h index e377ced5f1b3..bef8817b01f2 100644 --- a/tools/testing/cxl/test/mock.h +++ b/tools/testing/cxl/test/mock.h @@ -23,7 +23,8 @@ struct cxl_mock_ops { bool (*is_mock_port)(struct device *dev); bool (*is_mock_dev)(struct device *dev); int (*devm_cxl_port_enumerate_dports)(struct cxl_port *port); - struct cxl_hdm *(*devm_cxl_setup_hdm)(struct cxl_port *port); + struct cxl_hdm *(*devm_cxl_setup_hdm)( + struct cxl_port *port, struct cxl_endpoint_dvsec_info *info); int (*devm_cxl_add_passthrough_decoder)(struct cxl_port *port); int (*devm_cxl_enumerate_decoders)( struct cxl_hdm *hdm, struct cxl_endpoint_dvsec_info *info); -- cgit v1.2.3-70-g09d2