diff options
Diffstat (limited to 'drivers/platform/x86/intel')
37 files changed, 2147 insertions, 190 deletions
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig index bbbd9e54e9ee..e9dc0c021029 100644 --- a/drivers/platform/x86/intel/Kconfig +++ b/drivers/platform/x86/intel/Kconfig @@ -80,6 +80,16 @@ config INTEL_BXTWC_PMIC_TMU This driver enables the alarm wakeup functionality in the TMU unit of Whiskey Cove PMIC. +config INTEL_BYTCRC_PWRSRC + tristate "Intel Bay Trail Crystal Cove power source driver" + depends on INTEL_SOC_PMIC + help + This option adds a power source driver for Crystal Cove PMICs + on Intel Bay Trail devices. + + To compile this driver as a module, choose M here: the module + will be called intel_bytcrc_pwrsrc. + config INTEL_CHTDC_TI_PWRBTN tristate "Intel Cherry Trail Dollar Cove TI power button driver" depends on INTEL_SOC_PMIC_CHTDC_TI diff --git a/drivers/platform/x86/intel/Makefile b/drivers/platform/x86/intel/Makefile index 411df4040427..c1d5fe05e3f3 100644 --- a/drivers/platform/x86/intel/Makefile +++ b/drivers/platform/x86/intel/Makefile @@ -38,6 +38,8 @@ intel_bxtwc_tmu-y := bxtwc_tmu.o obj-$(CONFIG_INTEL_BXTWC_PMIC_TMU) += intel_bxtwc_tmu.o intel_crystal_cove_charger-y := crystal_cove_charger.o obj-$(CONFIG_X86_ANDROID_TABLETS) += intel_crystal_cove_charger.o +intel_bytcrc_pwrsrc-y := bytcrc_pwrsrc.o +obj-$(CONFIG_INTEL_BYTCRC_PWRSRC) += intel_bytcrc_pwrsrc.o intel_chtdc_ti_pwrbtn-y := chtdc_ti_pwrbtn.o obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o intel_chtwc_int33fe-y := chtwc_int33fe.o diff --git a/drivers/platform/x86/intel/bxtwc_tmu.c b/drivers/platform/x86/intel/bxtwc_tmu.c index 7ccf583649e6..d0e2a3c293b0 100644 --- a/drivers/platform/x86/intel/bxtwc_tmu.c +++ b/drivers/platform/x86/intel/bxtwc_tmu.c @@ -89,7 +89,7 @@ static int bxt_wcove_tmu_probe(struct platform_device *pdev) return 0; } -static int bxt_wcove_tmu_remove(struct platform_device *pdev) +static void bxt_wcove_tmu_remove(struct platform_device *pdev) { struct wcove_tmu *wctmu = platform_get_drvdata(pdev); unsigned int val; @@ -101,7 +101,6 @@ static int bxt_wcove_tmu_remove(struct platform_device *pdev) regmap_read(wctmu->regmap, BXTWC_MTMUIRQ_REG, &val); regmap_write(wctmu->regmap, BXTWC_MTMUIRQ_REG, val | BXTWC_TMU_ALRM_MASK); - return 0; } #ifdef CONFIG_PM_SLEEP @@ -132,7 +131,7 @@ MODULE_DEVICE_TABLE(platform, bxt_wcove_tmu_id_table); static struct platform_driver bxt_wcove_tmu_driver = { .probe = bxt_wcove_tmu_probe, - .remove = bxt_wcove_tmu_remove, + .remove_new = bxt_wcove_tmu_remove, .driver = { .name = "bxt_wcove_tmu", .pm = &bxtwc_tmu_pm_ops, diff --git a/drivers/platform/x86/intel/bytcrc_pwrsrc.c b/drivers/platform/x86/intel/bytcrc_pwrsrc.c new file mode 100644 index 000000000000..8a022b90d12d --- /dev/null +++ b/drivers/platform/x86/intel/bytcrc_pwrsrc.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Power-source driver for Bay Trail Crystal Cove PMIC + * + * Copyright (c) 2023 Hans de Goede <hdegoede@redhat.com> + * + * Based on intel_crystalcove_pwrsrc.c from Android kernel sources, which is: + * Copyright (C) 2013 Intel Corporation + */ + +#include <linux/debugfs.h> +#include <linux/mfd/intel_soc_pmic.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#define CRYSTALCOVE_SPWRSRC_REG 0x1E +#define CRYSTALCOVE_RESETSRC0_REG 0x20 +#define CRYSTALCOVE_RESETSRC1_REG 0x21 +#define CRYSTALCOVE_WAKESRC_REG 0x22 + +struct crc_pwrsrc_data { + struct regmap *regmap; + struct dentry *debug_dentry; + unsigned int resetsrc0; + unsigned int resetsrc1; + unsigned int wakesrc; +}; + +static const char * const pwrsrc_pwrsrc_info[] = { + /* bit 0 */ "USB", + /* bit 1 */ "DC in", + /* bit 2 */ "Battery", + NULL, +}; + +static const char * const pwrsrc_resetsrc0_info[] = { + /* bit 0 */ "SOC reporting a thermal event", + /* bit 1 */ "critical PMIC temperature", + /* bit 2 */ "critical system temperature", + /* bit 3 */ "critical battery temperature", + /* bit 4 */ "VSYS under voltage", + /* bit 5 */ "VSYS over voltage", + /* bit 6 */ "battery removal", + NULL, +}; + +static const char * const pwrsrc_resetsrc1_info[] = { + /* bit 0 */ "VCRIT threshold", + /* bit 1 */ "BATID reporting battery removal", + /* bit 2 */ "user pressing the power button", + NULL, +}; + +static const char * const pwrsrc_wakesrc_info[] = { + /* bit 0 */ "user pressing the power button", + /* bit 1 */ "a battery insertion", + /* bit 2 */ "a USB charger insertion", + /* bit 3 */ "an adapter insertion", + NULL, +}; + +static void crc_pwrsrc_log(struct seq_file *seq, const char *prefix, + const char * const *info, unsigned int reg_val) +{ + int i; + + for (i = 0; info[i]; i++) { + if (reg_val & BIT(i)) + seq_printf(seq, "%s by %s\n", prefix, info[i]); + } +} + +static int pwrsrc_show(struct seq_file *seq, void *unused) +{ + struct crc_pwrsrc_data *data = seq->private; + unsigned int reg_val; + int ret; + + ret = regmap_read(data->regmap, CRYSTALCOVE_SPWRSRC_REG, ®_val); + if (ret) + return ret; + + crc_pwrsrc_log(seq, "System powered", pwrsrc_pwrsrc_info, reg_val); + return 0; +} + +static int resetsrc_show(struct seq_file *seq, void *unused) +{ + struct crc_pwrsrc_data *data = seq->private; + + crc_pwrsrc_log(seq, "Last shutdown caused", pwrsrc_resetsrc0_info, data->resetsrc0); + crc_pwrsrc_log(seq, "Last shutdown caused", pwrsrc_resetsrc1_info, data->resetsrc1); + return 0; +} + +static int wakesrc_show(struct seq_file *seq, void *unused) +{ + struct crc_pwrsrc_data *data = seq->private; + + crc_pwrsrc_log(seq, "Last wake caused", pwrsrc_wakesrc_info, data->wakesrc); + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(pwrsrc); +DEFINE_SHOW_ATTRIBUTE(resetsrc); +DEFINE_SHOW_ATTRIBUTE(wakesrc); + +static int crc_pwrsrc_read_and_clear(struct crc_pwrsrc_data *data, + unsigned int reg, unsigned int *val) +{ + int ret; + + ret = regmap_read(data->regmap, reg, val); + if (ret) + return ret; + + return regmap_write(data->regmap, reg, *val); +} + +static int crc_pwrsrc_probe(struct platform_device *pdev) +{ + struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); + struct crc_pwrsrc_data *data; + int ret; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->regmap = pmic->regmap; + + /* + * Read + clear resetsrc0/1 and wakesrc now, so that they get + * cleared even if the debugfs interface is never used. + * + * Properly clearing the wakesrc is important, leaving bit 0 of it + * set turns reboot into poweroff on some tablets. + */ + ret = crc_pwrsrc_read_and_clear(data, CRYSTALCOVE_RESETSRC0_REG, &data->resetsrc0); + if (ret) + return ret; + + ret = crc_pwrsrc_read_and_clear(data, CRYSTALCOVE_RESETSRC1_REG, &data->resetsrc1); + if (ret) + return ret; + + ret = crc_pwrsrc_read_and_clear(data, CRYSTALCOVE_WAKESRC_REG, &data->wakesrc); + if (ret) + return ret; + + data->debug_dentry = debugfs_create_dir(KBUILD_MODNAME, NULL); + debugfs_create_file("pwrsrc", 0444, data->debug_dentry, data, &pwrsrc_fops); + debugfs_create_file("resetsrc", 0444, data->debug_dentry, data, &resetsrc_fops); + debugfs_create_file("wakesrc", 0444, data->debug_dentry, data, &wakesrc_fops); + + platform_set_drvdata(pdev, data); + return 0; +} + +static int crc_pwrsrc_remove(struct platform_device *pdev) +{ + struct crc_pwrsrc_data *data = platform_get_drvdata(pdev); + + debugfs_remove_recursive(data->debug_dentry); + return 0; +} + +static struct platform_driver crc_pwrsrc_driver = { + .probe = crc_pwrsrc_probe, + .remove = crc_pwrsrc_remove, + .driver = { + .name = "crystal_cove_pwrsrc", + }, +}; +module_platform_driver(crc_pwrsrc_driver); + +MODULE_ALIAS("platform:crystal_cove_pwrsrc"); +MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); +MODULE_DESCRIPTION("Power-source driver for Bay Trail Crystal Cove PMIC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c b/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c index 9606a994af22..615f8d1a0c68 100644 --- a/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c +++ b/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c @@ -67,11 +67,10 @@ static int chtdc_ti_pwrbtn_probe(struct platform_device *pdev) return 0; } -static int chtdc_ti_pwrbtn_remove(struct platform_device *pdev) +static void chtdc_ti_pwrbtn_remove(struct platform_device *pdev) { dev_pm_clear_wake_irq(&pdev->dev); device_init_wakeup(&pdev->dev, false); - return 0; } static const struct platform_device_id chtdc_ti_pwrbtn_id_table[] = { @@ -85,7 +84,7 @@ static struct platform_driver chtdc_ti_pwrbtn_driver = { .name = KBUILD_MODNAME, }, .probe = chtdc_ti_pwrbtn_probe, - .remove = chtdc_ti_pwrbtn_remove, + .remove_new = chtdc_ti_pwrbtn_remove, .id_table = chtdc_ti_pwrbtn_id_table, }; module_platform_driver(chtdc_ti_pwrbtn_driver); diff --git a/drivers/platform/x86/intel/chtwc_int33fe.c b/drivers/platform/x86/intel/chtwc_int33fe.c index 2c9a7d52be07..848baecc1bb0 100644 --- a/drivers/platform/x86/intel/chtwc_int33fe.c +++ b/drivers/platform/x86/intel/chtwc_int33fe.c @@ -405,7 +405,7 @@ out_remove_nodes: return ret; } -static int cht_int33fe_typec_remove(struct platform_device *pdev) +static void cht_int33fe_typec_remove(struct platform_device *pdev) { struct cht_int33fe_data *data = platform_get_drvdata(pdev); @@ -414,8 +414,6 @@ static int cht_int33fe_typec_remove(struct platform_device *pdev) i2c_unregister_device(data->battery_fg); cht_int33fe_remove_nodes(data); - - return 0; } static const struct acpi_device_id cht_int33fe_acpi_ids[] = { @@ -429,7 +427,7 @@ static struct platform_driver cht_int33fe_typec_driver = { .acpi_match_table = ACPI_PTR(cht_int33fe_acpi_ids), }, .probe = cht_int33fe_typec_probe, - .remove = cht_int33fe_typec_remove, + .remove_new = cht_int33fe_typec_remove, }; module_platform_driver(cht_int33fe_typec_driver); diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index b6c06b37862e..5632bd3c534a 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -720,7 +720,7 @@ err_remove_notify: return err; } -static int intel_hid_remove(struct platform_device *device) +static void intel_hid_remove(struct platform_device *device) { acpi_handle handle = ACPI_HANDLE(&device->dev); @@ -728,12 +728,6 @@ static int intel_hid_remove(struct platform_device *device) acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); intel_hid_set_enable(&device->dev, false); intel_button_array_enable(&device->dev, false); - - /* - * Even if we failed to shut off the event stream, we can still - * safely detach from the device. - */ - return 0; } static struct platform_driver intel_hid_pl_driver = { @@ -743,7 +737,7 @@ static struct platform_driver intel_hid_pl_driver = { .pm = &intel_hid_pl_pm_ops, }, .probe = intel_hid_probe, - .remove = intel_hid_remove, + .remove_new = intel_hid_remove, }; /* diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c index 206a617c2e02..306f886b52d2 100644 --- a/drivers/platform/x86/intel/ifs/core.c +++ b/drivers/platform/x86/intel/ifs/core.c @@ -16,27 +16,63 @@ static const struct x86_cpu_id ifs_cpu_ids[] __initconst = { X86_MATCH(SAPPHIRERAPIDS_X), + X86_MATCH(EMERALDRAPIDS_X), {} }; MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids); -static struct ifs_device ifs_device = { - .data = { - .integrity_cap_bit = MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT, - .test_num = 0, +ATTRIBUTE_GROUPS(plat_ifs); +ATTRIBUTE_GROUPS(plat_ifs_array); + +bool *ifs_pkg_auth; + +static const struct ifs_test_caps scan_test = { + .integrity_cap_bit = MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT, + .test_num = IFS_TYPE_SAF, +}; + +static const struct ifs_test_caps array_test = { + .integrity_cap_bit = MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT, + .test_num = IFS_TYPE_ARRAY_BIST, +}; + +static struct ifs_device ifs_devices[] = { + [IFS_TYPE_SAF] = { + .test_caps = &scan_test, + .misc = { + .name = "intel_ifs_0", + .minor = MISC_DYNAMIC_MINOR, + .groups = plat_ifs_groups, + }, }, - .misc = { - .name = "intel_ifs_0", - .nodename = "intel_ifs/0", - .minor = MISC_DYNAMIC_MINOR, + [IFS_TYPE_ARRAY_BIST] = { + .test_caps = &array_test, + .misc = { + .name = "intel_ifs_1", + .minor = MISC_DYNAMIC_MINOR, + .groups = plat_ifs_array_groups, + }, }, }; +#define IFS_NUMTESTS ARRAY_SIZE(ifs_devices) + +static void ifs_cleanup(void) +{ + int i; + + for (i = 0; i < IFS_NUMTESTS; i++) { + if (ifs_devices[i].misc.this_device) + misc_deregister(&ifs_devices[i].misc); + } + kfree(ifs_pkg_auth); +} + static int __init ifs_init(void) { const struct x86_cpu_id *m; u64 msrval; - int ret; + int i, ret; m = x86_match_cpu(ifs_cpu_ids); if (!m) @@ -51,28 +87,27 @@ static int __init ifs_init(void) if (rdmsrl_safe(MSR_INTEGRITY_CAPS, &msrval)) return -ENODEV; - ifs_device.misc.groups = ifs_get_groups(); - - if (!(msrval & BIT(ifs_device.data.integrity_cap_bit))) - return -ENODEV; - - ifs_device.data.pkg_auth = kmalloc_array(topology_max_packages(), sizeof(bool), GFP_KERNEL); - if (!ifs_device.data.pkg_auth) + ifs_pkg_auth = kmalloc_array(topology_max_packages(), sizeof(bool), GFP_KERNEL); + if (!ifs_pkg_auth) return -ENOMEM; - ret = misc_register(&ifs_device.misc); - if (ret) { - kfree(ifs_device.data.pkg_auth); - return ret; + for (i = 0; i < IFS_NUMTESTS; i++) { + if (!(msrval & BIT(ifs_devices[i].test_caps->integrity_cap_bit))) + continue; + ret = misc_register(&ifs_devices[i].misc); + if (ret) + goto err_exit; } - return 0; + +err_exit: + ifs_cleanup(); + return ret; } static void __exit ifs_exit(void) { - misc_deregister(&ifs_device.misc); - kfree(ifs_device.data.pkg_auth); + ifs_cleanup(); } module_init(ifs_init); diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h index 046e39304fd5..93191855890f 100644 --- a/drivers/platform/x86/intel/ifs/ifs.h +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -17,7 +17,7 @@ * In Field Scan (IFS) is a hardware feature to run circuit level tests on * a CPU core to detect problems that are not caught by parity or ECC checks. * Future CPUs will support more than one type of test which will show up - * with a new platform-device instance-id, for now only .0 is exposed. + * with a new platform-device instance-id. * * * IFS Image @@ -25,7 +25,10 @@ * * Intel provides a firmware file containing the scan tests via * github [#f1]_. Similar to microcode there is a separate file for each - * family-model-stepping. + * family-model-stepping. IFS Images are not applicable for some test types. + * Wherever applicable the sysfs directory would provide a "current_batch" file + * (see below) for loading the image. + * * * IFS Image Loading * ----------------- @@ -35,7 +38,7 @@ * SHA hashes for the test. Then the tests themselves. Status MSRs provide * feedback on the success/failure of these steps. * - * The test files are kept in a fixed location: /lib/firmware/intel/ifs_0/ + * The test files are kept in a fixed location: /lib/firmware/intel/ifs_<n>/ * For e.g if there are 3 test files, they would be named in the following * fashion: * ff-mm-ss-01.scan @@ -47,7 +50,7 @@ * (e.g 1, 2 or 3 in the above scenario) into the curent_batch file. * To load ff-mm-ss-02.scan, the following command can be used:: * - * # echo 2 > /sys/devices/virtual/misc/intel_ifs_0/current_batch + * # echo 2 > /sys/devices/virtual/misc/intel_ifs_<n>/current_batch * * The above file can also be read to know the currently loaded image. * @@ -69,16 +72,16 @@ * to migrate those applications to other cores before running a core test. * It may also be necessary to redirect interrupts to other CPUs. * - * In all cases reading the SCAN_STATUS MSR provides details on what + * In all cases reading the corresponding test's STATUS MSR provides details on what * happened. The driver makes the value of this MSR visible to applications * via the "details" file (see below). Interrupted tests may be restarted. * - * The IFS driver provides sysfs interfaces via /sys/devices/virtual/misc/intel_ifs_0/ + * The IFS driver provides sysfs interfaces via /sys/devices/virtual/misc/intel_ifs_<n>/ * to control execution: * * Test a specific core:: * - * # echo <cpu#> > /sys/devices/virtual/misc/intel_ifs_0/run_test + * # echo <cpu#> > /sys/devices/virtual/misc/intel_ifs_<n>/run_test * * when HT is enabled any of the sibling cpu# can be specified to test * its corresponding physical core. Since the tests are per physical core, @@ -87,21 +90,21 @@ * * For e.g. to test core corresponding to cpu5 * - * # echo 5 > /sys/devices/virtual/misc/intel_ifs_0/run_test + * # echo 5 > /sys/devices/virtual/misc/intel_ifs_<n>/run_test * * Results of the last test is provided in /sys:: * - * $ cat /sys/devices/virtual/misc/intel_ifs_0/status + * $ cat /sys/devices/virtual/misc/intel_ifs_<n>/status * pass * * Status can be one of pass, fail, untested * * Additional details of the last test is provided by the details file:: * - * $ cat /sys/devices/virtual/misc/intel_ifs_0/details + * $ cat /sys/devices/virtual/misc/intel_ifs_<n>/details * 0x8081 * - * The details file reports the hex value of the SCAN_STATUS MSR. + * The details file reports the hex value of the test specific status MSR. * Hardware defined error codes are documented in volume 4 of the Intel * Software Developer's Manual but the error_code field may contain one of * the following driver defined software codes: @@ -127,6 +130,7 @@ #include <linux/device.h> #include <linux/miscdevice.h> +#define MSR_ARRAY_BIST 0x00000105 #define MSR_COPY_SCAN_HASHES 0x000002c2 #define MSR_SCAN_HASHES_STATUS 0x000002c3 #define MSR_AUTHENTICATE_AND_COPY_CHUNK 0x000002c4 @@ -137,6 +141,9 @@ #define SCAN_TEST_PASS 1 #define SCAN_TEST_FAIL 2 +#define IFS_TYPE_SAF 0 +#define IFS_TYPE_ARRAY_BIST 1 + /* MSR_SCAN_HASHES_STATUS bit fields */ union ifs_scan_hashes_status { u64 data; @@ -189,6 +196,17 @@ union ifs_status { }; }; +/* MSR_ARRAY_BIST bit fields */ +union ifs_array { + u64 data; + struct { + u32 array_bitmask; + u16 array_bank; + u16 rsvd :15; + u16 ctrl_result :1; + }; +}; + /* * Driver populated error-codes * 0xFD: Test timed out before completing all the chunks. @@ -197,22 +215,22 @@ union ifs_status { #define IFS_SW_TIMEOUT 0xFD #define IFS_SW_PARTIAL_COMPLETION 0xFE +struct ifs_test_caps { + int integrity_cap_bit; + int test_num; +}; + /** * struct ifs_data - attributes related to intel IFS driver - * @integrity_cap_bit: MSR_INTEGRITY_CAPS bit enumerating this test * @loaded_version: stores the currently loaded ifs image version. - * @pkg_auth: array of bool storing per package auth status * @loaded: If a valid test binary has been loaded into the memory * @loading_error: Error occurred on another CPU while loading image * @valid_chunks: number of chunks which could be validated. * @status: it holds simple status pass/fail/untested * @scan_details: opaque scan status code from h/w * @cur_batch: number indicating the currently loaded test file - * @test_num: number indicating the test type */ struct ifs_data { - int integrity_cap_bit; - bool *pkg_auth; int loaded_version; bool loaded; bool loading_error; @@ -220,7 +238,6 @@ struct ifs_data { int status; u64 scan_details; u32 cur_batch; - int test_num; }; struct ifs_work { @@ -229,7 +246,8 @@ struct ifs_work { }; struct ifs_device { - struct ifs_data data; + const struct ifs_test_caps *test_caps; + struct ifs_data rw_data; struct miscdevice misc; }; @@ -238,11 +256,21 @@ static inline struct ifs_data *ifs_get_data(struct device *dev) struct miscdevice *m = dev_get_drvdata(dev); struct ifs_device *d = container_of(m, struct ifs_device, misc); - return &d->data; + return &d->rw_data; +} + +static inline const struct ifs_test_caps *ifs_get_test_caps(struct device *dev) +{ + struct miscdevice *m = dev_get_drvdata(dev); + struct ifs_device *d = container_of(m, struct ifs_device, misc); + + return d->test_caps; } +extern bool *ifs_pkg_auth; int ifs_load_firmware(struct device *dev); int do_core_test(int cpu, struct device *dev); -const struct attribute_group **ifs_get_groups(void); +extern struct attribute *plat_ifs_attrs[]; +extern struct attribute *plat_ifs_array_attrs[]; #endif diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index c5c24e6fdc43..61dffb4c8a1d 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -192,7 +192,7 @@ static int scan_chunks_sanity_check(struct device *dev) struct ifs_work local_work; int curr_pkg, cpu, ret; - memset(ifsd->pkg_auth, 0, (topology_max_packages() * sizeof(bool))); + memset(ifs_pkg_auth, 0, (topology_max_packages() * sizeof(bool))); ret = validate_ifs_metadata(dev); if (ret) return ret; @@ -204,7 +204,7 @@ static int scan_chunks_sanity_check(struct device *dev) cpus_read_lock(); for_each_online_cpu(cpu) { curr_pkg = topology_physical_package_id(cpu); - if (ifsd->pkg_auth[curr_pkg]) + if (ifs_pkg_auth[curr_pkg]) continue; reinit_completion(&ifs_done); local_work.dev = dev; @@ -215,7 +215,7 @@ static int scan_chunks_sanity_check(struct device *dev) ret = -EIO; goto out; } - ifsd->pkg_auth[curr_pkg] = 1; + ifs_pkg_auth[curr_pkg] = 1; } ret = 0; out: @@ -257,13 +257,14 @@ static int image_sanity_check(struct device *dev, const struct microcode_header_ */ int ifs_load_firmware(struct device *dev) { + const struct ifs_test_caps *test = ifs_get_test_caps(dev); struct ifs_data *ifsd = ifs_get_data(dev); const struct firmware *fw; char scan_path[64]; int ret = -EINVAL; snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.scan", - ifsd->test_num, boot_cpu_data.x86, boot_cpu_data.x86_model, + test->test_num, boot_cpu_data.x86, boot_cpu_data.x86_model, boot_cpu_data.x86_stepping, ifsd->cur_batch); ret = request_firmware_direct(&fw, scan_path, dev); diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index 0bfd8fcdd7e8..1061eb7ec399 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -229,6 +229,85 @@ static void ifs_test_core(int cpu, struct device *dev) } } +#define SPINUNIT 100 /* 100 nsec */ +static atomic_t array_cpus_out; + +/* + * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus() + */ +static void wait_for_sibling_cpu(atomic_t *t, long long timeout) +{ + int cpu = smp_processor_id(); + const struct cpumask *smt_mask = cpu_smt_mask(cpu); + int all_cpus = cpumask_weight(smt_mask); + + atomic_inc(t); + while (atomic_read(t) < all_cpus) { + if (timeout < SPINUNIT) + return; + ndelay(SPINUNIT); + timeout -= SPINUNIT; + touch_nmi_watchdog(); + } +} + +static int do_array_test(void *data) +{ + union ifs_array *command = data; + int cpu = smp_processor_id(); + int first; + + /* + * Only one logical CPU on a core needs to trigger the Array test via MSR write. + */ + first = cpumask_first(cpu_smt_mask(cpu)); + + if (cpu == first) { + wrmsrl(MSR_ARRAY_BIST, command->data); + /* Pass back the result of the test */ + rdmsrl(MSR_ARRAY_BIST, command->data); + } + + /* Tests complete faster if the sibling is spinning here */ + wait_for_sibling_cpu(&array_cpus_out, NSEC_PER_SEC); + + return 0; +} + +static void ifs_array_test_core(int cpu, struct device *dev) +{ + union ifs_array command = {}; + bool timed_out = false; + struct ifs_data *ifsd; + unsigned long timeout; + + ifsd = ifs_get_data(dev); + + command.array_bitmask = ~0U; + timeout = jiffies + HZ / 2; + + do { + if (time_after(jiffies, timeout)) { + timed_out = true; + break; + } + atomic_set(&array_cpus_out, 0); + stop_core_cpuslocked(cpu, do_array_test, &command); + + if (command.ctrl_result) + break; + } while (command.array_bitmask); + + ifsd->scan_details = command.data; + + if (command.ctrl_result) + ifsd->status = SCAN_TEST_FAIL; + else if (timed_out || command.array_bitmask) + ifsd->status = SCAN_NOT_TESTED; + else + ifsd->status = SCAN_TEST_PASS; +} + /* * Initiate per core test. It wakes up work queue threads on the target cpu and * its sibling cpu. Once all sibling threads wake up, the scan test gets executed and @@ -236,6 +315,8 @@ static void ifs_test_core(int cpu, struct device *dev) */ int do_core_test(int cpu, struct device *dev) { + const struct ifs_test_caps *test = ifs_get_test_caps(dev); + struct ifs_data *ifsd = ifs_get_data(dev); int ret = 0; /* Prevent CPUs from being taken offline during the scan test */ @@ -247,7 +328,18 @@ int do_core_test(int cpu, struct device *dev) goto out; } - ifs_test_core(cpu, dev); + switch (test->test_num) { + case IFS_TYPE_SAF: + if (!ifsd->loaded) + return -EPERM; + ifs_test_core(cpu, dev); + break; + case IFS_TYPE_ARRAY_BIST: + ifs_array_test_core(cpu, dev); + break; + default: + return -EINVAL; + } out: cpus_read_unlock(); return ret; diff --git a/drivers/platform/x86/intel/ifs/sysfs.c b/drivers/platform/x86/intel/ifs/sysfs.c index ee636a76b083..01b7502f46b0 100644 --- a/drivers/platform/x86/intel/ifs/sysfs.c +++ b/drivers/platform/x86/intel/ifs/sysfs.c @@ -13,7 +13,7 @@ * Protects against simultaneous tests on multiple cores, or * reloading can file while a test is in progress */ -static DEFINE_SEMAPHORE(ifs_sem); +static DEFINE_SEMAPHORE(ifs_sem, 1); /* * The sysfs interface to check additional details of last test @@ -64,7 +64,6 @@ static ssize_t run_test_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct ifs_data *ifsd = ifs_get_data(dev); unsigned int cpu; int rc; @@ -75,10 +74,7 @@ static ssize_t run_test_store(struct device *dev, if (down_interruptible(&ifs_sem)) return -EINTR; - if (!ifsd->loaded) - rc = -EPERM; - else - rc = do_core_test(cpu, dev); + rc = do_core_test(cpu, dev); up(&ifs_sem); @@ -141,7 +137,7 @@ static ssize_t image_version_show(struct device *dev, static DEVICE_ATTR_RO(image_version); /* global scan sysfs attributes */ -static struct attribute *plat_ifs_attrs[] = { +struct attribute *plat_ifs_attrs[] = { &dev_attr_details.attr, &dev_attr_status.attr, &dev_attr_run_test.attr, @@ -150,9 +146,10 @@ static struct attribute *plat_ifs_attrs[] = { NULL }; -ATTRIBUTE_GROUPS(plat_ifs); - -const struct attribute_group **ifs_get_groups(void) -{ - return plat_ifs_groups; -} +/* global array sysfs attributes */ +struct attribute *plat_ifs_array_attrs[] = { + &dev_attr_details.attr, + &dev_attr_status.attr, + &dev_attr_run_test.attr, + NULL +}; diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c index 97cfbc520a02..b6708bab7c53 100644 --- a/drivers/platform/x86/intel/int0002_vgpio.c +++ b/drivers/platform/x86/intel/int0002_vgpio.c @@ -223,11 +223,10 @@ static int int0002_probe(struct platform_device *pdev) return 0; } -static int int0002_remove(struct platform_device *pdev) +static void int0002_remove(struct platform_device *pdev) { device_init_wakeup(&pdev->dev, false); acpi_unregister_wakeup_handler(int0002_check_wake, NULL); - return 0; } static int int0002_suspend(struct device *dev) @@ -273,7 +272,7 @@ static struct platform_driver int0002_driver = { .pm = &int0002_pm_ops, }, .probe = int0002_probe, - .remove = int0002_remove, + .remove_new = int0002_remove, }; module_platform_driver(int0002_driver); diff --git a/drivers/platform/x86/intel/int1092/intel_sar.c b/drivers/platform/x86/intel/int1092/intel_sar.c index 352fc4596494..6246c066ade2 100644 --- a/drivers/platform/x86/intel/int1092/intel_sar.c +++ b/drivers/platform/x86/intel/int1092/intel_sar.c @@ -292,7 +292,7 @@ r_free: return result; } -static int sar_remove(struct platform_device *device) +static void sar_remove(struct platform_device *device) { struct wwan_sar_context *context = dev_get_drvdata(&device->dev); int reg; @@ -304,12 +304,11 @@ static int sar_remove(struct platform_device *device) kfree(context->config_data[reg].device_mode_info); kfree(context); - return 0; } static struct platform_driver sar_driver = { .probe = sar_probe, - .remove = sar_remove, + .remove_new = sar_remove, .driver = { .name = DRVNAME, .acpi_match_table = ACPI_PTR(sar_device_ids) diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index f064da74f50a..ef020e23e596 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -317,7 +317,7 @@ static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472) return 0; } -static int skl_int3472_discrete_remove(struct platform_device *pdev) +static void skl_int3472_discrete_remove(struct platform_device *pdev) { struct int3472_discrete_device *int3472 = platform_get_drvdata(pdev); @@ -326,8 +326,6 @@ static int skl_int3472_discrete_remove(struct platform_device *pdev) skl_int3472_unregister_clock(int3472); skl_int3472_unregister_pled(int3472); skl_int3472_unregister_regulator(int3472); - - return 0; } static int skl_int3472_discrete_probe(struct platform_device *pdev) @@ -392,7 +390,7 @@ static struct platform_driver int3472_discrete = { .acpi_match_table = int3472_device_id, }, .probe = skl_int3472_discrete_probe, - .remove = skl_int3472_discrete_remove, + .remove_new = skl_int3472_discrete_remove, }; module_platform_driver(int3472_discrete); diff --git a/drivers/platform/x86/intel/mrfld_pwrbtn.c b/drivers/platform/x86/intel/mrfld_pwrbtn.c index d58fea51747e..549a3f586f3b 100644 --- a/drivers/platform/x86/intel/mrfld_pwrbtn.c +++ b/drivers/platform/x86/intel/mrfld_pwrbtn.c @@ -78,13 +78,12 @@ static int mrfld_pwrbtn_probe(struct platform_device *pdev) return 0; } -static int mrfld_pwrbtn_remove(struct platform_device *pdev) +static void mrfld_pwrbtn_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; dev_pm_clear_wake_irq(dev); device_init_wakeup(dev, false); - return 0; } static const struct platform_device_id mrfld_pwrbtn_id_table[] = { @@ -98,7 +97,7 @@ static struct platform_driver mrfld_pwrbtn_driver = { .name = "mrfld_bcove_pwrbtn", }, .probe = mrfld_pwrbtn_probe, - .remove = mrfld_pwrbtn_remove, + .remove_new = mrfld_pwrbtn_remove, .id_table = mrfld_pwrbtn_id_table, }; module_platform_driver(mrfld_pwrbtn_driver); diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c index 3a15d32d7644..da6e7206d38b 100644 --- a/drivers/platform/x86/intel/pmc/core.c +++ b/drivers/platform/x86/intel/pmc/core.c @@ -66,7 +66,18 @@ static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset, static inline u64 pmc_core_adjust_slp_s0_step(struct pmc_dev *pmcdev, u32 value) { - return (u64)value * pmcdev->map->slp_s0_res_counter_step; + /* + * ADL PCH does not have the SLP_S0 counter and LPM Residency counters are + * used as a workaround which uses 30.5 usec tick. All other client + * programs have the legacy SLP_S0 residency counter that is using the 122 + * usec tick. + */ + const int lpm_adj_x2 = pmcdev->map->lpm_res_counter_step_x2; + + if (pmcdev->map == &adl_reg_map) + return (u64)value * GET_X2_COUNTER((u64)lpm_adj_x2); + else + return (u64)value * pmcdev->map->slp_s0_res_counter_step; } static int set_etr3(struct pmc_dev *pmcdev) @@ -1142,6 +1153,8 @@ static int pmc_core_probe(struct platform_device *pdev) pmc_core_do_dmi_quirks(pmcdev); pmc_core_dbgfs_register(pmcdev); + pm_report_max_hw_sleep(FIELD_MAX(SLP_S0_RES_COUNTER_MASK) * + pmc_core_adjust_slp_s0_step(pmcdev, 1)); device_initialized = true; dev_info(&pdev->dev, " initialized\n"); @@ -1149,7 +1162,7 @@ static int pmc_core_probe(struct platform_device *pdev) return 0; } -static int pmc_core_remove(struct platform_device *pdev) +static void pmc_core_remove(struct platform_device *pdev) { struct pmc_dev *pmcdev = platform_get_drvdata(pdev); @@ -1157,7 +1170,6 @@ static int pmc_core_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); mutex_destroy(&pmcdev->lock); iounmap(pmcdev->regbase); - return 0; } static bool warn_on_s0ix_failures; @@ -1168,12 +1180,6 @@ static __maybe_unused int pmc_core_suspend(struct device *dev) { struct pmc_dev *pmcdev = dev_get_drvdata(dev); - pmcdev->check_counters = false; - - /* No warnings on S0ix failures */ - if (!warn_on_s0ix_failures) - return 0; - /* Check if the syspend will actually use S0ix */ if (pm_suspend_via_firmware()) return 0; @@ -1186,7 +1192,6 @@ static __maybe_unused int pmc_core_suspend(struct device *dev) if (pmc_core_dev_state_get(pmcdev, &pmcdev->s0ix_counter)) return -EIO; - pmcdev->check_counters = true; return 0; } @@ -1210,6 +1215,8 @@ static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev) if (pmc_core_dev_state_get(pmcdev, &s0ix_counter)) return false; + pm_report_hw_sleep_time((u32)(s0ix_counter - pmcdev->s0ix_counter)); + if (s0ix_counter == pmcdev->s0ix_counter) return true; @@ -1222,12 +1229,16 @@ static __maybe_unused int pmc_core_resume(struct device *dev) const struct pmc_bit_map **maps = pmcdev->map->lpm_sts; int offset = pmcdev->map->lpm_status_offset; - if (!pmcdev->check_counters) + /* Check if the syspend used S0ix */ + if (pm_suspend_via_firmware()) return 0; if (!pmc_core_is_s0ix_failed(pmcdev)) return 0; + if (!warn_on_s0ix_failures) + return 0; + if (pmc_core_is_pc10_failed(pmcdev)) { /* S0ix failed because of PC10 entry failure */ dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n", @@ -1264,7 +1275,7 @@ static struct platform_driver pmc_core_driver = { .dev_groups = pmc_dev_groups, }, .probe = pmc_core_probe, - .remove = pmc_core_remove, + .remove_new = pmc_core_remove, }; module_platform_driver(pmc_core_driver); diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h index 810204d758ab..9ca9b9746719 100644 --- a/drivers/platform/x86/intel/pmc/core.h +++ b/drivers/platform/x86/intel/pmc/core.h @@ -16,6 +16,8 @@ #include <linux/bits.h> #include <linux/platform_device.h> +#define SLP_S0_RES_COUNTER_MASK GENMASK(31, 0) + #define PMC_BASE_ADDR_DEFAULT 0xFE000000 /* Sunrise Point Power Management Controller PCI Device ID */ @@ -319,7 +321,6 @@ struct pmc_reg_map { * @pmc_xram_read_bit: flag to indicate whether PMC XRAM shadow registers * used to read MPHY PG and PLL status are available * @mutex_lock: mutex to complete one transcation - * @check_counters: On resume, check if counters are getting incremented * @pc10_counter: PC10 residency counter * @s0ix_counter: S0ix residency (step adjusted) * @num_lpm_modes: Count of enabled modes @@ -338,7 +339,6 @@ struct pmc_dev { int pmc_xram_read_bit; struct mutex lock; /* generic mutex lock for PMC Core */ - bool check_counters; /* Check for counter increments on resume */ u64 pc10_counter; u64 s0ix_counter; int num_lpm_modes; diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c index eeb3bd8c2502..e8cc156412ce 100644 --- a/drivers/platform/x86/intel/pmc/mtl.c +++ b/drivers/platform/x86/intel/pmc/mtl.c @@ -8,6 +8,7 @@ * */ +#include <linux/pci.h> #include "core.h" const struct pmc_reg_map mtl_reg_map = { @@ -45,8 +46,38 @@ void mtl_core_configure(struct pmc_dev *pmcdev) pmc_core_send_ltr_ignore(pmcdev, 3); } +#define MTL_GNA_PCI_DEV 0x7e4c +#define MTL_IPU_PCI_DEV 0x7d19 +#define MTL_VPU_PCI_DEV 0x7d1d +static void mtl_set_device_d3(unsigned int device) +{ + struct pci_dev *pcidev; + + pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); + if (pcidev) { + if (!device_trylock(&pcidev->dev)) { + pci_dev_put(pcidev); + return; + } + if (!pcidev->dev.driver) { + dev_info(&pcidev->dev, "Setting to D3hot\n"); + pci_set_power_state(pcidev, PCI_D3hot); + } + device_unlock(&pcidev->dev); + pci_dev_put(pcidev); + } +} + void mtl_core_init(struct pmc_dev *pmcdev) { pmcdev->map = &mtl_reg_map; pmcdev->core_configure = mtl_core_configure; + + /* + * Set power state of select devices that do not have drivers to D3 + * so that they do not block Package C entry. + */ + mtl_set_device_d3(MTL_GNA_PCI_DEV); + mtl_set_device_d3(MTL_IPU_PCI_DEV); + mtl_set_device_d3(MTL_VPU_PCI_DEV); } diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c index 46598dcb634a..f32a233470de 100644 --- a/drivers/platform/x86/intel/pmt/class.c +++ b/drivers/platform/x86/intel/pmt/class.c @@ -33,7 +33,7 @@ bool intel_pmt_is_early_client_hw(struct device *dev) */ return !!(ivdev->info->quirks & VSEC_QUIRK_EARLY_HW); } -EXPORT_SYMBOL_GPL(intel_pmt_is_early_client_hw); +EXPORT_SYMBOL_NS_GPL(intel_pmt_is_early_client_hw, INTEL_PMT); static inline int pmt_memcpy64_fromio(void *to, const u64 __iomem *from, size_t count) @@ -155,7 +155,6 @@ ATTRIBUTE_GROUPS(intel_pmt); static struct class intel_pmt_class = { .name = "intel_pmt", - .owner = THIS_MODULE, .dev_groups = intel_pmt_groups, }; @@ -327,7 +326,7 @@ int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespa return intel_pmt_dev_register(entry, ns, dev); } -EXPORT_SYMBOL_GPL(intel_pmt_dev_create); +EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_create, INTEL_PMT); void intel_pmt_dev_destroy(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns) @@ -343,7 +342,7 @@ void intel_pmt_dev_destroy(struct intel_pmt_entry *entry, device_unregister(dev); xa_erase(ns->xa, entry->devid); } -EXPORT_SYMBOL_GPL(intel_pmt_dev_destroy); +EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_destroy, INTEL_PMT); static int __init pmt_class_init(void) { diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c index ace1239bc0a0..bbb3d61d09f4 100644 --- a/drivers/platform/x86/intel/pmt/crashlog.c +++ b/drivers/platform/x86/intel/pmt/crashlog.c @@ -328,3 +328,4 @@ module_exit(pmt_crashlog_exit); MODULE_AUTHOR("Alexander Duyck <alexander.h.duyck@linux.intel.com>"); MODULE_DESCRIPTION("Intel PMT Crashlog driver"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(INTEL_PMT); diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c index 5e4009c05ecf..39cbc87cc28a 100644 --- a/drivers/platform/x86/intel/pmt/telemetry.c +++ b/drivers/platform/x86/intel/pmt/telemetry.c @@ -78,7 +78,7 @@ static int pmt_telem_header_decode(struct intel_pmt_entry *entry, * reserved for future use. They have zero size. Do not fail * probe for these. Just ignore them. */ - if (header->size == 0) + if (header->size == 0 || header->access_type == 0xF) return 1; return 0; @@ -160,3 +160,4 @@ module_exit(pmt_telem_exit); MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); MODULE_DESCRIPTION("Intel PMT Telemetry driver"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(INTEL_PMT); diff --git a/drivers/platform/x86/intel/sdsi.c b/drivers/platform/x86/intel/sdsi.c index 9e0ea2cdd704..556e7c6dbb05 100644 --- a/drivers/platform/x86/intel/sdsi.c +++ b/drivers/platform/x86/intel/sdsi.c @@ -49,7 +49,7 @@ #define SDSI_MBOX_CMD_SUCCESS 0x40 #define SDSI_MBOX_CMD_TIMEOUT 0x80 -#define MBOX_TIMEOUT_US 2000 +#define MBOX_TIMEOUT_US 500000 #define MBOX_TIMEOUT_ACQUIRE_US 1000 #define MBOX_POLLING_PERIOD_US 100 #define MBOX_ACQUIRE_NUM_RETRIES 5 diff --git a/drivers/platform/x86/intel/speed_select_if/Kconfig b/drivers/platform/x86/intel/speed_select_if/Kconfig index ce3e3dc076d2..4eb3ad299db0 100644 --- a/drivers/platform/x86/intel/speed_select_if/Kconfig +++ b/drivers/platform/x86/intel/speed_select_if/Kconfig @@ -2,8 +2,12 @@ menu "Intel Speed Select Technology interface support" depends on PCI depends on X86_64 || COMPILE_TEST +config INTEL_SPEED_SELECT_TPMI + tristate + config INTEL_SPEED_SELECT_INTERFACE tristate "Intel(R) Speed Select Technology interface drivers" + select INTEL_SPEED_SELECT_TPMI if INTEL_TPMI help This config enables the Intel(R) Speed Select Technology interface drivers. The Intel(R) speed select technology features are non diff --git a/drivers/platform/x86/intel/speed_select_if/Makefile b/drivers/platform/x86/intel/speed_select_if/Makefile index 856076206f35..1d878a36d0ab 100644 --- a/drivers/platform/x86/intel/speed_select_if/Makefile +++ b/drivers/platform/x86/intel/speed_select_if/Makefile @@ -8,3 +8,5 @@ obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_common.o obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mmio.o obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mbox_pci.o obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mbox_msr.o +obj-$(CONFIG_INTEL_SPEED_SELECT_TPMI) += isst_tpmi_core.o +obj-$(CONFIG_INTEL_SPEED_SELECT_TPMI) += isst_tpmi.o diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index 0954a04623ed..e0572a29212e 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -19,9 +19,13 @@ #include <linux/uaccess.h> #include <uapi/linux/isst_if.h> +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> + #include "isst_if_common.h" #define MSR_THREAD_ID_INFO 0x53 +#define MSR_PM_LOGICAL_ID 0x54 #define MSR_CPU_BUS_NUMBER 0x128 static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX]; @@ -31,6 +35,7 @@ static int punit_msr_white_list[] = { MSR_CONFIG_TDP_CONTROL, MSR_TURBO_RATIO_LIMIT1, MSR_TURBO_RATIO_LIMIT2, + MSR_PM_LOGICAL_ID, }; struct isst_valid_cmd_ranges { @@ -73,6 +78,8 @@ struct isst_cmd { u32 param; }; +static bool isst_hpm_support; + static DECLARE_HASHTABLE(isst_hash, 8); static DEFINE_MUTEX(isst_hash_lock); @@ -262,11 +269,13 @@ bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd) } EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req); +static int isst_if_api_version; + static int isst_if_get_platform_info(void __user *argp) { struct isst_if_platform_info info; - info.api_version = ISST_IF_API_VERSION; + info.api_version = isst_if_api_version; info.driver_version = ISST_IF_DRIVER_VERSION; info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT; info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered; @@ -409,11 +418,20 @@ static int isst_if_cpu_online(unsigned int cpu) isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1); } + if (isst_hpm_support) { + + ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data); + if (!ret) + goto set_punit_id; + } + ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data); if (ret) { isst_cpu_info[cpu].punit_cpu_id = -1; return ret; } + +set_punit_id: isst_cpu_info[cpu].punit_cpu_id = data; isst_restore_msr_local(cpu); @@ -588,6 +606,7 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, struct isst_if_cmd_cb cmd_cb; struct isst_if_cmd_cb *cb; long ret = -ENOTTY; + int i; switch (cmd) { case ISST_IF_GET_PLATFORM_INFO: @@ -616,6 +635,16 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, ret = isst_if_exec_multi_cmd(argp, &cmd_cb); break; default: + for (i = 0; i < ISST_IF_DEV_MAX; ++i) { + struct isst_if_cmd_cb *cb = &punit_callbacks[i]; + int ret; + + if (cb->def_ioctl) { + ret = cb->def_ioctl(file, cmd, arg); + if (!ret) + return ret; + } + } break; } @@ -691,6 +720,12 @@ static struct miscdevice isst_if_char_driver = { .fops = &isst_if_char_driver_ops, }; +static const struct x86_cpu_id hpm_cpu_ids[] = { + X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL), + X86_MATCH_INTEL_FAM6_MODEL(SIERRAFOREST_X, NULL), + {} +}; + static int isst_misc_reg(void) { mutex_lock(&punit_misc_dev_reg_lock); @@ -698,6 +733,12 @@ static int isst_misc_reg(void) goto unlock_exit; if (!misc_usage_count) { + const struct x86_cpu_id *id; + + id = x86_match_cpu(hpm_cpu_ids); + if (id) + isst_hpm_support = true; + misc_device_ret = isst_if_cpu_info_init(); if (misc_device_ret) goto unlock_exit; @@ -756,6 +797,10 @@ int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb) mutex_unlock(&punit_misc_dev_open_lock); return -EAGAIN; } + if (!cb->api_version) + cb->api_version = ISST_IF_API_VERSION; + if (cb->api_version > isst_if_api_version) + isst_if_api_version = cb->api_version; memcpy(&punit_callbacks[device_type], cb, sizeof(*cb)); punit_callbacks[device_type].registered = 1; mutex_unlock(&punit_misc_dev_open_lock); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h index 35ff506b402e..1004f2c9cca8 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h @@ -30,7 +30,8 @@ #define ISST_IF_DEV_MBOX 0 #define ISST_IF_DEV_MMIO 1 -#define ISST_IF_DEV_MAX 2 +#define ISST_IF_DEV_TPMI 2 +#define ISST_IF_DEV_MAX 3 /** * struct isst_if_cmd_cb - Used to register a IOCTL handler @@ -40,6 +41,7 @@ * @offset: Offset to the first valid member in command structure. * This will be the offset of the start of the command * after command count field + * @api_version: API version supported for this target. 0, if none. * @owner: Registered module owner * @cmd_callback: Callback function to handle IOCTL. The callback has the * command pointer with data for command. There is a pointer @@ -47,6 +49,8 @@ * response to user ioctl buffer. The "resume" argument * can be used to avoid storing the command for replay * during system resume + * @def_ioctl: Default IOCTL handler callback, if there is no match in + * the existing list of IOCTL handled by the common handler. * * This structure is used to register an handler for IOCTL. To avoid * code duplication common code handles all the IOCTL command read/write @@ -57,8 +61,10 @@ struct isst_if_cmd_cb { int registered; int cmd_size; int offset; + int api_version; struct module *owner; long (*cmd_callback)(u8 *ptr, int *write_only, int resume); + long (*def_ioctl)(struct file *file, unsigned int cmd, unsigned long arg); }; /* Internal interface functions */ diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi.c new file mode 100644 index 000000000000..17972191538a --- /dev/null +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * isst_tpmi.c: SST TPMI interface + * + * Copyright (c) 2023, Intel Corporation. + * All Rights Reserved. + * + */ + +#include <linux/auxiliary_bus.h> +#include <linux/module.h> +#include <linux/intel_tpmi.h> + +#include "isst_tpmi_core.h" + +static int intel_sst_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) +{ + int ret; + + ret = tpmi_sst_init(); + if (ret) + return ret; + + ret = tpmi_sst_dev_add(auxdev); + if (ret) + tpmi_sst_exit(); + + return ret; +} + +static void intel_sst_remove(struct auxiliary_device *auxdev) +{ + tpmi_sst_dev_remove(auxdev); + tpmi_sst_exit(); +} + +static int intel_sst_suspend(struct device *dev) +{ + tpmi_sst_dev_suspend(to_auxiliary_dev(dev)); + + return 0; +} + +static int intel_sst_resume(struct device *dev) +{ + tpmi_sst_dev_resume(to_auxiliary_dev(dev)); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(intel_sst_pm, intel_sst_suspend, intel_sst_resume); + +static const struct auxiliary_device_id intel_sst_id_table[] = { + { .name = "intel_vsec.tpmi-sst" }, + {} +}; +MODULE_DEVICE_TABLE(auxiliary, intel_sst_id_table); + +static struct auxiliary_driver intel_sst_aux_driver = { + .id_table = intel_sst_id_table, + .remove = intel_sst_remove, + .probe = intel_sst_probe, + .driver = { + .pm = pm_sleep_ptr(&intel_sst_pm), + }, +}; + +module_auxiliary_driver(intel_sst_aux_driver); + +MODULE_IMPORT_NS(INTEL_TPMI_SST); +MODULE_DESCRIPTION("Intel TPMI SST Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c new file mode 100644 index 000000000000..664d2ee60385 --- /dev/null +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -0,0 +1,1440 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * isst_tpmi.c: SST TPMI interface core + * + * Copyright (c) 2023, Intel Corporation. + * All Rights Reserved. + * + * This information will be useful to understand flows: + * In the current generation of platforms, TPMI is supported via OOB + * PCI device. This PCI device has one instance per CPU package. + * There is a unique TPMI ID for SST. Each TPMI ID also has multiple + * entries, representing per power domain information. + * + * There is one dev file for complete SST information and control same as the + * prior generation of hardware. User spaces don't need to know how the + * information is presented by the hardware. The TPMI core module implements + * the hardware mapping. + */ + +#include <linux/auxiliary_bus.h> +#include <linux/delay.h> +#include <linux/intel_tpmi.h> +#include <linux/fs.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <uapi/linux/isst_if.h> + +#include "isst_tpmi_core.h" +#include "isst_if_common.h" + +/* Supported SST hardware version by this driver */ +#define ISST_HEADER_VERSION 1 + +/* + * Used to indicate if value read from MMIO needs to get multiplied + * to get to a standard unit or not. + */ +#define SST_MUL_FACTOR_NONE 1 + +/* Define 100 as a scaling factor frequency ratio to frequency conversion */ +#define SST_MUL_FACTOR_FREQ 100 + +/* All SST regs are 64 bit size */ +#define SST_REG_SIZE 8 + +/** + * struct sst_header - SST main header + * @interface_version: Version number for this interface + * @cap_mask: Bitmask of the supported sub features. 1=the sub feature is enabled. + * 0=disabled. + * Bit[8]= SST_CP enable (1), disable (0) + * bit[9]= SST_PP enable (1), disable (0) + * other bits are reserved for future use + * @cp_offset: Qword (8 bytes) offset to the SST_CP register bank + * @pp_offset: Qword (8 bytes) offset to the SST_PP register bank + * @reserved: Reserved for future use + * + * This register allows SW to discover SST capability and the offsets to SST-CP + * and SST-PP register banks. + */ +struct sst_header { + u8 interface_version; + u8 cap_mask; + u8 cp_offset; + u8 pp_offset; + u32 reserved; +} __packed; + +/** + * struct cp_header - SST-CP (core-power) header + * @feature_id: 0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF + * @feature_rev: Interface Version number for this SST feature + * @ratio_unit: Frequency ratio unit. 00: 100MHz. All others are reserved + * @reserved: Reserved for future use + * + * This structure is used store SST-CP header. This is packed to the same + * format as defined in the specifications. + */ +struct cp_header { + u64 feature_id :4; + u64 feature_rev :8; + u64 ratio_unit :2; + u64 reserved :50; +} __packed; + +/** + * struct pp_header - SST-PP (Perf profile) header + * @feature_id: 0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF + * @feature_rev: Interface Version number for this SST feature + * @level_en_mask: SST-PP level enable/disable fuse mask + * @allowed_level_mask: Allowed level mask used for dynamic config level switching + * @reserved0: Reserved for future use + * @ratio_unit: Frequency ratio unit. 00: 100MHz. All others are reserved + * @block_size: Size of PP block in Qword unit (8 bytes) + * @dynamic_switch: If set (1), dynamic switching of SST PP is supported + * @memory_ratio_unit: Memory Controller frequency ratio unit. 00: 100MHz, others reserved + * @reserved1: Reserved for future use + * + * This structure is used store SST-PP header. This is packed to the same + * format as defined in the specifications. + */ +struct pp_header { + u64 feature_id :4; + u64 feature_rev :8; + u64 level_en_mask :8; + u64 allowed_level_mask :8; + u64 reserved0 :4; + u64 ratio_unit :2; + u64 block_size :8; + u64 dynamic_switch :1; + u64 memory_ratio_unit :2; + u64 reserved1 :19; +} __packed; + +/** + * struct feature_offset - Offsets to SST-PP features + * @pp_offset: Qword offset within PP level for the SST_PP register bank + * @bf_offset: Qword offset within PP level for the SST_BF register bank + * @tf_offset: Qword offset within PP level for the SST_TF register bank + * @reserved: Reserved for future use + * + * This structure is used store offsets for SST features in the register bank. + * This is packed to the same format as defined in the specifications. + */ +struct feature_offset { + u64 pp_offset :8; + u64 bf_offset :8; + u64 tf_offset :8; + u64 reserved :40; +} __packed; + +/** + * struct levels_offset - Offsets to each SST PP level + * @sst_pp_level0_offset: Qword offset to the register block of PP level 0 + * @sst_pp_level1_offset: Qword offset to the register block of PP level 1 + * @sst_pp_level2_offset: Qword offset to the register block of PP level 2 + * @sst_pp_level3_offset: Qword offset to the register block of PP level 3 + * @sst_pp_level4_offset: Qword offset to the register block of PP level 4 + * @reserved: Reserved for future use + * + * This structure is used store offsets of SST PP levels in the register bank. + * This is packed to the same format as defined in the specifications. + */ +struct levels_offset { + u64 sst_pp_level0_offset :8; + u64 sst_pp_level1_offset :8; + u64 sst_pp_level2_offset :8; + u64 sst_pp_level3_offset :8; + u64 sst_pp_level4_offset :8; + u64 reserved :24; +} __packed; + +/** + * struct pp_control_offset - Offsets for SST PP controls + * @perf_level: A SST-PP level that SW intends to switch to + * @perf_level_lock: SST-PP level select lock. 0 - unlocked. 1 - locked till next reset + * @resvd0: Reserved for future use + * @current_state: Bit mask to control the enable(1)/disable(0) state of each feature + * of the current PP level, bit 0 = BF, bit 1 = TF, bit 2-7 = reserved + * @reserved: Reserved for future use + * + * This structure is used store offsets of SST PP controls in the register bank. + * This is packed to the same format as defined in the specifications. + */ +struct pp_control_offset { + u64 perf_level :3; + u64 perf_level_lock :1; + u64 resvd0 :4; + u64 current_state :8; + u64 reserved :48; +} __packed; + +/** + * struct pp_status_offset - Offsets for SST PP status fields + * @sst_pp_level: Returns the current SST-PP level + * @sst_pp_lock: Returns the lock bit setting of perf_level_lock in pp_control_offset + * @error_type: Returns last error of SST-PP level change request. 0: no error, + * 1: level change not allowed, others: reserved + * @feature_state: Bit mask to indicate the enable(1)/disable(0) state of each feature of the + * current PP level. bit 0 = BF, bit 1 = TF, bit 2-7 reserved + * @reserved0: Reserved for future use + * @feature_error_type: Returns last error of the specific feature. Three error_type bits per + * feature. i.e. ERROR_TYPE[2:0] for BF, ERROR_TYPE[5:3] for TF, etc. + * 0x0: no error, 0x1: The specific feature is not supported by the hardware. + * 0x2-0x6: Reserved. 0x7: feature state change is not allowed. + * @reserved1: Reserved for future use + * + * This structure is used store offsets of SST PP status in the register bank. + * This is packed to the same format as defined in the specifications. + */ +struct pp_status_offset { + u64 sst_pp_level :3; + u64 sst_pp_lock :1; + u64 error_type :4; + u64 feature_state :8; + u64 reserved0 :16; + u64 feature_error_type : 24; + u64 reserved1 :8; +} __packed; + +/** + * struct perf_level - Used to store perf level and mmio offset + * @mmio_offset: mmio offset for a perf level + * @level: perf level for this offset + * + * This structure is used store final mmio offset of each perf level from the + * SST base mmio offset. + */ +struct perf_level { + int mmio_offset; + int level; +}; + +/** + * struct tpmi_per_power_domain_info - Store per power_domain SST info + * @package_id: Package id for this power_domain + * @power_domain_id: Power domain id, Each entry from the SST-TPMI instance is a power_domain. + * @max_level: Max possible PP level possible for this power_domain + * @ratio_unit: Ratio unit for converting to MHz + * @avx_levels: Number of AVX levels + * @pp_block_size: Block size from PP header + * @sst_header: Store SST header for this power_domain + * @cp_header: Store SST-CP header for this power_domain + * @pp_header: Store SST-PP header for this power_domain + * @perf_levels: Pointer to each perf level to map level to mmio offset + * @feature_offsets: Store feature offsets for each PP-level + * @control_offset: Store the control offset for each PP-level + * @status_offset: Store the status offset for each PP-level + * @sst_base: Mapped SST base IO memory + * @auxdev: Auxiliary device instance enumerated this instance + * @saved_sst_cp_control: Save SST-CP control configuration to store restore for suspend/resume + * @saved_clos_configs: Save SST-CP CLOS configuration to store restore for suspend/resume + * @saved_clos_assocs: Save SST-CP CLOS association to store restore for suspend/resume + * @saved_pp_control: Save SST-PP control information to store restore for suspend/resume + * + * This structure is used store complete SST information for a power_domain. This information + * is used to read/write request for any SST IOCTL. Each physical CPU package can have multiple + * power_domains. Each power domain describes its own SST information and has its own controls. + */ +struct tpmi_per_power_domain_info { + int package_id; + int power_domain_id; + int max_level; + int ratio_unit; + int avx_levels; + int pp_block_size; + struct sst_header sst_header; + struct cp_header cp_header; + struct pp_header pp_header; + struct perf_level *perf_levels; + struct feature_offset feature_offsets; + struct pp_control_offset control_offset; + struct pp_status_offset status_offset; + void __iomem *sst_base; + struct auxiliary_device *auxdev; + u64 saved_sst_cp_control; + u64 saved_clos_configs[4]; + u64 saved_clos_assocs[4]; + u64 saved_pp_control; +}; + +/** + * struct tpmi_sst_struct - Store sst info for a package + * @package_id: Package id for this aux device instance + * @number_of_power_domains: Number of power_domains pointed by power_domain_info pointer + * @power_domain_info: Pointer to power domains information + * + * This structure is used store full SST information for a package. + * Each package has a unique OOB PCI device, which enumerates TPMI. + * Each Package will have multiple power_domains. + */ +struct tpmi_sst_struct { + int package_id; + int number_of_power_domains; + struct tpmi_per_power_domain_info *power_domain_info; +}; + +/** + * struct tpmi_sst_common_struct - Store all SST instances + * @max_index: Maximum instances currently present + * @sst_inst: Pointer to per package instance + * + * Stores every SST Package instance. + */ +struct tpmi_sst_common_struct { + int max_index; + struct tpmi_sst_struct **sst_inst; +}; + +/* + * Each IOCTL request is processed under this lock. Also used to protect + * registration functions and common data structures. + */ +static DEFINE_MUTEX(isst_tpmi_dev_lock); + +/* Usage count to track, number of TPMI SST instances registered to this core. */ +static int isst_core_usage_count; + +/* Stores complete SST information for every package and power_domain */ +static struct tpmi_sst_common_struct isst_common; + +#define SST_MAX_AVX_LEVELS 3 + +#define SST_PP_OFFSET_0 8 +#define SST_PP_OFFSET_1 16 +#define SST_PP_OFFSET_SIZE 8 + +static int sst_add_perf_profiles(struct auxiliary_device *auxdev, + struct tpmi_per_power_domain_info *pd_info, + int levels) +{ + u64 perf_level_offsets; + int i; + + pd_info->perf_levels = devm_kcalloc(&auxdev->dev, levels, + sizeof(struct perf_level), + GFP_KERNEL); + if (!pd_info->perf_levels) + return 0; + + pd_info->ratio_unit = pd_info->pp_header.ratio_unit; + pd_info->avx_levels = SST_MAX_AVX_LEVELS; + pd_info->pp_block_size = pd_info->pp_header.block_size; + + /* Read PP Offset 0: Get feature offset with PP level */ + *((u64 *)&pd_info->feature_offsets) = readq(pd_info->sst_base + + pd_info->sst_header.pp_offset + + SST_PP_OFFSET_0); + + perf_level_offsets = readq(pd_info->sst_base + pd_info->sst_header.pp_offset + + SST_PP_OFFSET_1); + + for (i = 0; i < levels; ++i) { + u64 offset; + + offset = perf_level_offsets & (0xffULL << (i * SST_PP_OFFSET_SIZE)); + offset >>= (i * 8); + offset &= 0xff; + offset *= 8; /* Convert to byte from QWORD offset */ + pd_info->perf_levels[i].mmio_offset = pd_info->sst_header.pp_offset + offset; + } + + return 0; +} + +static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domain_info *pd_info) +{ + int i, mask, levels; + + *((u64 *)&pd_info->sst_header) = readq(pd_info->sst_base); + pd_info->sst_header.cp_offset *= 8; + pd_info->sst_header.pp_offset *= 8; + + if (pd_info->sst_header.interface_version != ISST_HEADER_VERSION) { + dev_err(&auxdev->dev, "SST: Unsupported version:%x\n", + pd_info->sst_header.interface_version); + return -ENODEV; + } + + /* Read SST CP Header */ + *((u64 *)&pd_info->cp_header) = readq(pd_info->sst_base + pd_info->sst_header.cp_offset); + + /* Read PP header */ + *((u64 *)&pd_info->pp_header) = readq(pd_info->sst_base + pd_info->sst_header.pp_offset); + + /* Force level_en_mask level 0 */ + pd_info->pp_header.level_en_mask |= 0x01; + + mask = 0x01; + levels = 0; + for (i = 0; i < 8; ++i) { + if (pd_info->pp_header.level_en_mask & mask) + levels = i; + mask <<= 1; + } + pd_info->max_level = levels; + sst_add_perf_profiles(auxdev, pd_info, levels + 1); + + return 0; +} + +/* + * Map a package and power_domain id to SST information structure unique for a power_domain. + * The caller should call under isst_tpmi_dev_lock. + */ +static struct tpmi_per_power_domain_info *get_instance(int pkg_id, int power_domain_id) +{ + struct tpmi_per_power_domain_info *power_domain_info; + struct tpmi_sst_struct *sst_inst; + + if (pkg_id < 0 || pkg_id > isst_common.max_index || + pkg_id >= topology_max_packages()) + return NULL; + + sst_inst = isst_common.sst_inst[pkg_id]; + if (!sst_inst) + return NULL; + + if (power_domain_id < 0 || power_domain_id >= sst_inst->number_of_power_domains) + return NULL; + + power_domain_info = &sst_inst->power_domain_info[power_domain_id]; + + if (power_domain_info && !power_domain_info->sst_base) + return NULL; + + return power_domain_info; +} + +static bool disable_dynamic_sst_features(void) +{ + u64 value; + + rdmsrl(MSR_PM_ENABLE, value); + return !(value & 0x1); +} + +#define _read_cp_info(name_str, name, offset, start, width, mult_factor)\ +{\ + u64 val, mask;\ + \ + val = readq(power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\ + (offset));\ + mask = GENMASK_ULL((start + width - 1), start);\ + val &= mask; \ + val >>= start;\ + name = (val * mult_factor);\ +} + +#define _write_cp_info(name_str, name, offset, start, width, div_factor)\ +{\ + u64 val, mask;\ + \ + val = readq(power_domain_info->sst_base +\ + power_domain_info->sst_header.cp_offset + (offset));\ + mask = GENMASK_ULL((start + width - 1), start);\ + val &= ~mask;\ + val |= (name / div_factor) << start;\ + writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\ + (offset));\ +} + +#define SST_CP_CONTROL_OFFSET 8 +#define SST_CP_STATUS_OFFSET 16 + +#define SST_CP_ENABLE_START 0 +#define SST_CP_ENABLE_WIDTH 1 + +#define SST_CP_PRIORITY_TYPE_START 1 +#define SST_CP_PRIORITY_TYPE_WIDTH 1 + +static long isst_if_core_power_state(void __user *argp) +{ + struct tpmi_per_power_domain_info *power_domain_info; + struct isst_core_power core_power; + + if (disable_dynamic_sst_features()) + return -EFAULT; + + if (copy_from_user(&core_power, argp, sizeof(core_power))) + return -EFAULT; + + power_domain_info = get_instance(core_power.socket_id, core_power.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + if (core_power.get_set) { + _write_cp_info("cp_enable", core_power.enable, SST_CP_CONTROL_OFFSET, + SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE) + _write_cp_info("cp_prio_type", core_power.priority_type, SST_CP_CONTROL_OFFSET, + SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH, + SST_MUL_FACTOR_NONE) + } else { + /* get */ + _read_cp_info("cp_enable", core_power.enable, SST_CP_STATUS_OFFSET, + SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE) + _read_cp_info("cp_prio_type", core_power.priority_type, SST_CP_STATUS_OFFSET, + SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH, + SST_MUL_FACTOR_NONE) + core_power.supported = !!(power_domain_info->sst_header.cap_mask & BIT(0)); + if (copy_to_user(argp, &core_power, sizeof(core_power))) + return -EFAULT; + } + + return 0; +} + +#define SST_CLOS_CONFIG_0_OFFSET 24 + +#define SST_CLOS_CONFIG_PRIO_START 4 +#define SST_CLOS_CONFIG_PRIO_WIDTH 4 + +#define SST_CLOS_CONFIG_MIN_START 8 +#define SST_CLOS_CONFIG_MIN_WIDTH 8 + +#define SST_CLOS_CONFIG_MAX_START 16 +#define SST_CLOS_CONFIG_MAX_WIDTH 8 + +static long isst_if_clos_param(void __user *argp) +{ + struct tpmi_per_power_domain_info *power_domain_info; + struct isst_clos_param clos_param; + + if (copy_from_user(&clos_param, argp, sizeof(clos_param))) + return -EFAULT; + + power_domain_info = get_instance(clos_param.socket_id, clos_param.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + if (clos_param.get_set) { + _write_cp_info("clos.min_freq", clos_param.min_freq_mhz, + (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE), + SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH, + SST_MUL_FACTOR_FREQ); + _write_cp_info("clos.max_freq", clos_param.max_freq_mhz, + (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE), + SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH, + SST_MUL_FACTOR_FREQ); + _write_cp_info("clos.prio", clos_param.prop_prio, + (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE), + SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH, + SST_MUL_FACTOR_NONE); + } else { + /* get */ + _read_cp_info("clos.min_freq", clos_param.min_freq_mhz, + (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE), + SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH, + SST_MUL_FACTOR_FREQ) + _read_cp_info("clos.max_freq", clos_param.max_freq_mhz, + (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE), + SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH, + SST_MUL_FACTOR_FREQ) + _read_cp_info("clos.prio", clos_param.prop_prio, + (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE), + SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH, + SST_MUL_FACTOR_NONE) + + if (copy_to_user(argp, &clos_param, sizeof(clos_param))) + return -EFAULT; + } + + return 0; +} + +#define SST_CLOS_ASSOC_0_OFFSET 56 +#define SST_CLOS_ASSOC_CPUS_PER_REG 16 +#define SST_CLOS_ASSOC_BITS_PER_CPU 4 + +static long isst_if_clos_assoc(void __user *argp) +{ + struct isst_if_clos_assoc_cmds assoc_cmds; + unsigned char __user *ptr; + int i; + + /* Each multi command has u16 command count as the first field */ + if (copy_from_user(&assoc_cmds, argp, sizeof(assoc_cmds))) + return -EFAULT; + + if (!assoc_cmds.cmd_count || assoc_cmds.cmd_count > ISST_IF_CMD_LIMIT) + return -EINVAL; + + ptr = argp + offsetof(struct isst_if_clos_assoc_cmds, assoc_info); + for (i = 0; i < assoc_cmds.cmd_count; ++i) { + struct tpmi_per_power_domain_info *power_domain_info; + struct isst_if_clos_assoc clos_assoc; + int punit_id, punit_cpu_no, pkg_id; + struct tpmi_sst_struct *sst_inst; + int offset, shift, cpu; + u64 val, mask, clos; + + if (copy_from_user(&clos_assoc, ptr, sizeof(clos_assoc))) + return -EFAULT; + + if (clos_assoc.socket_id > topology_max_packages()) + return -EINVAL; + + cpu = clos_assoc.logical_cpu; + clos = clos_assoc.clos; + + if (assoc_cmds.punit_cpu_map) + punit_cpu_no = cpu; + else + return -EOPNOTSUPP; + + if (punit_cpu_no < 0) + return -EINVAL; + + punit_id = clos_assoc.power_domain_id; + pkg_id = clos_assoc.socket_id; + + sst_inst = isst_common.sst_inst[pkg_id]; + + if (clos_assoc.power_domain_id > sst_inst->number_of_power_domains) + return -EINVAL; + + power_domain_info = &sst_inst->power_domain_info[punit_id]; + + offset = SST_CLOS_ASSOC_0_OFFSET + + (punit_cpu_no / SST_CLOS_ASSOC_CPUS_PER_REG) * SST_REG_SIZE; + shift = punit_cpu_no % SST_CLOS_ASSOC_CPUS_PER_REG; + shift *= SST_CLOS_ASSOC_BITS_PER_CPU; + + val = readq(power_domain_info->sst_base + + power_domain_info->sst_header.cp_offset + offset); + if (assoc_cmds.get_set) { + mask = GENMASK_ULL((shift + SST_CLOS_ASSOC_BITS_PER_CPU - 1), shift); + val &= ~mask; + val |= (clos << shift); + writeq(val, power_domain_info->sst_base + + power_domain_info->sst_header.cp_offset + offset); + } else { + val >>= shift; + clos_assoc.clos = val & GENMASK(SST_CLOS_ASSOC_BITS_PER_CPU - 1, 0); + if (copy_to_user(ptr, &clos_assoc, sizeof(clos_assoc))) + return -EFAULT; + } + + ptr += sizeof(clos_assoc); + } + + return 0; +} + +#define _read_pp_info(name_str, name, offset, start, width, mult_factor)\ +{\ + u64 val, _mask;\ + \ + val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\ + (offset));\ + _mask = GENMASK_ULL((start + width - 1), start);\ + val &= _mask;\ + val >>= start;\ + name = (val * mult_factor);\ +} + +#define _write_pp_info(name_str, name, offset, start, width, div_factor)\ +{\ + u64 val, _mask;\ + \ + val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\ + (offset));\ + _mask = GENMASK((start + width - 1), start);\ + val &= ~_mask;\ + val |= (name / div_factor) << start;\ + writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\ + (offset));\ +} + +#define _read_bf_level_info(name_str, name, level, offset, start, width, mult_factor)\ +{\ + u64 val, _mask;\ + \ + val = readq(power_domain_info->sst_base +\ + power_domain_info->perf_levels[level].mmio_offset +\ + (power_domain_info->feature_offsets.bf_offset * 8) + (offset));\ + _mask = GENMASK_ULL((start + width - 1), start);\ + val &= _mask; \ + val >>= start;\ + name = (val * mult_factor);\ +} + +#define _read_tf_level_info(name_str, name, level, offset, start, width, mult_factor)\ +{\ + u64 val, _mask;\ + \ + val = readq(power_domain_info->sst_base +\ + power_domain_info->perf_levels[level].mmio_offset +\ + (power_domain_info->feature_offsets.tf_offset * 8) + (offset));\ + _mask = GENMASK_ULL((start + width - 1), start);\ + val &= _mask; \ + val >>= start;\ + name = (val * mult_factor);\ +} + +#define SST_PP_STATUS_OFFSET 32 + +#define SST_PP_LEVEL_START 0 +#define SST_PP_LEVEL_WIDTH 3 + +#define SST_PP_LOCK_START 3 +#define SST_PP_LOCK_WIDTH 1 + +#define SST_PP_FEATURE_STATE_START 8 +#define SST_PP_FEATURE_STATE_WIDTH 8 + +#define SST_BF_FEATURE_SUPPORTED_START 12 +#define SST_BF_FEATURE_SUPPORTED_WIDTH 1 + +#define SST_TF_FEATURE_SUPPORTED_START 12 +#define SST_TF_FEATURE_SUPPORTED_WIDTH 1 + +static int isst_if_get_perf_level(void __user *argp) +{ + struct isst_perf_level_info perf_level; + struct tpmi_per_power_domain_info *power_domain_info; + + if (copy_from_user(&perf_level, argp, sizeof(perf_level))) + return -EFAULT; + + power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + perf_level.max_level = power_domain_info->max_level; + perf_level.level_mask = power_domain_info->pp_header.allowed_level_mask; + perf_level.feature_rev = power_domain_info->pp_header.feature_rev; + _read_pp_info("current_level", perf_level.current_level, SST_PP_STATUS_OFFSET, + SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE) + _read_pp_info("locked", perf_level.locked, SST_PP_STATUS_OFFSET, + SST_PP_LOCK_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE) + _read_pp_info("feature_state", perf_level.feature_state, SST_PP_STATUS_OFFSET, + SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, SST_MUL_FACTOR_NONE) + perf_level.enabled = !!(power_domain_info->sst_header.cap_mask & BIT(1)); + + _read_bf_level_info("bf_support", perf_level.sst_bf_support, 0, 0, + SST_BF_FEATURE_SUPPORTED_START, SST_BF_FEATURE_SUPPORTED_WIDTH, + SST_MUL_FACTOR_NONE); + _read_tf_level_info("tf_support", perf_level.sst_tf_support, 0, 0, + SST_TF_FEATURE_SUPPORTED_START, SST_TF_FEATURE_SUPPORTED_WIDTH, + SST_MUL_FACTOR_NONE); + + if (copy_to_user(argp, &perf_level, sizeof(perf_level))) + return -EFAULT; + + return 0; +} + +#define SST_PP_CONTROL_OFFSET 24 +#define SST_PP_LEVEL_CHANGE_TIME_MS 5 +#define SST_PP_LEVEL_CHANGE_RETRY_COUNT 3 + +static int isst_if_set_perf_level(void __user *argp) +{ + struct isst_perf_level_control perf_level; + struct tpmi_per_power_domain_info *power_domain_info; + int level, retry = 0; + + if (disable_dynamic_sst_features()) + return -EFAULT; + + if (copy_from_user(&perf_level, argp, sizeof(perf_level))) + return -EFAULT; + + power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + if (!(power_domain_info->pp_header.allowed_level_mask & BIT(perf_level.level))) + return -EINVAL; + + _read_pp_info("current_level", level, SST_PP_STATUS_OFFSET, + SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE) + + /* If the requested new level is same as the current level, reject */ + if (perf_level.level == level) + return -EINVAL; + + _write_pp_info("perf_level", perf_level.level, SST_PP_CONTROL_OFFSET, + SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE) + + /* It is possible that firmware is busy (although unlikely), so retry */ + do { + /* Give time to FW to process */ + msleep(SST_PP_LEVEL_CHANGE_TIME_MS); + + _read_pp_info("current_level", level, SST_PP_STATUS_OFFSET, + SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE) + + /* Check if the new level is active */ + if (perf_level.level == level) + break; + + } while (retry++ < SST_PP_LEVEL_CHANGE_RETRY_COUNT); + + /* If the level change didn't happen, return fault */ + if (perf_level.level != level) + return -EFAULT; + + /* Reset the feature state on level change */ + _write_pp_info("perf_feature", 0, SST_PP_CONTROL_OFFSET, + SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, + SST_MUL_FACTOR_NONE) + + /* Give time to FW to process */ + msleep(SST_PP_LEVEL_CHANGE_TIME_MS); + + return 0; +} + +static int isst_if_set_perf_feature(void __user *argp) +{ + struct isst_perf_feature_control perf_feature; + struct tpmi_per_power_domain_info *power_domain_info; + + if (disable_dynamic_sst_features()) + return -EFAULT; + + if (copy_from_user(&perf_feature, argp, sizeof(perf_feature))) + return -EFAULT; + + power_domain_info = get_instance(perf_feature.socket_id, perf_feature.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + _write_pp_info("perf_feature", perf_feature.feature, SST_PP_CONTROL_OFFSET, + SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, + SST_MUL_FACTOR_NONE) + + return 0; +} + +#define _read_pp_level_info(name_str, name, level, offset, start, width, mult_factor)\ +{\ + u64 val, _mask;\ + \ + val = readq(power_domain_info->sst_base +\ + power_domain_info->perf_levels[level].mmio_offset +\ + (power_domain_info->feature_offsets.pp_offset * 8) + (offset));\ + _mask = GENMASK_ULL((start + width - 1), start);\ + val &= _mask; \ + val >>= start;\ + name = (val * mult_factor);\ +} + +#define SST_PP_INFO_0_OFFSET 0 +#define SST_PP_INFO_1_OFFSET 8 +#define SST_PP_INFO_2_OFFSET 16 +#define SST_PP_INFO_3_OFFSET 24 + +/* SST_PP_INFO_4_OFFSET to SST_PP_INFO_9_OFFSET are trl levels */ +#define SST_PP_INFO_4_OFFSET 32 + +#define SST_PP_INFO_10_OFFSET 80 +#define SST_PP_INFO_11_OFFSET 88 + +#define SST_PP_P1_SSE_START 0 +#define SST_PP_P1_SSE_WIDTH 8 + +#define SST_PP_P1_AVX2_START 8 +#define SST_PP_P1_AVX2_WIDTH 8 + +#define SST_PP_P1_AVX512_START 16 +#define SST_PP_P1_AVX512_WIDTH 8 + +#define SST_PP_P1_AMX_START 24 +#define SST_PP_P1_AMX_WIDTH 8 + +#define SST_PP_TDP_START 32 +#define SST_PP_TDP_WIDTH 15 + +#define SST_PP_T_PROCHOT_START 47 +#define SST_PP_T_PROCHOT_WIDTH 8 + +#define SST_PP_MAX_MEMORY_FREQ_START 55 +#define SST_PP_MAX_MEMORY_FREQ_WIDTH 7 + +#define SST_PP_COOLING_TYPE_START 62 +#define SST_PP_COOLING_TYPE_WIDTH 2 + +#define SST_PP_TRL_0_RATIO_0_START 0 +#define SST_PP_TRL_0_RATIO_0_WIDTH 8 + +#define SST_PP_TRL_CORES_BUCKET_0_START 0 +#define SST_PP_TRL_CORES_BUCKET_0_WIDTH 8 + +#define SST_PP_CORE_RATIO_P0_START 0 +#define SST_PP_CORE_RATIO_P0_WIDTH 8 + +#define SST_PP_CORE_RATIO_P1_START 8 +#define SST_PP_CORE_RATIO_P1_WIDTH 8 + +#define SST_PP_CORE_RATIO_PN_START 16 +#define SST_PP_CORE_RATIO_PN_WIDTH 8 + +#define SST_PP_CORE_RATIO_PM_START 24 +#define SST_PP_CORE_RATIO_PM_WIDTH 8 + +#define SST_PP_CORE_RATIO_P0_FABRIC_START 32 +#define SST_PP_CORE_RATIO_P0_FABRIC_WIDTH 8 + +#define SST_PP_CORE_RATIO_P1_FABRIC_START 40 +#define SST_PP_CORE_RATIO_P1_FABRIC_WIDTH 8 + +#define SST_PP_CORE_RATIO_PM_FABRIC_START 48 +#define SST_PP_CORE_RATIO_PM_FABRIC_WIDTH 8 + +static int isst_if_get_perf_level_info(void __user *argp) +{ + struct isst_perf_level_data_info perf_level; + struct tpmi_per_power_domain_info *power_domain_info; + int i, j; + + if (copy_from_user(&perf_level, argp, sizeof(perf_level))) + return -EFAULT; + + power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + if (perf_level.level > power_domain_info->max_level) + return -EINVAL; + + if (!(power_domain_info->pp_header.level_en_mask & BIT(perf_level.level))) + return -EINVAL; + + _read_pp_level_info("tdp_ratio", perf_level.tdp_ratio, perf_level.level, + SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH, + SST_MUL_FACTOR_NONE) + _read_pp_level_info("base_freq_mhz", perf_level.base_freq_mhz, perf_level.level, + SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH, + SST_MUL_FACTOR_FREQ) + _read_pp_level_info("base_freq_avx2_mhz", perf_level.base_freq_avx2_mhz, perf_level.level, + SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX2_START, SST_PP_P1_AVX2_WIDTH, + SST_MUL_FACTOR_FREQ) + _read_pp_level_info("base_freq_avx512_mhz", perf_level.base_freq_avx512_mhz, + perf_level.level, SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX512_START, + SST_PP_P1_AVX512_WIDTH, SST_MUL_FACTOR_FREQ) + _read_pp_level_info("base_freq_amx_mhz", perf_level.base_freq_amx_mhz, perf_level.level, + SST_PP_INFO_0_OFFSET, SST_PP_P1_AMX_START, SST_PP_P1_AMX_WIDTH, + SST_MUL_FACTOR_FREQ) + + _read_pp_level_info("thermal_design_power_w", perf_level.thermal_design_power_w, + perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_TDP_START, + SST_PP_TDP_WIDTH, SST_MUL_FACTOR_NONE) + perf_level.thermal_design_power_w /= 8; /* units are in 1/8th watt */ + _read_pp_level_info("tjunction_max_c", perf_level.tjunction_max_c, perf_level.level, + SST_PP_INFO_1_OFFSET, SST_PP_T_PROCHOT_START, SST_PP_T_PROCHOT_WIDTH, + SST_MUL_FACTOR_NONE) + _read_pp_level_info("max_memory_freq_mhz", perf_level.max_memory_freq_mhz, + perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_MAX_MEMORY_FREQ_START, + SST_PP_MAX_MEMORY_FREQ_WIDTH, SST_MUL_FACTOR_FREQ) + _read_pp_level_info("cooling_type", perf_level.cooling_type, perf_level.level, + SST_PP_INFO_1_OFFSET, SST_PP_COOLING_TYPE_START, + SST_PP_COOLING_TYPE_WIDTH, SST_MUL_FACTOR_NONE) + + for (i = 0; i < TRL_MAX_LEVELS; ++i) { + for (j = 0; j < TRL_MAX_BUCKETS; ++j) + _read_pp_level_info("trl*_bucket*_freq_mhz", + perf_level.trl_freq_mhz[i][j], perf_level.level, + SST_PP_INFO_4_OFFSET + (i * SST_PP_TRL_0_RATIO_0_WIDTH), + j * SST_PP_TRL_0_RATIO_0_WIDTH, + SST_PP_TRL_0_RATIO_0_WIDTH, + SST_MUL_FACTOR_FREQ); + } + + for (i = 0; i < TRL_MAX_BUCKETS; ++i) + _read_pp_level_info("bucket*_core_count", perf_level.bucket_core_counts[i], + perf_level.level, SST_PP_INFO_10_OFFSET, + SST_PP_TRL_CORES_BUCKET_0_WIDTH * i, + SST_PP_TRL_CORES_BUCKET_0_WIDTH, SST_MUL_FACTOR_NONE) + + perf_level.max_buckets = TRL_MAX_BUCKETS; + perf_level.max_trl_levels = TRL_MAX_LEVELS; + + _read_pp_level_info("p0_freq_mhz", perf_level.p0_freq_mhz, perf_level.level, + SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P0_START, + SST_PP_CORE_RATIO_P0_WIDTH, SST_MUL_FACTOR_FREQ) + _read_pp_level_info("p1_freq_mhz", perf_level.p1_freq_mhz, perf_level.level, + SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P1_START, + SST_PP_CORE_RATIO_P1_WIDTH, SST_MUL_FACTOR_FREQ) + _read_pp_level_info("pn_freq_mhz", perf_level.pn_freq_mhz, perf_level.level, + SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PN_START, + SST_PP_CORE_RATIO_PN_WIDTH, SST_MUL_FACTOR_FREQ) + _read_pp_level_info("pm_freq_mhz", perf_level.pm_freq_mhz, perf_level.level, + SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PM_START, + SST_PP_CORE_RATIO_PM_WIDTH, SST_MUL_FACTOR_FREQ) + _read_pp_level_info("p0_fabric_freq_mhz", perf_level.p0_fabric_freq_mhz, + perf_level.level, SST_PP_INFO_11_OFFSET, + SST_PP_CORE_RATIO_P0_FABRIC_START, + SST_PP_CORE_RATIO_P0_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ) + _read_pp_level_info("p1_fabric_freq_mhz", perf_level.p1_fabric_freq_mhz, + perf_level.level, SST_PP_INFO_11_OFFSET, + SST_PP_CORE_RATIO_P1_FABRIC_START, + SST_PP_CORE_RATIO_P1_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ) + _read_pp_level_info("pm_fabric_freq_mhz", perf_level.pm_fabric_freq_mhz, + perf_level.level, SST_PP_INFO_11_OFFSET, + SST_PP_CORE_RATIO_PM_FABRIC_START, + SST_PP_CORE_RATIO_PM_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ) + + if (copy_to_user(argp, &perf_level, sizeof(perf_level))) + return -EFAULT; + + return 0; +} + +#define SST_PP_FUSED_CORE_COUNT_START 0 +#define SST_PP_FUSED_CORE_COUNT_WIDTH 8 + +#define SST_PP_RSLVD_CORE_COUNT_START 8 +#define SST_PP_RSLVD_CORE_COUNT_WIDTH 8 + +#define SST_PP_RSLVD_CORE_MASK_START 0 +#define SST_PP_RSLVD_CORE_MASK_WIDTH 64 + +static int isst_if_get_perf_level_mask(void __user *argp) +{ + static struct isst_perf_level_cpu_mask cpumask; + struct tpmi_per_power_domain_info *power_domain_info; + u64 mask; + + if (copy_from_user(&cpumask, argp, sizeof(cpumask))) + return -EFAULT; + + power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + _read_pp_level_info("mask", mask, cpumask.level, SST_PP_INFO_2_OFFSET, + SST_PP_RSLVD_CORE_MASK_START, SST_PP_RSLVD_CORE_MASK_WIDTH, + SST_MUL_FACTOR_NONE) + + cpumask.mask = mask; + + if (!cpumask.punit_cpu_map) + return -EOPNOTSUPP; + + if (copy_to_user(argp, &cpumask, sizeof(cpumask))) + return -EFAULT; + + return 0; +} + +#define SST_BF_INFO_0_OFFSET 0 +#define SST_BF_INFO_1_OFFSET 8 + +#define SST_BF_P1_HIGH_START 13 +#define SST_BF_P1_HIGH_WIDTH 8 + +#define SST_BF_P1_LOW_START 21 +#define SST_BF_P1_LOW_WIDTH 8 + +#define SST_BF_T_PROHOT_START 38 +#define SST_BF_T_PROHOT_WIDTH 8 + +#define SST_BF_TDP_START 46 +#define SST_BF_TDP_WIDTH 15 + +static int isst_if_get_base_freq_info(void __user *argp) +{ + static struct isst_base_freq_info base_freq; + struct tpmi_per_power_domain_info *power_domain_info; + + if (copy_from_user(&base_freq, argp, sizeof(base_freq))) + return -EFAULT; + + power_domain_info = get_instance(base_freq.socket_id, base_freq.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + if (base_freq.level > power_domain_info->max_level) + return -EINVAL; + + _read_bf_level_info("p1_high", base_freq.high_base_freq_mhz, base_freq.level, + SST_BF_INFO_0_OFFSET, SST_BF_P1_HIGH_START, SST_BF_P1_HIGH_WIDTH, + SST_MUL_FACTOR_FREQ) + _read_bf_level_info("p1_low", base_freq.low_base_freq_mhz, base_freq.level, + SST_BF_INFO_0_OFFSET, SST_BF_P1_LOW_START, SST_BF_P1_LOW_WIDTH, + SST_MUL_FACTOR_FREQ) + _read_bf_level_info("BF-TJ", base_freq.tjunction_max_c, base_freq.level, + SST_BF_INFO_0_OFFSET, SST_BF_T_PROHOT_START, SST_BF_T_PROHOT_WIDTH, + SST_MUL_FACTOR_NONE) + _read_bf_level_info("BF-tdp", base_freq.thermal_design_power_w, base_freq.level, + SST_BF_INFO_0_OFFSET, SST_BF_TDP_START, SST_BF_TDP_WIDTH, + SST_MUL_FACTOR_NONE) + base_freq.thermal_design_power_w /= 8; /*unit = 1/8th watt*/ + + if (copy_to_user(argp, &base_freq, sizeof(base_freq))) + return -EFAULT; + + return 0; +} + +#define P1_HI_CORE_MASK_START 0 +#define P1_HI_CORE_MASK_WIDTH 64 + +static int isst_if_get_base_freq_mask(void __user *argp) +{ + static struct isst_perf_level_cpu_mask cpumask; + struct tpmi_per_power_domain_info *power_domain_info; + u64 mask; + + if (copy_from_user(&cpumask, argp, sizeof(cpumask))) + return -EFAULT; + + power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + _read_bf_level_info("BF-cpumask", mask, cpumask.level, SST_BF_INFO_1_OFFSET, + P1_HI_CORE_MASK_START, P1_HI_CORE_MASK_WIDTH, + SST_MUL_FACTOR_NONE) + + cpumask.mask = mask; + + if (!cpumask.punit_cpu_map) + return -EOPNOTSUPP; + + if (copy_to_user(argp, &cpumask, sizeof(cpumask))) + return -EFAULT; + + return 0; +} + +static int isst_if_get_tpmi_instance_count(void __user *argp) +{ + struct isst_tpmi_instance_count tpmi_inst; + struct tpmi_sst_struct *sst_inst; + int i; + + if (copy_from_user(&tpmi_inst, argp, sizeof(tpmi_inst))) + return -EFAULT; + + if (tpmi_inst.socket_id >= topology_max_packages()) + return -EINVAL; + + tpmi_inst.count = isst_common.sst_inst[tpmi_inst.socket_id]->number_of_power_domains; + + sst_inst = isst_common.sst_inst[tpmi_inst.socket_id]; + tpmi_inst.valid_mask = 0; + for (i = 0; i < sst_inst->number_of_power_domains; ++i) { + struct tpmi_per_power_domain_info *pd_info; + + pd_info = &sst_inst->power_domain_info[i]; + if (pd_info->sst_base) + tpmi_inst.valid_mask |= BIT(i); + } + + if (copy_to_user(argp, &tpmi_inst, sizeof(tpmi_inst))) + return -EFAULT; + + return 0; +} + +#define SST_TF_INFO_0_OFFSET 0 +#define SST_TF_INFO_1_OFFSET 8 +#define SST_TF_INFO_2_OFFSET 16 + +#define SST_TF_MAX_LP_CLIP_RATIOS TRL_MAX_LEVELS + +#define SST_TF_LP_CLIP_RATIO_0_START 16 +#define SST_TF_LP_CLIP_RATIO_0_WIDTH 8 + +#define SST_TF_RATIO_0_START 0 +#define SST_TF_RATIO_0_WIDTH 8 + +#define SST_TF_NUM_CORE_0_START 0 +#define SST_TF_NUM_CORE_0_WIDTH 8 + +static int isst_if_get_turbo_freq_info(void __user *argp) +{ + static struct isst_turbo_freq_info turbo_freq; + struct tpmi_per_power_domain_info *power_domain_info; + int i, j; + + if (copy_from_user(&turbo_freq, argp, sizeof(turbo_freq))) + return -EFAULT; + + power_domain_info = get_instance(turbo_freq.socket_id, turbo_freq.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + if (turbo_freq.level > power_domain_info->max_level) + return -EINVAL; + + turbo_freq.max_buckets = TRL_MAX_BUCKETS; + turbo_freq.max_trl_levels = TRL_MAX_LEVELS; + turbo_freq.max_clip_freqs = SST_TF_MAX_LP_CLIP_RATIOS; + + for (i = 0; i < turbo_freq.max_clip_freqs; ++i) + _read_tf_level_info("lp_clip*", turbo_freq.lp_clip_freq_mhz[i], + turbo_freq.level, SST_TF_INFO_0_OFFSET, + SST_TF_LP_CLIP_RATIO_0_START + + (i * SST_TF_LP_CLIP_RATIO_0_WIDTH), + SST_TF_LP_CLIP_RATIO_0_WIDTH, SST_MUL_FACTOR_FREQ) + + for (i = 0; i < TRL_MAX_LEVELS; ++i) { + for (j = 0; j < TRL_MAX_BUCKETS; ++j) + _read_tf_level_info("cydn*_bucket_*_trl", + turbo_freq.trl_freq_mhz[i][j], turbo_freq.level, + SST_TF_INFO_2_OFFSET + (i * SST_TF_RATIO_0_WIDTH), + j * SST_TF_RATIO_0_WIDTH, SST_TF_RATIO_0_WIDTH, + SST_MUL_FACTOR_FREQ) + } + + for (i = 0; i < TRL_MAX_BUCKETS; ++i) + _read_tf_level_info("bucket_*_core_count", turbo_freq.bucket_core_counts[i], + turbo_freq.level, SST_TF_INFO_1_OFFSET, + SST_TF_NUM_CORE_0_WIDTH * i, SST_TF_NUM_CORE_0_WIDTH, + SST_MUL_FACTOR_NONE) + + if (copy_to_user(argp, &turbo_freq, sizeof(turbo_freq))) + return -EFAULT; + + return 0; +} + +static long isst_if_def_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + long ret = -ENOTTY; + + mutex_lock(&isst_tpmi_dev_lock); + switch (cmd) { + case ISST_IF_COUNT_TPMI_INSTANCES: + ret = isst_if_get_tpmi_instance_count(argp); + break; + case ISST_IF_CORE_POWER_STATE: + ret = isst_if_core_power_state(argp); + break; + case ISST_IF_CLOS_PARAM: + ret = isst_if_clos_param(argp); + break; + case ISST_IF_CLOS_ASSOC: + ret = isst_if_clos_assoc(argp); + break; + case ISST_IF_PERF_LEVELS: + ret = isst_if_get_perf_level(argp); + break; + case ISST_IF_PERF_SET_LEVEL: + ret = isst_if_set_perf_level(argp); + break; + case ISST_IF_PERF_SET_FEATURE: + ret = isst_if_set_perf_feature(argp); + break; + case ISST_IF_GET_PERF_LEVEL_INFO: + ret = isst_if_get_perf_level_info(argp); + break; + case ISST_IF_GET_PERF_LEVEL_CPU_MASK: + ret = isst_if_get_perf_level_mask(argp); + break; + case ISST_IF_GET_BASE_FREQ_INFO: + ret = isst_if_get_base_freq_info(argp); + break; + case ISST_IF_GET_BASE_FREQ_CPU_MASK: + ret = isst_if_get_base_freq_mask(argp); + break; + case ISST_IF_GET_TURBO_FREQ_INFO: + ret = isst_if_get_turbo_freq_info(argp); + break; + default: + break; + } + mutex_unlock(&isst_tpmi_dev_lock); + + return ret; +} + +#define TPMI_SST_AUTO_SUSPEND_DELAY_MS 2000 + +int tpmi_sst_dev_add(struct auxiliary_device *auxdev) +{ + struct intel_tpmi_plat_info *plat_info; + struct tpmi_sst_struct *tpmi_sst; + int i, ret, pkg = 0, inst = 0; + int num_resources; + + plat_info = tpmi_get_platform_data(auxdev); + if (!plat_info) { + dev_err(&auxdev->dev, "No platform info\n"); + return -EINVAL; + } + + pkg = plat_info->package_id; + if (pkg >= topology_max_packages()) { + dev_err(&auxdev->dev, "Invalid package id :%x\n", pkg); + return -EINVAL; + } + + if (isst_common.sst_inst[pkg]) + return -EEXIST; + + num_resources = tpmi_get_resource_count(auxdev); + + if (!num_resources) + return -EINVAL; + + tpmi_sst = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_sst), GFP_KERNEL); + if (!tpmi_sst) + return -ENOMEM; + + tpmi_sst->power_domain_info = devm_kcalloc(&auxdev->dev, num_resources, + sizeof(*tpmi_sst->power_domain_info), + GFP_KERNEL); + if (!tpmi_sst->power_domain_info) + return -ENOMEM; + + tpmi_sst->number_of_power_domains = num_resources; + + for (i = 0; i < num_resources; ++i) { + struct resource *res; + + res = tpmi_get_resource_at_index(auxdev, i); + if (!res) { + tpmi_sst->power_domain_info[i].sst_base = NULL; + continue; + } + + tpmi_sst->power_domain_info[i].package_id = pkg; + tpmi_sst->power_domain_info[i].power_domain_id = i; + tpmi_sst->power_domain_info[i].auxdev = auxdev; + tpmi_sst->power_domain_info[i].sst_base = devm_ioremap_resource(&auxdev->dev, res); + if (IS_ERR(tpmi_sst->power_domain_info[i].sst_base)) + return PTR_ERR(tpmi_sst->power_domain_info[i].sst_base); + + ret = sst_main(auxdev, &tpmi_sst->power_domain_info[i]); + if (ret) { + devm_iounmap(&auxdev->dev, tpmi_sst->power_domain_info[i].sst_base); + tpmi_sst->power_domain_info[i].sst_base = NULL; + continue; + } + + ++inst; + } + + if (!inst) + return -ENODEV; + + tpmi_sst->package_id = pkg; + auxiliary_set_drvdata(auxdev, tpmi_sst); + + mutex_lock(&isst_tpmi_dev_lock); + if (isst_common.max_index < pkg) + isst_common.max_index = pkg; + isst_common.sst_inst[pkg] = tpmi_sst; + mutex_unlock(&isst_tpmi_dev_lock); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_add, INTEL_TPMI_SST); + +void tpmi_sst_dev_remove(struct auxiliary_device *auxdev) +{ + struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev); + + mutex_lock(&isst_tpmi_dev_lock); + isst_common.sst_inst[tpmi_sst->package_id] = NULL; + mutex_unlock(&isst_tpmi_dev_lock); +} +EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, INTEL_TPMI_SST); + +void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev) +{ + struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev); + struct tpmi_per_power_domain_info *power_domain_info = tpmi_sst->power_domain_info; + void __iomem *cp_base; + + cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset; + power_domain_info->saved_sst_cp_control = readq(cp_base + SST_CP_CONTROL_OFFSET); + + memcpy_fromio(power_domain_info->saved_clos_configs, cp_base + SST_CLOS_CONFIG_0_OFFSET, + sizeof(power_domain_info->saved_clos_configs)); + + memcpy_fromio(power_domain_info->saved_clos_assocs, cp_base + SST_CLOS_ASSOC_0_OFFSET, + sizeof(power_domain_info->saved_clos_assocs)); + + power_domain_info->saved_pp_control = readq(power_domain_info->sst_base + + power_domain_info->sst_header.pp_offset + + SST_PP_CONTROL_OFFSET); +} +EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_suspend, INTEL_TPMI_SST); + +void tpmi_sst_dev_resume(struct auxiliary_device *auxdev) +{ + struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev); + struct tpmi_per_power_domain_info *power_domain_info = tpmi_sst->power_domain_info; + void __iomem *cp_base; + + cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset; + writeq(power_domain_info->saved_sst_cp_control, cp_base + SST_CP_CONTROL_OFFSET); + + memcpy_toio(cp_base + SST_CLOS_CONFIG_0_OFFSET, power_domain_info->saved_clos_configs, + sizeof(power_domain_info->saved_clos_configs)); + + memcpy_toio(cp_base + SST_CLOS_ASSOC_0_OFFSET, power_domain_info->saved_clos_assocs, + sizeof(power_domain_info->saved_clos_assocs)); + + writeq(power_domain_info->saved_pp_control, power_domain_info->sst_base + + power_domain_info->sst_header.pp_offset + SST_PP_CONTROL_OFFSET); +} +EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_resume, INTEL_TPMI_SST); + +#define ISST_TPMI_API_VERSION 0x02 + +int tpmi_sst_init(void) +{ + struct isst_if_cmd_cb cb; + int ret = 0; + + mutex_lock(&isst_tpmi_dev_lock); + + if (isst_core_usage_count) { + ++isst_core_usage_count; + goto init_done; + } + + isst_common.sst_inst = kcalloc(topology_max_packages(), + sizeof(*isst_common.sst_inst), + GFP_KERNEL); + if (!isst_common.sst_inst) { + ret = -ENOMEM; + goto init_done; + } + + memset(&cb, 0, sizeof(cb)); + cb.cmd_size = sizeof(struct isst_if_io_reg); + cb.offset = offsetof(struct isst_if_io_regs, io_reg); + cb.cmd_callback = NULL; + cb.api_version = ISST_TPMI_API_VERSION; + cb.def_ioctl = isst_if_def_ioctl; + cb.owner = THIS_MODULE; + ret = isst_if_cdev_register(ISST_IF_DEV_TPMI, &cb); + if (ret) + kfree(isst_common.sst_inst); +init_done: + mutex_unlock(&isst_tpmi_dev_lock); + return ret; +} +EXPORT_SYMBOL_NS_GPL(tpmi_sst_init, INTEL_TPMI_SST); + +void tpmi_sst_exit(void) +{ + mutex_lock(&isst_tpmi_dev_lock); + if (isst_core_usage_count) + --isst_core_usage_count; + + if (!isst_core_usage_count) { + isst_if_cdev_unregister(ISST_IF_DEV_TPMI); + kfree(isst_common.sst_inst); + } + mutex_unlock(&isst_tpmi_dev_lock); +} +EXPORT_SYMBOL_NS_GPL(tpmi_sst_exit, INTEL_TPMI_SST); + +MODULE_IMPORT_NS(INTEL_TPMI); +MODULE_IMPORT_NS(INTEL_TPMI_POWER_DOMAIN); + +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.h b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.h new file mode 100644 index 000000000000..900b483703f9 --- /dev/null +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Intel Speed Select Interface: Drivers Internal defines + * Copyright (c) 2023, Intel Corporation. + * All rights reserved. + * + */ + +#ifndef _ISST_TPMI_CORE_H +#define _ISST_TPMI_CORE_H + +int tpmi_sst_init(void); +void tpmi_sst_exit(void); +int tpmi_sst_dev_add(struct auxiliary_device *auxdev); +void tpmi_sst_dev_remove(struct auxiliary_device *auxdev); +void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev); +void tpmi_sst_dev_resume(struct auxiliary_device *auxdev); +#endif diff --git a/drivers/platform/x86/intel/telemetry/pltdrv.c b/drivers/platform/x86/intel/telemetry/pltdrv.c index 405dea87de6b..06311d0e9451 100644 --- a/drivers/platform/x86/intel/telemetry/pltdrv.c +++ b/drivers/platform/x86/intel/telemetry/pltdrv.c @@ -1156,15 +1156,14 @@ out: return ret; } -static int telemetry_pltdrv_remove(struct platform_device *pdev) +static void telemetry_pltdrv_remove(struct platform_device *pdev) { telemetry_clear_pltdata(); - return 0; } static struct platform_driver telemetry_soc_driver = { .probe = telemetry_pltdrv_probe, - .remove = telemetry_pltdrv_remove, + .remove_new = telemetry_pltdrv_remove, .driver = { .name = DRIVER_NAME, }, diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c index c999732b0f1e..a5227951decc 100644 --- a/drivers/platform/x86/intel/tpmi.c +++ b/drivers/platform/x86/intel/tpmi.c @@ -203,7 +203,7 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info, struct intel_vsec_device *feature_vsec_dev; struct resource *res, *tmp; const char *name; - int ret, i; + int i; name = intel_tpmi_name(pfs->pfs_header.tpmi_id); if (!name) @@ -215,8 +215,8 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info, feature_vsec_dev = kzalloc(sizeof(*feature_vsec_dev), GFP_KERNEL); if (!feature_vsec_dev) { - ret = -ENOMEM; - goto free_res; + kfree(res); + return -ENOMEM; } snprintf(feature_id_name, sizeof(feature_id_name), "tpmi-%s", name); @@ -239,20 +239,11 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info, /* * intel_vsec_add_aux() is resource managed, no explicit * delete is required on error or on module unload. - * feature_vsec_dev memory is also freed as part of device - * delete. + * feature_vsec_dev and res memory are also freed as part of + * device deletion. */ - ret = intel_vsec_add_aux(vsec_dev->pcidev, &vsec_dev->auxdev.dev, - feature_vsec_dev, feature_id_name); - if (ret) - goto free_res; - - return 0; - -free_res: - kfree(res); - - return ret; + return intel_vsec_add_aux(vsec_dev->pcidev, &vsec_dev->auxdev.dev, + feature_vsec_dev, feature_id_name); } static int tpmi_create_devices(struct intel_tpmi_info *tpmi_info) diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c index cb24de9e97dc..064f186ae81b 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c @@ -44,14 +44,18 @@ static ssize_t store_min_max_freq_khz(struct uncore_data *data, int min_max) { unsigned int input; + int ret; if (kstrtouint(buf, 10, &input)) return -EINVAL; mutex_lock(&uncore_lock); - uncore_write(data, input, min_max); + ret = uncore_write(data, input, min_max); mutex_unlock(&uncore_lock); + if (ret) + return ret; + return count; } @@ -224,9 +228,15 @@ int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, u uncore_write = write_control_freq; uncore_read_freq = read_freq; - if (!uncore_root_kobj) - uncore_root_kobj = kobject_create_and_add("intel_uncore_frequency", - &cpu_subsys.dev_root->kobj); + if (!uncore_root_kobj) { + struct device *dev_root = bus_get_dev_root(&cpu_subsys); + + if (dev_root) { + uncore_root_kobj = kobject_create_and_add("intel_uncore_frequency", + &dev_root->kobj); + put_device(dev_root); + } + } if (uncore_root_kobj) ++uncore_instance_count; mutex_unlock(&uncore_lock); diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c index 00ac7e381441..32e2515ee366 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c @@ -204,6 +204,13 @@ static const struct x86_cpu_id intel_uncore_cpu_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL), X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL), + X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_uncore_cpu_ids); diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c index c5e4e35c8d20..6fa1735ad7a4 100644 --- a/drivers/platform/x86/intel/vbtn.c +++ b/drivers/platform/x86/intel/vbtn.c @@ -325,18 +325,12 @@ static int intel_vbtn_probe(struct platform_device *device) return 0; } -static int intel_vbtn_remove(struct platform_device *device) +static void intel_vbtn_remove(struct platform_device *device) { acpi_handle handle = ACPI_HANDLE(&device->dev); device_init_wakeup(&device->dev, false); acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); - - /* - * Even if we failed to shut off the event stream, we can still - * safely detach from the device. - */ - return 0; } static int intel_vbtn_pm_prepare(struct device *dev) @@ -377,7 +371,7 @@ static struct platform_driver intel_vbtn_pl_driver = { .pm = &intel_vbtn_pm_ops, }, .probe = intel_vbtn_probe, - .remove = intel_vbtn_remove, + .remove_new = intel_vbtn_remove, }; static acpi_status __init diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index 13decf36c6de..c1f9e4471b28 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -67,14 +67,6 @@ enum intel_vsec_id { VSEC_ID_TPMI = 66, }; -static enum intel_vsec_id intel_vsec_allow_list[] = { - VSEC_ID_TELEMETRY, - VSEC_ID_WATCHER, - VSEC_ID_CRASHLOG, - VSEC_ID_SDSI, - VSEC_ID_TPMI, -}; - static const char *intel_vsec_name(enum intel_vsec_id id) { switch (id) { @@ -98,26 +90,19 @@ static const char *intel_vsec_name(enum intel_vsec_id id) } } -static bool intel_vsec_allowed(u16 id) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(intel_vsec_allow_list); i++) - if (intel_vsec_allow_list[i] == id) - return true; - - return false; -} - -static bool intel_vsec_disabled(u16 id, unsigned long quirks) +static bool intel_vsec_supported(u16 id, unsigned long caps) { switch (id) { + case VSEC_ID_TELEMETRY: + return !!(caps & VSEC_CAP_TELEMETRY); case VSEC_ID_WATCHER: - return !!(quirks & VSEC_QUIRK_NO_WATCHER); - + return !!(caps & VSEC_CAP_WATCHER); case VSEC_ID_CRASHLOG: - return !!(quirks & VSEC_QUIRK_NO_CRASHLOG); - + return !!(caps & VSEC_CAP_CRASHLOG); + case VSEC_ID_SDSI: + return !!(caps & VSEC_CAP_SDSI); + case VSEC_ID_TPMI: + return !!(caps & VSEC_CAP_TPMI); default: return false; } @@ -154,6 +139,7 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL); mutex_unlock(&vsec_ida_lock); if (ret < 0) { + kfree(intel_vsec_dev->resource); kfree(intel_vsec_dev); return ret; } @@ -168,11 +154,7 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, ret = auxiliary_device_init(auxdev); if (ret < 0) { - mutex_lock(&vsec_ida_lock); - ida_free(intel_vsec_dev->ida, auxdev->id); - mutex_unlock(&vsec_ida_lock); - kfree(intel_vsec_dev->resource); - kfree(intel_vsec_dev); + intel_vsec_dev_release(&auxdev->dev); return ret; } @@ -205,7 +187,7 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he unsigned long quirks = info->quirks; int i; - if (!intel_vsec_allowed(header->id) || intel_vsec_disabled(header->id, quirks)) + if (!intel_vsec_supported(header->id, info->caps)) return -EINVAL; if (!header->num_entries) { @@ -260,14 +242,14 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he static bool intel_vsec_walk_header(struct pci_dev *pdev, struct intel_vsec_platform_info *info) { - struct intel_vsec_header **header = info->capabilities; + struct intel_vsec_header **header = info->headers; bool have_devices = false; int ret; for ( ; *header; header++) { ret = intel_vsec_add_dev(pdev, *header, info); if (ret) - dev_info(&pdev->dev, "Could not add device for DVSEC id %d\n", + dev_info(&pdev->dev, "Could not add device for VSEC id %d\n", (*header)->id); else have_devices = true; @@ -402,14 +384,8 @@ static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id return 0; } -/* TGL info */ -static const struct intel_vsec_platform_info tgl_info = { - .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG | - VSEC_QUIRK_TABLE_SHIFT | VSEC_QUIRK_EARLY_HW, -}; - /* DG1 info */ -static struct intel_vsec_header dg1_telemetry = { +static struct intel_vsec_header dg1_header = { .length = 0x10, .id = 2, .num_entries = 1, @@ -418,19 +394,31 @@ static struct intel_vsec_header dg1_telemetry = { .offset = 0x466000, }; -static struct intel_vsec_header *dg1_capabilities[] = { - &dg1_telemetry, +static struct intel_vsec_header *dg1_headers[] = { + &dg1_header, NULL }; static const struct intel_vsec_platform_info dg1_info = { - .capabilities = dg1_capabilities, + .caps = VSEC_CAP_TELEMETRY, + .headers = dg1_headers, .quirks = VSEC_QUIRK_NO_DVSEC | VSEC_QUIRK_EARLY_HW, }; /* MTL info */ static const struct intel_vsec_platform_info mtl_info = { - .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG, + .caps = VSEC_CAP_TELEMETRY, +}; + +/* OOBMSM info */ +static const struct intel_vsec_platform_info oobmsm_info = { + .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_SDSI | VSEC_CAP_TPMI, +}; + +/* TGL info */ +static const struct intel_vsec_platform_info tgl_info = { + .caps = VSEC_CAP_TELEMETRY, + .quirks = VSEC_QUIRK_TABLE_SHIFT | VSEC_QUIRK_EARLY_HW, }; #define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d @@ -445,7 +433,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_MTL_M, &mtl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_MTL_S, &mtl_info) }, - { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &(struct intel_vsec_platform_info) {}) }, + { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &oobmsm_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) }, { } diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h index ae8fe92c5595..0fd042c171ba 100644 --- a/drivers/platform/x86/intel/vsec.h +++ b/drivers/platform/x86/intel/vsec.h @@ -5,6 +5,12 @@ #include <linux/auxiliary_bus.h> #include <linux/bits.h> +#define VSEC_CAP_TELEMETRY BIT(0) +#define VSEC_CAP_WATCHER BIT(1) +#define VSEC_CAP_CRASHLOG BIT(2) +#define VSEC_CAP_SDSI BIT(3) +#define VSEC_CAP_TPMI BIT(4) + struct pci_dev; struct resource; @@ -27,7 +33,8 @@ enum intel_vsec_quirks { /* Platform specific data */ struct intel_vsec_platform_info { - struct intel_vsec_header **capabilities; + struct intel_vsec_header **headers; + unsigned long caps; unsigned long quirks; }; |