diff options
Diffstat (limited to 'include/linux')
70 files changed, 610 insertions, 202 deletions
diff --git a/include/linux/armada-37xx-rwtm-mailbox.h b/include/linux/armada-37xx-rwtm-mailbox.h index 57bb54f6767a..ef4bd705eb65 100644 --- a/include/linux/armada-37xx-rwtm-mailbox.h +++ b/include/linux/armada-37xx-rwtm-mailbox.h @@ -2,7 +2,7 @@ /* * rWTM BIU Mailbox driver for Armada 37xx * - * Author: Marek Behun <marek.behun@nic.cz> + * Author: Marek BehĂșn <kabel@kernel.org> */ #ifndef _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_ diff --git a/include/linux/asn1_encoder.h b/include/linux/asn1_encoder.h new file mode 100644 index 000000000000..08cd0c2ad34f --- /dev/null +++ b/include/linux/asn1_encoder.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _LINUX_ASN1_ENCODER_H +#define _LINUX_ASN1_ENCODER_H + +#include <linux/types.h> +#include <linux/asn1.h> +#include <linux/asn1_ber_bytecode.h> +#include <linux/bug.h> + +#define asn1_oid_len(oid) (sizeof(oid)/sizeof(u32)) +unsigned char * +asn1_encode_integer(unsigned char *data, const unsigned char *end_data, + s64 integer); +unsigned char * +asn1_encode_oid(unsigned char *data, const unsigned char *end_data, + u32 oid[], int oid_len); +unsigned char * +asn1_encode_tag(unsigned char *data, const unsigned char *end_data, + u32 tag, const unsigned char *string, int len); +unsigned char * +asn1_encode_octet_string(unsigned char *data, + const unsigned char *end_data, + const unsigned char *string, u32 len); +unsigned char * +asn1_encode_sequence(unsigned char *data, const unsigned char *end_data, + const unsigned char *seq, int len); +unsigned char * +asn1_encode_boolean(unsigned char *data, const unsigned char *end_data, + bool val); + +#endif diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 40bad71865ea..532bcbfc4716 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -476,7 +476,6 @@ struct virtchnl_rss_key { u16 vsi_id; u16 key_len; u8 key[1]; /* RSS hash key, packed bytes */ - u8 pad[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); @@ -485,7 +484,6 @@ struct virtchnl_rss_lut { u16 vsi_id; u16 lut_entries; u8 lut[1]; /* RSS lookup table */ - u8 pad[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index a19519f4241d..eed86eb0a1de 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h @@ -4,7 +4,7 @@ #include <linux/preempt.h> -#ifdef CONFIG_TRACE_IRQFLAGS +#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS) extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); #else static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) @@ -32,4 +32,10 @@ static inline void local_bh_enable(void) __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); } +#ifdef CONFIG_PREEMPT_RT +extern bool local_bh_blocked(void); +#else +static inline bool local_bh_blocked(void) { return false; } +#endif + #endif /* _LINUX_BH_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3625f019767d..fdac0534ce79 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -40,6 +40,7 @@ struct bpf_local_storage; struct bpf_local_storage_map; struct kobject; struct mem_cgroup; +struct module; extern struct idr btf_idr; extern spinlock_t btf_idr_lock; @@ -623,6 +624,7 @@ struct bpf_trampoline { /* Executable image of trampoline */ struct bpf_tramp_image *cur_image; u64 selector; + struct module *mod; }; struct bpf_attach_target_info { diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 86d143db6523..a247b089ca78 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -70,7 +70,7 @@ struct module; * @mark_unstable: Optional function to inform the clocksource driver that * the watchdog marked the clocksource unstable * @tick_stable: Optional function called periodically from the watchdog - * code to provide stable syncrhonization points + * code to provide stable synchronization points * @wd_list: List head to enqueue into the watchdog list (internal) * @cs_last: Last clocksource value for clocksource watchdog * @wd_last: Last watchdog value corresponding to @cs_last diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index f14adb882338..3d4442397bf9 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -135,6 +135,7 @@ enum cpuhp_state { CPUHP_AP_RISCV_TIMER_STARTING, CPUHP_AP_CLINT_TIMER_STARTING, CPUHP_AP_CSKY_TIMER_STARTING, + CPUHP_AP_TI_GP_TIMER_STARTING, CPUHP_AP_HYPERV_TIMER_STARTING, CPUHP_AP_KVM_STARTING, CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, @@ -175,6 +176,8 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, + CPUHP_AP_PERF_ARM_HISI_PA_ONLINE, + CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE, CPUHP_AP_PERF_ARM_L2X0_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, diff --git a/include/linux/device.h b/include/linux/device.h index ba660731bd25..38a2071cf776 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -49,7 +49,7 @@ struct dev_iommu; /** * struct subsys_interface - interfaces to device functions * @name: name of the device function - * @subsys: subsytem of the devices to attach to + * @subsys: subsystem of the devices to attach to * @node: the list of functions registered at the subsystem * @add_dev: device hookup to device function handler * @remove_dev: device hookup to device function handler @@ -439,6 +439,9 @@ struct dev_links_info { * @state_synced: The hardware state of this device has been synced to match * the software state of this device by calling the driver/bus * sync_state() callback. + * @can_match: The device has matched with a driver at least once or it is in + * a bus (like AMBA) which can't check for matching drivers until + * other devices probe successfully. * @dma_coherent: this particular device is dma coherent, even if the * architecture supports non-coherent devices. * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the @@ -545,6 +548,7 @@ struct device { bool offline:1; bool of_node_reused:1; bool state_synced:1; + bool can_match:1; #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) diff --git a/include/linux/devm-helpers.h b/include/linux/devm-helpers.h new file mode 100644 index 000000000000..f40f77717a24 --- /dev/null +++ b/include/linux/devm-helpers.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __LINUX_DEVM_HELPERS_H +#define __LINUX_DEVM_HELPERS_H + +/* + * Functions which do automatically cancel operations or release resources upon + * driver detach. + * + * These should be helpful to avoid mixing the manual and devm-based resource + * management which can be source of annoying, rarely occurring, + * hard-to-reproduce bugs. + * + * Please take into account that devm based cancellation may be performed some + * time after the remove() is ran. + * + * Thus mixing devm and manual resource management can easily cause problems + * when unwinding operations with dependencies. IRQ scheduling a work in a queue + * is typical example where IRQs are often devm-managed and WQs are manually + * cleaned at remove(). If IRQs are not manually freed at remove() (and this is + * often the case when we use devm for IRQs) we have a period of time after + * remove() - and before devm managed IRQs are freed - where new IRQ may fire + * and schedule a work item which won't be cancelled because remove() was + * already ran. + */ + +#include <linux/device.h> +#include <linux/workqueue.h> + +static inline void devm_delayed_work_drop(void *res) +{ + cancel_delayed_work_sync(res); +} + +/** + * devm_delayed_work_autocancel - Resource-managed delayed work allocation + * @dev: Device which lifetime work is bound to + * @w: Work item to be queued + * @worker: Worker function + * + * Initialize delayed work which is automatically cancelled when driver is + * detached. A few drivers need delayed work which must be cancelled before + * driver is detached to avoid accessing removed resources. + * devm_delayed_work_autocancel() can be used to omit the explicit + * cancelleation when driver is detached. + */ +static inline int devm_delayed_work_autocancel(struct device *dev, + struct delayed_work *w, + work_func_t worker) +{ + INIT_DELAYED_WORK(w, worker); + return devm_add_action(dev, devm_delayed_work_drop, w); +} + +#endif diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index 883acef895bc..2e2b8d6140ed 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -360,7 +360,7 @@ void syscall_exit_to_user_mode_work(struct pt_regs *regs); * * This is a combination of syscall_exit_to_user_mode_work() (1,2) and * exit_to_user_mode(). This function is preferred unless there is a - * compelling architectural reason to use the seperate functions. + * compelling architectural reason to use the separate functions. */ void syscall_exit_to_user_mode(struct pt_regs *regs); @@ -381,7 +381,7 @@ void irqentry_enter_from_user_mode(struct pt_regs *regs); * irqentry_exit_to_user_mode - Interrupt exit work * @regs: Pointer to current's pt_regs * - * Invoked with interrupts disbled and fully valid regs. Returns with all + * Invoked with interrupts disabled and fully valid regs. Returns with all * work handled, interrupts disabled such that the caller can immediately * switch to user mode. Called from architecture specific interrupt * handling code. diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index ec4cd3921c67..cdca84e6dd6b 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -87,9 +87,7 @@ u32 ethtool_op_get_link(struct net_device *dev); int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti); -/** - * struct ethtool_link_ext_state_info - link extended state and substate. - */ +/* Link extended state and substate. */ struct ethtool_link_ext_state_info { enum ethtool_link_ext_state link_ext_state; union { @@ -129,7 +127,6 @@ struct ethtool_link_ksettings { __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); } link_modes; u32 lanes; - enum ethtool_link_mode_bit_indices link_mode; }; /** @@ -292,6 +289,9 @@ struct ethtool_pause_stats { * do not attach ext_substate attribute to netlink message). If link_ext_state * and link_ext_substate are unknown, return -ENODATA. If not implemented, * link_ext_state and link_ext_substate will not be sent to userspace. + * @get_eeprom_len: Read range of EEPROM addresses for validation of + * @get_eeprom and @set_eeprom requests. + * Returns 0 if device does not support EEPROM access. * @get_eeprom: Read data from the device EEPROM. * Should fill in the magic field. Don't need to check len for zero * or wraparound. Fill in the data argument with the eeprom values @@ -384,6 +384,8 @@ struct ethtool_pause_stats { * @get_module_eeprom: Get the eeprom information from the plug-in module * @get_eee: Get Energy-Efficient (EEE) supported and status. * @set_eee: Set EEE status (enable/disable) as well as LPI timers. + * @get_tunable: Read the value of a driver / device tunable. + * @set_tunable: Set the value of a driver / device tunable. * @get_per_queue_coalesce: Get interrupt coalescing parameters per queue. * It must check that the given queue number is valid. If neither a RX nor * a TX queue has this number, return -EINVAL. If only a RX queue or a TX @@ -547,8 +549,8 @@ struct phy_tdr_config; * @get_sset_count: Get number of strings that @get_strings will write. * @get_strings: Return a set of strings that describe the requested objects * @get_stats: Return extended statistics about the PHY device. - * @start_cable_test - Start a cable test - * @start_cable_test_tdr - Start a Time Domain Reflectometry cable test + * @start_cable_test: Start a cable test + * @start_cable_test_tdr: Start a Time Domain Reflectometry cable test * * All operations are optional (i.e. the function pointer may be set to %NULL) * and callers must take this into account. Callers must hold the RTNL lock. @@ -571,4 +573,12 @@ struct ethtool_phy_ops { */ void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops); +/* + * ethtool_params_from_link_mode - Derive link parameters from a given link mode + * @link_ksettings: Link parameters to be derived from the link mode + * @link_mode: Link mode + */ +void +ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings, + enum ethtool_link_mode_bit_indices link_mode); #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 7c9d6a2d7e90..69bc86ea382c 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -6,6 +6,7 @@ #include <linux/preempt.h> #include <linux/lockdep.h> #include <linux/ftrace_irq.h> +#include <linux/sched.h> #include <linux/vtime.h> #include <asm/hardirq.h> diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h index 763802b2b8f9..c27329e2a5ad 100644 --- a/include/linux/hid-sensor-hub.h +++ b/include/linux/hid-sensor-hub.h @@ -231,6 +231,7 @@ struct hid_sensor_common { struct hid_sensor_hub_attribute_info report_state; struct hid_sensor_hub_attribute_info power_state; struct hid_sensor_hub_attribute_info sensitivity; + struct hid_sensor_hub_attribute_info sensitivity_rel; struct hid_sensor_hub_attribute_info report_latency; struct work_struct work; }; @@ -248,11 +249,17 @@ static inline int hid_sensor_convert_exponent(int unit_expo) int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev, u32 usage_id, - struct hid_sensor_common *st); + struct hid_sensor_common *st, + const u32 *sensitivity_addresses, + u32 sensitivity_addresses_len); int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st, int val1, int val2); +int hid_sensor_write_raw_hyst_rel_value(struct hid_sensor_common *st, int val1, + int val2); int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st, int *val1, int *val2); +int hid_sensor_read_raw_hyst_rel_value(struct hid_sensor_common *st, + int *val1, int *val2); int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st, int val1, int val2); int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st, diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h index 3bbdbccc5805..ac631159403a 100644 --- a/include/linux/hid-sensor-ids.h +++ b/include/linux/hid-sensor-ids.h @@ -149,6 +149,7 @@ /* Per data field properties */ #define HID_USAGE_SENSOR_DATA_MOD_NONE 0x00 #define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS 0x1000 +#define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_REL_PCT 0xE000 /* Power state enumerations */ #define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x200850 diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index f1d74dcf0353..9c2373a1cb2d 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -234,6 +234,7 @@ static inline u32 hv_get_avail_to_write_percent( * 5 . 0 (Newer Windows 10) * 5 . 1 (Windows 10 RS4) * 5 . 2 (Windows Server 2019, RS5) + * 5 . 3 (Windows Server 2022) */ #define VERSION_WS2008 ((0 << 16) | (13)) @@ -245,6 +246,7 @@ static inline u32 hv_get_avail_to_write_percent( #define VERSION_WIN10_V5 ((5 << 16) | (0)) #define VERSION_WIN10_V5_1 ((5 << 16) | (1)) #define VERSION_WIN10_V5_2 ((5 << 16) | (2)) +#define VERSION_WIN10_V5_3 ((5 << 16) | (3)) /* Make maximum size of pipe payload of 16K */ #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) @@ -284,7 +286,7 @@ struct vmbus_channel_offer { /* * Pipes: - * The following sructure is an integrated pipe protocol, which + * The following structure is an integrated pipe protocol, which * is implemented on top of standard user-defined data. Pipe * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own * use. @@ -475,6 +477,7 @@ enum vmbus_channel_message_type { CHANNELMSG_TL_CONNECT_REQUEST = 21, CHANNELMSG_MODIFYCHANNEL = 22, CHANNELMSG_TL_CONNECT_RESULT = 23, + CHANNELMSG_MODIFYCHANNEL_RESPONSE = 24, CHANNELMSG_COUNT }; @@ -588,6 +591,13 @@ struct vmbus_channel_open_result { u32 status; } __packed; +/* Modify Channel Result parameters */ +struct vmbus_channel_modifychannel_response { + struct vmbus_channel_message_header header; + u32 child_relid; + u32 status; +} __packed; + /* Close channel parameters; */ struct vmbus_channel_close_channel { struct vmbus_channel_message_header header; @@ -720,6 +730,7 @@ struct vmbus_channel_msginfo { struct vmbus_channel_gpadl_torndown gpadl_torndown; struct vmbus_channel_gpadl_created gpadl_created; struct vmbus_channel_version_response version_response; + struct vmbus_channel_modifychannel_response modify_response; } response; u32 msgsize; @@ -883,11 +894,11 @@ struct vmbus_channel { * Support for sub-channels. For high performance devices, * it will be useful to have multiple sub-channels to support * a scalable communication infrastructure with the host. - * The support for sub-channels is implemented as an extention + * The support for sub-channels is implemented as an extension * to the current infrastructure. * The initial offer is considered the primary channel and this * offer message will indicate if the host supports sub-channels. - * The guest is free to ask for sub-channels to be offerred and can + * The guest is free to ask for sub-channels to be offered and can * open these sub-channels as a normal "primary" channel. However, * all sub-channels will have the same type and instance guids as the * primary channel. Requests sent on a given channel will result in a @@ -951,7 +962,7 @@ struct vmbus_channel { * Clearly, these optimizations improve throughput at the expense of * latency. Furthermore, since the channel is shared for both * control and data messages, control messages currently suffer - * unnecessary latency adversley impacting performance and boot + * unnecessary latency adversely impacting performance and boot * time. To fix this issue, permit tagging the channel as being * in "low latency" mode. In this mode, we will bypass the monitor * mechanism. @@ -1594,7 +1605,7 @@ extern __u32 vmbus_proto_version; int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id, const guid_t *shv_host_servie_id); -int vmbus_send_modifychannel(u32 child_relid, u32 target_vp); +int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp); void vmbus_set_event(struct vmbus_channel *channel); /* Get the start of the ring buffer. */ diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h index c5d48e1c2d36..52620e5b8052 100644 --- a/include/linux/iio/adc/adi-axi-adc.h +++ b/include/linux/iio/adc/adi-axi-adc.h @@ -15,7 +15,7 @@ struct iio_chan_spec; * struct adi_axi_adc_chip_info - Chip specific information * @name Chip name * @id Chip ID (usually product ID) - * @channels Channel specifications of type @struct axi_adc_chan_spec + * @channels Channel specifications of type @struct iio_chan_spec * @num_channels Number of @channels * @scale_table Supported scales by the chip; tuples of 2 ints * @num_scales Number of scales in the table diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h index 5b502291d6a4..5c355be89814 100644 --- a/include/linux/iio/buffer-dmaengine.h +++ b/include/linux/iio/buffer-dmaengine.h @@ -7,10 +7,11 @@ #ifndef __IIO_DMAENGINE_H__ #define __IIO_DMAENGINE_H__ -struct iio_buffer; +struct iio_dev; struct device; -struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev, - const char *channel); +int devm_iio_dmaengine_buffer_setup(struct device *dev, + struct iio_dev *indio_dev, + const char *channel); #endif diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index 8febc23f5f26..b6928ac5c63d 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -41,7 +41,7 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev, bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, const unsigned long *mask); -void iio_device_attach_buffer(struct iio_dev *indio_dev, - struct iio_buffer *buffer); +int iio_device_attach_buffer(struct iio_dev *indio_dev, + struct iio_buffer *buffer); #endif /* _IIO_BUFFER_GENERIC_H_ */ diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h index a63dc07b7350..245b32918ae1 100644 --- a/include/linux/iio/buffer_impl.h +++ b/include/linux/iio/buffer_impl.h @@ -6,6 +6,8 @@ #ifdef CONFIG_IIO_BUFFER +#include <uapi/linux/iio/buffer.h> + struct iio_dev; struct iio_buffer; @@ -72,6 +74,9 @@ struct iio_buffer { /** @length: Number of datums in buffer. */ unsigned int length; + /** @flags: File ops flags including busy flag. */ + unsigned long flags; + /** @bytes_per_datum: Size of individual datum including timestamp. */ size_t bytes_per_datum; @@ -97,17 +102,14 @@ struct iio_buffer { /* @scan_timestamp: Does the scan mode include a timestamp. */ bool scan_timestamp; - /* @scan_el_dev_attr_list: List of scan element related attributes. */ - struct list_head scan_el_dev_attr_list; - - /* @buffer_group: Attributes of the buffer group. */ - struct attribute_group buffer_group; + /* @buffer_attr_list: List of buffer attributes. */ + struct list_head buffer_attr_list; /* - * @scan_el_group: Attribute group for those attributes not - * created from the iio_chan_info array. + * @buffer_group: Attributes of the new buffer group. + * Includes scan elements attributes. */ - struct attribute_group scan_el_group; + struct attribute_group buffer_group; /* @attrs: Standard attributes of the buffer. */ const struct attribute **attrs; @@ -115,6 +117,9 @@ struct iio_buffer { /* @demux_bounce: Buffer for doing gather from incoming scan. */ void *demux_bounce; + /* @attached_entry: Entry in the devices list of buffers attached by the driver. */ + struct list_head attached_entry; + /* @buffer_list: Entry in the devices list of current buffers. */ struct list_head buffer_list; diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index c9b80be82440..7ce8a8adad58 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -96,8 +96,7 @@ struct platform_device; int cros_ec_sensors_core_init(struct platform_device *pdev, struct iio_dev *indio_dev, bool physical_device, cros_ec_sensors_capture_t trigger_capture, - cros_ec_sensorhub_push_data_cb_t push_data, - bool has_hw_fifo); + cros_ec_sensorhub_push_data_cb_t push_data); irqreturn_t cros_ec_sensors_capture(int irq, void *p); int cros_ec_sensors_push_data(struct iio_dev *indio_dev, diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index 0a90ba8fa1bb..5fa5957586cf 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -242,6 +242,21 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val); int iio_read_channel_processed(struct iio_channel *chan, int *val); /** + * iio_read_channel_processed_scale() - read and scale a processed value + * @chan: The channel being queried. + * @val: Value read back. + * @scale: Scale factor to apply during the conversion + * + * Returns an error code or 0. + * + * This function will read a processed value from a channel. This will work + * like @iio_read_channel_processed() but also scale with an additional + * scale factor while attempting to minimize any precision loss. + */ +int iio_read_channel_processed_scale(struct iio_channel *chan, int *val, + unsigned int scale); + +/** * iio_write_channel_attribute() - Write values to the device attribute. * @chan: The channel being queried. * @val: Value being written. diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h index e9801c8d49c0..1f7e53c506b6 100644 --- a/include/linux/iio/dac/mcp4725.h +++ b/include/linux/iio/dac/mcp4725.h @@ -15,7 +15,7 @@ * @vref_buffered: Controls buffering of the external reference voltage. * * Vref related settings are available only on MCP4756. See - * Documentation/devicetree/bindings/iio/dac/mcp4725.txt for more information. + * Documentation/devicetree/bindings/iio/dac/microchip,mcp4725.yaml for more information. */ struct mcp4725_platform_data { bool use_vref; diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h index 07c5a8e52ca8..32addd5e790e 100644 --- a/include/linux/iio/iio-opaque.h +++ b/include/linux/iio/iio-opaque.h @@ -7,11 +7,18 @@ * struct iio_dev_opaque - industrial I/O device opaque information * @indio_dev: public industrial I/O device information * @event_interface: event chrdevs associated with interrupt lines + * @attached_buffers: array of buffers statically attached by the driver + * @attached_buffers_cnt: number of buffers in the array of statically attached buffers + * @buffer_ioctl_handler: ioctl() handler for this IIO device's buffer interface * @buffer_list: list of all buffers currently attached * @channel_attr_list: keep track of automatically created channel * attributes * @chan_attr_group: group for all attrs in base directory * @ioctl_handlers: ioctl handlers registered with the core handler + * @groups: attribute groups + * @groupcounter: index of next attribute group + * @legacy_scan_el_group: attribute group for legacy scan elements attribute group + * @legacy_buffer_group: attribute group for legacy buffer attributes group * @debugfs_dentry: device specific debugfs dentry * @cached_reg_addr: cached register address for debugfs reads * @read_buf: read buffer to be used for the initial reg read @@ -20,10 +27,17 @@ struct iio_dev_opaque { struct iio_dev indio_dev; struct iio_event_interface *event_interface; + struct iio_buffer **attached_buffers; + unsigned int attached_buffers_cnt; + struct iio_ioctl_handler *buffer_ioctl_handler; struct list_head buffer_list; struct list_head channel_attr_list; struct attribute_group chan_attr_group; struct list_head ioctl_handlers; + const struct attribute_group **groups; + int groupcounter; + struct attribute_group legacy_scan_el_group; + struct attribute_group legacy_buffer_group; #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_dentry; unsigned cached_reg_addr; diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index e4a9822e6495..f2d65e2e88b6 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -518,8 +518,6 @@ struct iio_buffer_setup_ops { * @setup_ops: [DRIVER] callbacks to call before and after buffer * enable/disable * @chrdev: [INTERN] associated character device - * @groups: [INTERN] attribute groups - * @groupcounter: [INTERN] index of next attribute group * @flags: [INTERN] file ops related flags including busy flag. * @priv: [DRIVER] reference to driver's private information * **MUST** be accessed **ONLY** via iio_priv() helper @@ -556,9 +554,6 @@ struct iio_dev { struct mutex info_exist_lock; const struct iio_buffer_setup_ops *setup_ops; struct cdev chrdev; -#define IIO_MAX_GROUPS 6 - const struct attribute_group *groups[IIO_MAX_GROUPS + 1]; - int groupcounter; unsigned long flags; void *priv; @@ -698,7 +693,7 @@ static inline void *iio_priv(const struct iio_dev *indio_dev) void iio_device_free(struct iio_dev *indio_dev); struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv); __printf(2, 3) -struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, +struct iio_trigger *devm_iio_trigger_alloc(struct device *parent, const char *fmt, ...); /** * iio_buffer_enabled() - helper function to test if the buffer is enabled diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index 04e96d688ba9..f9b728d490b1 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -428,6 +428,16 @@ static inline int adis_initial_startup(struct adis *adis) return ret; } +static inline void adis_dev_lock(struct adis *adis) +{ + mutex_lock(&adis->state_lock); +} + +static inline void adis_dev_unlock(struct adis *adis) +{ + mutex_unlock(&adis->state_lock); +} + int adis_single_conversion(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int error_mask, int *val); diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h index 1fc1efa7799d..ccd2ceae7b25 100644 --- a/include/linux/iio/kfifo_buf.h +++ b/include/linux/iio/kfifo_buf.h @@ -3,11 +3,20 @@ #define __LINUX_IIO_KFIFO_BUF_H__ struct iio_buffer; +struct iio_buffer_setup_ops; +struct iio_dev; struct device; struct iio_buffer *iio_kfifo_allocate(void); void iio_kfifo_free(struct iio_buffer *r); -struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev); +int devm_iio_kfifo_buffer_setup_ext(struct device *dev, + struct iio_dev *indio_dev, + int mode_flags, + const struct iio_buffer_setup_ops *setup_ops, + const struct attribute **buffer_attrs); + +#define devm_iio_kfifo_buffer_setup(dev, indio_dev, mode_flags, setup_ops) \ + devm_iio_kfifo_buffer_setup_ext((dev), (indio_dev), (mode_flags), (setup_ops), NULL) #endif diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h index b532c875bc24..e51fba66de4b 100644 --- a/include/linux/iio/sysfs.h +++ b/include/linux/iio/sysfs.h @@ -9,6 +9,7 @@ #ifndef _INDUSTRIAL_IO_SYSFS_H_ #define _INDUSTRIAL_IO_SYSFS_H_ +struct iio_buffer; struct iio_chan_spec; /** @@ -17,12 +18,14 @@ struct iio_chan_spec; * @address: associated register address * @l: list head for maintaining list of dynamically created attrs * @c: specification for the underlying channel + * @buffer: the IIO buffer to which this attribute belongs to (if any) */ struct iio_dev_attr { struct device_attribute dev_attr; u64 address; struct list_head l; struct iio_chan_spec const *c; + struct iio_buffer *buffer; }; #define to_iio_dev_attr(_dev_attr) \ diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index 055890b6ffcf..096f68dd2e0c 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h @@ -161,7 +161,8 @@ void iio_trigger_poll_chained(struct iio_trigger *trig); irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private); -__printf(1, 2) struct iio_trigger *iio_trigger_alloc(const char *fmt, ...); +__printf(2, 3) +struct iio_trigger *iio_trigger_alloc(struct device *parent, const char *fmt, ...); void iio_trigger_free(struct iio_trigger *trig); /** diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index 1e3ed6f55bca..84b3f8175cc6 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h @@ -16,6 +16,7 @@ enum iio_event_info { IIO_EV_INFO_PERIOD, IIO_EV_INFO_HIGH_PASS_FILTER_3DB, IIO_EV_INFO_LOW_PASS_FILTER_3DB, + IIO_EV_INFO_TIMEOUT, }; #define IIO_VAL_INT 1 @@ -50,6 +51,7 @@ enum iio_chan_info_enum { IIO_CHAN_INFO_PHASE, IIO_CHAN_INFO_HARDWAREGAIN, IIO_CHAN_INFO_HYSTERESIS, + IIO_CHAN_INFO_HYSTERESIS_RELATIVE, IIO_CHAN_INFO_INT_TIME, IIO_CHAN_INFO_ENABLE, IIO_CHAN_INFO_CALIBHEIGHT, diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 967e25767153..4777850a6dc7 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -61,6 +61,9 @@ * interrupt handler after suspending interrupts. For system * wakeup devices users need to implement wakeup detection in * their interrupt handlers. + * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it. + * Users will enable it explicitly by enable_irq() or enable_nmi() + * later. */ #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 @@ -74,6 +77,7 @@ #define IRQF_NO_THREAD 0x00010000 #define IRQF_EARLY_RESUME 0x00020000 #define IRQF_COND_SUSPEND 0x00040000 +#define IRQF_NO_AUTOEN 0x00080000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) @@ -654,26 +658,21 @@ enum TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ }; -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) static inline int tasklet_trylock(struct tasklet_struct *t) { return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); } -static inline void tasklet_unlock(struct tasklet_struct *t) -{ - smp_mb__before_atomic(); - clear_bit(TASKLET_STATE_RUN, &(t)->state); -} +void tasklet_unlock(struct tasklet_struct *t); +void tasklet_unlock_wait(struct tasklet_struct *t); +void tasklet_unlock_spin_wait(struct tasklet_struct *t); -static inline void tasklet_unlock_wait(struct tasklet_struct *t) -{ - while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } -} #else -#define tasklet_trylock(t) 1 -#define tasklet_unlock_wait(t) do { } while (0) -#define tasklet_unlock(t) do { } while (0) +static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; } +static inline void tasklet_unlock(struct tasklet_struct *t) { } +static inline void tasklet_unlock_wait(struct tasklet_struct *t) { } +static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { } #endif extern void __tasklet_schedule(struct tasklet_struct *t); @@ -698,6 +697,17 @@ static inline void tasklet_disable_nosync(struct tasklet_struct *t) smp_mb__after_atomic(); } +/* + * Do not use in new code. Disabling tasklets from atomic contexts is + * error prone and should be avoided. + */ +static inline void tasklet_disable_in_atomic(struct tasklet_struct *t) +{ + tasklet_disable_nosync(t); + tasklet_unlock_spin_wait(t); + smp_mb(); +} + static inline void tasklet_disable(struct tasklet_struct *t) { tasklet_disable_nosync(t); @@ -712,7 +722,6 @@ static inline void tasklet_enable(struct tasklet_struct *t) } extern void tasklet_kill(struct tasklet_struct *t); -extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data); extern void tasklet_setup(struct tasklet_struct *t, diff --git a/include/linux/irq.h b/include/linux/irq.h index 2efde6a79b7e..31b347c9f8dd 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -116,7 +116,7 @@ enum { * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to * support stacked irqchips, which indicates skipping - * all descendent irqchips. + * all descendant irqchips. */ enum { IRQ_SET_MASK_OK = 0, @@ -302,7 +302,7 @@ static inline bool irqd_is_level_type(struct irq_data *d) /* * Must only be called of irqchip.irq_set_affinity() or low level - * hieararchy domain allocation functions. + * hierarchy domain allocation functions. */ static inline void irqd_set_single_target(struct irq_data *d) { @@ -1258,11 +1258,13 @@ int __init set_handle_irq(void (*handle_irq)(struct pt_regs *)); */ extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init; #else +#ifndef set_handle_irq #define set_handle_irq(handle_irq) \ do { \ (void)handle_irq; \ WARN_ON(1); \ } while (0) #endif +#endif #endif /* _LINUX_IRQ_H */ diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 943c3411ca10..2c63375bbd43 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -145,4 +145,6 @@ int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *vpe_ops, const struct irq_domain_ops *sgi_ops); +bool gic_cpuif_has_vsgi(void); + #endif diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 891b323266df..df4651250785 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -32,7 +32,7 @@ struct pt_regs; * @last_unhandled: aging timer for unhandled count * @irqs_unhandled: stats field for spurious unhandled interrupts * @threads_handled: stats field for deferred spurious detection of threaded handlers - * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers + * @threads_handled_last: comparator field for deferred spurious detection of threaded handlers * @lock: locking for SMP * @affinity_hint: hint to user space for preferred irq affinity * @affinity_notify: context for notification of affinity changes diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 33cacc8af26d..7a1dd7b969b6 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -415,15 +415,6 @@ static inline unsigned int irq_linear_revmap(struct irq_domain *domain, extern unsigned int irq_find_mapping(struct irq_domain *host, irq_hw_number_t hwirq); extern unsigned int irq_create_direct_mapping(struct irq_domain *host); -extern int irq_create_strict_mappings(struct irq_domain *domain, - unsigned int irq_base, - irq_hw_number_t hwirq_base, int count); - -static inline int irq_create_identity_mapping(struct irq_domain *host, - irq_hw_number_t hwirq) -{ - return irq_create_strict_mappings(host, hwirq, hwirq, 1); -} extern const struct irq_domain_ops irq_domain_simple_ops; diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index d92691262f51..05f5554d860f 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -382,6 +382,21 @@ struct static_key_false { [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \ } +#define _DEFINE_STATIC_KEY_1(name) DEFINE_STATIC_KEY_TRUE(name) +#define _DEFINE_STATIC_KEY_0(name) DEFINE_STATIC_KEY_FALSE(name) +#define DEFINE_STATIC_KEY_MAYBE(cfg, name) \ + __PASTE(_DEFINE_STATIC_KEY_, IS_ENABLED(cfg))(name) + +#define _DEFINE_STATIC_KEY_RO_1(name) DEFINE_STATIC_KEY_TRUE_RO(name) +#define _DEFINE_STATIC_KEY_RO_0(name) DEFINE_STATIC_KEY_FALSE_RO(name) +#define DEFINE_STATIC_KEY_MAYBE_RO(cfg, name) \ + __PASTE(_DEFINE_STATIC_KEY_RO_, IS_ENABLED(cfg))(name) + +#define _DECLARE_STATIC_KEY_1(name) DECLARE_STATIC_KEY_TRUE(name) +#define _DECLARE_STATIC_KEY_0(name) DECLARE_STATIC_KEY_FALSE(name) +#define DECLARE_STATIC_KEY_MAYBE(cfg, name) \ + __PASTE(_DECLARE_STATIC_KEY_, IS_ENABLED(cfg))(name) + extern bool ____wrong_branch_error(void); #define static_key_enabled(x) \ @@ -482,6 +497,10 @@ extern bool ____wrong_branch_error(void); #endif /* CONFIG_JUMP_LABEL */ +#define static_branch_maybe(config, x) \ + (IS_ENABLED(config) ? static_branch_likely(x) \ + : static_branch_unlikely(x)) + /* * Advanced usage; refcount, branch is enabled when: count != 0 */ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index b91732bd05d7..d53ea3c047bc 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -330,7 +330,7 @@ static inline bool kasan_check_byte(const void *address) #endif /* CONFIG_KASAN */ -#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK +#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) void kasan_unpoison_task_stack(struct task_struct *task); #else static inline void kasan_unpoison_task_stack(struct task_struct *task) {} @@ -376,6 +376,12 @@ static inline void *kasan_reset_tag(const void *addr) #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ +#ifdef CONFIG_KASAN_HW_TAGS + +void kasan_report_async(void); + +#endif /* CONFIG_KASAN_HW_TAGS */ + #ifdef CONFIG_KASAN_SW_TAGS void __init kasan_init_sw_tags(void); #else diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 52b1610eae68..c544b70dfbd2 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -28,11 +28,12 @@ /* Marvel 88E1111 in Finisar SFP module with modified PHY ID */ #define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0 -/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do +/* These Ethernet switch families contain embedded PHYs, but they do * not have a model ID. So the switch driver traps reads to the ID2 * register and returns the switch family ID */ -#define MARVELL_PHY_ID_88E6390 0x01410f90 +#define MARVELL_PHY_ID_88E6341_FAMILY 0x01410f41 +#define MARVELL_PHY_ID_88E6390_FAMILY 0x01410f90 #define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4) diff --git a/include/linux/mhi.h b/include/linux/mhi.h index d26acc8b21cd..944aa3aa3035 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -117,6 +117,7 @@ struct mhi_link_info { * @MHI_EE_WFW: WLAN firmware mode * @MHI_EE_PTHRU: Passthrough * @MHI_EE_EDL: Embedded downloader + * @MHI_EE_FP: Flash Programmer Environment */ enum mhi_ee_type { MHI_EE_PBL, @@ -126,7 +127,8 @@ enum mhi_ee_type { MHI_EE_WFW, MHI_EE_PTHRU, MHI_EE_EDL, - MHI_EE_MAX_SUPPORTED = MHI_EE_EDL, + MHI_EE_FP, + MHI_EE_MAX_SUPPORTED = MHI_EE_FP, MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */ MHI_EE_NOT_SUPPORTED, MHI_EE_MAX, @@ -203,7 +205,7 @@ enum mhi_db_brst_mode { * @num: The number assigned to this channel * @num_elements: The number of elements that can be queued to this channel * @local_elements: The local ring length of the channel - * @event_ring: The event rung index that services this channel + * @event_ring: The event ring index that services this channel * @dir: Direction that data may flow on this channel * @type: Channel type * @ee_mask: Execution Environment mask for this channel @@ -296,7 +298,7 @@ struct mhi_controller_config { * @wake_db: MHI WAKE doorbell register address * @iova_start: IOMMU starting address for data (required) * @iova_stop: IOMMU stop address for data (required) - * @fw_image: Firmware image name for normal booting (required) + * @fw_image: Firmware image name for normal booting (optional) * @edl_image: Firmware image name for emergency download mode (optional) * @rddm_size: RAM dump size that host should allocate for debugging purpose * @sbl_size: SBL image size downloaded through BHIe (optional) @@ -352,7 +354,6 @@ struct mhi_controller_config { * @index: Index of the MHI controller instance * @bounce_buf: Use of bounce buffer * @fbc_download: MHI host needs to do complete image transfer (optional) - * @pre_init: MHI host needs to do pre-initialization before power up * @wake_set: Device wakeup set flag * @irq_flags: irq flags passed to request_irq (optional) * @@ -445,7 +446,6 @@ struct mhi_controller { int index; bool bounce_buf; bool fbc_download; - bool pre_init; bool wake_set; unsigned long irq_flags; }; @@ -712,13 +712,27 @@ int mhi_device_get_sync(struct mhi_device *mhi_dev); void mhi_device_put(struct mhi_device *mhi_dev); /** - * mhi_prepare_for_transfer - Setup channel for data transfer + * mhi_prepare_for_transfer - Setup UL and DL channels for data transfer. + * Allocate and initialize the channel context and + * also issue the START channel command to both + * channels. Channels can be started only if both + * host and device execution environments match and + * channels are in a DISABLED state. * @mhi_dev: Device associated with the channels */ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); /** - * mhi_unprepare_from_transfer - Unprepare the channels + * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer. + * Issue the RESET channel command and let the + * device clean-up the context so no incoming + * transfers are seen on the host. Free memory + * associated with the context on host. If device + * is unresponsive, only perform a host side + * clean-up. Channels can be reset only if both + * host and device execution environments match + * and channels are in an ENABLED, STOPPED or + * SUSPENDED state. * @mhi_dev: Device associated with the channels */ void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index df5d91c8b2d4..9c68b2da14c6 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -437,11 +437,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reserved_at_60[0x18]; u8 log_max_ft_num[0x8]; - u8 reserved_at_80[0x18]; + u8 reserved_at_80[0x10]; + u8 log_max_flow_counter[0x8]; u8 log_max_destination[0x8]; - u8 log_max_flow_counter[0x8]; - u8 reserved_at_a8[0x10]; + u8 reserved_at_a0[0x18]; u8 log_max_flow[0x8]; u8 reserved_at_c0[0x40]; @@ -8835,6 +8835,8 @@ struct mlx5_ifc_pplm_reg_bits { u8 fec_override_admin_100g_2x[0x10]; u8 fec_override_admin_50g_1x[0x10]; + + u8 reserved_at_140[0x140]; }; struct mlx5_ifc_ppcnt_reg_bits { @@ -10198,7 +10200,7 @@ struct mlx5_ifc_pbmc_reg_bits { struct mlx5_ifc_bufferx_reg_bits buffer[10]; - u8 reserved_at_2e0[0x40]; + u8 reserved_at_2e0[0x80]; }; struct mlx5_ifc_qtct_reg_bits { diff --git a/include/linux/mm.h b/include/linux/mm.h index 8ba434287387..616dcaf08d99 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2904,18 +2904,20 @@ static inline void kernel_poison_pages(struct page *page, int numpages) { } static inline void kernel_unpoison_pages(struct page *page, int numpages) { } #endif -DECLARE_STATIC_KEY_FALSE(init_on_alloc); +DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); static inline bool want_init_on_alloc(gfp_t flags) { - if (static_branch_unlikely(&init_on_alloc)) + if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, + &init_on_alloc)) return true; return flags & __GFP_ZERO; } -DECLARE_STATIC_KEY_FALSE(init_on_free); +DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); static inline bool want_init_on_free(void) { - return static_branch_unlikely(&init_on_free); + return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, + &init_on_free); } extern bool _debug_pagealloc_enabled_early; diff --git a/include/linux/moxtet.h b/include/linux/moxtet.h index 490db6886dcc..79184948fab4 100644 --- a/include/linux/moxtet.h +++ b/include/linux/moxtet.h @@ -2,7 +2,7 @@ /* * Turris Mox module configuration bus driver * - * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz> + * Copyright (C) 2019 Marek BehĂșn <kabel@kernel.org> */ #ifndef __LINUX_MOXTET_H diff --git a/include/linux/nd.h b/include/linux/nd.h index cec526c8043d..ee9ad76afbba 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -11,6 +11,7 @@ enum nvdimm_event { NVDIMM_REVALIDATE_POISON, + NVDIMM_REVALIDATE_REGION, }; enum nvdimm_claim_class { diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index 7d3537c40ec9..26a13294318c 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -52,8 +52,9 @@ extern void *arpt_alloc_initial_table(const struct xt_table *); int arpt_register_table(struct net *net, const struct xt_table *table, const struct arpt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res); -void arpt_unregister_table(struct net *net, struct xt_table *table, - const struct nf_hook_ops *ops); +void arpt_unregister_table(struct net *net, struct xt_table *table); +void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); extern unsigned int arpt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 2f5c4e6ecd8a..3a956145a25c 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -110,8 +110,9 @@ extern int ebt_register_table(struct net *net, const struct ebt_table *table, const struct nf_hook_ops *ops, struct ebt_table **res); -extern void ebt_unregister_table(struct net *net, struct ebt_table *table, - const struct nf_hook_ops *); +extern void ebt_unregister_table(struct net *net, struct ebt_table *table); +void ebt_unregister_table_pre_exit(struct net *net, const char *tablename, + const struct nf_hook_ops *ops); extern unsigned int ebt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct ebt_table *table); diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index 052293f4cbdb..923dada24eb4 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -65,6 +65,10 @@ int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val); int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val); int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val); int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val); +int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, + u32 *val); +int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, + u64 *val); /* direct nvmem device read/write interface */ struct nvmem_device *nvmem_device_get(struct device *dev, const char *name); diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index 4462ed2c18cd..461b7aa587ba 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -19,8 +19,14 @@ enum OID { OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */ OID_id_dsa, /* 1.2.840.10040.4.1 */ - OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */ OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */ + OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */ + OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */ + OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */ + OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */ + OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */ + OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */ + OID_id_ecdsa_with_sha512, /* 1.2.840.10045.4.3.4 */ /* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */ OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */ @@ -58,6 +64,7 @@ enum OID { OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ OID_sha1, /* 1.3.14.3.2.26 */ + OID_id_ansip384r1, /* 1.3.132.0.34 */ OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ OID_sha384, /* 2.16.840.1.101.3.4.2.2 */ OID_sha512, /* 2.16.840.1.101.3.4.2.3 */ @@ -113,10 +120,16 @@ enum OID { OID_SM2_with_SM3, /* 1.2.156.10197.1.501 */ OID_sm3WithRSAEncryption, /* 1.2.156.10197.1.504 */ + /* TCG defined OIDS for TPM based keys */ + OID_TPMLoadableKey, /* 2.23.133.10.1.3 */ + OID_TPMImportableKey, /* 2.23.133.10.1.4 */ + OID_TPMSealedData, /* 2.23.133.10.1.5 */ + OID__NR }; extern enum OID look_up_OID(const void *data, size_t datasize); +extern int parse_OID(const void *data, size_t datasize, enum OID *oid); extern int sprint_oid(const void *, size_t, char *, size_t); extern int sprint_OID(enum OID, char *, size_t); diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index e435bdb0bab3..0ed434d02196 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -44,6 +44,12 @@ enum phy_mode { PHY_MODE_DP }; +enum phy_media { + PHY_MEDIA_DEFAULT, + PHY_MEDIA_SR, + PHY_MEDIA_DAC, +}; + /** * union phy_configure_opts - Opaque generic phy configuration * @@ -64,6 +70,8 @@ union phy_configure_opts { * @power_on: powering on the phy * @power_off: powering off the phy * @set_mode: set the mode of the phy + * @set_media: set the media type of the phy (optional) + * @set_speed: set the speed of the phy (optional) * @reset: resetting the phy * @calibrate: calibrate the phy * @release: ops to be performed while the consumer relinquishes the PHY @@ -75,6 +83,8 @@ struct phy_ops { int (*power_on)(struct phy *phy); int (*power_off)(struct phy *phy); int (*set_mode)(struct phy *phy, enum phy_mode mode, int submode); + int (*set_media)(struct phy *phy, enum phy_media media); + int (*set_speed)(struct phy *phy, int speed); /** * @configure: @@ -215,6 +225,8 @@ int phy_power_off(struct phy *phy); int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode); #define phy_set_mode(phy, mode) \ phy_set_mode_ext(phy, mode, 0) +int phy_set_media(struct phy *phy, enum phy_media media); +int phy_set_speed(struct phy *phy, int speed); int phy_configure(struct phy *phy, union phy_configure_opts *opts); int phy_validate(struct phy *phy, enum phy_mode mode, int submode, union phy_configure_opts *opts); @@ -344,6 +356,20 @@ static inline int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, #define phy_set_mode(phy, mode) \ phy_set_mode_ext(phy, mode, 0) +static inline int phy_set_media(struct phy *phy, enum phy_media media) +{ + if (!phy) + return 0; + return -ENODEV; +} + +static inline int phy_set_speed(struct phy *phy, int speed) +{ + if (!phy) + return 0; + return -ENODEV; +} + static inline enum phy_mode phy_get_mode(struct phy *phy) { return PHY_MODE_INVALID; diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 5ff8597ceabd..6035d9a98fb8 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -3467,6 +3467,7 @@ struct ec_response_get_next_event_v1 { #define EC_MKBP_LID_OPEN 0 #define EC_MKBP_TABLET_MODE 1 #define EC_MKBP_BASE_ATTACHED 2 +#define EC_MKBP_FRONT_PROXIMITY 3 /* Run keyboard factory test scanning */ #define EC_CMD_KEYBOARD_FACTORY_TEST 0x0068 diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h index 8b30b14b47d3..f377817ce75c 100644 --- a/include/linux/platform_data/gpio-omap.h +++ b/include/linux/platform_data/gpio-omap.h @@ -85,6 +85,7 @@ * omap2+ specific GPIO registers */ #define OMAP24XX_GPIO_REVISION 0x0000 +#define OMAP24XX_GPIO_SYSCONFIG 0x0010 #define OMAP24XX_GPIO_IRQSTATUS1 0x0018 #define OMAP24XX_GPIO_IRQSTATUS2 0x0028 #define OMAP24XX_GPIO_IRQENABLE2 0x002c @@ -108,6 +109,7 @@ #define OMAP24XX_GPIO_SETDATAOUT 0x0094 #define OMAP4_GPIO_REVISION 0x0000 +#define OMAP4_GPIO_SYSCONFIG 0x0010 #define OMAP4_GPIO_EOI 0x0020 #define OMAP4_GPIO_IRQSTATUSRAW0 0x0024 #define OMAP4_GPIO_IRQSTATUSRAW1 0x0028 @@ -148,6 +150,7 @@ #ifndef __ASSEMBLER__ struct omap_gpio_reg_offs { u16 revision; + u16 sysconfig; u16 direction; u16 datain; u16 dataout; diff --git a/include/linux/platform_data/invensense_mpu6050.h b/include/linux/platform_data/invensense_mpu6050.h index 93974f4cfba1..f05b37521f67 100644 --- a/include/linux/platform_data/invensense_mpu6050.h +++ b/include/linux/platform_data/invensense_mpu6050.h @@ -12,7 +12,7 @@ * mounting matrix retrieved from device-tree) * * Contains platform specific information on how to configure the MPU6050 to - * work on this platform. The orientation matricies are 3x3 rotation matricies + * work on this platform. The orientation matrices are 3x3 rotation matrices * that are applied to the data to rotate from the mounting orientation to the * platform orientation. The values must be one of 0, 1, or -1 and each row and * column should have exactly 1 non-zero value. diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 3f23f6e430bf..cd81e060863c 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -359,4 +359,7 @@ static inline int is_sh_early_platform_device(struct platform_device *pdev) } #endif /* CONFIG_SUPERH */ +/* For now only SuperH uses it */ +void early_platform_cleanup(void); + #endif /* _PLATFORM_DEVICE_H_ */ diff --git a/include/linux/pps-gpio.h b/include/linux/pps-gpio.h deleted file mode 100644 index 7bf49908be06..000000000000 --- a/include/linux/pps-gpio.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * pps-gpio.h -- PPS client for GPIOs - * - * Copyright (C) 2011 James Nuss <jamesnuss@nanometrics.ca> - */ - -#ifndef _PPS_GPIO_H -#define _PPS_GPIO_H - -struct pps_gpio_platform_data { - struct gpio_desc *gpio_pin; - struct gpio_desc *echo_pin; - bool assert_falling_edge; - bool capture_clear; - unsigned int echo_active_ms; -}; - -#endif /* _PPS_GPIO_H */ diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 69cc8b64aa3a..9881eac0698f 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -79,7 +79,11 @@ #define nmi_count() (preempt_count() & NMI_MASK) #define hardirq_count() (preempt_count() & HARDIRQ_MASK) -#define softirq_count() (preempt_count() & SOFTIRQ_MASK) +#ifdef CONFIG_PREEMPT_RT +# define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK) +#else +# define softirq_count() (preempt_count() & SOFTIRQ_MASK) +#endif #define irq_count() (nmi_count() | hardirq_count() | softirq_count()) /* diff --git a/include/linux/property.h b/include/linux/property.h index dd4687b56239..0d876316e61d 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -254,6 +254,13 @@ struct software_node_ref_args { u64 args[NR_FWNODE_REFERENCE_ARGS]; }; +#define SOFTWARE_NODE_REFERENCE(_ref_, ...) \ +(const struct software_node_ref_args) { \ + .node = _ref_, \ + .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \ + .args = { __VA_ARGS__ }, \ +} + /** * struct property_entry - "Built-in" device property representation. * @name: Name of the property. @@ -362,11 +369,7 @@ struct property_entry { .name = _name_, \ .length = sizeof(struct software_node_ref_args), \ .type = DEV_PROP_REF, \ - { .pointer = &(const struct software_node_ref_args) { \ - .node = _ref_, \ - .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \ - .args = { __VA_ARGS__ }, \ - } }, \ + { .pointer = &SOFTWARE_NODE_REFERENCE(_ref_, ##__VA_ARGS__), }, \ } struct property_entry * diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h new file mode 100644 index 000000000000..fd80fab663a9 --- /dev/null +++ b/include/linux/randomize_kstack.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _LINUX_RANDOMIZE_KSTACK_H +#define _LINUX_RANDOMIZE_KSTACK_H + +#include <linux/kernel.h> +#include <linux/jump_label.h> +#include <linux/percpu-defs.h> + +DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, + randomize_kstack_offset); +DECLARE_PER_CPU(u32, kstack_offset); + +/* + * Do not use this anywhere else in the kernel. This is used here because + * it provides an arch-agnostic way to grow the stack with correct + * alignment. Also, since this use is being explicitly masked to a max of + * 10 bits, stack-clash style attacks are unlikely. For more details see + * "VLAs" in Documentation/process/deprecated.rst + */ +void *__builtin_alloca(size_t size); +/* + * Use, at most, 10 bits of entropy. We explicitly cap this to keep the + * "VLA" from being unbounded (see above). 10 bits leaves enough room for + * per-arch offset masks to reduce entropy (by removing higher bits, since + * high entropy may overly constrain usable stack space), and for + * compiler/arch-specific stack alignment to remove the lower bits. + */ +#define KSTACK_OFFSET_MAX(x) ((x) & 0x3FF) + +/* + * These macros must be used during syscall entry when interrupts and + * preempt are disabled, and after user registers have been stored to + * the stack. + */ +#define add_random_kstack_offset() do { \ + if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ + &randomize_kstack_offset)) { \ + u32 offset = raw_cpu_read(kstack_offset); \ + u8 *ptr = __builtin_alloca(KSTACK_OFFSET_MAX(offset)); \ + /* Keep allocation even after "ptr" loses scope. */ \ + asm volatile("" : "=o"(*ptr) :: "memory"); \ + } \ +} while (0) + +#define choose_random_kstack_offset(rand) do { \ + if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ + &randomize_kstack_offset)) { \ + u32 offset = raw_cpu_read(kstack_offset); \ + offset ^= (rand); \ + raw_cpu_write(kstack_offset, offset); \ + } \ +} while (0) + +#endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index bd04f722714f..6d855ef091ba 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -334,7 +334,8 @@ static inline void rcu_preempt_sleep_check(void) { } #define rcu_sleep_check() \ do { \ rcu_preempt_sleep_check(); \ - RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ + RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ "Illegal context switch in RCU-bh read-side critical section"); \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ "Illegal context switch in RCU-sched read-side critical section"); \ diff --git a/include/linux/sched.h b/include/linux/sched.h index ef00bb22164c..743a613c9cf3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1044,6 +1044,9 @@ struct task_struct { int softirq_context; int irq_config; #endif +#ifdef CONFIG_PREEMPT_RT + int softirq_disable_cnt; +#endif #ifdef CONFIG_LOCKDEP # define MAX_LOCK_DEPTH 48UL diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 8edbbf5f2f93..822c048934e3 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -349,8 +349,13 @@ static inline void sk_psock_update_proto(struct sock *sk, static inline void sk_psock_restore_proto(struct sock *sk, struct sk_psock *psock) { - sk->sk_prot->unhash = psock->saved_unhash; if (inet_csk_has_ulp(sk)) { + /* TLS does not have an unhash proto in SW cases, but we need + * to ensure we stop using the sock_map unhash routine because + * the associated psock is being removed. So use the original + * unhash handler. + */ + WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash); tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space); } else { sk->sk_write_space = psock->saved_write_space; diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index d08039d65825..ced07f8fde87 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -125,6 +125,12 @@ enum sdw_dpn_grouping { SDW_BLK_GRP_CNT_4 = 3, }; +/* block packing mode enum */ +enum sdw_dpn_pkg_mode { + SDW_BLK_PKG_PER_PORT = 0, + SDW_BLK_PKG_PER_CHANNEL = 1 +}; + /** * enum sdw_stream_type: data stream type * @@ -405,6 +411,7 @@ struct sdw_slave_prop { * command * @mclk_freq: clock reference passed to SoundWire Master, in Hz. * @hw_disabled: if true, the Master is not functional, typically due to pin-mux + * @quirks: bitmask identifying optional behavior beyond the scope of the MIPI specification */ struct sdw_master_prop { u32 revision; @@ -421,8 +428,29 @@ struct sdw_master_prop { u32 err_threshold; u32 mclk_freq; bool hw_disabled; + u64 quirks; }; +/* Definitions for Master quirks */ + +/* + * In a number of platforms bus clashes are reported after a hardware + * reset but without any explanations or evidence of a real problem. + * The following quirk will discard all initial bus clash interrupts + * but will leave the detection on should real bus clashes happen + */ +#define SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH BIT(0) + +/* + * Some Slave devices have known issues with incorrect parity errors + * reported after a hardware reset. However during integration unexplained + * parity errors can be reported by Slave devices, possibly due to electrical + * issues at the Master level. + * The following quirk will discard all initial parity errors but will leave + * the detection on should real parity errors happen. + */ +#define SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY BIT(1) + int sdw_master_read_prop(struct sdw_bus *bus); int sdw_slave_read_prop(struct sdw_slave *slave); @@ -614,6 +642,7 @@ struct sdw_slave_ops { * @debugfs: Slave debugfs * @node: node for bus list * @port_ready: Port ready completion flag for each Slave port + * @m_port_map: static Master port map for each Slave port * @dev_num: Current Device Number, values can be 0 or dev_num_sticky * @dev_num_sticky: one-time static Device Number assigned by Bus * @probed: boolean tracking driver state @@ -645,6 +674,7 @@ struct sdw_slave { #endif struct list_head node; struct completion port_ready[SDW_MAX_PORTS]; + unsigned int m_port_map[SDW_MAX_PORTS]; enum sdw_clk_stop_mode curr_clk_stop_mode; u16 dev_num; u16 dev_num_sticky; @@ -804,6 +834,7 @@ struct sdw_defer { /** * struct sdw_master_ops - Master driver ops * @read_prop: Read Master properties + * @override_adr: Override value read from firmware (quirk for buggy firmware) * @xfer_msg: Transfer message callback * @xfer_msg_defer: Defer version of transfer message callback * @reset_page_addr: Reset the SCP page address registers @@ -813,7 +844,8 @@ struct sdw_defer { */ struct sdw_master_ops { int (*read_prop)(struct sdw_bus *bus); - + u64 (*override_adr) + (struct sdw_bus *bus, u64 addr); enum sdw_command_response (*xfer_msg) (struct sdw_bus *bus, struct sdw_msg *msg); enum sdw_command_response (*xfer_msg_defer) @@ -1009,5 +1041,7 @@ int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value); int sdw_read_no_pm(struct sdw_slave *slave, u32 addr); int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val); int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val); +int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id); +void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id); #endif /* __SOUNDWIRE_H */ diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 50e2df30b0aa..9edecb494e9e 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -52,8 +52,27 @@ typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr); */ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs); + +/** + * arch_stack_walk_reliable - Architecture specific function to walk the + * stack reliably + * + * @consume_entry: Callback which is invoked by the architecture code for + * each entry. + * @cookie: Caller supplied pointer which is handed back to + * @consume_entry + * @task: Pointer to a task struct, can be NULL + * + * This function returns an error if it detects any unreliable + * features of the stack. Otherwise it guarantees that the stack + * trace is reliable. + * + * If the task is not 'current', the caller *must* ensure the task is + * inactive and its stack is pinned. + */ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task); + void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, const struct pt_regs *regs); diff --git a/include/linux/static_call.h b/include/linux/static_call.h index 85ecc789f4ff..e01b61ab86b1 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -20,6 +20,7 @@ * static_call(name)(args...); * static_call_cond(name)(args...); * static_call_update(name, func); + * static_call_query(name); * * Usage example: * @@ -91,6 +92,10 @@ * * which will include the required value tests to avoid NULL-pointer * dereferences. + * + * To query which function is currently set to be called, use: + * + * func = static_call_query(name); */ #include <linux/types.h> @@ -118,6 +123,8 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool STATIC_CALL_TRAMP_ADDR(name), func); \ }) +#define static_call_query(name) (READ_ONCE(STATIC_CALL_KEY(name).func)) + #ifdef CONFIG_HAVE_STATIC_CALL_INLINE extern int __init static_call_init(void); @@ -128,16 +135,6 @@ struct static_call_mod { struct static_call_site *sites; }; -struct static_call_key { - void *func; - union { - /* bit 0: 0 = mods, 1 = sites */ - unsigned long type; - struct static_call_mod *mods; - struct static_call_site *sites; - }; -}; - /* For finding the key associated with a trampoline */ struct static_call_tramp_key { s32 tramp; @@ -187,10 +184,6 @@ extern long __static_call_return0(void); static inline int static_call_init(void) { return 0; } -struct static_call_key { - void *func; -}; - #define __DEFINE_STATIC_CALL(name, _func, _func_init) \ DECLARE_STATIC_CALL(name, _func); \ struct static_call_key STATIC_CALL_KEY(name) = { \ @@ -205,6 +198,7 @@ struct static_call_key { }; \ ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) + #define static_call_cond(name) (void)__static_call(name) static inline @@ -243,10 +237,6 @@ static inline long __static_call_return0(void) static inline int static_call_init(void) { return 0; } -struct static_call_key { - void *func; -}; - static inline long __static_call_return0(void) { return 0; diff --git a/include/linux/static_call_types.h b/include/linux/static_call_types.h index ae5662d368b9..5a00b8b2cf9f 100644 --- a/include/linux/static_call_types.h +++ b/include/linux/static_call_types.h @@ -58,11 +58,25 @@ struct static_call_site { __raw_static_call(name); \ }) +struct static_call_key { + void *func; + union { + /* bit 0: 0 = mods, 1 = sites */ + unsigned long type; + struct static_call_mod *mods; + struct static_call_site *sites; + }; +}; + #else /* !CONFIG_HAVE_STATIC_CALL_INLINE */ #define __STATIC_CALL_ADDRESSABLE(name) #define __static_call(name) __raw_static_call(name) +struct static_call_key { + void *func; +}; + #endif /* CONFIG_HAVE_STATIC_CALL_INLINE */ #ifdef MODULE @@ -77,6 +91,10 @@ struct static_call_site { #else +struct static_call_key { + void *func; +}; + #define static_call(name) \ ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func)) diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h index f4b1ba887384..0806796eabcb 100644 --- a/include/linux/surface_aggregator/controller.h +++ b/include/linux/surface_aggregator/controller.h @@ -344,16 +344,16 @@ struct ssam_request_spec_md { * request has been fully completed. The required transport buffer will be * allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_controller - * *ctrl)``, returning the status of the request, which is zero on success and - * negative on failure. The ``ctrl`` parameter is the controller via which the - * request is being sent. + * The generated function is defined as ``static int name(struct + * ssam_controller *ctrl)``, returning the status of the request, which is + * zero on success and negative on failure. The ``ctrl`` parameter is the + * controller via which the request is being sent. * * Refer to ssam_request_sync_onstack() for more details on the behavior of * the generated function. */ #define SSAM_DEFINE_SYNC_REQUEST_N(name, spec...) \ - int name(struct ssam_controller *ctrl) \ + static int name(struct ssam_controller *ctrl) \ { \ struct ssam_request_spec s = (struct ssam_request_spec)spec; \ struct ssam_request rqst; \ @@ -383,17 +383,17 @@ struct ssam_request_spec_md { * returning once the request has been fully completed. The required transport * buffer will be allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_controller - * *ctrl, const atype *arg)``, returning the status of the request, which is - * zero on success and negative on failure. The ``ctrl`` parameter is the - * controller via which the request is sent. The request argument is specified - * via the ``arg`` pointer. + * The generated function is defined as ``static int name(struct + * ssam_controller *ctrl, const atype *arg)``, returning the status of the + * request, which is zero on success and negative on failure. The ``ctrl`` + * parameter is the controller via which the request is sent. The request + * argument is specified via the ``arg`` pointer. * * Refer to ssam_request_sync_onstack() for more details on the behavior of * the generated function. */ #define SSAM_DEFINE_SYNC_REQUEST_W(name, atype, spec...) \ - int name(struct ssam_controller *ctrl, const atype *arg) \ + static int name(struct ssam_controller *ctrl, const atype *arg) \ { \ struct ssam_request_spec s = (struct ssam_request_spec)spec; \ struct ssam_request rqst; \ @@ -424,17 +424,17 @@ struct ssam_request_spec_md { * request itself, returning once the request has been fully completed. The * required transport buffer will be allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_controller - * *ctrl, rtype *ret)``, returning the status of the request, which is zero on - * success and negative on failure. The ``ctrl`` parameter is the controller - * via which the request is sent. The request's return value is written to the - * memory pointed to by the ``ret`` parameter. + * The generated function is defined as ``static int name(struct + * ssam_controller *ctrl, rtype *ret)``, returning the status of the request, + * which is zero on success and negative on failure. The ``ctrl`` parameter is + * the controller via which the request is sent. The request's return value is + * written to the memory pointed to by the ``ret`` parameter. * * Refer to ssam_request_sync_onstack() for more details on the behavior of * the generated function. */ #define SSAM_DEFINE_SYNC_REQUEST_R(name, rtype, spec...) \ - int name(struct ssam_controller *ctrl, rtype *ret) \ + static int name(struct ssam_controller *ctrl, rtype *ret) \ { \ struct ssam_request_spec s = (struct ssam_request_spec)spec; \ struct ssam_request rqst; \ @@ -483,17 +483,17 @@ struct ssam_request_spec_md { * returning once the request has been fully completed. The required transport * buffer will be allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_controller - * *ctrl, u8 tid, u8 iid)``, returning the status of the request, which is - * zero on success and negative on failure. The ``ctrl`` parameter is the - * controller via which the request is sent, ``tid`` the target ID for the - * request, and ``iid`` the instance ID. + * The generated function is defined as ``static int name(struct + * ssam_controller *ctrl, u8 tid, u8 iid)``, returning the status of the + * request, which is zero on success and negative on failure. The ``ctrl`` + * parameter is the controller via which the request is sent, ``tid`` the + * target ID for the request, and ``iid`` the instance ID. * * Refer to ssam_request_sync_onstack() for more details on the behavior of * the generated function. */ #define SSAM_DEFINE_SYNC_REQUEST_MD_N(name, spec...) \ - int name(struct ssam_controller *ctrl, u8 tid, u8 iid) \ + static int name(struct ssam_controller *ctrl, u8 tid, u8 iid) \ { \ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \ struct ssam_request rqst; \ @@ -524,18 +524,18 @@ struct ssam_request_spec_md { * the request itself, returning once the request has been fully completed. * The required transport buffer will be allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_controller - * *ctrl, u8 tid, u8 iid, const atype *arg)``, returning the status of the - * request, which is zero on success and negative on failure. The ``ctrl`` - * parameter is the controller via which the request is sent, ``tid`` the - * target ID for the request, and ``iid`` the instance ID. The request argument - * is specified via the ``arg`` pointer. + * The generated function is defined as ``static int name(struct + * ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg)``, returning the + * status of the request, which is zero on success and negative on failure. + * The ``ctrl`` parameter is the controller via which the request is sent, + * ``tid`` the target ID for the request, and ``iid`` the instance ID. The + * request argument is specified via the ``arg`` pointer. * * Refer to ssam_request_sync_onstack() for more details on the behavior of * the generated function. */ #define SSAM_DEFINE_SYNC_REQUEST_MD_W(name, atype, spec...) \ - int name(struct ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg)\ + static int name(struct ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg) \ { \ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \ struct ssam_request rqst; \ @@ -567,18 +567,18 @@ struct ssam_request_spec_md { * execution of the request itself, returning once the request has been fully * completed. The required transport buffer will be allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_controller - * *ctrl, u8 tid, u8 iid, rtype *ret)``, returning the status of the request, - * which is zero on success and negative on failure. The ``ctrl`` parameter is - * the controller via which the request is sent, ``tid`` the target ID for the - * request, and ``iid`` the instance ID. The request's return value is written - * to the memory pointed to by the ``ret`` parameter. + * The generated function is defined as ``static int name(struct + * ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret)``, returning the status + * of the request, which is zero on success and negative on failure. The + * ``ctrl`` parameter is the controller via which the request is sent, ``tid`` + * the target ID for the request, and ``iid`` the instance ID. The request's + * return value is written to the memory pointed to by the ``ret`` parameter. * * Refer to ssam_request_sync_onstack() for more details on the behavior of * the generated function. */ #define SSAM_DEFINE_SYNC_REQUEST_MD_R(name, rtype, spec...) \ - int name(struct ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret) \ + static int name(struct ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret) \ { \ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \ struct ssam_request rqst; \ diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h index 02f3e06c0a60..4441ad667c3f 100644 --- a/include/linux/surface_aggregator/device.h +++ b/include/linux/surface_aggregator/device.h @@ -336,17 +336,18 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d); * request has been fully completed. The required transport buffer will be * allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_device *sdev)``, - * returning the status of the request, which is zero on success and negative - * on failure. The ``sdev`` parameter specifies both the target device of the - * request and by association the controller via which the request is sent. + * The generated function is defined as ``static int name(struct ssam_device + * *sdev)``, returning the status of the request, which is zero on success and + * negative on failure. The ``sdev`` parameter specifies both the target + * device of the request and by association the controller via which the + * request is sent. * * Refer to ssam_request_sync_onstack() for more details on the behavior of * the generated function. */ #define SSAM_DEFINE_SYNC_REQUEST_CL_N(name, spec...) \ SSAM_DEFINE_SYNC_REQUEST_MD_N(__raw_##name, spec) \ - int name(struct ssam_device *sdev) \ + static int name(struct ssam_device *sdev) \ { \ return __raw_##name(sdev->ctrl, sdev->uid.target, \ sdev->uid.instance); \ @@ -368,19 +369,19 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d); * itself, returning once the request has been fully completed. The required * transport buffer will be allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_device *sdev, - * const atype *arg)``, returning the status of the request, which is zero on - * success and negative on failure. The ``sdev`` parameter specifies both the - * target device of the request and by association the controller via which - * the request is sent. The request's argument is specified via the ``arg`` - * pointer. + * The generated function is defined as ``static int name(struct ssam_device + * *sdev, const atype *arg)``, returning the status of the request, which is + * zero on success and negative on failure. The ``sdev`` parameter specifies + * both the target device of the request and by association the controller via + * which the request is sent. The request's argument is specified via the + * ``arg`` pointer. * * Refer to ssam_request_sync_onstack() for more details on the behavior of * the generated function. */ #define SSAM_DEFINE_SYNC_REQUEST_CL_W(name, atype, spec...) \ SSAM_DEFINE_SYNC_REQUEST_MD_W(__raw_##name, atype, spec) \ - int name(struct ssam_device *sdev, const atype *arg) \ + static int name(struct ssam_device *sdev, const atype *arg) \ { \ return __raw_##name(sdev->ctrl, sdev->uid.target, \ sdev->uid.instance, arg); \ @@ -402,8 +403,8 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d); * itself, returning once the request has been fully completed. The required * transport buffer will be allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_device *sdev, - * rtype *ret)``, returning the status of the request, which is zero on + * The generated function is defined as ``static int name(struct ssam_device + * *sdev, rtype *ret)``, returning the status of the request, which is zero on * success and negative on failure. The ``sdev`` parameter specifies both the * target device of the request and by association the controller via which * the request is sent. The request's return value is written to the memory @@ -414,7 +415,7 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d); */ #define SSAM_DEFINE_SYNC_REQUEST_CL_R(name, rtype, spec...) \ SSAM_DEFINE_SYNC_REQUEST_MD_R(__raw_##name, rtype, spec) \ - int name(struct ssam_device *sdev, rtype *ret) \ + static int name(struct ssam_device *sdev, rtype *ret) \ { \ return __raw_##name(sdev->ctrl, sdev->uid.target, \ sdev->uid.instance, ret); \ diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h index 754b74a2167f..c6540ceea143 100644 --- a/include/linux/timecounter.h +++ b/include/linux/timecounter.h @@ -124,7 +124,7 @@ extern u64 timecounter_read(struct timecounter *tc); * This allows conversion of cycle counter values which were generated * in the past. */ -extern u64 timecounter_cyc2time(struct timecounter *tc, +extern u64 timecounter_cyc2time(const struct timecounter *tc, u64 cycle_tstamp); #endif diff --git a/include/linux/timex.h b/include/linux/timex.h index 9c2e54faf9b7..059b18eb1f1f 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -133,7 +133,7 @@ /* * kernel variables - * Note: maximum error = NTP synch distance = dispersion + delay / 2; + * Note: maximum error = NTP sync distance = dispersion + delay / 2; * estimated error = NTP dispersion. */ extern unsigned long tick_usec; /* USER_HZ period (usec) */ diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 543aa3b1dedc..aa11fe323c56 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -305,6 +305,8 @@ struct tpm_buf { }; enum tpm2_object_attributes { + TPM2_OA_FIXED_TPM = BIT(1), + TPM2_OA_FIXED_PARENT = BIT(4), TPM2_OA_USER_WITH_AUTH = BIT(6), }; diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 64cf8ebdc4ec..f6c5f784be5a 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -63,6 +63,9 @@ struct user_namespace { kgid_t group; struct ns_common ns; unsigned long flags; + /* parent_could_setfcap: true if the creator if this ns had CAP_SETFCAP + * in its effective capability set at the child ns creation time. */ + bool parent_could_setfcap; #ifdef CONFIG_KEYS /* List of joinable keyrings in this namespace. Modification access of diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 6b5fcfa1e555..b465f8f3e554 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -62,15 +62,21 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, return -EINVAL; } + skb_reset_mac_header(skb); + if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { - u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start); - u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset); + u32 start = __virtio16_to_cpu(little_endian, hdr->csum_start); + u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset); + u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16)); + + if (!pskb_may_pull(skb, needed)) + return -EINVAL; if (!skb_partial_csum_set(skb, start, off)) return -EINVAL; p_off = skb_transport_offset(skb) + thlen; - if (p_off > skb_headlen(skb)) + if (!pskb_may_pull(skb, p_off)) return -EINVAL; } else { /* gso packets without NEEDS_CSUM do not set transport_offset. @@ -100,14 +106,14 @@ retry: } p_off = keys.control.thoff + thlen; - if (p_off > skb_headlen(skb) || + if (!pskb_may_pull(skb, p_off) || keys.basic.ip_proto != ip_proto) return -EINVAL; skb_set_transport_header(skb, keys.control.thoff); } else if (gso_type) { p_off = thlen; - if (p_off > skb_headlen(skb)) + if (!pskb_may_pull(skb, p_off)) return -EINVAL; } } diff --git a/include/linux/wmi.h b/include/linux/wmi.h index 8ef7e7faea1e..2cb3913c1f50 100644 --- a/include/linux/wmi.h +++ b/include/linux/wmi.h @@ -37,7 +37,7 @@ struct wmi_driver { const struct wmi_device_id *id_table; int (*probe)(struct wmi_device *wdev, const void *context); - int (*remove)(struct wmi_device *wdev); + void (*remove)(struct wmi_device *wdev); void (*notify)(struct wmi_device *device, union acpi_object *data); long (*filter_callback)(struct wmi_device *wdev, unsigned int cmd, struct wmi_ioctl_buffer *arg); |
