diff options
297 files changed, 4848 insertions, 3398 deletions
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index a137a0e6d068..77e0ece2b1d6 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -315,11 +315,15 @@ indeed the normal API is implemented in terms of the advanced API. The advanced API is only available to modules with a GPL-compatible license. The advanced API is based around the xa_state. This is an opaque data -structure which you declare on the stack using the XA_STATE() -macro. This macro initialises the xa_state ready to start walking -around the XArray. It is used as a cursor to maintain the position -in the XArray and let you compose various operations together without -having to restart from the top every time. +structure which you declare on the stack using the XA_STATE() macro. +This macro initialises the xa_state ready to start walking around the +XArray. It is used as a cursor to maintain the position in the XArray +and let you compose various operations together without having to restart +from the top every time. The contents of the xa_state are protected by +the rcu_read_lock() or the xas_lock(). If you need to drop whichever of +those locks is protecting your state and tree, you must call xas_pause() +so that future calls do not rely on the parts of the state which were +left unprotected. The xa_state is also used to store errors. You can call xas_error() to retrieve the error. All operations check whether diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt b/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt index 6ce0b212ec6d..606b4b1b709d 100644 --- a/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt +++ b/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt @@ -81,4 +81,4 @@ Example: }; }; -[1]. Documentation/devicetree/bindings/arm/idle-states.yaml +[1]. Documentation/devicetree/bindings/cpu/idle-states.yaml diff --git a/Documentation/devicetree/bindings/arm/psci.yaml b/Documentation/devicetree/bindings/arm/psci.yaml index 8b77cf83a095..dd83ef278af0 100644 --- a/Documentation/devicetree/bindings/arm/psci.yaml +++ b/Documentation/devicetree/bindings/arm/psci.yaml @@ -101,7 +101,7 @@ properties: bindings in [1]) must specify this property. [1] Kernel documentation - ARM idle states bindings - Documentation/devicetree/bindings/arm/idle-states.yaml + Documentation/devicetree/bindings/cpu/idle-states.yaml patternProperties: "^power-domain-": diff --git a/Documentation/devicetree/bindings/arm/idle-states.yaml b/Documentation/devicetree/bindings/cpu/idle-states.yaml index 4d381fa1ee57..fa4d4142ac93 100644 --- a/Documentation/devicetree/bindings/arm/idle-states.yaml +++ b/Documentation/devicetree/bindings/cpu/idle-states.yaml @@ -1,25 +1,30 @@ # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) %YAML 1.2 --- -$id: http://devicetree.org/schemas/arm/idle-states.yaml# +$id: http://devicetree.org/schemas/cpu/idle-states.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ARM idle states binding description +title: Idle states binding description maintainers: - Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> + - Anup Patel <anup@brainfault.org> description: |+ ========================================== 1 - Introduction ========================================== - ARM systems contain HW capable of managing power consumption dynamically, - where cores can be put in different low-power states (ranging from simple wfi - to power gating) according to OS PM policies. The CPU states representing the - range of dynamic idle states that a processor can enter at run-time, can be - specified through device tree bindings representing the parameters required to - enter/exit specific idle states on a given processor. + ARM and RISC-V systems contain HW capable of managing power consumption + dynamically, where cores can be put in different low-power states (ranging + from simple wfi to power gating) according to OS PM policies. The CPU states + representing the range of dynamic idle states that a processor can enter at + run-time, can be specified through device tree bindings representing the + parameters required to enter/exit specific idle states on a given processor. + + ========================================== + 2 - ARM idle states + ========================================== According to the Server Base System Architecture document (SBSA, [3]), the power states an ARM CPU can be put into are identified by the following list: @@ -43,8 +48,23 @@ description: |+ The device tree binding definition for ARM idle states is the subject of this document. + ========================================== + 3 - RISC-V idle states + ========================================== + + On RISC-V systems, the HARTs (or CPUs) [6] can be put in platform specific + suspend (or idle) states (ranging from simple WFI, power gating, etc). The + RISC-V SBI v0.3 (or higher) [7] hart state management extension provides a + standard mechanism for OS to request HART state transitions. + + The platform specific suspend (or idle) states of a hart can be either + retentive or non-rententive in nature. A retentive suspend state will + preserve HART registers and CSR values for all privilege modes whereas + a non-retentive suspend state will not preserve HART registers and CSR + values. + =========================================== - 2 - idle-states definitions + 4 - idle-states definitions =========================================== Idle states are characterized for a specific system through a set of @@ -211,10 +231,10 @@ description: |+ properties specification that is the subject of the following sections. =========================================== - 3 - idle-states node + 5 - idle-states node =========================================== - ARM processor idle states are defined within the idle-states node, which is + The processor idle states are defined within the idle-states node, which is a direct child of the cpus node [1] and provides a container where the processor idle states, defined as device tree nodes, are listed. @@ -223,7 +243,7 @@ description: |+ just supports idle_standby, an idle-states node is not required. =========================================== - 4 - References + 6 - References =========================================== [1] ARM Linux Kernel documentation - CPUs bindings @@ -238,9 +258,15 @@ description: |+ [4] ARM Architecture Reference Manuals http://infocenter.arm.com/help/index.jsp - [6] ARM Linux Kernel documentation - Booting AArch64 Linux + [5] ARM Linux Kernel documentation - Booting AArch64 Linux Documentation/arm64/booting.rst + [6] RISC-V Linux Kernel documentation - CPUs bindings + Documentation/devicetree/bindings/riscv/cpus.yaml + + [7] RISC-V Supervisor Binary Interface (SBI) + http://github.com/riscv/riscv-sbi-doc/riscv-sbi.adoc + properties: $nodename: const: idle-states @@ -253,7 +279,7 @@ properties: On ARM 32-bit systems this property is optional This assumes that the "enable-method" property is set to "psci" in the cpu - node[6] that is responsible for setting up CPU idle management in the OS + node[5] that is responsible for setting up CPU idle management in the OS implementation. const: psci @@ -265,8 +291,8 @@ patternProperties: as follows. The idle state entered by executing the wfi instruction (idle_standby - SBSA,[3][4]) is considered standard on all ARM platforms and therefore - must not be listed. + SBSA,[3][4]) is considered standard on all ARM and RISC-V platforms and + therefore must not be listed. In addition to the properties listed above, a state node may require additional properties specific to the entry-method defined in the @@ -275,7 +301,27 @@ patternProperties: properties: compatible: - const: arm,idle-state + enum: + - arm,idle-state + - riscv,idle-state + + arm,psci-suspend-param: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + power_state parameter to pass to the ARM PSCI suspend call. + + Device tree nodes that require usage of PSCI CPU_SUSPEND function + (i.e. idle states node with entry-method property is set to "psci") + must specify this property. + + riscv,sbi-suspend-param: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + suspend_type parameter to pass to the RISC-V SBI HSM suspend call. + + This property is required in idle state nodes of device tree meant + for RISC-V systems. For more details on the suspend_type parameter + refer the SBI specifiation v0.3 (or higher) [7]. local-timer-stop: description: @@ -317,6 +363,8 @@ patternProperties: description: A string used as a descriptive name for the idle state. + additionalProperties: false + required: - compatible - entry-latency-us @@ -658,4 +706,150 @@ examples: }; }; + - | + // Example 3 (RISC-V 64-bit, 4-cpu systems, two clusters): + + cpus { + #size-cells = <0>; + #address-cells = <1>; + + cpu@0 { + device_type = "cpu"; + compatible = "riscv"; + reg = <0x0>; + riscv,isa = "rv64imafdc"; + mmu-type = "riscv,sv48"; + cpu-idle-states = <&CPU_RET_0_0>, <&CPU_NONRET_0_0>, + <&CLUSTER_RET_0>, <&CLUSTER_NONRET_0>; + + cpu_intc0: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + + cpu@1 { + device_type = "cpu"; + compatible = "riscv"; + reg = <0x1>; + riscv,isa = "rv64imafdc"; + mmu-type = "riscv,sv48"; + cpu-idle-states = <&CPU_RET_0_0>, <&CPU_NONRET_0_0>, + <&CLUSTER_RET_0>, <&CLUSTER_NONRET_0>; + + cpu_intc1: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + + cpu@10 { + device_type = "cpu"; + compatible = "riscv"; + reg = <0x10>; + riscv,isa = "rv64imafdc"; + mmu-type = "riscv,sv48"; + cpu-idle-states = <&CPU_RET_1_0>, <&CPU_NONRET_1_0>, + <&CLUSTER_RET_1>, <&CLUSTER_NONRET_1>; + + cpu_intc10: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + + cpu@11 { + device_type = "cpu"; + compatible = "riscv"; + reg = <0x11>; + riscv,isa = "rv64imafdc"; + mmu-type = "riscv,sv48"; + cpu-idle-states = <&CPU_RET_1_0>, <&CPU_NONRET_1_0>, + <&CLUSTER_RET_1>, <&CLUSTER_NONRET_1>; + + cpu_intc11: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + + idle-states { + CPU_RET_0_0: cpu-retentive-0-0 { + compatible = "riscv,idle-state"; + riscv,sbi-suspend-param = <0x10000000>; + entry-latency-us = <20>; + exit-latency-us = <40>; + min-residency-us = <80>; + }; + + CPU_NONRET_0_0: cpu-nonretentive-0-0 { + compatible = "riscv,idle-state"; + riscv,sbi-suspend-param = <0x90000000>; + entry-latency-us = <250>; + exit-latency-us = <500>; + min-residency-us = <950>; + }; + + CLUSTER_RET_0: cluster-retentive-0 { + compatible = "riscv,idle-state"; + riscv,sbi-suspend-param = <0x11000000>; + local-timer-stop; + entry-latency-us = <50>; + exit-latency-us = <100>; + min-residency-us = <250>; + wakeup-latency-us = <130>; + }; + + CLUSTER_NONRET_0: cluster-nonretentive-0 { + compatible = "riscv,idle-state"; + riscv,sbi-suspend-param = <0x91000000>; + local-timer-stop; + entry-latency-us = <600>; + exit-latency-us = <1100>; + min-residency-us = <2700>; + wakeup-latency-us = <1500>; + }; + + CPU_RET_1_0: cpu-retentive-1-0 { + compatible = "riscv,idle-state"; + riscv,sbi-suspend-param = <0x10000010>; + entry-latency-us = <20>; + exit-latency-us = <40>; + min-residency-us = <80>; + }; + + CPU_NONRET_1_0: cpu-nonretentive-1-0 { + compatible = "riscv,idle-state"; + riscv,sbi-suspend-param = <0x90000010>; + entry-latency-us = <250>; + exit-latency-us = <500>; + min-residency-us = <950>; + }; + + CLUSTER_RET_1: cluster-retentive-1 { + compatible = "riscv,idle-state"; + riscv,sbi-suspend-param = <0x11000010>; + local-timer-stop; + entry-latency-us = <50>; + exit-latency-us = <100>; + min-residency-us = <250>; + wakeup-latency-us = <130>; + }; + + CLUSTER_NONRET_1: cluster-nonretentive-1 { + compatible = "riscv,idle-state"; + riscv,sbi-suspend-param = <0x91000010>; + local-timer-stop; + entry-latency-us = <600>; + exit-latency-us = <1100>; + min-residency-us = <2700>; + wakeup-latency-us = <1500>; + }; + }; + }; + ... diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml index aa5fb64d57eb..d632ac76532e 100644 --- a/Documentation/devicetree/bindings/riscv/cpus.yaml +++ b/Documentation/devicetree/bindings/riscv/cpus.yaml @@ -99,6 +99,14 @@ properties: - compatible - interrupt-controller + cpu-idle-states: + $ref: '/schemas/types.yaml#/definitions/phandle-array' + items: + maxItems: 1 + description: | + List of phandles to idle state nodes supported + by this hart (see ./idle-states.yaml). + required: - riscv,isa - interrupt-controller diff --git a/Documentation/filesystems/cifs/ksmbd.rst b/Documentation/filesystems/cifs/ksmbd.rst index b0d354fd8066..1af600db2e70 100644 --- a/Documentation/filesystems/cifs/ksmbd.rst +++ b/Documentation/filesystems/cifs/ksmbd.rst @@ -82,10 +82,10 @@ Signing Update Supported. Pre-authentication integrity Supported. SMB3 encryption(CCM, GCM) Supported. (CCM and GCM128 supported, GCM256 in progress) -SMB direct(RDMA) Partially Supported. SMB3 Multi-channel is - required to connect to Windows client. +SMB direct(RDMA) Supported. SMB3 Multi-channel Partially Supported. Planned to implement replay/retry mechanisms for future. +Receive Side Scaling mode Supported. SMB3.1.1 POSIX extension Supported. ACLs Partially Supported. only DACLs available, SACLs (auditing) is planned for the future. For diff --git a/Documentation/filesystems/fsverity.rst b/Documentation/filesystems/fsverity.rst index 1d831e3cbcb3..8cc536d08f51 100644 --- a/Documentation/filesystems/fsverity.rst +++ b/Documentation/filesystems/fsverity.rst @@ -549,7 +549,7 @@ Pagecache ~~~~~~~~~ For filesystems using Linux's pagecache, the ``->readpage()`` and -``->readpages()`` methods must be modified to verify pages before they +``->readahead()`` methods must be modified to verify pages before they are marked Uptodate. Merely hooking ``->read_iter()`` would be insufficient, since ``->read_iter()`` is not used for memory maps. @@ -611,7 +611,7 @@ workqueue, and then the workqueue work does the decryption or verification. Finally, pages where no decryption or verity error occurred are marked Uptodate, and the pages are unlocked. -Files on ext4 and f2fs may contain holes. Normally, ``->readpages()`` +Files on ext4 and f2fs may contain holes. Normally, ``->readahead()`` simply zeroes holes and sets the corresponding pages Uptodate; no bios are issued. To prevent this case from bypassing fs-verity, these filesystems use fsverity_verify_page() to verify hole pages. @@ -778,7 +778,7 @@ weren't already directly answered in other parts of this document. - To prevent bypassing verification, pages must not be marked Uptodate until they've been verified. Currently, each filesystem is responsible for marking pages Uptodate via - ``->readpages()``. Therefore, currently it's not possible for + ``->readahead()``. Therefore, currently it's not possible for the VFS to do the verification on its own. Changing this would require significant changes to the VFS and all filesystems. diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index 2998cec9af4b..c26d854275a0 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -241,8 +241,6 @@ prototypes:: int (*writepages)(struct address_space *, struct writeback_control *); bool (*dirty_folio)(struct address_space *, struct folio *folio); void (*readahead)(struct readahead_control *); - int (*readpages)(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages); int (*write_begin)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); @@ -274,7 +272,6 @@ readpage: yes, unlocks shared writepages: dirty_folio maybe readahead: yes, unlocks shared -readpages: no shared write_begin: locks the page exclusive write_end: yes, unlocks exclusive bmap: @@ -300,9 +297,6 @@ completion. ->readahead() unlocks the pages that I/O is attempted on like ->readpage(). -->readpages() populates the pagecache with the passed pages and starts -I/O against them. They come unlocked upon I/O completion. - ->writepage() is used for two purposes: for "memory cleansing" and for "sync". These are quite different operations and the behaviour may differ depending upon the mode. diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index 4f14edf93941..794bd1a66bfb 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -726,8 +726,6 @@ cache in your filesystem. The following members are defined: int (*writepages)(struct address_space *, struct writeback_control *); bool (*dirty_folio)(struct address_space *, struct folio *); void (*readahead)(struct readahead_control *); - int (*readpages)(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages); int (*write_begin)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); @@ -817,15 +815,6 @@ cache in your filesystem. The following members are defined: completes successfully. Setting PageError on any page will be ignored; simply unlock the page if an I/O error occurs. -``readpages`` - called by the VM to read pages associated with the address_space - object. This is essentially just a vector version of readpage. - Instead of just one page, several pages are requested. - readpages is only used for read-ahead, so read errors are - ignored. If anything goes wrong, feel free to give up. - This interface is deprecated and will be removed by the end of - 2020; implement readahead instead. - ``write_begin`` Called by the generic buffered write code to ask the filesystem to prepare to write len bytes at the given offset in the file. diff --git a/Documentation/riscv/index.rst b/Documentation/riscv/index.rst index ea915c196048..e23b876ad6eb 100644 --- a/Documentation/riscv/index.rst +++ b/Documentation/riscv/index.rst @@ -7,7 +7,6 @@ RISC-V architecture boot-image-header vm-layout - pmu patch-acceptance features diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 07a45474abe9..d13fa6600467 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -151,12 +151,6 @@ In order to create user controlled virtual machines on S390, check KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as privileged user (CAP_SYS_ADMIN). -To use hardware assisted virtualization on MIPS (VZ ASE) rather than -the default trap & emulate implementation (which changes the virtual -memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the -flag KVM_VM_MIPS_VZ. - - On arm64, the physical address size for a VM (IPA Size limit) is limited to 40bits by default. The limit can be configured if the host supports the extension KVM_CAP_ARM_VM_IPA_SIZE. When supported, use @@ -4081,6 +4075,11 @@ x2APIC MSRs are always allowed, independent of the ``default_allow`` setting, and their behavior depends on the ``X2APIC_ENABLE`` bit of the APIC base register. +.. warning:: + MSR accesses coming from nested vmentry/vmexit are not filtered. + This includes both writes to individual VMCS fields and reads/writes + through the MSR lists pointed to by the VMCS. + If a bit is within one of the defined ranges, read and write accesses are guarded by the bitmap's value for the MSR index if the kind of access is included in the ``struct kvm_msr_filter_range`` flags. If no range @@ -5293,6 +5292,10 @@ type values: KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO Sets the guest physical address of the vcpu_info for a given vCPU. + As with the shared_info page for the VM, the corresponding page may be + dirtied at any time if event channel interrupt delivery is enabled, so + userspace should always assume that the page is dirty without relying + on dirty logging. KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO Sets the guest physical address of an additional pvclock structure @@ -7719,3 +7722,49 @@ only be invoked on a VM prior to the creation of VCPUs. At this time, KVM_PMU_CAP_DISABLE is the only capability. Setting this capability will disable PMU virtualization for that VM. Usermode should adjust CPUID leaf 0xA to reflect that the PMU is disabled. + +9. Known KVM API problems +========================= + +In some cases, KVM's API has some inconsistencies or common pitfalls +that userspace need to be aware of. This section details some of +these issues. + +Most of them are architecture specific, so the section is split by +architecture. + +9.1. x86 +-------- + +``KVM_GET_SUPPORTED_CPUID`` issues +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In general, ``KVM_GET_SUPPORTED_CPUID`` is designed so that it is possible +to take its result and pass it directly to ``KVM_SET_CPUID2``. This section +documents some cases in which that requires some care. + +Local APIC features +~~~~~~~~~~~~~~~~~~~ + +CPU[EAX=1]:ECX[21] (X2APIC) is reported by ``KVM_GET_SUPPORTED_CPUID``, +but it can only be enabled if ``KVM_CREATE_IRQCHIP`` or +``KVM_ENABLE_CAP(KVM_CAP_IRQCHIP_SPLIT)`` are used to enable in-kernel emulation of +the local APIC. + +The same is true for the ``KVM_FEATURE_PV_UNHALT`` paravirtualized feature. + +CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``. +It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel +has enabled in-kernel emulation of the local APIC. + +Obsolete ioctls and capabilities +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +KVM_CAP_DISABLE_QUIRKS does not let userspace know which quirks are actually +available. Use ``KVM_CHECK_EXTENSION(KVM_CAP_DISABLE_QUIRKS2)`` instead if +available. + +Ordering of KVM_GET_*/KVM_SET_* ioctls +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +TBD diff --git a/Documentation/virt/kvm/index.rst b/Documentation/virt/kvm/index.rst index b6833c7bb474..e0a2c74e1043 100644 --- a/Documentation/virt/kvm/index.rst +++ b/Documentation/virt/kvm/index.rst @@ -8,25 +8,13 @@ KVM :maxdepth: 2 api - amd-memory-encryption - cpuid - halt-polling - hypercalls - locking - mmu - msr - nested-vmx - ppc-pv - s390-diag - s390-pv - s390-pv-boot - timekeeping - vcpu-requests - - review-checklist + devices/index arm/index + s390/index + ppc-pv + x86/index - devices/index - - running-nested-guests + locking + vcpu-requests + review-checklist diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst index 5d27da356836..845a561629f1 100644 --- a/Documentation/virt/kvm/locking.rst +++ b/Documentation/virt/kvm/locking.rst @@ -210,32 +210,47 @@ time it will be set using the Dirty tracking mechanism described above. 3. Reference ------------ -:Name: kvm_lock +``kvm_lock`` +^^^^^^^^^^^^ + :Type: mutex :Arch: any :Protects: - vm_list -:Name: kvm_count_lock +``kvm_count_lock`` +^^^^^^^^^^^^^^^^^^ + :Type: raw_spinlock_t :Arch: any :Protects: - hardware virtualization enable/disable :Comment: 'raw' because hardware enabling/disabling must be atomic /wrt migration. -:Name: kvm_arch::tsc_write_lock -:Type: raw_spinlock +``kvm->mn_invalidate_lock`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:Type: spinlock_t +:Arch: any +:Protects: mn_active_invalidate_count, mn_memslots_update_rcuwait + +``kvm_arch::tsc_write_lock`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:Type: raw_spinlock_t :Arch: x86 :Protects: - kvm_arch::{last_tsc_write,last_tsc_nsec,last_tsc_offset} - tsc offset in vmcb :Comment: 'raw' because updating the tsc offsets must not be preempted. -:Name: kvm->mmu_lock -:Type: spinlock_t +``kvm->mmu_lock`` +^^^^^^^^^^^^^^^^^ +:Type: spinlock_t or rwlock_t :Arch: any :Protects: -shadow page/shadow tlb entry :Comment: it is a spinlock since it is used in mmu notifier. -:Name: kvm->srcu +``kvm->srcu`` +^^^^^^^^^^^^^ :Type: srcu lock :Arch: any :Protects: - kvm->memslots @@ -246,10 +261,20 @@ time it will be set using the Dirty tracking mechanism described above. The srcu index can be stored in kvm_vcpu->srcu_idx per vcpu if it is needed by multiple functions. -:Name: blocked_vcpu_on_cpu_lock +``kvm->slots_arch_lock`` +^^^^^^^^^^^^^^^^^^^^^^^^ +:Type: mutex +:Arch: any (only needed on x86 though) +:Protects: any arch-specific fields of memslots that have to be modified + in a ``kvm->srcu`` read-side critical section. +:Comment: must be held before reading the pointer to the current memslots, + until after all changes to the memslots are complete + +``wakeup_vcpus_on_cpu_lock`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :Type: spinlock_t :Arch: x86 -:Protects: blocked_vcpu_on_cpu +:Protects: wakeup_vcpus_on_cpu :Comment: This is a per-CPU lock and it is used for VT-d posted-interrupts. When VT-d posted-interrupts is supported and the VM has assigned devices, we put the blocked vCPU on the list blocked_vcpu_on_cpu diff --git a/Documentation/virt/kvm/s390/index.rst b/Documentation/virt/kvm/s390/index.rst new file mode 100644 index 000000000000..605f488f0cc5 --- /dev/null +++ b/Documentation/virt/kvm/s390/index.rst @@ -0,0 +1,12 @@ +.. SPDX-License-Identifier: GPL-2.0 + +==================== +KVM for s390 systems +==================== + +.. toctree:: + :maxdepth: 2 + + s390-diag + s390-pv + s390-pv-boot diff --git a/Documentation/virt/kvm/s390-diag.rst b/Documentation/virt/kvm/s390/s390-diag.rst index ca85f030eb0b..ca85f030eb0b 100644 --- a/Documentation/virt/kvm/s390-diag.rst +++ b/Documentation/virt/kvm/s390/s390-diag.rst diff --git a/Documentation/virt/kvm/s390-pv-boot.rst b/Documentation/virt/kvm/s390/s390-pv-boot.rst index 73a6083cb5e7..73a6083cb5e7 100644 --- a/Documentation/virt/kvm/s390-pv-boot.rst +++ b/Documentation/virt/kvm/s390/s390-pv-boot.rst diff --git a/Documentation/virt/kvm/s390-pv.rst b/Documentation/virt/kvm/s390/s390-pv.rst index 8e41a3b63fa5..8e41a3b63fa5 100644 --- a/Documentation/virt/kvm/s390-pv.rst +++ b/Documentation/virt/kvm/s390/s390-pv.rst diff --git a/Documentation/virt/kvm/vcpu-requests.rst b/Documentation/virt/kvm/vcpu-requests.rst index b61d48aec36c..db43ee571f5a 100644 --- a/Documentation/virt/kvm/vcpu-requests.rst +++ b/Documentation/virt/kvm/vcpu-requests.rst @@ -135,6 +135,16 @@ KVM_REQ_UNHALT such as a pending signal, which does not indicate the VCPU's halt emulation should stop, and therefore does not make the request. +KVM_REQ_OUTSIDE_GUEST_MODE + + This "request" ensures the target vCPU has exited guest mode prior to the + sender of the request continuing on. No action needs be taken by the target, + and so no request is actually logged for the target. This request is similar + to a "kick", but unlike a kick it guarantees the vCPU has actually exited + guest mode. A kick only guarantees the vCPU will exit at some point in the + future, e.g. a previous kick may have started the process, but there's no + guarantee the to-be-kicked vCPU has fully exited guest mode. + KVM_REQUEST_MASK ---------------- diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/x86/amd-memory-encryption.rst index 1c6847fff304..1c6847fff304 100644 --- a/Documentation/virt/kvm/amd-memory-encryption.rst +++ b/Documentation/virt/kvm/x86/amd-memory-encryption.rst diff --git a/Documentation/virt/kvm/cpuid.rst b/Documentation/virt/kvm/x86/cpuid.rst index bda3e3e737d7..bda3e3e737d7 100644 --- a/Documentation/virt/kvm/cpuid.rst +++ b/Documentation/virt/kvm/x86/cpuid.rst diff --git a/Documentation/virt/kvm/x86/errata.rst b/Documentation/virt/kvm/x86/errata.rst new file mode 100644 index 000000000000..806f049b6975 --- /dev/null +++ b/Documentation/virt/kvm/x86/errata.rst @@ -0,0 +1,39 @@ + +======================================= +Known limitations of CPU virtualization +======================================= + +Whenever perfect emulation of a CPU feature is impossible or too hard, KVM +has to choose between not implementing the feature at all or introducing +behavioral differences between virtual machines and bare metal systems. + +This file documents some of the known limitations that KVM has in +virtualizing CPU features. + +x86 +=== + +``KVM_GET_SUPPORTED_CPUID`` issues +---------------------------------- + +x87 features +~~~~~~~~~~~~ + +Unlike most other CPUID feature bits, CPUID[EAX=7,ECX=0]:EBX[6] +(FDP_EXCPTN_ONLY) and CPUID[EAX=7,ECX=0]:EBX]13] (ZERO_FCS_FDS) are +clear if the features are present and set if the features are not present. + +Clearing these bits in CPUID has no effect on the operation of the guest; +if these bits are set on hardware, the features will not be present on +any virtual machine that runs on that hardware. + +**Workaround:** It is recommended to always set these bits in guest CPUID. +Note however that any software (e.g ``WIN87EM.DLL``) expecting these features +to be present likely predates these CPUID feature bits, and therefore +doesn't know to check for them anyway. + +Nested virtualization features +------------------------------ + +TBD + diff --git a/Documentation/virt/kvm/halt-polling.rst b/Documentation/virt/kvm/x86/halt-polling.rst index 4922e4a15f18..4922e4a15f18 100644 --- a/Documentation/virt/kvm/halt-polling.rst +++ b/Documentation/virt/kvm/x86/halt-polling.rst diff --git a/Documentation/virt/kvm/hypercalls.rst b/Documentation/virt/kvm/x86/hypercalls.rst index e56fa8b9cfca..e56fa8b9cfca 100644 --- a/Documentation/virt/kvm/hypercalls.rst +++ b/Documentation/virt/kvm/x86/hypercalls.rst diff --git a/Documentation/virt/kvm/x86/index.rst b/Documentation/virt/kvm/x86/index.rst new file mode 100644 index 000000000000..7ff588826b9f --- /dev/null +++ b/Documentation/virt/kvm/x86/index.rst @@ -0,0 +1,19 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=================== +KVM for x86 systems +=================== + +.. toctree:: + :maxdepth: 2 + + amd-memory-encryption + cpuid + errata + halt-polling + hypercalls + mmu + msr + nested-vmx + running-nested-guests + timekeeping diff --git a/Documentation/virt/kvm/mmu.rst b/Documentation/virt/kvm/x86/mmu.rst index 5b1ebad24c77..5b1ebad24c77 100644 --- a/Documentation/virt/kvm/mmu.rst +++ b/Documentation/virt/kvm/x86/mmu.rst diff --git a/Documentation/virt/kvm/msr.rst b/Documentation/virt/kvm/x86/msr.rst index 9315fc385fb0..9315fc385fb0 100644 --- a/Documentation/virt/kvm/msr.rst +++ b/Documentation/virt/kvm/x86/msr.rst diff --git a/Documentation/virt/kvm/nested-vmx.rst b/Documentation/virt/kvm/x86/nested-vmx.rst index ac2095d41f02..ac2095d41f02 100644 --- a/Documentation/virt/kvm/nested-vmx.rst +++ b/Documentation/virt/kvm/x86/nested-vmx.rst diff --git a/Documentation/virt/kvm/running-nested-guests.rst b/Documentation/virt/kvm/x86/running-nested-guests.rst index bd70c69468ae..bd70c69468ae 100644 --- a/Documentation/virt/kvm/running-nested-guests.rst +++ b/Documentation/virt/kvm/x86/running-nested-guests.rst diff --git a/Documentation/virt/kvm/timekeeping.rst b/Documentation/virt/kvm/x86/timekeeping.rst index 21ae7efa29ba..21ae7efa29ba 100644 --- a/Documentation/virt/kvm/timekeeping.rst +++ b/Documentation/virt/kvm/x86/timekeeping.rst diff --git a/MAINTAINERS b/MAINTAINERS index 4b73a79ffb7f..fd768d43e048 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4640,6 +4640,7 @@ F: drivers/input/touchscreen/chipone_icn8505.c CHROME HARDWARE PLATFORM SUPPORT M: Benson Leung <bleung@chromium.org> +L: chrome-platform@lists.linux.dev S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/chrome-platform/linux.git F: drivers/platform/chrome/ @@ -4648,6 +4649,7 @@ CHROMEOS EC CODEC DRIVER M: Cheng-Yi Chiang <cychiang@chromium.org> M: Tzung-Bi Shih <tzungbi@google.com> R: Guenter Roeck <groeck@chromium.org> +L: chrome-platform@lists.linux.dev S: Maintained F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml F: sound/soc/codecs/cros_ec_codec.* @@ -4655,6 +4657,7 @@ F: sound/soc/codecs/cros_ec_codec.* CHROMEOS EC SUBDRIVERS M: Benson Leung <bleung@chromium.org> R: Guenter Roeck <groeck@chromium.org> +L: chrome-platform@lists.linux.dev S: Maintained F: drivers/power/supply/cros_usbpd-charger.c N: cros_ec @@ -4662,11 +4665,13 @@ N: cros-ec CHROMEOS EC USB TYPE-C DRIVER M: Prashant Malani <pmalani@chromium.org> +L: chrome-platform@lists.linux.dev S: Maintained F: drivers/platform/chrome/cros_ec_typec.c CHROMEOS EC USB PD NOTIFY DRIVER M: Prashant Malani <pmalani@chromium.org> +L: chrome-platform@lists.linux.dev S: Maintained F: drivers/platform/chrome/cros_usbpd_notify.c F: include/linux/platform_data/cros_usbpd_notify.h @@ -5157,6 +5162,20 @@ S: Supported F: drivers/cpuidle/cpuidle-psci.h F: drivers/cpuidle/cpuidle-psci-domain.c +CPUIDLE DRIVER - DT IDLE PM DOMAIN +M: Ulf Hansson <ulf.hansson@linaro.org> +L: linux-pm@vger.kernel.org +S: Supported +F: drivers/cpuidle/dt_idle_genpd.c +F: drivers/cpuidle/dt_idle_genpd.h + +CPUIDLE DRIVER - RISC-V SBI +M: Anup Patel <anup@brainfault.org> +L: linux-pm@vger.kernel.org +L: linux-riscv@lists.infradead.org +S: Maintained +F: drivers/cpuidle/cpuidle-riscv-sbi.c + CRAMFS FILESYSTEM M: Nicolas Pitre <nico@fluxnic.net> S: Maintained @@ -6038,6 +6057,7 @@ F: drivers/scsi/dpt/ DRBD DRIVER M: Philipp Reisner <philipp.reisner@linbit.com> M: Lars Ellenberg <lars.ellenberg@linbit.com> +M: Christoph Böhmwalder <christoph.boehmwalder@linbit.com> L: drbd-dev@lists.linbit.com S: Supported W: http://www.drbd.org @@ -10654,9 +10674,9 @@ F: tools/testing/selftests/ KERNEL SMB3 SERVER (KSMBD) M: Namjae Jeon <linkinjeon@kernel.org> -M: Sergey Senozhatsky <senozhatsky@chromium.org> M: Steve French <sfrench@samba.org> M: Hyunchul Lee <hyc.lee@gmail.com> +R: Sergey Senozhatsky <senozhatsky@chromium.org> L: linux-cifs@vger.kernel.org S: Maintained T: git git://git.samba.org/ksmbd.git @@ -793,10 +793,6 @@ ifdef CONFIG_CC_IS_CLANG KBUILD_CPPFLAGS += -Qunused-arguments # The kernel builds with '-std=gnu11' so use of GNU extensions is acceptable. KBUILD_CFLAGS += -Wno-gnu -# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the -# source of a reference will be _MergedGlobals and not on of the whitelisted names. -# See modpost pattern 2 -KBUILD_CFLAGS += -mno-global-merge else # gcc inanely warns about local variables called 'main' diff --git a/arch/alpha/include/asm/user.h b/arch/alpha/include/asm/user.h index 3df37492c7b7..c9f525a6aab8 100644 --- a/arch/alpha/include/asm/user.h +++ b/arch/alpha/include/asm/user.h @@ -45,10 +45,4 @@ struct user { char u_comm[32]; /* user command name */ }; -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_DATA_START_ADDR (u.start_data) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) - #endif /* _ALPHA_USER_H */ diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi index 827e887afbda..13e1bdb3ddbf 100644 --- a/arch/arm/boot/dts/spear1340.dtsi +++ b/arch/arm/boot/dts/spear1340.dtsi @@ -134,9 +134,9 @@ reg = <0xb4100000 0x1000>; interrupts = <0 105 0x4>; status = "disabled"; - dmas = <&dwdma0 12 0 1>, - <&dwdma0 13 1 0>; - dma-names = "tx", "rx"; + dmas = <&dwdma0 13 0 1>, + <&dwdma0 12 1 0>; + dma-names = "rx", "tx"; }; thermal@e07008c4 { diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi index c87b881b2c8b..913553367687 100644 --- a/arch/arm/boot/dts/spear13xx.dtsi +++ b/arch/arm/boot/dts/spear13xx.dtsi @@ -284,9 +284,9 @@ #size-cells = <0>; interrupts = <0 31 0x4>; status = "disabled"; - dmas = <&dwdma0 4 0 0>, - <&dwdma0 5 0 0>; - dma-names = "tx", "rx"; + dmas = <&dwdma0 5 0 0>, + <&dwdma0 4 0 0>; + dma-names = "rx", "tx"; }; rtc@e0580000 { diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h index c799a3c49342..167d44b550f4 100644 --- a/arch/arm/include/asm/user.h +++ b/arch/arm/include/asm/user.h @@ -77,10 +77,6 @@ struct user{ struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */ /* the FP registers. */ }; -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) /* * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c index 0659ab4cb0af..11677fc2968f 100644 --- a/arch/arm/mach-omap2/omap-secure.c +++ b/arch/arm/mach-omap2/omap-secure.c @@ -59,8 +59,13 @@ static void __init omap_optee_init_check(void) u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2, u32 arg3, u32 arg4) { + static u32 buf[NR_CPUS][5]; + u32 *param; + int cpu; u32 ret; - u32 param[5]; + + cpu = get_cpu(); + param = buf[cpu]; param[0] = nargs; param[1] = arg1; @@ -76,6 +81,8 @@ u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2, outer_clean_range(__pa(param), __pa(param + 5)); ret = omap_smc2(idx, flag, __pa(param)); + put_cpu(); + return ret; } @@ -119,8 +126,8 @@ phys_addr_t omap_secure_ram_mempool_base(void) #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) u32 omap3_save_secure_ram(void __iomem *addr, int size) { + static u32 param[5]; u32 ret; - u32 param[5]; if (size != OMAP3_SAVE_SECURE_RAM_SZ) return OMAP3_SAVE_SECURE_RAM_SZ; @@ -153,8 +160,8 @@ u32 omap3_save_secure_ram(void __iomem *addr, int size) u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs, u32 arg1, u32 arg2, u32 arg3, u32 arg4) { + static u32 param[5]; u32 ret; - u32 param[5]; param[0] = nargs+1; /* RX-51 needs number of arguments + 1 */ param[1] = arg1; diff --git a/arch/arm64/boot/dts/amd/Makefile b/arch/arm64/boot/dts/amd/Makefile index 6a6093064a32..68103a8b0ef5 100644 --- a/arch/arm64/boot/dts/amd/Makefile +++ b/arch/arm64/boot/dts/amd/Makefile @@ -1,4 +1,2 @@ # SPDX-License-Identifier: GPL-2.0 -dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive.dtb \ - amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb \ - husky.dtb +dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb diff --git a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts index 8e341be9a399..c290d1ce2b03 100644 --- a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts +++ b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts @@ -9,6 +9,7 @@ /dts-v1/; /include/ "amd-seattle-soc.dtsi" +/include/ "amd-seattle-cpus.dtsi" / { model = "AMD Seattle (Rev.B0) Development Board (Overdrive)"; @@ -36,14 +37,6 @@ status = "ok"; }; -&gpio2 { - status = "ok"; -}; - -&gpio3 { - status = "ok"; -}; - &gpio4 { status = "ok"; }; @@ -79,10 +72,6 @@ }; }; -&ipmi_kcs { - status = "ok"; -}; - &smb0 { /include/ "amd-seattle-xgbe-b.dtsi" }; diff --git a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts index 92cef05c6b74..e0926f6bb7c3 100644 --- a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts +++ b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts @@ -9,6 +9,7 @@ /dts-v1/; /include/ "amd-seattle-soc.dtsi" +/include/ "amd-seattle-cpus.dtsi" / { model = "AMD Seattle (Rev.B1) Development Board (Overdrive)"; diff --git a/arch/arm64/boot/dts/amd/amd-overdrive.dts b/arch/arm64/boot/dts/amd/amd-overdrive.dts deleted file mode 100644 index 41b3a6c0993d..000000000000 --- a/arch/arm64/boot/dts/amd/amd-overdrive.dts +++ /dev/null @@ -1,66 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * DTS file for AMD Seattle Overdrive Development Board - * - * Copyright (C) 2014 Advanced Micro Devices, Inc. - */ - -/dts-v1/; - -/include/ "amd-seattle-soc.dtsi" - -/ { - model = "AMD Seattle Development Board (Overdrive)"; - compatible = "amd,seattle-overdrive", "amd,seattle"; - - chosen { - stdout-path = &serial0; - }; -}; - -&ccp0 { - status = "ok"; -}; - -&gpio0 { - status = "ok"; -}; - -&gpio1 { - status = "ok"; -}; - -&i2c0 { - status = "ok"; -}; - -&pcie0 { - status = "ok"; -}; - -&spi0 { - status = "ok"; -}; - -&spi1 { - status = "ok"; - sdcard0: sdcard@0 { - compatible = "mmc-spi-slot"; - reg = <0>; - spi-max-frequency = <20000000>; - voltage-ranges = <3200 3400>; - gpios = <&gpio0 7 0>; - interrupt-parent = <&gpio0>; - interrupts = <7 3>; - pl022,hierarchy = <0>; - pl022,interface = <0>; - pl022,com-mode = <0x0>; - pl022,rx-level-trig = <0>; - pl022,tx-level-trig = <0>; - }; -}; - -&v2m0 { - arm,msi-base-spi = <64>; - arm,msi-num-spis = <256>; -}; diff --git a/arch/arm64/boot/dts/amd/amd-seattle-cpus.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-cpus.dtsi new file mode 100644 index 000000000000..93688a0b6820 --- /dev/null +++ b/arch/arm64/boot/dts/amd/amd-seattle-cpus.dtsi @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0 + +/ { + cpus { + #address-cells = <0x1>; + #size-cells = <0x0>; + + cpu-map { + cluster0 { + core0 { + cpu = <&CPU0>; + }; + core1 { + cpu = <&CPU1>; + }; + }; + cluster1 { + core0 { + cpu = <&CPU2>; + }; + core1 { + cpu = <&CPU3>; + }; + }; + cluster2 { + core0 { + cpu = <&CPU4>; + }; + core1 { + cpu = <&CPU5>; + }; + }; + cluster3 { + core0 { + cpu = <&CPU6>; + }; + core1 { + cpu = <&CPU7>; + }; + }; + }; + + CPU0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0>; + enable-method = "psci"; + + i-cache-size = <0xC000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; + l2-cache = <&L2_0>; + + }; + + CPU1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x1>; + enable-method = "psci"; + + i-cache-size = <0xC000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; + l2-cache = <&L2_0>; + }; + + CPU2: cpu@100 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x100>; + enable-method = "psci"; + + i-cache-size = <0xC000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; + l2-cache = <&L2_1>; + }; + + CPU3: cpu@101 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x101>; + enable-method = "psci"; + + i-cache-size = <0xC000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; + l2-cache = <&L2_1>; + }; + + CPU4: cpu@200 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x200>; + enable-method = "psci"; + + i-cache-size = <0xC000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; + l2-cache = <&L2_2>; + }; + + CPU5: cpu@201 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x201>; + enable-method = "psci"; + + i-cache-size = <0xC000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; + l2-cache = <&L2_2>; + }; + + CPU6: cpu@300 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x300>; + enable-method = "psci"; + + i-cache-size = <0xC000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; + l2-cache = <&L2_3>; + }; + + CPU7: cpu@301 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x301>; + enable-method = "psci"; + + i-cache-size = <0xC000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; + l2-cache = <&L2_3>; + }; + }; + + L2_0: l2-cache0 { + cache-size = <0x100000>; + cache-line-size = <64>; + cache-sets = <1024>; + cache-unified; + next-level-cache = <&L3>; + }; + + L2_1: l2-cache1 { + cache-size = <0x100000>; + cache-line-size = <64>; + cache-sets = <1024>; + cache-unified; + next-level-cache = <&L3>; + }; + + L2_2: l2-cache2 { + cache-size = <0x100000>; + cache-line-size = <64>; + cache-sets = <1024>; + cache-unified; + next-level-cache = <&L3>; + }; + + L2_3: l2-cache3 { + cache-size = <0x100000>; + cache-line-size = <64>; + cache-sets = <1024>; + cache-unified; + next-level-cache = <&L3>; + }; + + L3: l3-cache { + cache-level = <3>; + cache-size = <0x800000>; + cache-line-size = <64>; + cache-sets = <8192>; + cache-unified; + }; + + pmu { + compatible = "arm,cortex-a57-pmu"; + interrupts = <0x0 0x7 0x4>, + <0x0 0x8 0x4>, + <0x0 0x9 0x4>, + <0x0 0xa 0x4>, + <0x0 0xb 0x4>, + <0x0 0xc 0x4>, + <0x0 0xd 0x4>, + <0x0 0xe 0x4>; + interrupt-affinity = <&CPU0>, + <&CPU1>, + <&CPU2>, + <&CPU3>, + <&CPU4>, + <&CPU5>, + <&CPU6>, + <&CPU7>; + }; +}; diff --git a/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi index b664e7af74eb..690020589d41 100644 --- a/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi +++ b/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi @@ -38,18 +38,6 @@ <1 10 0xff04>; }; - pmu { - compatible = "arm,armv8-pmuv3"; - interrupts = <0 7 4>, - <0 8 4>, - <0 9 4>, - <0 10 4>, - <0 11 4>, - <0 12 4>, - <0 13 4>, - <0 14 4>; - }; - smb0: smb { compatible = "simple-bus"; #address-cells = <2>; @@ -70,6 +58,7 @@ reg = <0 0xe0300000 0 0xf0000>; interrupts = <0 355 4>; clocks = <&sataclk_333mhz>; + iommus = <&sata0_smmu 0x0 0x1f>; dma-coherent; }; @@ -80,6 +69,27 @@ reg = <0 0xe0d00000 0 0xf0000>; interrupts = <0 354 4>; clocks = <&sataclk_333mhz>; + iommus = <&sata1_smmu 0x0e>, + <&sata1_smmu 0x0f>, + <&sata1_smmu 0x1e>; + dma-coherent; + }; + + sata0_smmu: iommu@e0200000 { + compatible = "arm,mmu-401"; + reg = <0 0xe0200000 0 0x10000>; + #global-interrupts = <1>; + interrupts = <0 332 4>, <0 332 4>; + #iommu-cells = <2>; + dma-coherent; + }; + + sata1_smmu: iommu@e0c00000 { + compatible = "arm,mmu-401"; + reg = <0 0xe0c00000 0 0x10000>; + #global-interrupts = <1>; + interrupts = <0 331 4>, <0 331 4>; + #iommu-cells = <1>; dma-coherent; }; @@ -201,6 +211,10 @@ reg = <0 0xe0100000 0 0x10000>; interrupts = <0 3 4>; dma-coherent; + iommus = <&sata1_smmu 0x00>, + <&sata1_smmu 0x02>, + <&sata1_smmu 0x40>, + <&sata1_smmu 0x42>; }; pcie0: pcie@f0000000 { @@ -213,12 +227,22 @@ msi-parent = <&v2m0>; reg = <0 0xf0000000 0 0x10000000>; - interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map-mask = <0xff00 0x0 0x0 0x7>; interrupt-map = - <0x1000 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x120 0x1>, - <0x1000 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x121 0x1>, - <0x1000 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x122 0x1>, - <0x1000 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x123 0x1>; + <0x1100 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x120 0x1>, + <0x1100 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x121 0x1>, + <0x1100 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x122 0x1>, + <0x1100 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x123 0x1>, + + <0x1200 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x124 0x1>, + <0x1200 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x125 0x1>, + <0x1200 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x126 0x1>, + <0x1200 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x127 0x1>, + + <0x1300 0x0 0x0 0x1 &gic0 0x0 0x0 0x0 0x128 0x1>, + <0x1300 0x0 0x0 0x2 &gic0 0x0 0x0 0x0 0x129 0x1>, + <0x1300 0x0 0x0 0x3 &gic0 0x0 0x0 0x0 0x12a 0x1>, + <0x1300 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x12b 0x1>; dma-coherent; dma-ranges = <0x43000000 0x0 0x0 0x0 0x0 0x100 0x0>; @@ -227,8 +251,18 @@ <0x01000000 0x00 0x00000000 0x00 0xefff0000 0x00 0x00010000>, /* 32-bit MMIO (size=2G) */ <0x02000000 0x00 0x40000000 0x00 0x40000000 0x00 0x80000000>, - /* 64-bit MMIO (size= 124G) */ + /* 64-bit MMIO (size= 508G) */ <0x03000000 0x01 0x00000000 0x01 0x00000000 0x7f 0x00000000>; + iommu-map = <0x0 &pcie_smmu 0x0 0x10000>; + }; + + pcie_smmu: iommu@e0a00000 { + compatible = "arm,mmu-401"; + reg = <0 0xe0a00000 0 0x10000>; + #global-interrupts = <1>; + interrupts = <0 333 4>, <0 333 4>; + #iommu-cells = <1>; + dma-coherent; }; /* Perf CCN504 PMU */ diff --git a/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi index d97498361ce3..9259e547e2e8 100644 --- a/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi +++ b/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi @@ -55,7 +55,7 @@ clocks = <&xgmacclk0_dma_250mhz>, <&xgmacclk0_ptp_250mhz>; clock-names = "dma_clk", "ptp_clk"; phy-mode = "xgmii"; - #stream-id-cells = <16>; + iommus = <&xgmac0_smmu 0x00 0x17>; /* 0-7, 16-23 */ dma-coherent; }; @@ -81,11 +81,11 @@ clocks = <&xgmacclk1_dma_250mhz>, <&xgmacclk1_ptp_250mhz>; clock-names = "dma_clk", "ptp_clk"; phy-mode = "xgmii"; - #stream-id-cells = <16>; + iommus = <&xgmac1_smmu 0x00 0x17>; /* 0-7, 16-23 */ dma-coherent; }; - xgmac0_smmu: smmu@e0600000 { + xgmac0_smmu: iommu@e0600000 { compatible = "arm,mmu-401"; reg = <0 0xe0600000 0 0x10000>; #global-interrupts = <1>; @@ -94,14 +94,11 @@ */ <0 336 4>, <0 336 4>; - - mmu-masters = <&xgmac0 - 0 1 2 3 4 5 6 7 - 16 17 18 19 20 21 22 23 - >; + #iommu-cells = <2>; + dma-coherent; }; - xgmac1_smmu: smmu@e0800000 { + xgmac1_smmu: iommu@e0800000 { compatible = "arm,mmu-401"; reg = <0 0xe0800000 0 0x10000>; #global-interrupts = <1>; @@ -110,9 +107,6 @@ */ <0 335 4>, <0 335 4>; - - mmu-masters = <&xgmac1 - 0 1 2 3 4 5 6 7 - 16 17 18 19 20 21 22 23 - >; + #iommu-cells = <2>; + dma-coherent; }; diff --git a/arch/arm64/boot/dts/amd/husky.dts b/arch/arm64/boot/dts/amd/husky.dts deleted file mode 100644 index 7acde34772cb..000000000000 --- a/arch/arm64/boot/dts/amd/husky.dts +++ /dev/null @@ -1,84 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * DTS file for AMD/Linaro 96Boards Enterprise Edition Server (Husky) Board - * Note: Based-on AMD Seattle Rev.B0 - * - * Copyright (C) 2015 Advanced Micro Devices, Inc. - */ - -/dts-v1/; - -/include/ "amd-seattle-soc.dtsi" - -/ { - model = "Linaro 96Boards Enterprise Edition Server (Husky) Board"; - compatible = "amd,seattle-overdrive", "amd,seattle"; - - chosen { - stdout-path = &serial0; - }; - - psci { - compatible = "arm,psci-0.2"; - method = "smc"; - }; -}; - -&ccp0 { - status = "ok"; - amd,zlib-support = <1>; -}; - -/** - * NOTE: In Rev.B, gpio0 is reserved. - */ -&gpio1 { - status = "ok"; -}; - -&gpio2 { - status = "ok"; -}; - -&gpio3 { - status = "ok"; -}; - -&gpio4 { - status = "ok"; -}; - -&i2c0 { - status = "ok"; -}; - -&i2c1 { - status = "ok"; -}; - -&pcie0 { - status = "ok"; -}; - -&spi0 { - status = "ok"; -}; - -&spi1 { - status = "ok"; - sdcard0: sdcard@0 { - compatible = "mmc-spi-slot"; - reg = <0>; - spi-max-frequency = <20000000>; - voltage-ranges = <3200 3400>; - pl022,hierarchy = <0>; - pl022,interface = <0>; - pl022,com-mode = <0x0>; - pl022,rx-level-trig = <0>; - pl022,tx-level-trig = <0>; - }; -}; - -&smb0 { - /include/ "amd-seattle-xgbe-b.dtsi" -}; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi index 01b01e320411..35d1939e690b 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi @@ -536,9 +536,9 @@ clock-names = "i2c"; clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL QORIQ_CLK_PLL_DIV(1)>; - dmas = <&edma0 1 39>, - <&edma0 1 38>; - dma-names = "tx", "rx"; + dmas = <&edma0 1 38>, + <&edma0 1 39>; + dma-names = "rx", "tx"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi index 687fea6d8afa..4e7bd04d9798 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi @@ -499,9 +499,9 @@ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL QORIQ_CLK_PLL_DIV(2)>; - dmas = <&edma0 1 39>, - <&edma0 1 38>; - dma-names = "tx", "rx"; + dmas = <&edma0 1 38>, + <&edma0 1 39>; + dma-names = "rx", "tx"; status = "disabled"; }; diff --git a/arch/h8300/include/asm/user.h b/arch/h8300/include/asm/user.h index 2298909f24c6..161653d84b34 100644 --- a/arch/h8300/include/asm/user.h +++ b/arch/h8300/include/asm/user.h @@ -67,9 +67,5 @@ struct user { unsigned long magic; /* To uniquely identify a core file */ char u_comm[32]; /* User command that was responsible */ }; -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) #endif diff --git a/arch/ia64/include/asm/user.h b/arch/ia64/include/asm/user.h index 0ba486651b7c..ec03d3ab8715 100644 --- a/arch/ia64/include/asm/user.h +++ b/arch/ia64/include/asm/user.h @@ -50,10 +50,4 @@ struct user { char u_comm[32]; /* user command name */ }; -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_DATA_START_ADDR (u.start_data) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) - #endif /* _ASM_IA64_USER_H */ diff --git a/arch/m68k/include/asm/user.h b/arch/m68k/include/asm/user.h index 509d555977c8..61413bff613a 100644 --- a/arch/m68k/include/asm/user.h +++ b/arch/m68k/include/asm/user.h @@ -79,9 +79,5 @@ struct user{ unsigned long magic; /* To uniquely identify a core file */ char u_comm[32]; /* User command that was responsible */ }; -#define NBPG 4096 -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) #endif diff --git a/arch/mips/crypto/crc32-mips.c b/arch/mips/crypto/crc32-mips.c index 0a03529cf317..3e4f5ba104f8 100644 --- a/arch/mips/crypto/crc32-mips.c +++ b/arch/mips/crypto/crc32-mips.c @@ -28,7 +28,7 @@ enum crc_type { }; #ifndef TOOLCHAIN_SUPPORTS_CRC -#define _ASM_MACRO_CRC32(OP, SZ, TYPE) \ +#define _ASM_SET_CRC(OP, SZ, TYPE) \ _ASM_MACRO_3R(OP, rt, rs, rt2, \ ".ifnc \\rt, \\rt2\n\t" \ ".error \"invalid operands \\\"" #OP " \\rt,\\rs,\\rt2\\\"\"\n\t" \ @@ -37,30 +37,36 @@ _ASM_MACRO_3R(OP, rt, rs, rt2, \ ((SZ) << 6) | ((TYPE) << 8)) \ _ASM_INSN32_IF_MM(0x00000030 | (__rs << 16) | (__rt << 21) | \ ((SZ) << 14) | ((TYPE) << 3))) -_ASM_MACRO_CRC32(crc32b, 0, 0); -_ASM_MACRO_CRC32(crc32h, 1, 0); -_ASM_MACRO_CRC32(crc32w, 2, 0); -_ASM_MACRO_CRC32(crc32d, 3, 0); -_ASM_MACRO_CRC32(crc32cb, 0, 1); -_ASM_MACRO_CRC32(crc32ch, 1, 1); -_ASM_MACRO_CRC32(crc32cw, 2, 1); -_ASM_MACRO_CRC32(crc32cd, 3, 1); -#define _ASM_SET_CRC "" +#define _ASM_UNSET_CRC(op, SZ, TYPE) ".purgem " #op "\n\t" #else /* !TOOLCHAIN_SUPPORTS_CRC */ -#define _ASM_SET_CRC ".set\tcrc\n\t" +#define _ASM_SET_CRC(op, SZ, TYPE) ".set\tcrc\n\t" +#define _ASM_UNSET_CRC(op, SZ, TYPE) #endif -#define _CRC32(crc, value, size, type) \ -do { \ - __asm__ __volatile__( \ - ".set push\n\t" \ - _ASM_SET_CRC \ - #type #size " %0, %1, %0\n\t" \ - ".set pop" \ - : "+r" (crc) \ - : "r" (value)); \ +#define __CRC32(crc, value, op, SZ, TYPE) \ +do { \ + __asm__ __volatile__( \ + ".set push\n\t" \ + _ASM_SET_CRC(op, SZ, TYPE) \ + #op " %0, %1, %0\n\t" \ + _ASM_UNSET_CRC(op, SZ, TYPE) \ + ".set pop" \ + : "+r" (crc) \ + : "r" (value)); \ } while (0) +#define _CRC32_crc32b(crc, value) __CRC32(crc, value, crc32b, 0, 0) +#define _CRC32_crc32h(crc, value) __CRC32(crc, value, crc32h, 1, 0) +#define _CRC32_crc32w(crc, value) __CRC32(crc, value, crc32w, 2, 0) +#define _CRC32_crc32d(crc, value) __CRC32(crc, value, crc32d, 3, 0) +#define _CRC32_crc32cb(crc, value) __CRC32(crc, value, crc32cb, 0, 1) +#define _CRC32_crc32ch(crc, value) __CRC32(crc, value, crc32ch, 1, 1) +#define _CRC32_crc32cw(crc, value) __CRC32(crc, value, crc32cw, 2, 1) +#define _CRC32_crc32cd(crc, value) __CRC32(crc, value, crc32cd, 3, 1) + +#define _CRC32(crc, value, size, op) \ + _CRC32_##op##size(crc, value) + #define CRC32(crc, value, size) \ _CRC32(crc, value, size, crc32) diff --git a/arch/mips/include/asm/mach-rc32434/rb.h b/arch/mips/include/asm/mach-rc32434/rb.h index 34d179ca020b..dd9d4b026e62 100644 --- a/arch/mips/include/asm/mach-rc32434/rb.h +++ b/arch/mips/include/asm/mach-rc32434/rb.h @@ -29,15 +29,6 @@ #define DEV3TC 0x01003C #define BTCS 0x010040 #define BTCOMPARE 0x010044 -#define GPIOBASE 0x050000 -/* Offsets relative to GPIOBASE */ -#define GPIOFUNC 0x00 -#define GPIOCFG 0x04 -#define GPIOD 0x08 -#define GPIOILEVEL 0x0C -#define GPIOISTAT 0x10 -#define GPIONMIEN 0x14 -#define IMASK6 0x38 #define LO_WPX (1 << 0) #define LO_ALE (1 << 1) #define LO_CLE (1 << 2) diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c index 64726c670ca6..5204fc6d6d50 100644 --- a/arch/mips/lantiq/falcon/sysctrl.c +++ b/arch/mips/lantiq/falcon/sysctrl.c @@ -167,6 +167,8 @@ static inline void clkdev_add_sys(const char *dev, unsigned int module, { struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); + if (!clk) + return; clk->cl.dev_id = dev; clk->cl.con_id = NULL; clk->cl.clk = clk; diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c index 3d5683e75cf1..200fe9ff641d 100644 --- a/arch/mips/lantiq/xway/gptu.c +++ b/arch/mips/lantiq/xway/gptu.c @@ -122,6 +122,8 @@ static inline void clkdev_add_gptu(struct device *dev, const char *con, { struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); + if (!clk) + return; clk->cl.dev_id = dev_name(dev); clk->cl.con_id = con; clk->cl.clk = clk; diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index 917fac1636b7..084f6caba5f2 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -315,6 +315,8 @@ static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate, { struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); + if (!clk) + return; clk->cl.dev_id = dev; clk->cl.con_id = con; clk->cl.clk = clk; @@ -338,6 +340,8 @@ static void clkdev_add_cgu(const char *dev, const char *con, { struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); + if (!clk) + return; clk->cl.dev_id = dev; clk->cl.con_id = con; clk->cl.clk = clk; @@ -356,24 +360,28 @@ static void clkdev_add_pci(void) struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL); /* main pci clock */ - clk->cl.dev_id = "17000000.pci"; - clk->cl.con_id = NULL; - clk->cl.clk = clk; - clk->rate = CLOCK_33M; - clk->rates = valid_pci_rates; - clk->enable = pci_enable; - clk->disable = pmu_disable; - clk->module = 0; - clk->bits = PMU_PCI; - clkdev_add(&clk->cl); + if (clk) { + clk->cl.dev_id = "17000000.pci"; + clk->cl.con_id = NULL; + clk->cl.clk = clk; + clk->rate = CLOCK_33M; + clk->rates = valid_pci_rates; + clk->enable = pci_enable; + clk->disable = pmu_disable; + clk->module = 0; + clk->bits = PMU_PCI; + clkdev_add(&clk->cl); + } /* use internal/external bus clock */ - clk_ext->cl.dev_id = "17000000.pci"; - clk_ext->cl.con_id = "external"; - clk_ext->cl.clk = clk_ext; - clk_ext->enable = pci_ext_enable; - clk_ext->disable = pci_ext_disable; - clkdev_add(&clk_ext->cl); + if (clk_ext) { + clk_ext->cl.dev_id = "17000000.pci"; + clk_ext->cl.con_id = "external"; + clk_ext->cl.clk = clk_ext; + clk_ext->enable = pci_ext_enable; + clk_ext->disable = pci_ext_disable; + clkdev_add(&clk_ext->cl); + } } /* xway socs can generate clocks on gpio pins */ @@ -393,9 +401,15 @@ static void clkdev_add_clkout(void) char *name; name = kzalloc(sizeof("clkout0"), GFP_KERNEL); + if (!name) + continue; sprintf(name, "clkout%d", i); clk = kzalloc(sizeof(struct clk), GFP_KERNEL); + if (!clk) { + kfree(name); + continue; + } clk->cl.dev_id = "1f103000.cgu"; clk->cl.con_id = name; clk->cl.clk = clk; diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c index 94f02ada4082..29c21b9d42da 100644 --- a/arch/mips/rb532/gpio.c +++ b/arch/mips/rb532/gpio.c @@ -37,6 +37,16 @@ #include <asm/mach-rc32434/rb.h> #include <asm/mach-rc32434/gpio.h> +#define GPIOBASE 0x050000 +/* Offsets relative to GPIOBASE */ +#define GPIOFUNC 0x00 +#define GPIOCFG 0x04 +#define GPIOD 0x08 +#define GPIOILEVEL 0x0C +#define GPIOISTAT 0x10 +#define GPIONMIEN 0x14 +#define IMASK6 0x38 + struct rb532_gpio_chip { struct gpio_chip chip; void __iomem *regbase; diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c index dfc52f661ad0..38d12f417e48 100644 --- a/arch/mips/sgi-ip22/ip22-gio.c +++ b/arch/mips/sgi-ip22/ip22-gio.c @@ -363,6 +363,8 @@ static void ip22_check_gio(int slotno, unsigned long addr, int irq) printk(KERN_INFO "GIO: slot %d : %s (id %x)\n", slotno, name, id); gio_dev = kzalloc(sizeof *gio_dev, GFP_KERNEL); + if (!gio_dev) + return; gio_dev->name = name; gio_dev->slotno = slotno; gio_dev->id.id = id; diff --git a/arch/powerpc/include/asm/user.h b/arch/powerpc/include/asm/user.h index 99443b8594e7..7fae7e597ba4 100644 --- a/arch/powerpc/include/asm/user.h +++ b/arch/powerpc/include/asm/user.h @@ -44,9 +44,4 @@ struct user { char u_comm[32]; /* user command name */ }; -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_DATA_START_ADDR (u.start_data) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) #endif /* _ASM_POWERPC_USER_H */ diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index ea8ec8a960bd..00fd9c548f26 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -16,6 +16,7 @@ config RISCV select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2 select ARCH_HAS_BINFMT_FLAT + select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DEBUG_WX @@ -47,6 +48,7 @@ config RISCV select CLONE_BACKWARDS select CLINT_TIMER if !MMU select COMMON_CLK + select CPU_PM if CPU_IDLE select EDAC_SUPPORT select GENERIC_ARCH_TOPOLOGY if SMP select GENERIC_ATOMIC64 if !64BIT @@ -533,4 +535,10 @@ source "kernel/power/Kconfig" endmenu +menu "CPU Power Management" + +source "drivers/cpuidle/Kconfig" + +endmenu + source "arch/riscv/kvm/Kconfig" diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs index c112ab2a9052..34592d00dde8 100644 --- a/arch/riscv/Kconfig.socs +++ b/arch/riscv/Kconfig.socs @@ -36,6 +36,9 @@ config SOC_VIRT select GOLDFISH select RTC_DRV_GOLDFISH if RTC_CLASS select SIFIVE_PLIC + select PM_GENERIC_DOMAINS if PM + select PM_GENERIC_DOMAINS_OF if PM && OF + select RISCV_SBI_CPUIDLE if CPU_IDLE help This enables support for QEMU Virt Machine. diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts index 984872f3d3a9..b9e30df127fe 100644 --- a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts +++ b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts @@ -203,6 +203,8 @@ compatible = "jedec,spi-nor"; reg = <0>; spi-max-frequency = <50000000>; + spi-tx-bus-width = <4>; + spi-rx-bus-width = <4>; m25p,fast-read; broken-flash-reset; }; diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts index 7ba99b4da304..8d23401b0bbb 100644 --- a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts +++ b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts @@ -205,6 +205,8 @@ compatible = "jedec,spi-nor"; reg = <0>; spi-max-frequency = <50000000>; + spi-tx-bus-width = <4>; + spi-rx-bus-width = <4>; m25p,fast-read; broken-flash-reset; }; diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts index be9b12c9b374..24fd83b43d9d 100644 --- a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts +++ b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts @@ -213,6 +213,8 @@ compatible = "jedec,spi-nor"; reg = <0>; spi-max-frequency = <50000000>; + spi-tx-bus-width = <4>; + spi-rx-bus-width = <4>; m25p,fast-read; broken-flash-reset; }; diff --git a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts index 031c0c28f819..25341f38292a 100644 --- a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts +++ b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts @@ -178,6 +178,8 @@ compatible = "jedec,spi-nor"; reg = <0>; spi-max-frequency = <50000000>; + spi-tx-bus-width = <4>; + spi-rx-bus-width = <4>; m25p,fast-read; broken-flash-reset; }; diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 7cd10ded7bf8..30e3017f22bc 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -15,11 +15,14 @@ CONFIG_CHECKPOINT_RESTORE=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_SYSFS_SYSCALL is not set +CONFIG_PROFILING=y CONFIG_SOC_MICROCHIP_POLARFIRE=y CONFIG_SOC_SIFIVE=y CONFIG_SOC_VIRT=y CONFIG_SMP=y CONFIG_HOTPLUG_CPU=y +CONFIG_PM=y +CONFIG_CPU_IDLE=y CONFIG_VIRTUALIZATION=y CONFIG_KVM=m CONFIG_JUMP_LABEL=y @@ -64,8 +67,6 @@ CONFIG_INPUT_MOUSEDEV=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y -CONFIG_SERIAL_EARLYCON_RISCV_SBI=y -CONFIG_HVC_RISCV_SBI=y CONFIG_VIRTIO_CONSOLE=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_VIRTIO=y diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig index 3f42ed87dde8..2438fa39f8ae 100644 --- a/arch/riscv/configs/nommu_k210_defconfig +++ b/arch/riscv/configs/nommu_k210_defconfig @@ -21,7 +21,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_AIO is not set # CONFIG_IO_URING is not set # CONFIG_ADVISE_SYSCALLS is not set -# CONFIG_MEMBARRIER is not set # CONFIG_KALLSYMS is not set CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig index af64b95e88cc..9a133e63ae5b 100644 --- a/arch/riscv/configs/nommu_k210_sdcard_defconfig +++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig @@ -13,7 +13,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_AIO is not set # CONFIG_IO_URING is not set # CONFIG_ADVISE_SYSCALLS is not set -# CONFIG_MEMBARRIER is not set # CONFIG_KALLSYMS is not set CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/riscv/configs/nommu_virt_defconfig b/arch/riscv/configs/nommu_virt_defconfig index e1c9864b6237..5269fbb6b4fc 100644 --- a/arch/riscv/configs/nommu_virt_defconfig +++ b/arch/riscv/configs/nommu_virt_defconfig @@ -19,7 +19,6 @@ CONFIG_EXPERT=y # CONFIG_AIO is not set # CONFIG_IO_URING is not set # CONFIG_ADVISE_SYSCALLS is not set -# CONFIG_MEMBARRIER is not set # CONFIG_KALLSYMS is not set # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig index e0e5c7c09ab8..7e5efdc3829d 100644 --- a/arch/riscv/configs/rv32_defconfig +++ b/arch/riscv/configs/rv32_defconfig @@ -15,11 +15,14 @@ CONFIG_CHECKPOINT_RESTORE=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_SYSFS_SYSCALL is not set +CONFIG_PROFILING=y CONFIG_SOC_SIFIVE=y CONFIG_SOC_VIRT=y CONFIG_ARCH_RV32I=y CONFIG_SMP=y CONFIG_HOTPLUG_CPU=y +CONFIG_PM=y +CONFIG_CPU_IDLE=y CONFIG_VIRTUALIZATION=y CONFIG_KVM=m CONFIG_JUMP_LABEL=y @@ -62,8 +65,6 @@ CONFIG_INPUT_MOUSEDEV=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y -CONFIG_SERIAL_EARLYCON_RISCV_SBI=y -CONFIG_HVC_RISCV_SBI=y CONFIG_VIRTIO_CONSOLE=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_VIRTIO=y diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h index 618d7c5af1a2..8c2549b16ac0 100644 --- a/arch/riscv/include/asm/asm.h +++ b/arch/riscv/include/asm/asm.h @@ -67,4 +67,30 @@ #error "Unexpected __SIZEOF_SHORT__" #endif +#ifdef __ASSEMBLY__ + +/* Common assembly source macros */ + +#ifdef CONFIG_XIP_KERNEL +.macro XIP_FIXUP_OFFSET reg + REG_L t0, _xip_fixup + add \reg, \reg, t0 +.endm +.macro XIP_FIXUP_FLASH_OFFSET reg + la t1, __data_loc + REG_L t1, _xip_phys_offset + sub \reg, \reg, t1 + add \reg, \reg, t0 +.endm +_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET +_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET +#else +.macro XIP_FIXUP_OFFSET reg +.endm +.macro XIP_FIXUP_FLASH_OFFSET reg +.endm +#endif /* CONFIG_XIP_KERNEL */ + +#endif /* __ASSEMBLY__ */ + #endif /* _ASM_RISCV_ASM_H */ diff --git a/arch/riscv/include/asm/cpuidle.h b/arch/riscv/include/asm/cpuidle.h new file mode 100644 index 000000000000..71fdc607d4bc --- /dev/null +++ b/arch/riscv/include/asm/cpuidle.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Allwinner Ltd + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + */ + +#ifndef _ASM_RISCV_CPUIDLE_H +#define _ASM_RISCV_CPUIDLE_H + +#include <asm/barrier.h> +#include <asm/processor.h> + +static inline void cpu_do_idle(void) +{ + /* + * Add mb() here to ensure that all + * IO/MEM accesses are completed prior + * to entering WFI. + */ + mb(); + wait_for_interrupt(); +} + +#endif diff --git a/arch/riscv/include/asm/current.h b/arch/riscv/include/asm/current.h index 1de233d8e8de..21774d868c65 100644 --- a/arch/riscv/include/asm/current.h +++ b/arch/riscv/include/asm/current.h @@ -33,6 +33,8 @@ static __always_inline struct task_struct *get_current(void) #define current get_current() +register unsigned long current_stack_pointer __asm__("sp"); + #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_CURRENT_H */ diff --git a/arch/riscv/include/asm/module.lds.h b/arch/riscv/include/asm/module.lds.h index 4254ff2ff049..1075beae1ac6 100644 --- a/arch/riscv/include/asm/module.lds.h +++ b/arch/riscv/include/asm/module.lds.h @@ -2,8 +2,8 @@ /* Copyright (C) 2017 Andes Technology Corporation */ #ifdef CONFIG_MODULE_SECTIONS SECTIONS { - .plt (NOLOAD) : { BYTE(0) } - .got (NOLOAD) : { BYTE(0) } - .got.plt (NOLOAD) : { BYTE(0) } + .plt : { BYTE(0) } + .got : { BYTE(0) } + .got.plt : { BYTE(0) } } #endif diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h new file mode 100644 index 000000000000..8be391c2aecb --- /dev/null +++ b/arch/riscv/include/asm/suspend.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * Copyright (c) 2022 Ventana Micro Systems Inc. + */ + +#ifndef _ASM_RISCV_SUSPEND_H +#define _ASM_RISCV_SUSPEND_H + +#include <asm/ptrace.h> + +struct suspend_context { + /* Saved and restored by low-level functions */ + struct pt_regs regs; + /* Saved and restored by high-level functions */ + unsigned long scratch; + unsigned long tvec; + unsigned long ie; +#ifdef CONFIG_MMU + unsigned long satp; +#endif +}; + +/* Low-level CPU suspend entry function */ +int __cpu_suspend_enter(struct suspend_context *context); + +/* High-level CPU suspend which will save context and call finish() */ +int cpu_suspend(unsigned long arg, + int (*finish)(unsigned long arg, + unsigned long entry, + unsigned long context)); + +/* Low-level CPU resume entry function */ +int __cpu_resume_enter(unsigned long hartid, unsigned long context); + +#endif diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index 60da0dcacf14..74d888c8d631 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -11,11 +11,17 @@ #include <asm/page.h> #include <linux/const.h> +#ifdef CONFIG_KASAN +#define KASAN_STACK_ORDER 1 +#else +#define KASAN_STACK_ORDER 0 +#endif + /* thread information allocation */ #ifdef CONFIG_64BIT -#define THREAD_SIZE_ORDER (2) +#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER) #else -#define THREAD_SIZE_ORDER (1) +#define THREAD_SIZE_ORDER (1 + KASAN_STACK_ORDER) #endif #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index e0133d113216..87adbe47bc15 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -48,6 +48,8 @@ obj-$(CONFIG_RISCV_BOOT_SPINWAIT) += cpu_ops_spinwait.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o +obj-$(CONFIG_CPU_PM) += suspend_entry.o suspend.o + obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index df0519a64eaf..df9444397908 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -13,6 +13,7 @@ #include <asm/thread_info.h> #include <asm/ptrace.h> #include <asm/cpu_ops_sbi.h> +#include <asm/suspend.h> void asm_offsets(void); @@ -113,6 +114,8 @@ void asm_offsets(void) OFFSET(PT_BADADDR, pt_regs, badaddr); OFFSET(PT_CAUSE, pt_regs, cause); + OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs); + OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero); OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra); OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp); diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index d2a936195295..ccb617791e56 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -69,11 +69,11 @@ int riscv_of_parent_hartid(struct device_node *node) .uprop = #UPROP, \ .isa_ext_id = EXTID, \ } -/** +/* * Here are the ordering rules of extension naming defined by RISC-V * specification : * 1. All extensions should be separated from other multi-letter extensions - * from other multi-letter extensions by an underscore. + * by an underscore. * 2. The first letter following the 'Z' conventionally indicates the most * closely related alphabetical extension category, IMAFDQLCBKJTPVH. * If multiple 'Z' extensions are named, they should be ordered first @@ -110,7 +110,7 @@ static void print_isa_ext(struct seq_file *f) } } -/** +/* * These are the only valid base (single letter) ISA extensions as per the spec. * It also specifies the canonical order in which it appears in the spec. * Some of the extension may just be a place holder for now (B, K, P, J). diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c index 2e16f6732cdf..4f5a6f84e2a4 100644 --- a/arch/riscv/kernel/cpu_ops_sbi.c +++ b/arch/riscv/kernel/cpu_ops_sbi.c @@ -21,7 +21,7 @@ const struct cpu_operations cpu_ops_sbi; * be invoked from multiple threads in parallel. Define a per cpu data * to handle that. */ -DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data); +static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data); static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr, unsigned long priv) diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index ec07f991866a..893b8bb69391 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -16,26 +16,6 @@ #include <asm/image.h> #include "efi-header.S" -#ifdef CONFIG_XIP_KERNEL -.macro XIP_FIXUP_OFFSET reg - REG_L t0, _xip_fixup - add \reg, \reg, t0 -.endm -.macro XIP_FIXUP_FLASH_OFFSET reg - la t0, __data_loc - REG_L t1, _xip_phys_offset - sub \reg, \reg, t1 - add \reg, \reg, t0 -.endm -_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET -_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET -#else -.macro XIP_FIXUP_OFFSET reg -.endm -.macro XIP_FIXUP_FLASH_OFFSET reg -.endm -#endif /* CONFIG_XIP_KERNEL */ - __HEAD ENTRY(_start) /* @@ -89,7 +69,8 @@ pe_head_start: .align 2 #ifdef CONFIG_MMU -relocate: + .global relocate_enable_mmu +relocate_enable_mmu: /* Relocate return address */ la a1, kernel_map XIP_FIXUP_OFFSET a1 @@ -184,7 +165,7 @@ secondary_start_sbi: /* Enable virtual memory and relocate to virtual address */ la a0, swapper_pg_dir XIP_FIXUP_OFFSET a0 - call relocate + call relocate_enable_mmu #endif call setup_trap_vector tail smp_callin @@ -328,7 +309,7 @@ clear_bss_done: #ifdef CONFIG_MMU la a0, early_pg_dir XIP_FIXUP_OFFSET a0 - call relocate + call relocate_enable_mmu #endif /* CONFIG_MMU */ call setup_trap_vector diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 4a48287513c3..c29cef90d1dd 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -69,7 +69,7 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location, return 0; } -static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location, +static int apply_r_riscv_rvc_branch_rela(struct module *me, u32 *location, Elf_Addr v) { ptrdiff_t offset = (void *)v - (void *)location; @@ -301,7 +301,7 @@ static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, [R_RISCV_64] = apply_r_riscv_64_rela, [R_RISCV_BRANCH] = apply_r_riscv_branch_rela, [R_RISCV_JAL] = apply_r_riscv_jal_rela, - [R_RISCV_RVC_BRANCH] = apply_r_riscv_rcv_branch_rela, + [R_RISCV_RVC_BRANCH] = apply_r_riscv_rvc_branch_rela, [R_RISCV_RVC_JUMP] = apply_r_riscv_rvc_jump_rela, [R_RISCV_PCREL_HI20] = apply_r_riscv_pcrel_hi20_rela, [R_RISCV_PCREL_LO12_I] = apply_r_riscv_pcrel_lo12_i_rela, diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c index 55faa4991b87..3348a61de7d9 100644 --- a/arch/riscv/kernel/perf_callchain.c +++ b/arch/riscv/kernel/perf_callchain.c @@ -68,7 +68,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, static bool fill_callchain(void *entry, unsigned long pc) { - return perf_callchain_store(entry, pc); + return perf_callchain_store(entry, pc) == 0; } void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 03ac3aa611f5..504b496787aa 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -23,6 +23,7 @@ #include <asm/string.h> #include <asm/switch_to.h> #include <asm/thread_info.h> +#include <asm/cpuidle.h> register unsigned long gp_in_global __asm__("gp"); @@ -37,7 +38,7 @@ extern asmlinkage void ret_from_kernel_thread(void); void arch_cpu_idle(void) { - wait_for_interrupt(); + cpu_do_idle(); raw_local_irq_enable(); } diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 14d2b53ec322..08d11a53f39e 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -14,8 +14,6 @@ #include <asm/stacktrace.h> -register unsigned long sp_in_global __asm__("sp"); - #ifdef CONFIG_FRAME_POINTER void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, @@ -30,7 +28,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, pc = instruction_pointer(regs); } else if (task == NULL || task == current) { fp = (unsigned long)__builtin_frame_address(0); - sp = sp_in_global; + sp = current_stack_pointer; pc = (unsigned long)walk_stackframe; } else { /* task blocked in __switch_to */ @@ -78,7 +76,7 @@ void notrace walk_stackframe(struct task_struct *task, sp = user_stack_pointer(regs); pc = instruction_pointer(regs); } else if (task == NULL || task == current) { - sp = sp_in_global; + sp = current_stack_pointer; pc = (unsigned long)walk_stackframe; } else { /* task blocked in __switch_to */ diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c new file mode 100644 index 000000000000..9ba24fb8cc93 --- /dev/null +++ b/arch/riscv/kernel/suspend.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * Copyright (c) 2022 Ventana Micro Systems Inc. + */ + +#include <linux/ftrace.h> +#include <asm/csr.h> +#include <asm/suspend.h> + +static void suspend_save_csrs(struct suspend_context *context) +{ + context->scratch = csr_read(CSR_SCRATCH); + context->tvec = csr_read(CSR_TVEC); + context->ie = csr_read(CSR_IE); + + /* + * No need to save/restore IP CSR (i.e. MIP or SIP) because: + * + * 1. For no-MMU (M-mode) kernel, the bits in MIP are set by + * external devices (such as interrupt controller, timer, etc). + * 2. For MMU (S-mode) kernel, the bits in SIP are set by + * M-mode firmware and external devices (such as interrupt + * controller, etc). + */ + +#ifdef CONFIG_MMU + context->satp = csr_read(CSR_SATP); +#endif +} + +static void suspend_restore_csrs(struct suspend_context *context) +{ + csr_write(CSR_SCRATCH, context->scratch); + csr_write(CSR_TVEC, context->tvec); + csr_write(CSR_IE, context->ie); + +#ifdef CONFIG_MMU + csr_write(CSR_SATP, context->satp); +#endif +} + +int cpu_suspend(unsigned long arg, + int (*finish)(unsigned long arg, + unsigned long entry, + unsigned long context)) +{ + int rc = 0; + struct suspend_context context = { 0 }; + + /* Finisher should be non-NULL */ + if (!finish) + return -EINVAL; + + /* Save additional CSRs*/ + suspend_save_csrs(&context); + + /* + * Function graph tracer state gets incosistent when the kernel + * calls functions that never return (aka finishers) hence disable + * graph tracing during their execution. + */ + pause_graph_tracing(); + + /* Save context on stack */ + if (__cpu_suspend_enter(&context)) { + /* Call the finisher */ + rc = finish(arg, __pa_symbol(__cpu_resume_enter), + (ulong)&context); + + /* + * Should never reach here, unless the suspend finisher + * fails. Successful cpu_suspend() should return from + * __cpu_resume_entry() + */ + if (!rc) + rc = -EOPNOTSUPP; + } + + /* Enable function graph tracer */ + unpause_graph_tracing(); + + /* Restore additional CSRs */ + suspend_restore_csrs(&context); + + return rc; +} diff --git a/arch/riscv/kernel/suspend_entry.S b/arch/riscv/kernel/suspend_entry.S new file mode 100644 index 000000000000..4b07b809a2b8 --- /dev/null +++ b/arch/riscv/kernel/suspend_entry.S @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * Copyright (c) 2022 Ventana Micro Systems Inc. + */ + +#include <linux/linkage.h> +#include <asm/asm.h> +#include <asm/asm-offsets.h> +#include <asm/csr.h> + + .text + .altmacro + .option norelax + +ENTRY(__cpu_suspend_enter) + /* Save registers (except A0 and T0-T6) */ + REG_S ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0) + REG_S sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0) + REG_S gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0) + REG_S tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0) + REG_S s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0) + REG_S s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0) + REG_S a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0) + REG_S a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0) + REG_S a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0) + REG_S a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0) + REG_S a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0) + REG_S a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0) + REG_S a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0) + REG_S s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0) + REG_S s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0) + REG_S s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0) + REG_S s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0) + REG_S s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0) + REG_S s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0) + REG_S s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0) + REG_S s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0) + REG_S s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0) + REG_S s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0) + + /* Save CSRs */ + csrr t0, CSR_EPC + REG_S t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0) + csrr t0, CSR_STATUS + REG_S t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0) + csrr t0, CSR_TVAL + REG_S t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0) + csrr t0, CSR_CAUSE + REG_S t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0) + + /* Return non-zero value */ + li a0, 1 + + /* Return to C code */ + ret +END(__cpu_suspend_enter) + +ENTRY(__cpu_resume_enter) + /* Load the global pointer */ + .option push + .option norelax + la gp, __global_pointer$ + .option pop + +#ifdef CONFIG_MMU + /* Save A0 and A1 */ + add t0, a0, zero + add t1, a1, zero + + /* Enable MMU */ + la a0, swapper_pg_dir + XIP_FIXUP_OFFSET a0 + call relocate_enable_mmu + + /* Restore A0 and A1 */ + add a0, t0, zero + add a1, t1, zero +#endif + + /* Make A0 point to suspend context */ + add a0, a1, zero + + /* Restore CSRs */ + REG_L t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0) + csrw CSR_EPC, t0 + REG_L t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0) + csrw CSR_STATUS, t0 + REG_L t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0) + csrw CSR_TVAL, t0 + REG_L t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0) + csrw CSR_CAUSE, t0 + + /* Restore registers (except A0 and T0-T6) */ + REG_L ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0) + REG_L sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0) + REG_L gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0) + REG_L tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0) + REG_L s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0) + REG_L s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0) + REG_L a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0) + REG_L a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0) + REG_L a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0) + REG_L a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0) + REG_L a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0) + REG_L a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0) + REG_L a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0) + REG_L s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0) + REG_L s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0) + REG_L s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0) + REG_L s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0) + REG_L s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0) + REG_L s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0) + REG_L s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0) + REG_L s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0) + REG_L s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0) + REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0) + + /* Return zero value */ + add a0, zero, zero + + /* Return to C code */ + ret +END(__cpu_resume_enter) diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 9b80e8bed3f6..77b5a03de13a 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -58,6 +58,7 @@ config S390 select ALTERNATE_USER_ADDRESS_SPACE select ARCH_32BIT_USTAT_F_TINODE select ARCH_BINFMT_ELF_STATE + select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM select ARCH_ENABLE_MEMORY_HOTREMOVE select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2 diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h index 955d620db23e..bb3837d7387c 100644 --- a/arch/s390/include/asm/alternative-asm.h +++ b/arch/s390/include/asm/alternative-asm.h @@ -37,9 +37,15 @@ * a 2-byte nop if the size of the area is not divisible by 6. */ .macro alt_pad_fill bytes - .fill ( \bytes ) / 6, 6, 0xc0040000 - .fill ( \bytes ) % 6 / 4, 4, 0x47000000 - .fill ( \bytes ) % 6 % 4 / 2, 2, 0x0700 + .rept ( \bytes ) / 6 + brcl 0,0 + .endr + .rept ( \bytes ) % 6 / 4 + nop + .endr + .rept ( \bytes ) % 6 % 4 / 2 + nopr + .endr .endm /* diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h index d3880ca764ee..3f2856ed6808 100644 --- a/arch/s390/include/asm/alternative.h +++ b/arch/s390/include/asm/alternative.h @@ -71,11 +71,18 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); ".if " oldinstr_pad_len(num) " > 6\n" \ "\tjg " e_oldinstr_pad_end "f\n" \ "6620:\n" \ - "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \ + "\t.rept (" oldinstr_pad_len(num) " - (6620b-662b)) / 2\n" \ + "\tnopr\n" \ ".else\n" \ - "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \ - "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \ - "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \ + "\t.rept " oldinstr_pad_len(num) " / 6\n" \ + "\t.brcl 0,0\n" \ + "\t.endr\n" \ + "\t.rept " oldinstr_pad_len(num) " %% 6 / 4\n" \ + "\tnop\n" \ + "\t.endr\n" \ + "\t.rept " oldinstr_pad_len(num) " %% 6 %% 4 / 2\n" \ + "\tnopr\n" \ + ".endr\n" \ ".endif\n" #define OLDINSTR(oldinstr, num) \ diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h index ae75da592ccb..b515cfa62bd9 100644 --- a/arch/s390/include/asm/ap.h +++ b/arch/s390/include/asm/ap.h @@ -60,11 +60,11 @@ static inline bool ap_instructions_available(void) unsigned long reg1 = 0; asm volatile( - " lgr 0,%[reg0]\n" /* qid into gr0 */ - " lghi 1,0\n" /* 0 into gr1 */ - " lghi 2,0\n" /* 0 into gr2 */ - " .long 0xb2af0000\n" /* PQAP(TAPQ) */ - "0: la %[reg1],1\n" /* 1 into reg1 */ + " lgr 0,%[reg0]\n" /* qid into gr0 */ + " lghi 1,0\n" /* 0 into gr1 */ + " lghi 2,0\n" /* 0 into gr2 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(TAPQ) */ + "0: la %[reg1],1\n" /* 1 into reg1 */ "1:\n" EX_TABLE(0b, 1b) : [reg1] "+&d" (reg1) @@ -86,11 +86,11 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info) unsigned long reg2; asm volatile( - " lgr 0,%[qid]\n" /* qid into gr0 */ - " lghi 2,0\n" /* 0 into gr2 */ - " .long 0xb2af0000\n" /* PQAP(TAPQ) */ - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ - " lgr %[reg2],2\n" /* gr2 into reg2 */ + " lgr 0,%[qid]\n" /* qid into gr0 */ + " lghi 2,0\n" /* 0 into gr2 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(TAPQ) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr %[reg2],2\n" /* gr2 into reg2 */ : [reg1] "=&d" (reg1), [reg2] "=&d" (reg2) : [qid] "d" (qid) : "cc", "0", "1", "2"); @@ -128,9 +128,9 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid) struct ap_queue_status reg1; asm volatile( - " lgr 0,%[reg0]\n" /* qid arg into gr0 */ - " .long 0xb2af0000\n" /* PQAP(RAPQ) */ - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr 0,%[reg0]\n" /* qid arg into gr0 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(RAPQ) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ : [reg1] "=&d" (reg1) : [reg0] "d" (reg0) : "cc", "0", "1"); @@ -149,9 +149,9 @@ static inline struct ap_queue_status ap_zapq(ap_qid_t qid) struct ap_queue_status reg1; asm volatile( - " lgr 0,%[reg0]\n" /* qid arg into gr0 */ - " .long 0xb2af0000\n" /* PQAP(ZAPQ) */ - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr 0,%[reg0]\n" /* qid arg into gr0 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(ZAPQ) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ : [reg1] "=&d" (reg1) : [reg0] "d" (reg0) : "cc", "0", "1"); @@ -190,10 +190,10 @@ static inline int ap_qci(struct ap_config_info *config) struct ap_config_info *reg2 = config; asm volatile( - " lgr 0,%[reg0]\n" /* QCI fc into gr0 */ - " lgr 2,%[reg2]\n" /* ptr to config into gr2 */ - " .long 0xb2af0000\n" /* PQAP(QCI) */ - "0: la %[reg1],0\n" /* good case, QCI fc available */ + " lgr 0,%[reg0]\n" /* QCI fc into gr0 */ + " lgr 2,%[reg2]\n" /* ptr to config into gr2 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(QCI) */ + "0: la %[reg1],0\n" /* good case, QCI fc available */ "1:\n" EX_TABLE(0b, 1b) : [reg1] "+&d" (reg1) @@ -246,11 +246,11 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid, reg1.qirqctrl = qirqctrl; asm volatile( - " lgr 0,%[reg0]\n" /* qid param into gr0 */ - " lgr 1,%[reg1]\n" /* irq ctrl into gr1 */ - " lgr 2,%[reg2]\n" /* ni addr into gr2 */ - " .long 0xb2af0000\n" /* PQAP(AQIC) */ - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr 0,%[reg0]\n" /* qid param into gr0 */ + " lgr 1,%[reg1]\n" /* irq ctrl into gr1 */ + " lgr 2,%[reg2]\n" /* ni addr into gr2 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(AQIC) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ : [reg1] "+&d" (reg1) : [reg0] "d" (reg0), [reg2] "d" (reg2) : "cc", "0", "1", "2"); @@ -297,11 +297,11 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit, reg1.value = apinfo->val; asm volatile( - " lgr 0,%[reg0]\n" /* qid param into gr0 */ - " lgr 1,%[reg1]\n" /* qact in info into gr1 */ - " .long 0xb2af0000\n" /* PQAP(QACT) */ - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ - " lgr %[reg2],2\n" /* qact out info into reg2 */ + " lgr 0,%[reg0]\n" /* qid param into gr0 */ + " lgr 1,%[reg1]\n" /* qact in info into gr1 */ + " .insn rre,0xb2af0000,0,0\n" /* PQAP(QACT) */ + " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr %[reg2],2\n" /* qact out info into reg2 */ : [reg1] "+&d" (reg1), [reg2] "=&d" (reg2) : [reg0] "d" (reg0) : "cc", "0", "1", "2"); diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index c800199a376b..82388da3f95f 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -74,8 +74,17 @@ static __always_inline void __ctl_clear_bit(unsigned int cr, unsigned int bit) __ctl_load(reg, cr, cr); } -void smp_ctl_set_bit(int cr, int bit); -void smp_ctl_clear_bit(int cr, int bit); +void smp_ctl_set_clear_bit(int cr, int bit, bool set); + +static inline void ctl_set_bit(int cr, int bit) +{ + smp_ctl_set_clear_bit(cr, bit, true); +} + +static inline void ctl_clear_bit(int cr, int bit) +{ + smp_ctl_set_clear_bit(cr, bit, false); +} union ctlreg0 { unsigned long val; @@ -130,8 +139,5 @@ union ctlreg15 { }; }; -#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) -#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) - #endif /* __ASSEMBLY__ */ #endif /* __ASM_CTL_REG_H */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 84ec63145325..eee8d96fb38e 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -319,11 +319,18 @@ extern void (*s390_base_pgm_handler_fn)(struct pt_regs *regs); extern int memcpy_real(void *, unsigned long, size_t); extern void memcpy_absolute(void *, void *, size_t); -#define mem_assign_absolute(dest, val) do { \ - __typeof__(dest) __tmp = (val); \ - \ - BUILD_BUG_ON(sizeof(__tmp) != sizeof(val)); \ - memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \ +#define put_abs_lowcore(member, x) do { \ + unsigned long __abs_address = offsetof(struct lowcore, member); \ + __typeof__(((struct lowcore *)0)->member) __tmp = (x); \ + \ + memcpy_absolute(__va(__abs_address), &__tmp, sizeof(__tmp)); \ +} while (0) + +#define get_abs_lowcore(x, member) do { \ + unsigned long __abs_address = offsetof(struct lowcore, member); \ + __typeof__(((struct lowcore *)0)->member) *__ptr = &(x); \ + \ + memcpy_absolute(__ptr, __va(__abs_address), sizeof(*__ptr)); \ } while (0) extern int s390_isolate_bp(void); diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 888a2f1c9ee3..24a54443c865 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -78,7 +78,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) { typecheck(int, lp->lock); asm_inline volatile( - ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */ + ALTERNATIVE("", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */ " sth %1,%0\n" : "=R" (((unsigned short *) &lp->lock)[1]) : "d" (0) : "cc", "memory"); diff --git a/arch/s390/include/asm/syscall_wrapper.h b/arch/s390/include/asm/syscall_wrapper.h index ad2c996e7e93..fde7e6b1df48 100644 --- a/arch/s390/include/asm/syscall_wrapper.h +++ b/arch/s390/include/asm/syscall_wrapper.h @@ -162,4 +162,4 @@ __diag_pop(); \ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) -#endif /* _ASM_X86_SYSCALL_WRAPPER_H */ +#endif /* _ASM_S390_SYSCALL_WRAPPER_H */ diff --git a/arch/s390/include/asm/unwind.h b/arch/s390/include/asm/unwind.h index 5ebf534ef753..0bf06f1682d8 100644 --- a/arch/s390/include/asm/unwind.h +++ b/arch/s390/include/asm/unwind.h @@ -4,6 +4,8 @@ #include <linux/sched.h> #include <linux/ftrace.h> +#include <linux/kprobes.h> +#include <linux/llist.h> #include <asm/ptrace.h> #include <asm/stacktrace.h> @@ -36,10 +38,21 @@ struct unwind_state { struct pt_regs *regs; unsigned long sp, ip; int graph_idx; + struct llist_node *kr_cur; bool reliable; bool error; }; +/* Recover the return address modified by kretprobe and ftrace_graph. */ +static inline unsigned long unwind_recover_ret_addr(struct unwind_state *state, + unsigned long ip) +{ + ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL); + if (is_kretprobe_trampoline(ip)) + ip = kretprobe_find_ret_addr(state->task, (void *)state->sp, &state->kr_cur); + return ip; +} + void __unwind_start(struct unwind_state *state, struct task_struct *task, struct pt_regs *regs, unsigned long first_frame); bool unwind_next_frame(struct unwind_state *state); diff --git a/arch/s390/include/asm/user.h b/arch/s390/include/asm/user.h index 0ca572ced21b..8e8aaf48582e 100644 --- a/arch/s390/include/asm/user.h +++ b/arch/s390/include/asm/user.h @@ -67,9 +67,5 @@ struct user { unsigned long magic; /* To uniquely identify a core file */ char u_comm[32]; /* User command that was responsible */ }; -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) #endif /* _S390_USER_H */ diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index a601a518b569..59b69c8ab5e1 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -121,22 +121,22 @@ _LPP_OFFSET = __LC_LPP .endm .macro BPOFF - ALTERNATIVE "", ".long 0xb2e8c000", 82 + ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,12,0", 82 .endm .macro BPON - ALTERNATIVE "", ".long 0xb2e8d000", 82 + ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,13,0", 82 .endm .macro BPENTER tif_ptr,tif_mask - ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \ + ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \ "", 82 .endm .macro BPEXIT tif_ptr,tif_mask TSTMSK \tif_ptr,\tif_mask - ALTERNATIVE "jz .+8; .long 0xb2e8c000", \ - "jnz .+8; .long 0xb2e8d000", 82 + ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0", \ + "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82 .endm /* diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 28ae7df26c4a..1cc85b8ff42e 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -1646,8 +1646,8 @@ static void dump_reipl_run(struct shutdown_trigger *trigger) csum = (__force unsigned int) csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0); - mem_assign_absolute(S390_lowcore.ipib, ipib); - mem_assign_absolute(S390_lowcore.ipib_checksum, csum); + put_abs_lowcore(ipib, ipib); + put_abs_lowcore(ipib_checksum, csum); dump_run(trigger); } diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index e32c14fd1282..0032bdbe8e3f 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -284,11 +284,11 @@ NOKPROBE_SYMBOL(pop_kprobe); void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { - ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; - ri->fp = NULL; + ri->ret_addr = (kprobe_opcode_t *)regs->gprs[14]; + ri->fp = (void *)regs->gprs[15]; /* Replace the return addr with trampoline addr */ - regs->gprs[14] = (unsigned long) &__kretprobe_trampoline; + regs->gprs[14] = (unsigned long)&__kretprobe_trampoline; } NOKPROBE_SYMBOL(arch_prepare_kretprobe); @@ -385,7 +385,7 @@ NOKPROBE_SYMBOL(arch_kretprobe_fixup_return); */ void trampoline_probe_handler(struct pt_regs *regs) { - kretprobe_trampoline_handler(regs, NULL); + kretprobe_trampoline_handler(regs, (void *)regs->gprs[15]); } NOKPROBE_SYMBOL(trampoline_probe_handler); diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 088d57a3083f..b2ef014a9287 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -226,7 +226,7 @@ void arch_crash_save_vmcoreinfo(void) vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31); vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31); vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); - mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); + put_abs_lowcore(vmcore_info, paddr_vmcoreinfo_note()); } void machine_shutdown(void) diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c index 6b5b64e67eee..1acc2e05d70f 100644 --- a/arch/s390/kernel/os_info.c +++ b/arch/s390/kernel/os_info.c @@ -63,7 +63,7 @@ void __init os_info_init(void) os_info.version_minor = OS_INFO_VERSION_MINOR; os_info.magic = OS_INFO_MAGIC; os_info.csum = os_info_csum(&os_info); - mem_assign_absolute(S390_lowcore.os_info, __pa(ptr)); + put_abs_lowcore(os_info, __pa(ptr)); } #ifdef CONFIG_CRASH_DUMP diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 84e23fcc1106..d860ac300919 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -481,11 +481,11 @@ static void __init setup_lowcore_dat_off(void) lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET; /* Setup absolute zero lowcore */ - mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack); - mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn); - mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data); - mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source); - mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw); + put_abs_lowcore(restart_stack, lc->restart_stack); + put_abs_lowcore(restart_fn, lc->restart_fn); + put_abs_lowcore(restart_data, lc->restart_data); + put_abs_lowcore(restart_source, lc->restart_source); + put_abs_lowcore(restart_psw, lc->restart_psw); lc->spinlock_lockval = arch_spin_lockval(0); lc->spinlock_index = 0; @@ -501,6 +501,7 @@ static void __init setup_lowcore_dat_off(void) static void __init setup_lowcore_dat_on(void) { struct lowcore *lc = lowcore_ptr[0]; + int cr; __ctl_clear_bit(0, 28); S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT; @@ -509,10 +510,10 @@ static void __init setup_lowcore_dat_on(void) S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT; __ctl_store(S390_lowcore.cregs_save_area, 0, 15); __ctl_set_bit(0, 28); - mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS); - mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw); - memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area, - sizeof(S390_lowcore.cregs_save_area)); + put_abs_lowcore(restart_flags, RESTART_FLAG_CTLREGS); + put_abs_lowcore(program_new_psw, lc->program_new_psw); + for (cr = 0; cr < ARRAY_SIZE(lc->cregs_save_area); cr++) + put_abs_lowcore(cregs_save_area[cr], lc->cregs_save_area[cr]); } static struct resource code_resource = { diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 127da1850b06..30c91d565933 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -213,7 +213,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) if (nmi_alloc_mcesa(&lc->mcesad)) goto out; lowcore_ptr[cpu] = lc; - pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); + pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, __pa(lc)); return 0; out: @@ -326,10 +326,17 @@ static void pcpu_delegate(struct pcpu *pcpu, /* Stop target cpu (if func returns this stops the current cpu). */ pcpu_sigp_retry(pcpu, SIGP_STOP, 0); /* Restart func on the target cpu and stop the current cpu. */ - mem_assign_absolute(lc->restart_stack, stack); - mem_assign_absolute(lc->restart_fn, (unsigned long) func); - mem_assign_absolute(lc->restart_data, (unsigned long) data); - mem_assign_absolute(lc->restart_source, source_cpu); + if (lc) { + lc->restart_stack = stack; + lc->restart_fn = (unsigned long)func; + lc->restart_data = (unsigned long)data; + lc->restart_source = source_cpu; + } else { + put_abs_lowcore(restart_stack, stack); + put_abs_lowcore(restart_fn, (unsigned long)func); + put_abs_lowcore(restart_data, (unsigned long)data); + put_abs_lowcore(restart_source, source_cpu); + } __bpon(); asm volatile( "0: sigp 0,%0,%2 # sigp restart to target cpu\n" @@ -570,39 +577,27 @@ static void smp_ctl_bit_callback(void *info) } static DEFINE_SPINLOCK(ctl_lock); -static unsigned long ctlreg; -/* - * Set a bit in a control register of all cpus - */ -void smp_ctl_set_bit(int cr, int bit) +void smp_ctl_set_clear_bit(int cr, int bit, bool set) { - struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr }; - - spin_lock(&ctl_lock); - memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg)); - __set_bit(bit, &ctlreg); - memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg)); - spin_unlock(&ctl_lock); - on_each_cpu(smp_ctl_bit_callback, &parms, 1); -} -EXPORT_SYMBOL(smp_ctl_set_bit); - -/* - * Clear a bit in a control register of all cpus - */ -void smp_ctl_clear_bit(int cr, int bit) -{ - struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr }; + struct ec_creg_mask_parms parms = { .cr = cr, }; + u64 ctlreg; + if (set) { + parms.orval = 1UL << bit; + parms.andval = -1UL; + } else { + parms.orval = 0; + parms.andval = ~(1UL << bit); + } spin_lock(&ctl_lock); - memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg)); - __clear_bit(bit, &ctlreg); - memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg)); + get_abs_lowcore(ctlreg, cregs_save_area[cr]); + ctlreg = (ctlreg & parms.andval) | parms.orval; + put_abs_lowcore(cregs_save_area[cr], ctlreg); spin_unlock(&ctl_lock); on_each_cpu(smp_ctl_bit_callback, &parms, 1); } -EXPORT_SYMBOL(smp_ctl_clear_bit); +EXPORT_SYMBOL(smp_ctl_set_clear_bit); #ifdef CONFIG_CRASH_DUMP diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 674c65019434..1d2aa448d103 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -141,10 +141,10 @@ static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc) do_trap(regs, SIGFPE, si_code, "floating point exception"); } -static void translation_exception(struct pt_regs *regs) +static void translation_specification_exception(struct pt_regs *regs) { /* May never happen. */ - panic("Translation exception"); + panic("Translation-Specification Exception"); } static void illegal_op(struct pt_regs *regs) @@ -368,7 +368,7 @@ static void (*pgm_check_table[128])(struct pt_regs *regs) = { [0x0f] = hfp_divide_exception, [0x10] = do_dat_exception, [0x11] = do_dat_exception, - [0x12] = translation_exception, + [0x12] = translation_specification_exception, [0x13] = special_op_exception, [0x14] = default_trap_handler, [0x15] = operand_exception, diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c index 707fd99f6734..0ece156fdd7c 100644 --- a/arch/s390/kernel/unwind_bc.c +++ b/arch/s390/kernel/unwind_bc.c @@ -64,8 +64,8 @@ bool unwind_next_frame(struct unwind_state *state) ip = READ_ONCE_NOCHECK(sf->gprs[8]); reliable = false; regs = NULL; - if (!__kernel_text_address(ip)) { - /* skip bogus %r14 */ + /* skip bogus %r14 or if is the same as regs->psw.addr */ + if (!__kernel_text_address(ip) || state->ip == unwind_recover_ret_addr(state, ip)) { state->regs = NULL; return unwind_next_frame(state); } @@ -103,13 +103,11 @@ bool unwind_next_frame(struct unwind_state *state) if (sp & 0x7) goto out_err; - ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, (void *) sp); - /* Update unwind state */ state->sp = sp; - state->ip = ip; state->regs = regs; state->reliable = reliable; + state->ip = unwind_recover_ret_addr(state, ip); return true; out_err: @@ -161,12 +159,10 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, ip = READ_ONCE_NOCHECK(sf->gprs[8]); } - ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL); - /* Update unwind state */ state->sp = sp; - state->ip = ip; state->reliable = true; + state->ip = unwind_recover_ret_addr(state, ip); if (!first_frame) return; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ab73d99483fe..156d1c25a3c1 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -3462,7 +3462,7 @@ void exit_sie(struct kvm_vcpu *vcpu) /* Kick a guest cpu out of SIE to process a request synchronously */ void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu) { - kvm_make_request(req, vcpu); + __kvm_make_request(req, vcpu); kvm_s390_vcpu_request(vcpu); } diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 692dc84cd19c..5e7ea8b111e8 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -75,7 +75,7 @@ static inline int arch_load_niai4(int *lock) int owner; asm_inline volatile( - ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */ + ALTERNATIVE("", ".insn rre,0xb2fa0000,4,0", 49) /* NIAI 4 */ " l %0,%1\n" : "=d" (owner) : "Q" (*lock) : "memory"); return owner; @@ -86,7 +86,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new) int expected = old; asm_inline volatile( - ALTERNATIVE("", ".long 0xb2fa0080", 49) /* NIAI 8 */ + ALTERNATIVE("", ".insn rre,0xb2fa0000,8,0", 49) /* NIAI 8 */ " cs %0,%3,%1\n" : "=d" (old), "=Q" (*lock) : "0" (old), "d" (new), "Q" (*lock) diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c index c01f02887de4..9bb067321ab4 100644 --- a/arch/s390/lib/test_unwind.c +++ b/arch/s390/lib/test_unwind.c @@ -47,7 +47,7 @@ static void print_backtrace(char *bt) static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs, unsigned long sp) { - int frame_count, prev_is_func2, seen_func2_func1; + int frame_count, prev_is_func2, seen_func2_func1, seen_kretprobe_trampoline; const int max_frames = 128; struct unwind_state state; size_t bt_pos = 0; @@ -63,6 +63,7 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs, frame_count = 0; prev_is_func2 = 0; seen_func2_func1 = 0; + seen_kretprobe_trampoline = 0; unwind_for_each_frame(&state, task, regs, sp) { unsigned long addr = unwind_get_return_address(&state); char sym[KSYM_SYMBOL_LEN]; @@ -88,6 +89,8 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs, if (prev_is_func2 && str_has_prefix(sym, "unwindme_func1")) seen_func2_func1 = 1; prev_is_func2 = str_has_prefix(sym, "unwindme_func2"); + if (str_has_prefix(sym, "__kretprobe_trampoline+0x0/")) + seen_kretprobe_trampoline = 1; } /* Check the results. */ @@ -103,6 +106,10 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs, kunit_err(current_test, "Maximum number of frames exceeded\n"); ret = -EINVAL; } + if (seen_kretprobe_trampoline) { + kunit_err(current_test, "__kretprobe_trampoline+0x0 in unwinding results\n"); + ret = -EINVAL; + } if (ret || force_bt) print_backtrace(bt); kfree(bt); @@ -132,36 +139,50 @@ static struct unwindme *unwindme; #define UWM_PGM 0x40 /* Unwind from program check handler */ #define UWM_KPROBE_ON_FTRACE 0x80 /* Unwind from kprobe handler called via ftrace. */ #define UWM_FTRACE 0x100 /* Unwind from ftrace handler. */ -#define UWM_KRETPROBE 0x200 /* Unwind kretprobe handlers. */ +#define UWM_KRETPROBE 0x200 /* Unwind through kretprobed function. */ +#define UWM_KRETPROBE_HANDLER 0x400 /* Unwind from kretprobe handler. */ -static __always_inline unsigned long get_psw_addr(void) +static __always_inline struct pt_regs fake_pt_regs(void) { - unsigned long psw_addr; + struct pt_regs regs; + + memset(®s, 0, sizeof(regs)); + regs.gprs[15] = current_stack_pointer(); asm volatile( "basr %[psw_addr],0\n" - : [psw_addr] "=d" (psw_addr)); - return psw_addr; + : [psw_addr] "=d" (regs.psw.addr)); + return regs; } static int kretprobe_ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs) { struct unwindme *u = unwindme; + if (!(u->flags & UWM_KRETPROBE_HANDLER)) + return 0; + u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? regs : NULL, (u->flags & UWM_SP) ? u->sp : 0); return 0; } -static noinline notrace void test_unwind_kretprobed_func(void) +static noinline notrace int test_unwind_kretprobed_func(struct unwindme *u) { - asm volatile(" nop\n"); + struct pt_regs regs; + + if (!(u->flags & UWM_KRETPROBE)) + return 0; + + regs = fake_pt_regs(); + return test_unwind(NULL, (u->flags & UWM_REGS) ? ®s : NULL, + (u->flags & UWM_SP) ? u->sp : 0); } -static noinline void test_unwind_kretprobed_func_caller(void) +static noinline int test_unwind_kretprobed_func_caller(struct unwindme *u) { - test_unwind_kretprobed_func(); + return test_unwind_kretprobed_func(u); } static int test_unwind_kretprobe(struct unwindme *u) @@ -187,10 +208,12 @@ static int test_unwind_kretprobe(struct unwindme *u) return -EINVAL; } - test_unwind_kretprobed_func_caller(); + ret = test_unwind_kretprobed_func_caller(u); unregister_kretprobe(&my_kretprobe); unwindme = NULL; - return u->ret; + if (u->flags & UWM_KRETPROBE_HANDLER) + ret = u->ret; + return ret; } static int kprobe_pre_handler(struct kprobe *p, struct pt_regs *regs) @@ -304,16 +327,13 @@ static noinline int unwindme_func4(struct unwindme *u) return 0; } else if (u->flags & (UWM_PGM | UWM_KPROBE_ON_FTRACE)) { return test_unwind_kprobe(u); - } else if (u->flags & (UWM_KRETPROBE)) { + } else if (u->flags & (UWM_KRETPROBE | UWM_KRETPROBE_HANDLER)) { return test_unwind_kretprobe(u); } else if (u->flags & UWM_FTRACE) { return test_unwind_ftrace(u); } else { - struct pt_regs regs; + struct pt_regs regs = fake_pt_regs(); - memset(®s, 0, sizeof(regs)); - regs.psw.addr = get_psw_addr(); - regs.gprs[15] = current_stack_pointer(); return test_unwind(NULL, (u->flags & UWM_REGS) ? ®s : NULL, (u->flags & UWM_SP) ? u->sp : 0); @@ -452,6 +472,10 @@ static const struct test_params param_list[] = { TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP), TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_REGS), TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP | UWM_REGS), + TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER), + TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_SP), + TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_REGS), + TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_SP | UWM_REGS), }; /* diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 792f8e0f2178..e563cb65c0c4 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -69,6 +69,7 @@ struct zpci_dev *get_zdev_by_fid(u32 fid) list_for_each_entry(tmp, &zpci_list, entry) { if (tmp->fid == fid) { zdev = tmp; + zpci_zdev_get(zdev); break; } } @@ -399,7 +400,7 @@ EXPORT_SYMBOL(pci_iounmap); static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { - struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn); + struct zpci_dev *zdev = zdev_from_bus(bus, devfn); return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV; } @@ -407,7 +408,7 @@ static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { - struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn); + struct zpci_dev *zdev = zdev_from_bus(bus, devfn); return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV; } diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h index e359d2686178..e96c9860e064 100644 --- a/arch/s390/pci/pci_bus.h +++ b/arch/s390/pci/pci_bus.h @@ -19,7 +19,8 @@ void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error); void zpci_release_device(struct kref *kref); static inline void zpci_zdev_put(struct zpci_dev *zdev) { - kref_put(&zdev->kref, zpci_release_device); + if (zdev) + kref_put(&zdev->kref, zpci_release_device); } static inline void zpci_zdev_get(struct zpci_dev *zdev) @@ -32,8 +33,8 @@ void zpci_free_domain(int domain); int zpci_setup_bus_resources(struct zpci_dev *zdev, struct list_head *resources); -static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus, - unsigned int devfn) +static inline struct zpci_dev *zdev_from_bus(struct pci_bus *bus, + unsigned int devfn) { struct zpci_bus *zbus = bus->sysdata; diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 63f3e057c168..1057d7af4a55 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -23,6 +23,8 @@ #include <asm/clp.h> #include <uapi/asm/clp.h> +#include "pci_bus.h" + bool zpci_unique_uid; void update_uid_checking(bool new) @@ -404,8 +406,11 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data) return; zdev = get_zdev_by_fid(entry->fid); - if (!zdev) - zpci_create_device(entry->fid, entry->fh, entry->config_state); + if (zdev) { + zpci_zdev_put(zdev); + return; + } + zpci_create_device(entry->fid, entry->fh, entry->config_state); } int clp_scan_pci_devices(void) diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 2e3e5b278925..ea9db5cea64e 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -269,7 +269,7 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf) pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid); if (!pdev) - return; + goto no_pdev; switch (ccdf->pec) { case 0x003a: /* Service Action or Error Recovery Successful */ @@ -286,6 +286,8 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf) break; } pci_dev_put(pdev); +no_pdev: + zpci_zdev_put(zdev); } void zpci_event_error(void *data) @@ -314,6 +316,7 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh) static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) { struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); + bool existing_zdev = !!zdev; enum zpci_state state; zpci_dbg(3, "avl fid:%x, fh:%x, pec:%x\n", @@ -378,6 +381,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) default: break; } + if (existing_zdev) + zpci_zdev_put(zdev); } void zpci_event_availability(void *data) diff --git a/arch/sh/include/asm/user.h b/arch/sh/include/asm/user.h index 7dfd3f6461e6..12ea0f3f4419 100644 --- a/arch/sh/include/asm/user.h +++ b/arch/sh/include/asm/user.h @@ -52,10 +52,4 @@ struct user { char u_comm[32]; /* user command name */ }; -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_DATA_START_ADDR (u.start_data) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) - #endif /* __ASM_SH_USER_H */ diff --git a/arch/um/Makefile b/arch/um/Makefile index 320b09cd513c..f2fe63bfd819 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -75,10 +75,6 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \ -D_FILE_OFFSET_BITS=64 -idirafter $(srctree)/include \ -idirafter $(objtree)/include -D__KERNEL__ -D__UM_HOST__ -ifdef CONFIG_CC_IS_CLANG -USER_CFLAGS := $(patsubst -mno-global-merge,,$(USER_CFLAGS)) -endif - #This will adjust *FLAGS accordingly to the platform. include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS) diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index b08bd2966253..f1f3f52f1e9c 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -24,7 +24,6 @@ generic-y += softirq_stack.h generic-y += switch_to.h generic-y += topology.h generic-y += trace_clock.h -generic-y += word-at-a-time.h generic-y += kprobes.h generic-y += mm_hooks.h generic-y += vga.h diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4138939532c6..d23e80a56eb8 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -249,6 +249,7 @@ enum x86_intercept_stage; #define PFERR_SGX_BIT 15 #define PFERR_GUEST_FINAL_BIT 32 #define PFERR_GUEST_PAGE_BIT 33 +#define PFERR_IMPLICIT_ACCESS_BIT 48 #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT) #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT) @@ -259,6 +260,7 @@ enum x86_intercept_stage; #define PFERR_SGX_MASK (1U << PFERR_SGX_BIT) #define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT) #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT) +#define PFERR_IMPLICIT_ACCESS (1ULL << PFERR_IMPLICIT_ACCESS_BIT) #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \ PFERR_WRITE_MASK | \ @@ -430,7 +432,7 @@ struct kvm_mmu { void (*inject_page_fault)(struct kvm_vcpu *vcpu, struct x86_exception *fault); gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - gpa_t gva_or_gpa, u32 access, + gpa_t gva_or_gpa, u64 access, struct x86_exception *exception); int (*sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp); @@ -512,6 +514,7 @@ struct kvm_pmu { u64 global_ctrl_mask; u64 global_ovf_ctrl_mask; u64 reserved_bits; + u64 raw_event_mask; u8 version; struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED]; @@ -1040,14 +1043,16 @@ struct kvm_x86_msr_filter { struct msr_bitmap_range ranges[16]; }; -#define APICV_INHIBIT_REASON_DISABLE 0 -#define APICV_INHIBIT_REASON_HYPERV 1 -#define APICV_INHIBIT_REASON_NESTED 2 -#define APICV_INHIBIT_REASON_IRQWIN 3 -#define APICV_INHIBIT_REASON_PIT_REINJ 4 -#define APICV_INHIBIT_REASON_X2APIC 5 -#define APICV_INHIBIT_REASON_BLOCKIRQ 6 -#define APICV_INHIBIT_REASON_ABSENT 7 +enum kvm_apicv_inhibit { + APICV_INHIBIT_REASON_DISABLE, + APICV_INHIBIT_REASON_HYPERV, + APICV_INHIBIT_REASON_NESTED, + APICV_INHIBIT_REASON_IRQWIN, + APICV_INHIBIT_REASON_PIT_REINJ, + APICV_INHIBIT_REASON_X2APIC, + APICV_INHIBIT_REASON_BLOCKIRQ, + APICV_INHIBIT_REASON_ABSENT, +}; struct kvm_arch { unsigned long n_used_mmu_pages; @@ -1401,7 +1406,7 @@ struct kvm_x86_ops { void (*enable_nmi_window)(struct kvm_vcpu *vcpu); void (*enable_irq_window)(struct kvm_vcpu *vcpu); void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); - bool (*check_apicv_inhibit_reasons)(ulong bit); + bool (*check_apicv_inhibit_reasons)(enum kvm_apicv_inhibit reason); void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); @@ -1585,7 +1590,7 @@ void kvm_mmu_module_exit(void); void kvm_mmu_destroy(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu); -void kvm_mmu_init_vm(struct kvm *kvm); +int kvm_mmu_init_vm(struct kvm *kvm); void kvm_mmu_uninit_vm(struct kvm *kvm); void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu); @@ -1795,11 +1800,22 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, bool kvm_apicv_activated(struct kvm *kvm); void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu); -void kvm_request_apicv_update(struct kvm *kvm, bool activate, - unsigned long bit); +void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, + enum kvm_apicv_inhibit reason, bool set); +void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, + enum kvm_apicv_inhibit reason, bool set); + +static inline void kvm_set_apicv_inhibit(struct kvm *kvm, + enum kvm_apicv_inhibit reason) +{ + kvm_set_or_clear_apicv_inhibit(kvm, reason, true); +} -void __kvm_request_apicv_update(struct kvm *kvm, bool activate, - unsigned long bit); +static inline void kvm_clear_apicv_inhibit(struct kvm *kvm, + enum kvm_apicv_inhibit reason) +{ + kvm_set_or_clear_apicv_inhibit(kvm, reason, false); +} int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 7eb2df5417fb..f70a5108d464 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -221,8 +221,14 @@ struct __attribute__ ((__packed__)) vmcb_control_area { #define SVM_NESTED_CTL_SEV_ES_ENABLE BIT(2) +#define SVM_TSC_RATIO_RSVD 0xffffff0000000000ULL +#define SVM_TSC_RATIO_MIN 0x0000000000000001ULL +#define SVM_TSC_RATIO_MAX 0x000000ffffffffffULL +#define SVM_TSC_RATIO_DEFAULT 0x0100000000ULL + + /* AVIC */ -#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF) +#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFFULL) #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31) @@ -230,9 +236,11 @@ struct __attribute__ ((__packed__)) vmcb_control_area { #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12) #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62) #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63) -#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK (0xFF) +#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK (0xFFULL) + +#define AVIC_DOORBELL_PHYSICAL_ID_MASK GENMASK_ULL(11, 0) -#define AVIC_DOORBELL_PHYSICAL_ID_MASK (0xFF) +#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL #define AVIC_UNACCEL_ACCESS_WRITE_MASK 1 #define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0 diff --git a/arch/x86/include/asm/user_32.h b/arch/x86/include/asm/user_32.h index d72c3d66e94f..8963915e533f 100644 --- a/arch/x86/include/asm/user_32.h +++ b/arch/x86/include/asm/user_32.h @@ -124,9 +124,5 @@ struct user{ char u_comm[32]; /* User command that was responsible */ int u_debugreg[8]; }; -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) #endif /* _ASM_X86_USER_32_H */ diff --git a/arch/x86/include/asm/user_64.h b/arch/x86/include/asm/user_64.h index db909923611c..1dd10f07ccd6 100644 --- a/arch/x86/include/asm/user_64.h +++ b/arch/x86/include/asm/user_64.h @@ -130,9 +130,5 @@ struct user { unsigned long error_code; /* CPU error code or 0 */ unsigned long fault_address; /* CR3 or 0 */ }; -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) #endif /* _ASM_X86_USER_64_H */ diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 79e0b8d63ffa..a22deb58f86d 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -517,7 +517,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) { ipi_bitmap <<= min - apic_id; min = apic_id; - } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) { + } else if (apic_id > min && apic_id < min + KVM_IPI_CLUSTER_SIZE) { max = apic_id < max ? max : apic_id; } else { ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index a00cd97b2623..b24ca7f4ed7c 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -735,6 +735,7 @@ static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array, if (function > READ_ONCE(max_cpuid_80000000)) return entry; } + break; default: break; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index f9c00c89829b..89b11e7dca8a 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3540,8 +3540,10 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt) { u64 tsc_aux = 0; - if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux)) + if (!ctxt->ops->guest_has_rdpid(ctxt)) return emulate_ud(ctxt); + + ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux); ctxt->dst.val = tsc_aux; return X86EMUL_CONTINUE; } @@ -3642,7 +3644,7 @@ static int em_wrmsr(struct x86_emulate_ctxt *ctxt) msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX) | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32); - r = ctxt->ops->set_msr(ctxt, msr_index, msr_data); + r = ctxt->ops->set_msr_with_filter(ctxt, msr_index, msr_data); if (r == X86EMUL_IO_NEEDED) return r; @@ -3659,7 +3661,7 @@ static int em_rdmsr(struct x86_emulate_ctxt *ctxt) u64 msr_data; int r; - r = ctxt->ops->get_msr(ctxt, msr_index, &msr_data); + r = ctxt->ops->get_msr_with_filter(ctxt, msr_index, &msr_data); if (r == X86EMUL_IO_NEEDED) return r; diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index a32f54ab84a2..123b677111c5 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -122,9 +122,13 @@ static void synic_update_vector(struct kvm_vcpu_hv_synic *synic, else hv->synic_auto_eoi_used--; - __kvm_request_apicv_update(vcpu->kvm, - !hv->synic_auto_eoi_used, - APICV_INHIBIT_REASON_HYPERV); + /* + * Inhibit APICv if any vCPU is using SynIC's AutoEOI, which relies on + * the hypervisor to manually inject IRQs. + */ + __kvm_set_or_clear_apicv_inhibit(vcpu->kvm, + APICV_INHIBIT_REASON_HYPERV, + !!hv->synic_auto_eoi_used); up_write(&vcpu->kvm->arch.apicv_update_lock); } @@ -239,7 +243,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); int ret; - if (!synic->active && !host) + if (!synic->active && (!host || data)) return 1; trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); @@ -285,6 +289,9 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, case HV_X64_MSR_EOM: { int i; + if (!synic->active) + break; + for (i = 0; i < ARRAY_SIZE(synic->sint); i++) kvm_hv_notify_acked_sint(vcpu, i); break; @@ -449,6 +456,9 @@ static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint) struct kvm_lapic_irq irq; int ret, vector; + if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm)) + return -EINVAL; + if (sint >= ARRAY_SIZE(synic->sint)) return -EINVAL; @@ -661,7 +671,7 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); - if (!synic->active && !host) + if (!synic->active && (!host || config)) return 1; if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode && @@ -690,7 +700,7 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); - if (!synic->active && !host) + if (!synic->active && (!host || count)) return 1; trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id, diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 0b65a764ed3a..1c83076091af 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -305,15 +305,13 @@ void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject) * So, deactivate APICv when PIT is in reinject mode. */ if (reinject) { - kvm_request_apicv_update(kvm, false, - APICV_INHIBIT_REASON_PIT_REINJ); + kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PIT_REINJ); /* The initial state is preserved while ps->reinject == 0. */ kvm_pit_reset_reinject(pit); kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier); kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); } else { - kvm_request_apicv_update(kvm, true, - APICV_INHIBIT_REASON_PIT_REINJ); + kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PIT_REINJ); kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier); kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); } diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index 840ddb4a9302..8dff25d267b7 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -210,6 +210,8 @@ struct x86_emulate_ops { int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); u64 (*get_smbase)(struct x86_emulate_ctxt *ctxt); void (*set_smbase)(struct x86_emulate_ctxt *ctxt, u64 smbase); + int (*set_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); + int (*get_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc); @@ -226,6 +228,7 @@ struct x86_emulate_ops { bool (*guest_has_long_mode)(struct x86_emulate_ctxt *ctxt); bool (*guest_has_movbe)(struct x86_emulate_ctxt *ctxt); bool (*guest_has_fxsr)(struct x86_emulate_ctxt *ctxt); + bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt); void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 80a2020c4db4..66b0eb0bda94 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1024,6 +1024,10 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, *r = -1; if (irq->shorthand == APIC_DEST_SELF) { + if (KVM_BUG_ON(!src, kvm)) { + *r = 0; + return true; + } *r = kvm_apic_set_irq(src->vcpu, irq, dest_map); return true; } diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index bf8dbc4bb12a..e6cae6f22683 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -214,27 +214,27 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, */ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned pte_access, unsigned pte_pkey, - unsigned pfec) + u64 access) { - int cpl = static_call(kvm_x86_get_cpl)(vcpu); + /* strip nested paging fault error codes */ + unsigned int pfec = access; unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); /* - * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1. + * For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1. + * For implicit supervisor accesses, SMAP cannot be overridden. * - * If CPL = 3, SMAP applies to all supervisor-mode data accesses - * (these are implicit supervisor accesses) regardless of the value - * of EFLAGS.AC. + * SMAP works on supervisor accesses only, and not_smap can + * be set or not set when user access with neither has any bearing + * on the result. * - * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving - * the result in X86_EFLAGS_AC. We then insert it in place of - * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec, - * but it will be one in index if SMAP checks are being overridden. - * It is important to keep this branchless. + * We put the SMAP checking bit in place of the PFERR_RSVD_MASK bit; + * this bit will always be zero in pfec, but it will be one in index + * if SMAP checks are being disabled. */ - unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC); - int index = (pfec >> 1) + - (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); + u64 implicit_access = access & PFERR_IMPLICIT_ACCESS; + bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC; + int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1; bool fault = (mmu->permissions[index] >> pte_access) & 1; u32 errcode = PFERR_PRESENT_MASK; @@ -317,12 +317,12 @@ static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count) atomic64_add(count, &kvm->stat.pages[level - 1]); } -gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, +gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, struct x86_exception *exception); static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - gpa_t gpa, u32 access, + gpa_t gpa, u64 access, struct x86_exception *exception) { if (mmu != &vcpu->arch.nested_mmu) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 51671cb34fb6..8f19ea752704 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2696,8 +2696,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, if (*sptep == spte) { ret = RET_PF_SPURIOUS; } else { - trace_kvm_mmu_set_spte(level, gfn, sptep); flush |= mmu_spte_update(sptep, spte); + trace_kvm_mmu_set_spte(level, gfn, sptep); } if (wrprot) { @@ -3703,7 +3703,7 @@ void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu) } static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - gpa_t vaddr, u32 access, + gpa_t vaddr, u64 access, struct x86_exception *exception) { if (exception) @@ -4591,11 +4591,11 @@ static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept) * - X86_CR4_SMAP is set in CR4 * - A user page is accessed * - The access is not a fetch - * - Page fault in kernel mode - * - if CPL = 3 or X86_EFLAGS_AC is clear + * - The access is supervisor mode + * - If implicit supervisor access or X86_EFLAGS_AC is clear * - * Here, we cover the first three conditions. - * The fourth is computed dynamically in permission_fault(); + * Here, we cover the first four conditions. + * The fifth is computed dynamically in permission_fault(); * PFERR_RSVD_MASK bit will be set in PFEC if the access is * *not* subject to SMAP restrictions. */ @@ -5768,17 +5768,24 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, kvm_mmu_zap_all_fast(kvm); } -void kvm_mmu_init_vm(struct kvm *kvm) +int kvm_mmu_init_vm(struct kvm *kvm) { struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; + int r; + INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); + INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); + INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages); spin_lock_init(&kvm->arch.mmu_unsync_pages_lock); - kvm_mmu_init_tdp_mmu(kvm); + r = kvm_mmu_init_tdp_mmu(kvm); + if (r < 0) + return r; node->track_write = kvm_mmu_pte_write; node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot; kvm_page_track_register_notifier(kvm, node); + return 0; } void kvm_mmu_uninit_vm(struct kvm *kvm) @@ -5842,8 +5849,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) if (is_tdp_mmu_enabled(kvm)) { for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) - flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start, - gfn_end, flush); + flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start, + gfn_end, true, flush); } if (flush) diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 252c77805eb9..01fee5f67ac3 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -34,9 +34,8 @@ #define PT_HAVE_ACCESSED_DIRTY(mmu) true #ifdef CONFIG_X86_64 #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL - #define CMPXCHG cmpxchg + #define CMPXCHG "cmpxchgq" #else - #define CMPXCHG cmpxchg64 #define PT_MAX_FULL_LEVELS 2 #endif #elif PTTYPE == 32 @@ -52,7 +51,7 @@ #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT #define PT_HAVE_ACCESSED_DIRTY(mmu) true - #define CMPXCHG cmpxchg + #define CMPXCHG "cmpxchgl" #elif PTTYPE == PTTYPE_EPT #define pt_element_t u64 #define guest_walker guest_walkerEPT @@ -65,7 +64,9 @@ #define PT_GUEST_DIRTY_SHIFT 9 #define PT_GUEST_ACCESSED_SHIFT 8 #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad) - #define CMPXCHG cmpxchg64 + #ifdef CONFIG_X86_64 + #define CMPXCHG "cmpxchgq" + #endif #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL #else #error Invalid PTTYPE value @@ -147,43 +148,36 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, pt_element_t __user *ptep_user, unsigned index, pt_element_t orig_pte, pt_element_t new_pte) { - int npages; - pt_element_t ret; - pt_element_t *table; - struct page *page; - - npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page); - if (likely(npages == 1)) { - table = kmap_atomic(page); - ret = CMPXCHG(&table[index], orig_pte, new_pte); - kunmap_atomic(table); - - kvm_release_page_dirty(page); - } else { - struct vm_area_struct *vma; - unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK; - unsigned long pfn; - unsigned long paddr; - - mmap_read_lock(current->mm); - vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE); - if (!vma || !(vma->vm_flags & VM_PFNMAP)) { - mmap_read_unlock(current->mm); - return -EFAULT; - } - pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - paddr = pfn << PAGE_SHIFT; - table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB); - if (!table) { - mmap_read_unlock(current->mm); - return -EFAULT; - } - ret = CMPXCHG(&table[index], orig_pte, new_pte); - memunmap(table); - mmap_read_unlock(current->mm); - } + signed char r; - return (ret != orig_pte); + if (!user_access_begin(ptep_user, sizeof(pt_element_t))) + return -EFAULT; + +#ifdef CMPXCHG + asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n" + "setnz %b[r]\n" + "2:" + _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r]) + : [ptr] "+m" (*ptep_user), + [old] "+a" (orig_pte), + [r] "=q" (r) + : [new] "r" (new_pte) + : "memory"); +#else + asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n" + "setnz %b[r]\n" + "2:" + _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r]) + : [ptr] "+m" (*ptep_user), + [old] "+A" (orig_pte), + [r] "=q" (r) + : [new_lo] "b" ((u32)new_pte), + [new_hi] "c" ((u32)(new_pte >> 32)) + : "memory"); +#endif + + user_access_end(); + return r; } static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, @@ -339,7 +333,7 @@ static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu, */ static int FNAME(walk_addr_generic)(struct guest_walker *walker, struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - gpa_t addr, u32 access) + gpa_t addr, u64 access) { int ret; pt_element_t pte; @@ -347,7 +341,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, gfn_t table_gfn; u64 pt_access, pte_access; unsigned index, accessed_dirty, pte_pkey; - unsigned nested_access; + u64 nested_access; gpa_t pte_gpa; bool have_ad; int offset; @@ -540,7 +534,7 @@ error: } static int FNAME(walk_addr)(struct guest_walker *walker, - struct kvm_vcpu *vcpu, gpa_t addr, u32 access) + struct kvm_vcpu *vcpu, gpa_t addr, u64 access) { return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr, access); @@ -988,7 +982,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - gpa_t addr, u32 access, + gpa_t addr, u64 access, struct x86_exception *exception) { struct guest_walker walker; diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index e7e7876251b3..d71d177ae6b8 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -14,21 +14,24 @@ static bool __read_mostly tdp_mmu_enabled = true; module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); /* Initializes the TDP MMU for the VM, if enabled. */ -bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) +int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { + struct workqueue_struct *wq; + if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled)) - return false; + return 0; + + wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0); + if (!wq) + return -ENOMEM; /* This should not be changed for the lifetime of the VM. */ kvm->arch.tdp_mmu_enabled = true; - INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); - kvm->arch.tdp_mmu_zap_wq = - alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0); - - return true; + kvm->arch.tdp_mmu_zap_wq = wq; + return 1; } /* Arbitrarily returns true so that this may be used in if statements. */ @@ -906,10 +909,8 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) } /* - * Tears down the mappings for the range of gfns, [start, end), and frees the - * non-root pages mapping GFNs strictly within that range. Returns true if - * SPTEs have been cleared and a TLB flush is needed before releasing the - * MMU lock. + * Zap leafs SPTEs for the range of gfns, [start, end). Returns true if SPTEs + * have been cleared and a TLB flush is needed before releasing the MMU lock. * * If can_yield is true, will release the MMU lock and reschedule if the * scheduler needs the CPU or there is contention on the MMU lock. If this @@ -917,42 +918,25 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) * the caller must ensure it does not supply too large a GFN range, or the * operation can cause a soft lockup. */ -static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, - gfn_t start, gfn_t end, bool can_yield, bool flush) +static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, + gfn_t start, gfn_t end, bool can_yield, bool flush) { - bool zap_all = (start == 0 && end >= tdp_mmu_max_gfn_host()); struct tdp_iter iter; - /* - * No need to try to step down in the iterator when zapping all SPTEs, - * zapping the top-level non-leaf SPTEs will recurse on their children. - */ - int min_level = zap_all ? root->role.level : PG_LEVEL_4K; - end = min(end, tdp_mmu_max_gfn_host()); lockdep_assert_held_write(&kvm->mmu_lock); rcu_read_lock(); - for_each_tdp_pte_min_level(iter, root, min_level, start, end) { + for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) { if (can_yield && tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) { flush = false; continue; } - if (!is_shadow_present_pte(iter.old_spte)) - continue; - - /* - * If this is a non-last-level SPTE that covers a larger range - * than should be zapped, continue, and zap the mappings at a - * lower level, except when zapping all SPTEs. - */ - if (!zap_all && - (iter.gfn < start || - iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) && + if (!is_shadow_present_pte(iter.old_spte) || !is_last_spte(iter.old_spte, iter.level)) continue; @@ -960,17 +944,13 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, flush = true; } - /* - * Need to flush before releasing RCU. TODO: do it only if intermediate - * page tables were zapped; there is no need to flush under RCU protection - * if no 'struct kvm_mmu_page' is freed. - */ - if (flush) - kvm_flush_remote_tlbs_with_address(kvm, start, end - start); - rcu_read_unlock(); - return false; + /* + * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need + * to provide RCU protection as no 'struct kvm_mmu_page' will be freed. + */ + return flush; } /* @@ -979,13 +959,13 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, * SPTEs have been cleared and a TLB flush is needed before releasing the * MMU lock. */ -bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, - gfn_t end, bool can_yield, bool flush) +bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end, + bool can_yield, bool flush) { struct kvm_mmu_page *root; for_each_tdp_mmu_root_yield_safe(kvm, root, as_id) - flush = zap_gfn_range(kvm, root, start, end, can_yield, flush); + flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush); return flush; } @@ -1233,8 +1213,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, bool flush) { - return __kvm_tdp_mmu_zap_gfn_range(kvm, range->slot->as_id, range->start, - range->end, range->may_block, flush); + return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start, + range->end, range->may_block, flush); } typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter, diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index 5e5ef2576c81..c163f7cc23ca 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -15,14 +15,8 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root) void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, bool shared); -bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, +bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end, bool can_yield, bool flush); -static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, - gfn_t start, gfn_t end, bool flush) -{ - return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush); -} - bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp); void kvm_tdp_mmu_zap_all(struct kvm *kvm); void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm); @@ -72,7 +66,7 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, u64 *spte); #ifdef CONFIG_X86_64 -bool kvm_mmu_init_tdp_mmu(struct kvm *kvm); +int kvm_mmu_init_tdp_mmu(struct kvm *kvm); void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } @@ -93,7 +87,7 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu) return sp && is_tdp_mmu_page(sp) && sp->root_count; } #else -static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; } +static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; } static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {} static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; } diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index b1a02993782b..eca39f56c231 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -96,8 +96,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event, static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config, bool exclude_user, - bool exclude_kernel, bool intr, - bool in_tx, bool in_tx_cp) + bool exclude_kernel, bool intr) { struct perf_event *event; struct perf_event_attr attr = { @@ -116,16 +115,14 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, attr.sample_period = get_sample_period(pmc, pmc->counter); - if (in_tx) - attr.config |= HSW_IN_TX; - if (in_tx_cp) { + if ((attr.config & HSW_IN_TX_CHECKPOINTED) && + guest_cpuid_is_intel(pmc->vcpu)) { /* * HSW_IN_TX_CHECKPOINTED is not supported with nonzero * period. Just clear the sample period so at least * allocating the counter doesn't fail. */ attr.sample_period = 0; - attr.config |= HSW_IN_TX_CHECKPOINTED; } event = perf_event_create_kernel_counter(&attr, -1, current, @@ -185,6 +182,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) u32 type = PERF_TYPE_RAW; struct kvm *kvm = pmc->vcpu->kvm; struct kvm_pmu_event_filter *filter; + struct kvm_pmu *pmu = vcpu_to_pmu(pmc->vcpu); bool allow_event = true; if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) @@ -221,7 +219,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) } if (type == PERF_TYPE_RAW) - config = eventsel & AMD64_RAW_EVENT_MASK; + config = eventsel & pmu->raw_event_mask; if (pmc->current_config == eventsel && pmc_resume_counter(pmc)) return; @@ -232,9 +230,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) pmc_reprogram_counter(pmc, type, config, !(eventsel & ARCH_PERFMON_EVENTSEL_USR), !(eventsel & ARCH_PERFMON_EVENTSEL_OS), - eventsel & ARCH_PERFMON_EVENTSEL_INT, - (eventsel & HSW_IN_TX), - (eventsel & HSW_IN_TX_CHECKPOINTED)); + eventsel & ARCH_PERFMON_EVENTSEL_INT); } EXPORT_SYMBOL_GPL(reprogram_gp_counter); @@ -270,7 +266,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc), !(en_field & 0x2), /* exclude user */ !(en_field & 0x1), /* exclude kernel */ - pmi, false, false); + pmi); } EXPORT_SYMBOL_GPL(reprogram_fixed_counter); diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index b37b353ec086..a1cf9c31273b 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -726,7 +726,7 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, { struct kvm_kernel_irq_routing_entry *e; struct kvm_irq_routing_table *irq_rt; - int idx, ret = -EINVAL; + int idx, ret = 0; if (!kvm_arch_has_assigned_device(kvm) || !irq_remapping_cap(IRQ_POSTING_CAP)) @@ -737,7 +737,13 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, idx = srcu_read_lock(&kvm->irq_srcu); irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); - WARN_ON(guest_irq >= irq_rt->nr_rt_entries); + + if (guest_irq >= irq_rt->nr_rt_entries || + hlist_empty(&irq_rt->map[guest_irq])) { + pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n", + guest_irq, irq_rt->nr_rt_entries); + goto out; + } hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { struct vcpu_data vcpu_info; @@ -822,7 +828,7 @@ out: return ret; } -bool avic_check_apicv_inhibit_reasons(ulong bit) +bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason) { ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) | BIT(APICV_INHIBIT_REASON_ABSENT) | @@ -833,7 +839,7 @@ bool avic_check_apicv_inhibit_reasons(ulong bit) BIT(APICV_INHIBIT_REASON_X2APIC) | BIT(APICV_INHIBIT_REASON_BLOCKIRQ); - return supported & BIT(bit); + return supported & BIT(reason); } diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index d4de52409335..24eb935b6f85 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -262,12 +262,10 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) /* MSR_EVNTSELn */ pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); if (pmc) { - if (data == pmc->eventsel) - return 0; - if (!(data & pmu->reserved_bits)) { + data &= ~pmu->reserved_bits; + if (data != pmc->eventsel) reprogram_gp_counter(pmc, data); - return 0; - } + return 0; } return 1; @@ -284,6 +282,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu) pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1; pmu->reserved_bits = 0xfffffff000280000ull; + pmu->raw_event_mask = AMD64_RAW_EVENT_MASK; pmu->version = 1; /* not applicable to AMD; but clean them to prevent any fall out */ pmu->counter_bitmask[KVM_PMC_FIXED] = 0; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 0884c3414a1b..bd4c64b362d2 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -62,20 +62,8 @@ MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); #define SEG_TYPE_LDT 2 #define SEG_TYPE_BUSY_TSS16 3 -#define SVM_FEATURE_LBRV (1 << 1) -#define SVM_FEATURE_SVML (1 << 2) -#define SVM_FEATURE_TSC_RATE (1 << 4) -#define SVM_FEATURE_VMCB_CLEAN (1 << 5) -#define SVM_FEATURE_FLUSH_ASID (1 << 6) -#define SVM_FEATURE_DECODE_ASSIST (1 << 7) -#define SVM_FEATURE_PAUSE_FILTER (1 << 10) - #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) -#define TSC_RATIO_RSVD 0xffffff0000000000ULL -#define TSC_RATIO_MIN 0x0000000000000001ULL -#define TSC_RATIO_MAX 0x000000ffffffffffULL - static bool erratum_383_found __read_mostly; u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; @@ -87,7 +75,6 @@ u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; static uint64_t osvw_len = 4, osvw_status; static DEFINE_PER_CPU(u64, current_tsc_ratio); -#define TSC_RATIO_DEFAULT 0x0100000000ULL static const struct svm_direct_access_msrs { u32 index; /* Index of the MSR */ @@ -480,7 +467,7 @@ static void svm_hardware_disable(void) { /* Make sure we clean up behind us */ if (tsc_scaling) - wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); + wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT); cpu_svm_disable(); @@ -526,8 +513,8 @@ static int svm_hardware_enable(void) * Set the default value, even if we don't use TSC scaling * to avoid having stale value in the msr */ - wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); - __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT); + wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT); + __this_cpu_write(current_tsc_ratio, SVM_TSC_RATIO_DEFAULT); } @@ -2723,7 +2710,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) break; } - if (data & TSC_RATIO_RSVD) + if (data & SVM_TSC_RATIO_RSVD) return 1; svm->tsc_ratio_msr = data; @@ -2918,7 +2905,7 @@ static int interrupt_window_interception(struct kvm_vcpu *vcpu) * In this case AVIC was temporarily disabled for * requesting the IRQ window and we have to re-enable it. */ - kvm_request_apicv_update(vcpu->kvm, true, APICV_INHIBIT_REASON_IRQWIN); + kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN); ++vcpu->stat.irq_window_exits; return 1; @@ -3516,7 +3503,7 @@ static void svm_enable_irq_window(struct kvm_vcpu *vcpu) * via AVIC. In such case, we need to temporarily disable AVIC, * and fallback to injecting IRQ via V_IRQ. */ - kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_IRQWIN); + kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN); svm_set_vintr(svm); } } @@ -3948,6 +3935,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct kvm_cpuid_entry2 *best; + struct kvm *kvm = vcpu->kvm; vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && boot_cpu_has(X86_FEATURE_XSAVE) && @@ -3974,16 +3962,14 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) * is exposed to the guest, disable AVIC. */ if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC)) - kvm_request_apicv_update(vcpu->kvm, false, - APICV_INHIBIT_REASON_X2APIC); + kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_X2APIC); /* * Currently, AVIC does not work with nested virtualization. * So, we disable AVIC when cpuid for SVM is set in the L1 guest. */ if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM)) - kvm_request_apicv_update(vcpu->kvm, false, - APICV_INHIBIT_REASON_NESTED); + kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_NESTED); } init_vmcb_after_set_cpuid(vcpu); } @@ -4766,10 +4752,10 @@ static __init int svm_hardware_setup(void) } else { pr_info("TSC scaling supported\n"); kvm_has_tsc_control = true; - kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX; - kvm_tsc_scaling_ratio_frac_bits = 32; } } + kvm_max_tsc_scaling_ratio = SVM_TSC_RATIO_MAX; + kvm_tsc_scaling_ratio_frac_bits = 32; tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index e37bb3508cfa..f77a7d2d39dd 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -22,6 +22,8 @@ #include <asm/svm.h> #include <asm/sev-common.h> +#include "kvm_cache_regs.h" + #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) #define IOPM_SIZE PAGE_SIZE * 3 @@ -569,17 +571,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops; /* avic.c */ -#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF) -#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31 -#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31) - -#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK GENMASK_ULL(11, 0) -#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12) -#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62) -#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63) - -#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL - int avic_ga_log_notifier(u32 ga_tag); void avic_vm_destroy(struct kvm *kvm); int avic_vm_init(struct kvm *kvm); @@ -592,7 +583,7 @@ void __avic_vcpu_put(struct kvm_vcpu *vcpu); void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu); void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu); void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); -bool avic_check_apicv_inhibit_reasons(ulong bit); +bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason); void avic_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr); void avic_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr); bool avic_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/svm/svm_onhyperv.c b/arch/x86/kvm/svm/svm_onhyperv.c index 98aa981c04ec..8cdc62c74a96 100644 --- a/arch/x86/kvm/svm/svm_onhyperv.c +++ b/arch/x86/kvm/svm/svm_onhyperv.c @@ -4,7 +4,6 @@ */ #include <linux/kvm_host.h> -#include "kvm_cache_regs.h" #include <asm/mshyperv.h> diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 193f5ba930d1..e3a24b8f04be 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -1339,23 +1339,25 @@ TRACE_EVENT(kvm_hv_stimer_cleanup, __entry->vcpu_id, __entry->timer_index) ); -TRACE_EVENT(kvm_apicv_update_request, - TP_PROTO(bool activate, unsigned long bit), - TP_ARGS(activate, bit), +TRACE_EVENT(kvm_apicv_inhibit_changed, + TP_PROTO(int reason, bool set, unsigned long inhibits), + TP_ARGS(reason, set, inhibits), TP_STRUCT__entry( - __field(bool, activate) - __field(unsigned long, bit) + __field(int, reason) + __field(bool, set) + __field(unsigned long, inhibits) ), TP_fast_assign( - __entry->activate = activate; - __entry->bit = bit; + __entry->reason = reason; + __entry->set = set; + __entry->inhibits = inhibits; ), - TP_printk("%s bit=%lu", - __entry->activate ? "activate" : "deactivate", - __entry->bit) + TP_printk("%s reason=%u, inhibits=0x%lx", + __entry->set ? "set" : "cleared", + __entry->reason, __entry->inhibits) ); TRACE_EVENT(kvm_apicv_accept_irq, diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 0684e519181b..bc3f8512bb64 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -389,6 +389,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) struct kvm_pmc *pmc; u32 msr = msr_info->index; u64 data = msr_info->data; + u64 reserved_bits; switch (msr) { case MSR_CORE_PERF_FIXED_CTR_CTRL: @@ -443,7 +444,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { if (data == pmc->eventsel) return 0; - if (!(data & pmu->reserved_bits)) { + reserved_bits = pmu->reserved_bits; + if ((pmc->idx == 2) && + (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED)) + reserved_bits ^= HSW_IN_TX_CHECKPOINTED; + if (!(data & reserved_bits)) { reprogram_gp_counter(pmc, data); return 0; } @@ -485,6 +490,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) pmu->counter_bitmask[KVM_PMC_FIXED] = 0; pmu->version = 0; pmu->reserved_bits = 0xffffffff00200000ull; + pmu->raw_event_mask = X86_RAW_EVENT_MASK; entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); if (!entry || !vcpu->kvm->arch.enable_pmu) @@ -533,8 +539,10 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) entry = kvm_find_cpuid_entry(vcpu, 7, 0); if (entry && (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) && - (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) - pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED; + (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) { + pmu->reserved_bits ^= HSW_IN_TX; + pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); + } bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e8963f5af618..04d170c4b61e 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2866,21 +2866,17 @@ static void enter_rmode(struct kvm_vcpu *vcpu) int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) { struct vcpu_vmx *vmx = to_vmx(vcpu); - struct vmx_uret_msr *msr = vmx_find_uret_msr(vmx, MSR_EFER); /* Nothing to do if hardware doesn't support EFER. */ - if (!msr) + if (!vmx_find_uret_msr(vmx, MSR_EFER)) return 0; vcpu->arch.efer = efer; - if (efer & EFER_LMA) { - vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); - msr->data = efer; - } else { - vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); + if (efer & EFER_LMA) + vm_entry_controls_setbit(vmx, VM_ENTRY_IA32E_MODE); + else + vm_entry_controls_clearbit(vmx, VM_ENTRY_IA32E_MODE); - msr->data = efer & ~EFER_LME; - } vmx_setup_uret_msrs(vmx); return 0; } @@ -2906,7 +2902,6 @@ static void enter_lmode(struct kvm_vcpu *vcpu) static void exit_lmode(struct kvm_vcpu *vcpu) { - vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); } @@ -7705,14 +7700,14 @@ static void vmx_hardware_unsetup(void) free_kvm_area(); } -static bool vmx_check_apicv_inhibit_reasons(ulong bit) +static bool vmx_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason) { ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) | BIT(APICV_INHIBIT_REASON_ABSENT) | BIT(APICV_INHIBIT_REASON_HYPERV) | BIT(APICV_INHIBIT_REASON_BLOCKIRQ); - return supported & BIT(bit); + return supported & BIT(reason); } static struct kvm_x86_ops vmx_x86_ops __initdata = { @@ -7980,12 +7975,11 @@ static __init int hardware_setup(void) if (!enable_apicv) vmx_x86_ops.sync_pir_to_irr = NULL; - if (cpu_has_vmx_tsc_scaling()) { + if (cpu_has_vmx_tsc_scaling()) kvm_has_tsc_control = true; - kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX; - kvm_tsc_scaling_ratio_frac_bits = 48; - } + kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX; + kvm_tsc_scaling_ratio_frac_bits = 48; kvm_has_bus_lock_exit = cpu_has_vmx_bus_lock_detection(); set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 02cf0a7e1d14..0c0ca599a353 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1748,9 +1748,6 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, { struct msr_data msr; - if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE)) - return KVM_MSR_RET_FILTERED; - switch (index) { case MSR_FS_BASE: case MSR_GS_BASE: @@ -1832,9 +1829,6 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, struct msr_data msr; int ret; - if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ)) - return KVM_MSR_RET_FILTERED; - switch (index) { case MSR_TSC_AUX: if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) @@ -1871,6 +1865,20 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, return ret; } +static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data) +{ + if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ)) + return KVM_MSR_RET_FILTERED; + return kvm_get_msr_ignored_check(vcpu, index, data, false); +} + +static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data) +{ + if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE)) + return KVM_MSR_RET_FILTERED; + return kvm_set_msr_ignored_check(vcpu, index, data, false); +} + int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) { return kvm_get_msr_ignored_check(vcpu, index, data, false); @@ -1953,7 +1961,7 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) u64 data; int r; - r = kvm_get_msr(vcpu, ecx, &data); + r = kvm_get_msr_with_filter(vcpu, ecx, &data); if (!r) { trace_kvm_msr_read(ecx, data); @@ -1978,7 +1986,7 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) u64 data = kvm_read_edx_eax(vcpu); int r; - r = kvm_set_msr(vcpu, ecx, data); + r = kvm_set_msr_with_filter(vcpu, ecx, data); if (!r) { trace_kvm_msr_write(ecx, data); @@ -5938,7 +5946,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, smp_wmb(); kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; - kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT); + kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT); r = 0; split_irqchip_unlock: mutex_unlock(&kvm->lock); @@ -6335,7 +6343,7 @@ set_identity_unlock: /* Write kvm->irq_routing before enabling irqchip_in_kernel. */ smp_wmb(); kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; - kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT); + kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT); create_irqchip_unlock: mutex_unlock(&kvm->lock); break; @@ -6726,7 +6734,7 @@ void kvm_get_segment(struct kvm_vcpu *vcpu, static_call(kvm_x86_get_segment)(vcpu, var, seg); } -gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, +gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, struct x86_exception *exception) { struct kvm_mmu *mmu = vcpu->arch.mmu; @@ -6746,7 +6754,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; - u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; + u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); } EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); @@ -6756,7 +6764,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; - u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; + u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_FETCH_MASK; return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); } @@ -6766,7 +6774,7 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; - u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; + u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); } @@ -6782,7 +6790,7 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, } static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, - struct kvm_vcpu *vcpu, u32 access, + struct kvm_vcpu *vcpu, u64 access, struct x86_exception *exception) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; @@ -6819,7 +6827,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); struct kvm_mmu *mmu = vcpu->arch.walk_mmu; - u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; + u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; unsigned offset; int ret; @@ -6844,7 +6852,7 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { - u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; + u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; /* * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED @@ -6863,9 +6871,11 @@ static int emulator_read_std(struct x86_emulate_ctxt *ctxt, struct x86_exception *exception, bool system) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - u32 access = 0; + u64 access = 0; - if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3) + if (system) + access |= PFERR_IMPLICIT_ACCESS; + else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) access |= PFERR_USER_MASK; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); @@ -6881,7 +6891,7 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, } static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, - struct kvm_vcpu *vcpu, u32 access, + struct kvm_vcpu *vcpu, u64 access, struct x86_exception *exception) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; @@ -6915,9 +6925,11 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v bool system) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - u32 access = PFERR_WRITE_MASK; + u64 access = PFERR_WRITE_MASK; - if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3) + if (system) + access |= PFERR_IMPLICIT_ACCESS; + else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) access |= PFERR_USER_MASK; return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, @@ -6984,7 +6996,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, bool write) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; - u32 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) + u64 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) | (write ? PFERR_WRITE_MASK : 0); /* @@ -7627,13 +7639,13 @@ static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, return; } -static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, - u32 msr_index, u64 *pdata) +static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt, + u32 msr_index, u64 *pdata) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); int r; - r = kvm_get_msr(vcpu, msr_index, pdata); + r = kvm_get_msr_with_filter(vcpu, msr_index, pdata); if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0, complete_emulated_rdmsr, r)) { @@ -7644,13 +7656,13 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, return r; } -static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, - u32 msr_index, u64 data) +static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt, + u32 msr_index, u64 data) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); int r; - r = kvm_set_msr(vcpu, msr_index, data); + r = kvm_set_msr_with_filter(vcpu, msr_index, data); if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data, complete_emulated_msr_access, r)) { @@ -7661,6 +7673,18 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, return r; } +static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, + u32 msr_index, u64 *pdata) +{ + return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); +} + +static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, + u32 msr_index, u64 data) +{ + return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data); +} + static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); @@ -7724,6 +7748,11 @@ static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt) return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR); } +static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt) +{ + return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID); +} + static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) { return kvm_register_read_raw(emul_to_vcpu(ctxt), reg); @@ -7794,6 +7823,8 @@ static const struct x86_emulate_ops emulate_ops = { .set_dr = emulator_set_dr, .get_smbase = emulator_get_smbase, .set_smbase = emulator_set_smbase, + .set_msr_with_filter = emulator_set_msr_with_filter, + .get_msr_with_filter = emulator_get_msr_with_filter, .set_msr = emulator_set_msr, .get_msr = emulator_get_msr, .check_pmc = emulator_check_pmc, @@ -7806,6 +7837,7 @@ static const struct x86_emulate_ops emulate_ops = { .guest_has_long_mode = emulator_guest_has_long_mode, .guest_has_movbe = emulator_guest_has_movbe, .guest_has_fxsr = emulator_guest_has_fxsr, + .guest_has_rdpid = emulator_guest_has_rdpid, .set_nmi_mask = emulator_set_nmi_mask, .get_hflags = emulator_get_hflags, .exiting_smm = emulator_exiting_smm, @@ -9058,15 +9090,29 @@ bool kvm_apicv_activated(struct kvm *kvm) } EXPORT_SYMBOL_GPL(kvm_apicv_activated); + +static void set_or_clear_apicv_inhibit(unsigned long *inhibits, + enum kvm_apicv_inhibit reason, bool set) +{ + if (set) + __set_bit(reason, inhibits); + else + __clear_bit(reason, inhibits); + + trace_kvm_apicv_inhibit_changed(reason, set, *inhibits); +} + static void kvm_apicv_init(struct kvm *kvm) { + unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons; + init_rwsem(&kvm->arch.apicv_update_lock); - set_bit(APICV_INHIBIT_REASON_ABSENT, - &kvm->arch.apicv_inhibit_reasons); + set_or_clear_apicv_inhibit(inhibits, APICV_INHIBIT_REASON_ABSENT, true); + if (!enable_apicv) - set_bit(APICV_INHIBIT_REASON_DISABLE, - &kvm->arch.apicv_inhibit_reasons); + set_or_clear_apicv_inhibit(inhibits, + APICV_INHIBIT_REASON_ABSENT, true); } static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id) @@ -9740,24 +9786,21 @@ out: } EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv); -void __kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit) +void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, + enum kvm_apicv_inhibit reason, bool set) { unsigned long old, new; lockdep_assert_held_write(&kvm->arch.apicv_update_lock); - if (!static_call(kvm_x86_check_apicv_inhibit_reasons)(bit)) + if (!static_call(kvm_x86_check_apicv_inhibit_reasons)(reason)) return; old = new = kvm->arch.apicv_inhibit_reasons; - if (activate) - __clear_bit(bit, &new); - else - __set_bit(bit, &new); + set_or_clear_apicv_inhibit(&new, reason, set); if (!!old != !!new) { - trace_kvm_apicv_update_request(activate, bit); /* * Kick all vCPUs before setting apicv_inhibit_reasons to avoid * false positives in the sanity check WARN in svm_vcpu_run(). @@ -9776,20 +9819,22 @@ void __kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit) unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE); kvm_zap_gfn_range(kvm, gfn, gfn+1); } - } else + } else { kvm->arch.apicv_inhibit_reasons = new; + } } -void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit) +void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, + enum kvm_apicv_inhibit reason, bool set) { if (!enable_apicv) return; down_write(&kvm->arch.apicv_update_lock); - __kvm_request_apicv_update(kvm, activate, bit); + __kvm_set_or_clear_apicv_inhibit(kvm, reason, set); up_write(&kvm->arch.apicv_update_lock); } -EXPORT_SYMBOL_GPL(kvm_request_apicv_update); +EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit); static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) { @@ -10937,7 +10982,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm) { - bool inhibit = false; + bool set = false; struct kvm_vcpu *vcpu; unsigned long i; @@ -10945,11 +10990,11 @@ static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm) kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) { - inhibit = true; + set = true; break; } } - __kvm_request_apicv_update(kvm, !inhibit, APICV_INHIBIT_REASON_BLOCKIRQ); + __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set); up_write(&kvm->arch.apicv_update_lock); } @@ -11557,10 +11602,8 @@ int kvm_arch_hardware_setup(void *opaque) u64 max = min(0x7fffffffULL, __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz)); kvm_max_guest_tsc_khz = max; - - kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits; } - + kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits; kvm_init_msr_list(); return 0; } @@ -11629,12 +11672,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ret = kvm_page_track_init(kvm); if (ret) - return ret; + goto out; + + ret = kvm_mmu_init_vm(kvm); + if (ret) + goto out_page_track; INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); - INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); - INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); - INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); atomic_set(&kvm->arch.noncoherent_dma_count, 0); @@ -11666,10 +11710,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm_apicv_init(kvm); kvm_hv_init_vm(kvm); - kvm_mmu_init_vm(kvm); kvm_xen_init_vm(kvm); return static_call(kvm_x86_vm_init)(kvm); + +out_page_track: + kvm_page_track_cleanup(kvm); +out: + return ret; } int kvm_arch_post_init_vm(struct kvm *kvm) @@ -12593,7 +12641,7 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; struct x86_exception fault; - u32 access = error_code & + u64 access = error_code & (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK); if (!(error_code & PFERR_PRESENT_MASK) || @@ -12933,7 +12981,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit); diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 4aa0f2b31665..bf6cc25eee76 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -39,8 +39,8 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) } do { - ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, false, true, - gpa, PAGE_SIZE, false); + ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN, + gpa, PAGE_SIZE); if (ret) goto out; @@ -1025,8 +1025,7 @@ static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm break; idx = srcu_read_lock(&kvm->srcu); - rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, - PAGE_SIZE, false); + rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE); srcu_read_unlock(&kvm->srcu, idx); } while(!rc); diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c index 1f8a8f895173..50734a23034c 100644 --- a/arch/x86/lib/csum-partial_64.c +++ b/arch/x86/lib/csum-partial_64.c @@ -93,7 +93,6 @@ __wsum csum_partial(const void *buff, int len, __wsum sum) buff += 8; } if (len & 7) { -#ifdef CONFIG_DCACHE_WORD_ACCESS unsigned int shift = (8 - (len & 7)) * 8; unsigned long trail; @@ -103,31 +102,6 @@ __wsum csum_partial(const void *buff, int len, __wsum sum) "adcq $0,%[res]" : [res] "+r" (temp64) : [trail] "r" (trail)); -#else - if (len & 4) { - asm("addq %[val],%[res]\n\t" - "adcq $0,%[res]" - : [res] "+r" (temp64) - : [val] "r" ((u64)*(u32 *)buff) - : "memory"); - buff += 4; - } - if (len & 2) { - asm("addq %[val],%[res]\n\t" - "adcq $0,%[res]" - : [res] "+r" (temp64) - : [val] "r" ((u64)*(u16 *)buff) - : "memory"); - buff += 2; - } - if (len & 1) { - asm("addq %[val],%[res]\n\t" - "adcq $0,%[res]" - : [res] "+r" (temp64) - : [val] "r" ((u64)*(u8 *)buff) - : "memory"); - } -#endif } result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff); if (unlikely(odd)) { diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig index ead7e5b3a975..1bcd42c53039 100644 --- a/arch/x86/um/Kconfig +++ b/arch/x86/um/Kconfig @@ -9,6 +9,7 @@ endmenu config UML_X86 def_bool y select ARCH_BINFMT_ELF_EXTRA_PHDRS if X86_32 + select DCACHE_WORD_ACCESS config 64BIT bool "64-bit kernel" if "$(SUBARCH)" = "x86" diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 0430926426fe..8dfe62786cd5 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -65,19 +65,12 @@ static bool blkcg_policy_enabled(struct request_queue *q, return pol && test_bit(pol->plid, q->blkcg_pols); } -/** - * blkg_free - free a blkg - * @blkg: blkg to free - * - * Free @blkg which may be partially allocated. - */ -static void blkg_free(struct blkcg_gq *blkg) +static void blkg_free_workfn(struct work_struct *work) { + struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, + free_work); int i; - if (!blkg) - return; - for (i = 0; i < BLKCG_MAX_POLS; i++) if (blkg->pd[i]) blkcg_policy[i]->pd_free_fn(blkg->pd[i]); @@ -89,6 +82,25 @@ static void blkg_free(struct blkcg_gq *blkg) kfree(blkg); } +/** + * blkg_free - free a blkg + * @blkg: blkg to free + * + * Free @blkg which may be partially allocated. + */ +static void blkg_free(struct blkcg_gq *blkg) +{ + if (!blkg) + return; + + /* + * Both ->pd_free_fn() and request queue's release handler may + * sleep, so free us by scheduling one work func + */ + INIT_WORK(&blkg->free_work, blkg_free_workfn); + schedule_work(&blkg->free_work); +} + static void __blkg_release(struct rcu_head *rcu) { struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 11f49f78db32..df9cfe4ca532 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -280,7 +280,6 @@ int set_task_ioprio(struct task_struct *task, int ioprio) task_lock(task); if (task->flags & PF_EXITING) { - err = -ESRCH; kmem_cache_free(iocontext_cachep, ioc); goto out; } @@ -292,7 +291,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio) task->io_context->ioprio = ioprio; out: task_unlock(task); - return err; + return 0; } EXPORT_SYMBOL_GPL(set_task_ioprio); diff --git a/block/blk-mq.c b/block/blk-mq.c index e6f24fa4a4c2..ed3ed86f7dd2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4462,21 +4462,28 @@ static bool blk_mq_elv_switch_none(struct list_head *head, return true; } -static void blk_mq_elv_switch_back(struct list_head *head, - struct request_queue *q) +static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head, + struct request_queue *q) { struct blk_mq_qe_pair *qe; - struct elevator_type *t = NULL; list_for_each_entry(qe, head, node) - if (qe->q == q) { - t = qe->type; - break; - } + if (qe->q == q) + return qe; - if (!t) - return; + return NULL; +} +static void blk_mq_elv_switch_back(struct list_head *head, + struct request_queue *q) +{ + struct blk_mq_qe_pair *qe; + struct elevator_type *t; + + qe = blk_lookup_qe_pair(head, q); + if (!qe) + return; + t = qe->type; list_del(&qe->node); kfree(qe); diff --git a/block/blk-wbt.h b/block/blk-wbt.h index 2eb01becde8c..7e44eccc676d 100644 --- a/block/blk-wbt.h +++ b/block/blk-wbt.h @@ -101,9 +101,6 @@ u64 wbt_default_latency_nsec(struct request_queue *); #else -static inline void wbt_track(struct request *rq, enum wbt_flags flags) -{ -} static inline int wbt_init(struct request_queue *q) { return -EINVAL; diff --git a/block/genhd.c b/block/genhd.c index c9a4fc90d3e9..b8b6759d670f 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -335,7 +335,7 @@ int blk_alloc_ext_minor(void) { int idx; - idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL); + idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT - 1, GFP_KERNEL); if (idx == -ENOSPC) return -EBUSY; return idx; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 96881d5babd9..9676a1d214bc 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -171,7 +171,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr, unsigned int set_size) { struct drbd_request *r; - struct drbd_request *req = NULL; + struct drbd_request *req = NULL, *tmp = NULL; int expect_epoch = 0; int expect_size = 0; @@ -225,8 +225,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr, * to catch requests being barrier-acked "unexpectedly". * It usually should find the same req again, or some READ preceding it. */ list_for_each_entry(req, &connection->transfer_log, tl_requests) - if (req->epoch == expect_epoch) + if (req->epoch == expect_epoch) { + tmp = req; break; + } + req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests); list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) { if (req->epoch != expect_epoch) break; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index c04394518b07..75be0e16770a 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -180,7 +180,8 @@ void start_new_tl_epoch(struct drbd_connection *connection) void complete_master_bio(struct drbd_device *device, struct bio_and_error *m) { - m->bio->bi_status = errno_to_blk_status(m->error); + if (unlikely(m->error)) + m->bio->bi_status = errno_to_blk_status(m->error); bio_endio(m->bio); dec_ap_bio(device); } @@ -332,17 +333,21 @@ static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct dr static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) { struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; + struct drbd_request *iter = req; if (!connection) return; if (connection->req_next != req) return; - list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { - const unsigned s = req->rq_state; - if (s & RQ_NET_QUEUED) + + req = NULL; + list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) { + const unsigned int s = iter->rq_state; + + if (s & RQ_NET_QUEUED) { + req = iter; break; + } } - if (&req->tl_requests == &connection->transfer_log) - req = NULL; connection->req_next = req; } @@ -358,17 +363,21 @@ static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, st static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) { struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; + struct drbd_request *iter = req; if (!connection) return; if (connection->req_ack_pending != req) return; - list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { - const unsigned s = req->rq_state; - if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING)) + + req = NULL; + list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) { + const unsigned int s = iter->rq_state; + + if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING)) { + req = iter; break; + } } - if (&req->tl_requests == &connection->transfer_log) - req = NULL; connection->req_ack_pending = req; } @@ -384,17 +393,21 @@ static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, s static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) { struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; + struct drbd_request *iter = req; if (!connection) return; if (connection->req_not_net_done != req) return; - list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { - const unsigned s = req->rq_state; - if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE)) + + req = NULL; + list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) { + const unsigned int s = iter->rq_state; + + if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE)) { + req = iter; break; + } } - if (&req->tl_requests == &connection->transfer_log) - req = NULL; connection->req_not_net_done = req; } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 3e636a75c83a..a58595f5ee2c 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1591,6 +1591,7 @@ struct compat_loop_info { compat_ulong_t lo_inode; /* ioctl r/o */ compat_dev_t lo_rdevice; /* ioctl r/o */ compat_int_t lo_offset; + compat_int_t lo_encrypt_type; /* obsolete, ignored */ compat_int_t lo_encrypt_key_size; /* ioctl w/o */ compat_int_t lo_flags; /* ioctl r/o */ char lo_name[LO_NAME_SIZE]; diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c index 4db9a8c244af..e094d2b8b5a9 100644 --- a/drivers/block/n64cart.c +++ b/drivers/block/n64cart.c @@ -88,7 +88,7 @@ static void n64cart_submit_bio(struct bio *bio) { struct bio_vec bvec; struct bvec_iter iter; - struct device *dev = bio->bi_disk->private_data; + struct device *dev = bio->bi_bdev->bd_disk->private_data; u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT; bio_for_each_segment(bvec, bio, iter) { diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index d1e26461a64e..de42458195bc 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -931,7 +931,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, if (rc) goto unmap; - for (n = 0, i = 0; n < nseg; n++) { + for (n = 0; n < nseg; n++) { uint8_t first_sect, last_sect; if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 378262ec47ae..003056d4f7f5 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -576,7 +576,7 @@ struct setup_rw_req { struct blkif_request *ring_req; grant_ref_t gref_head; unsigned int id; - /* Only used when persistent grant is used and it's a read request */ + /* Only used when persistent grant is used and it's a write request */ bool need_copy; unsigned int bvec_off; char *bvec_data; diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig index c0aeedd66f02..ff71dd662880 100644 --- a/drivers/cpuidle/Kconfig +++ b/drivers/cpuidle/Kconfig @@ -47,6 +47,10 @@ config CPU_IDLE_GOV_HALTPOLL config DT_IDLE_STATES bool +config DT_IDLE_GENPD + depends on PM_GENERIC_DOMAINS_OF + bool + menu "ARM CPU Idle Drivers" depends on ARM || ARM64 source "drivers/cpuidle/Kconfig.arm" @@ -62,6 +66,11 @@ depends on PPC source "drivers/cpuidle/Kconfig.powerpc" endmenu +menu "RISC-V CPU Idle Drivers" +depends on RISCV +source "drivers/cpuidle/Kconfig.riscv" +endmenu + config HALTPOLL_CPUIDLE tristate "Halt poll cpuidle driver" depends on X86 && KVM_GUEST diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index 15d6c46c0a47..be7f512109f7 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -27,6 +27,7 @@ config ARM_PSCI_CPUIDLE_DOMAIN bool "PSCI CPU idle Domain" depends on ARM_PSCI_CPUIDLE depends on PM_GENERIC_DOMAINS_OF + select DT_IDLE_GENPD default y help Select this to enable the PSCI based CPUidle driver to use PM domains, diff --git a/drivers/cpuidle/Kconfig.riscv b/drivers/cpuidle/Kconfig.riscv new file mode 100644 index 000000000000..78518c26af74 --- /dev/null +++ b/drivers/cpuidle/Kconfig.riscv @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# RISC-V CPU Idle drivers +# + +config RISCV_SBI_CPUIDLE + bool "RISC-V SBI CPU idle Driver" + depends on RISCV_SBI + select DT_IDLE_STATES + select CPU_IDLE_MULTIPLE_DRIVERS + select DT_IDLE_GENPD if PM_GENERIC_DOMAINS_OF + help + Select this option to enable RISC-V SBI firmware based CPU idle + driver for RISC-V systems. This drivers also supports hierarchical + DT based layout of the idle state. diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index 26bbc5e74123..d103342b7cfc 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile @@ -6,6 +6,7 @@ obj-y += cpuidle.o driver.o governor.o sysfs.o governors/ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o +obj-$(CONFIG_DT_IDLE_GENPD) += dt_idle_genpd.o obj-$(CONFIG_ARCH_HAS_CPU_RELAX) += poll_state.o obj-$(CONFIG_HALTPOLL_CPUIDLE) += cpuidle-haltpoll.o @@ -34,3 +35,7 @@ obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o # POWERPC drivers obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o + +############################################################################### +# RISC-V drivers +obj-$(CONFIG_RISCV_SBI_CPUIDLE) += cpuidle-riscv-sbi.o diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c index ff2c3f8e4668..755bbdfc5b82 100644 --- a/drivers/cpuidle/cpuidle-psci-domain.c +++ b/drivers/cpuidle/cpuidle-psci-domain.c @@ -47,73 +47,14 @@ static int psci_pd_power_off(struct generic_pm_domain *pd) return 0; } -static int psci_pd_parse_state_nodes(struct genpd_power_state *states, - int state_count) -{ - int i, ret; - u32 psci_state, *psci_state_buf; - - for (i = 0; i < state_count; i++) { - ret = psci_dt_parse_state_node(to_of_node(states[i].fwnode), - &psci_state); - if (ret) - goto free_state; - - psci_state_buf = kmalloc(sizeof(u32), GFP_KERNEL); - if (!psci_state_buf) { - ret = -ENOMEM; - goto free_state; - } - *psci_state_buf = psci_state; - states[i].data = psci_state_buf; - } - - return 0; - -free_state: - i--; - for (; i >= 0; i--) - kfree(states[i].data); - return ret; -} - -static int psci_pd_parse_states(struct device_node *np, - struct genpd_power_state **states, int *state_count) -{ - int ret; - - /* Parse the domain idle states. */ - ret = of_genpd_parse_idle_states(np, states, state_count); - if (ret) - return ret; - - /* Fill out the PSCI specifics for each found state. */ - ret = psci_pd_parse_state_nodes(*states, *state_count); - if (ret) - kfree(*states); - - return ret; -} - -static void psci_pd_free_states(struct genpd_power_state *states, - unsigned int state_count) -{ - int i; - - for (i = 0; i < state_count; i++) - kfree(states[i].data); - kfree(states); -} - static int psci_pd_init(struct device_node *np, bool use_osi) { struct generic_pm_domain *pd; struct psci_pd_provider *pd_provider; struct dev_power_governor *pd_gov; - struct genpd_power_state *states = NULL; int ret = -ENOMEM, state_count = 0; - pd = kzalloc(sizeof(*pd), GFP_KERNEL); + pd = dt_idle_pd_alloc(np, psci_dt_parse_state_node); if (!pd) goto out; @@ -121,22 +62,6 @@ static int psci_pd_init(struct device_node *np, bool use_osi) if (!pd_provider) goto free_pd; - pd->name = kasprintf(GFP_KERNEL, "%pOF", np); - if (!pd->name) - goto free_pd_prov; - - /* - * Parse the domain idle states and let genpd manage the state selection - * for those being compatible with "domain-idle-state". - */ - ret = psci_pd_parse_states(np, &states, &state_count); - if (ret) - goto free_name; - - pd->free_states = psci_pd_free_states; - pd->name = kbasename(pd->name); - pd->states = states; - pd->state_count = state_count; pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN; /* Allow power off when OSI has been successfully enabled. */ @@ -149,10 +74,8 @@ static int psci_pd_init(struct device_node *np, bool use_osi) pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL; ret = pm_genpd_init(pd, pd_gov, false); - if (ret) { - psci_pd_free_states(states, state_count); - goto free_name; - } + if (ret) + goto free_pd_prov; ret = of_genpd_add_provider_simple(np, pd); if (ret) @@ -166,12 +89,10 @@ static int psci_pd_init(struct device_node *np, bool use_osi) remove_pd: pm_genpd_remove(pd); -free_name: - kfree(pd->name); free_pd_prov: kfree(pd_provider); free_pd: - kfree(pd); + dt_idle_pd_free(pd); out: pr_err("failed to init PM domain ret=%d %pOF\n", ret, np); return ret; @@ -195,30 +116,6 @@ static void psci_pd_remove(void) } } -static int psci_pd_init_topology(struct device_node *np) -{ - struct device_node *node; - struct of_phandle_args child, parent; - int ret; - - for_each_child_of_node(np, node) { - if (of_parse_phandle_with_args(node, "power-domains", - "#power-domain-cells", 0, &parent)) - continue; - - child.np = node; - child.args_count = 0; - ret = of_genpd_add_subdomain(&parent, &child); - of_node_put(parent.np); - if (ret) { - of_node_put(node); - return ret; - } - } - - return 0; -} - static bool psci_pd_try_set_osi_mode(void) { int ret; @@ -282,7 +179,7 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev) goto no_pd; /* Link genpd masters/subdomains to model the CPU topology. */ - ret = psci_pd_init_topology(np); + ret = dt_idle_pd_init_topology(np); if (ret) goto remove_pd; @@ -314,28 +211,3 @@ static int __init psci_idle_init_domains(void) return platform_driver_register(&psci_cpuidle_domain_driver); } subsys_initcall(psci_idle_init_domains); - -struct device *psci_dt_attach_cpu(int cpu) -{ - struct device *dev; - - dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci"); - if (IS_ERR_OR_NULL(dev)) - return dev; - - pm_runtime_irq_safe(dev); - if (cpu_online(cpu)) - pm_runtime_get_sync(dev); - - dev_pm_syscore_device(dev, true); - - return dev; -} - -void psci_dt_detach_cpu(struct device *dev) -{ - if (IS_ERR_OR_NULL(dev)) - return; - - dev_pm_domain_detach(dev, false); -} diff --git a/drivers/cpuidle/cpuidle-psci.h b/drivers/cpuidle/cpuidle-psci.h index d8e925e84c27..4e132640ed64 100644 --- a/drivers/cpuidle/cpuidle-psci.h +++ b/drivers/cpuidle/cpuidle-psci.h @@ -10,8 +10,19 @@ void psci_set_domain_state(u32 state); int psci_dt_parse_state_node(struct device_node *np, u32 *state); #ifdef CONFIG_ARM_PSCI_CPUIDLE_DOMAIN -struct device *psci_dt_attach_cpu(int cpu); -void psci_dt_detach_cpu(struct device *dev); + +#include "dt_idle_genpd.h" + +static inline struct device *psci_dt_attach_cpu(int cpu) +{ + return dt_idle_attach_cpu(cpu, "psci"); +} + +static inline void psci_dt_detach_cpu(struct device *dev) +{ + dt_idle_detach_cpu(dev); +} + #else static inline struct device *psci_dt_attach_cpu(int cpu) { return NULL; } static inline void psci_dt_detach_cpu(struct device *dev) { } diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c new file mode 100644 index 000000000000..b459eda2cd37 --- /dev/null +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c @@ -0,0 +1,627 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * RISC-V SBI CPU idle driver. + * + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * Copyright (c) 2022 Ventana Micro Systems Inc. + */ + +#define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt + +#include <linux/cpuidle.h> +#include <linux/cpumask.h> +#include <linux/cpu_pm.h> +#include <linux/cpu_cooling.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> +#include <asm/cpuidle.h> +#include <asm/sbi.h> +#include <asm/suspend.h> + +#include "dt_idle_states.h" +#include "dt_idle_genpd.h" + +struct sbi_cpuidle_data { + u32 *states; + struct device *dev; +}; + +struct sbi_domain_state { + bool available; + u32 state; +}; + +static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data); +static DEFINE_PER_CPU(struct sbi_domain_state, domain_state); +static bool sbi_cpuidle_use_osi; +static bool sbi_cpuidle_use_cpuhp; +static bool sbi_cpuidle_pd_allow_domain_state; + +static inline void sbi_set_domain_state(u32 state) +{ + struct sbi_domain_state *data = this_cpu_ptr(&domain_state); + + data->available = true; + data->state = state; +} + +static inline u32 sbi_get_domain_state(void) +{ + struct sbi_domain_state *data = this_cpu_ptr(&domain_state); + + return data->state; +} + +static inline void sbi_clear_domain_state(void) +{ + struct sbi_domain_state *data = this_cpu_ptr(&domain_state); + + data->available = false; +} + +static inline bool sbi_is_domain_state_available(void) +{ + struct sbi_domain_state *data = this_cpu_ptr(&domain_state); + + return data->available; +} + +static int sbi_suspend_finisher(unsigned long suspend_type, + unsigned long resume_addr, + unsigned long opaque) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, + suspend_type, resume_addr, opaque, 0, 0, 0); + + return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0; +} + +static int sbi_suspend(u32 state) +{ + if (state & SBI_HSM_SUSP_NON_RET_BIT) + return cpu_suspend(state, sbi_suspend_finisher); + else + return sbi_suspend_finisher(state, 0, 0); +} + +static int sbi_cpuidle_enter_state(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int idx) +{ + u32 *states = __this_cpu_read(sbi_cpuidle_data.states); + + return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, states[idx]); +} + +static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int idx, + bool s2idle) +{ + struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data); + u32 *states = data->states; + struct device *pd_dev = data->dev; + u32 state; + int ret; + + ret = cpu_pm_enter(); + if (ret) + return -1; + + /* Do runtime PM to manage a hierarchical CPU toplogy. */ + rcu_irq_enter_irqson(); + if (s2idle) + dev_pm_genpd_suspend(pd_dev); + else + pm_runtime_put_sync_suspend(pd_dev); + rcu_irq_exit_irqson(); + + if (sbi_is_domain_state_available()) + state = sbi_get_domain_state(); + else + state = states[idx]; + + ret = sbi_suspend(state) ? -1 : idx; + + rcu_irq_enter_irqson(); + if (s2idle) + dev_pm_genpd_resume(pd_dev); + else + pm_runtime_get_sync(pd_dev); + rcu_irq_exit_irqson(); + + cpu_pm_exit(); + + /* Clear the domain state to start fresh when back from idle. */ + sbi_clear_domain_state(); + return ret; +} + +static int sbi_enter_domain_idle_state(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int idx) +{ + return __sbi_enter_domain_idle_state(dev, drv, idx, false); +} + +static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int idx) +{ + return __sbi_enter_domain_idle_state(dev, drv, idx, true); +} + +static int sbi_cpuidle_cpuhp_up(unsigned int cpu) +{ + struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev); + + if (pd_dev) + pm_runtime_get_sync(pd_dev); + + return 0; +} + +static int sbi_cpuidle_cpuhp_down(unsigned int cpu) +{ + struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev); + + if (pd_dev) { + pm_runtime_put_sync(pd_dev); + /* Clear domain state to start fresh at next online. */ + sbi_clear_domain_state(); + } + + return 0; +} + +static void sbi_idle_init_cpuhp(void) +{ + int err; + + if (!sbi_cpuidle_use_cpuhp) + return; + + err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING, + "cpuidle/sbi:online", + sbi_cpuidle_cpuhp_up, + sbi_cpuidle_cpuhp_down); + if (err) + pr_warn("Failed %d while setup cpuhp state\n", err); +} + +static const struct of_device_id sbi_cpuidle_state_match[] = { + { .compatible = "riscv,idle-state", + .data = sbi_cpuidle_enter_state }, + { }, +}; + +static bool sbi_suspend_state_is_valid(u32 state) +{ + if (state > SBI_HSM_SUSPEND_RET_DEFAULT && + state < SBI_HSM_SUSPEND_RET_PLATFORM) + return false; + if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT && + state < SBI_HSM_SUSPEND_NON_RET_PLATFORM) + return false; + return true; +} + +static int sbi_dt_parse_state_node(struct device_node *np, u32 *state) +{ + int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state); + + if (err) { + pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np); + return err; + } + + if (!sbi_suspend_state_is_valid(*state)) { + pr_warn("Invalid SBI suspend state %#x\n", *state); + return -EINVAL; + } + + return 0; +} + +static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv, + struct sbi_cpuidle_data *data, + unsigned int state_count, int cpu) +{ + /* Currently limit the hierarchical topology to be used in OSI mode. */ + if (!sbi_cpuidle_use_osi) + return 0; + + data->dev = dt_idle_attach_cpu(cpu, "sbi"); + if (IS_ERR_OR_NULL(data->dev)) + return PTR_ERR_OR_ZERO(data->dev); + + /* + * Using the deepest state for the CPU to trigger a potential selection + * of a shared state for the domain, assumes the domain states are all + * deeper states. + */ + drv->states[state_count - 1].enter = sbi_enter_domain_idle_state; + drv->states[state_count - 1].enter_s2idle = + sbi_enter_s2idle_domain_idle_state; + sbi_cpuidle_use_cpuhp = true; + + return 0; +} + +static int sbi_cpuidle_dt_init_states(struct device *dev, + struct cpuidle_driver *drv, + unsigned int cpu, + unsigned int state_count) +{ + struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu); + struct device_node *state_node; + struct device_node *cpu_node; + u32 *states; + int i, ret; + + cpu_node = of_cpu_device_node_get(cpu); + if (!cpu_node) + return -ENODEV; + + states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL); + if (!states) { + ret = -ENOMEM; + goto fail; + } + + /* Parse SBI specific details from state DT nodes */ + for (i = 1; i < state_count; i++) { + state_node = of_get_cpu_state_node(cpu_node, i - 1); + if (!state_node) + break; + + ret = sbi_dt_parse_state_node(state_node, &states[i]); + of_node_put(state_node); + + if (ret) + return ret; + + pr_debug("sbi-state %#x index %d\n", states[i], i); + } + if (i != state_count) { + ret = -ENODEV; + goto fail; + } + + /* Initialize optional data, used for the hierarchical topology. */ + ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu); + if (ret < 0) + return ret; + + /* Store states in the per-cpu struct. */ + data->states = states; + +fail: + of_node_put(cpu_node); + + return ret; +} + +static void sbi_cpuidle_deinit_cpu(int cpu) +{ + struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu); + + dt_idle_detach_cpu(data->dev); + sbi_cpuidle_use_cpuhp = false; +} + +static int sbi_cpuidle_init_cpu(struct device *dev, int cpu) +{ + struct cpuidle_driver *drv; + unsigned int state_count = 0; + int ret = 0; + + drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); + if (!drv) + return -ENOMEM; + + drv->name = "sbi_cpuidle"; + drv->owner = THIS_MODULE; + drv->cpumask = (struct cpumask *)cpumask_of(cpu); + + /* RISC-V architectural WFI to be represented as state index 0. */ + drv->states[0].enter = sbi_cpuidle_enter_state; + drv->states[0].exit_latency = 1; + drv->states[0].target_residency = 1; + drv->states[0].power_usage = UINT_MAX; + strcpy(drv->states[0].name, "WFI"); + strcpy(drv->states[0].desc, "RISC-V WFI"); + + /* + * If no DT idle states are detected (ret == 0) let the driver + * initialization fail accordingly since there is no reason to + * initialize the idle driver if only wfi is supported, the + * default archictectural back-end already executes wfi + * on idle entry. + */ + ret = dt_init_idle_driver(drv, sbi_cpuidle_state_match, 1); + if (ret <= 0) { + pr_debug("HART%ld: failed to parse DT idle states\n", + cpuid_to_hartid_map(cpu)); + return ret ? : -ENODEV; + } + state_count = ret + 1; /* Include WFI state as well */ + + /* Initialize idle states from DT. */ + ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count); + if (ret) { + pr_err("HART%ld: failed to init idle states\n", + cpuid_to_hartid_map(cpu)); + return ret; + } + + ret = cpuidle_register(drv, NULL); + if (ret) + goto deinit; + + cpuidle_cooling_register(drv); + + return 0; +deinit: + sbi_cpuidle_deinit_cpu(cpu); + return ret; +} + +static void sbi_cpuidle_domain_sync_state(struct device *dev) +{ + /* + * All devices have now been attached/probed to the PM domain + * topology, hence it's fine to allow domain states to be picked. + */ + sbi_cpuidle_pd_allow_domain_state = true; +} + +#ifdef CONFIG_DT_IDLE_GENPD + +static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd) +{ + struct genpd_power_state *state = &pd->states[pd->state_idx]; + u32 *pd_state; + + if (!state->data) + return 0; + + if (!sbi_cpuidle_pd_allow_domain_state) + return -EBUSY; + + /* OSI mode is enabled, set the corresponding domain state. */ + pd_state = state->data; + sbi_set_domain_state(*pd_state); + + return 0; +} + +struct sbi_pd_provider { + struct list_head link; + struct device_node *node; +}; + +static LIST_HEAD(sbi_pd_providers); + +static int sbi_pd_init(struct device_node *np) +{ + struct generic_pm_domain *pd; + struct sbi_pd_provider *pd_provider; + struct dev_power_governor *pd_gov; + int ret = -ENOMEM, state_count = 0; + + pd = dt_idle_pd_alloc(np, sbi_dt_parse_state_node); + if (!pd) + goto out; + + pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL); + if (!pd_provider) + goto free_pd; + + pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN; + + /* Allow power off when OSI is available. */ + if (sbi_cpuidle_use_osi) + pd->power_off = sbi_cpuidle_pd_power_off; + else + pd->flags |= GENPD_FLAG_ALWAYS_ON; + + /* Use governor for CPU PM domains if it has some states to manage. */ + pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL; + + ret = pm_genpd_init(pd, pd_gov, false); + if (ret) + goto free_pd_prov; + + ret = of_genpd_add_provider_simple(np, pd); + if (ret) + goto remove_pd; + + pd_provider->node = of_node_get(np); + list_add(&pd_provider->link, &sbi_pd_providers); + + pr_debug("init PM domain %s\n", pd->name); + return 0; + +remove_pd: + pm_genpd_remove(pd); +free_pd_prov: + kfree(pd_provider); +free_pd: + dt_idle_pd_free(pd); +out: + pr_err("failed to init PM domain ret=%d %pOF\n", ret, np); + return ret; +} + +static void sbi_pd_remove(void) +{ + struct sbi_pd_provider *pd_provider, *it; + struct generic_pm_domain *genpd; + + list_for_each_entry_safe(pd_provider, it, &sbi_pd_providers, link) { + of_genpd_del_provider(pd_provider->node); + + genpd = of_genpd_remove_last(pd_provider->node); + if (!IS_ERR(genpd)) + kfree(genpd); + + of_node_put(pd_provider->node); + list_del(&pd_provider->link); + kfree(pd_provider); + } +} + +static int sbi_genpd_probe(struct device_node *np) +{ + struct device_node *node; + int ret = 0, pd_count = 0; + + if (!np) + return -ENODEV; + + /* + * Parse child nodes for the "#power-domain-cells" property and + * initialize a genpd/genpd-of-provider pair when it's found. + */ + for_each_child_of_node(np, node) { + if (!of_find_property(node, "#power-domain-cells", NULL)) + continue; + + ret = sbi_pd_init(node); + if (ret) + goto put_node; + + pd_count++; + } + + /* Bail out if not using the hierarchical CPU topology. */ + if (!pd_count) + goto no_pd; + + /* Link genpd masters/subdomains to model the CPU topology. */ + ret = dt_idle_pd_init_topology(np); + if (ret) + goto remove_pd; + + return 0; + +put_node: + of_node_put(node); +remove_pd: + sbi_pd_remove(); + pr_err("failed to create CPU PM domains ret=%d\n", ret); +no_pd: + return ret; +} + +#else + +static inline int sbi_genpd_probe(struct device_node *np) +{ + return 0; +} + +#endif + +static int sbi_cpuidle_probe(struct platform_device *pdev) +{ + int cpu, ret; + struct cpuidle_driver *drv; + struct cpuidle_device *dev; + struct device_node *np, *pds_node; + + /* Detect OSI support based on CPU DT nodes */ + sbi_cpuidle_use_osi = true; + for_each_possible_cpu(cpu) { + np = of_cpu_device_node_get(cpu); + if (np && + of_find_property(np, "power-domains", NULL) && + of_find_property(np, "power-domain-names", NULL)) { + continue; + } else { + sbi_cpuidle_use_osi = false; + break; + } + } + + /* Populate generic power domains from DT nodes */ + pds_node = of_find_node_by_path("/cpus/power-domains"); + if (pds_node) { + ret = sbi_genpd_probe(pds_node); + of_node_put(pds_node); + if (ret) + return ret; + } + + /* Initialize CPU idle driver for each CPU */ + for_each_possible_cpu(cpu) { + ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu); + if (ret) { + pr_debug("HART%ld: idle driver init failed\n", + cpuid_to_hartid_map(cpu)); + goto out_fail; + } + } + + /* Setup CPU hotplut notifiers */ + sbi_idle_init_cpuhp(); + + pr_info("idle driver registered for all CPUs\n"); + + return 0; + +out_fail: + while (--cpu >= 0) { + dev = per_cpu(cpuidle_devices, cpu); + drv = cpuidle_get_cpu_driver(dev); + cpuidle_unregister(drv); + sbi_cpuidle_deinit_cpu(cpu); + } + + return ret; +} + +static struct platform_driver sbi_cpuidle_driver = { + .probe = sbi_cpuidle_probe, + .driver = { + .name = "sbi-cpuidle", + .sync_state = sbi_cpuidle_domain_sync_state, + }, +}; + +static int __init sbi_cpuidle_init(void) +{ + int ret; + struct platform_device *pdev; + + /* + * The SBI HSM suspend function is only available when: + * 1) SBI version is 0.3 or higher + * 2) SBI HSM extension is available + */ + if ((sbi_spec_version < sbi_mk_version(0, 3)) || + sbi_probe_extension(SBI_EXT_HSM) <= 0) { + pr_info("HSM suspend not available\n"); + return 0; + } + + ret = platform_driver_register(&sbi_cpuidle_driver); + if (ret) + return ret; + + pdev = platform_device_register_simple("sbi-cpuidle", + -1, NULL, 0); + if (IS_ERR(pdev)) { + platform_driver_unregister(&sbi_cpuidle_driver); + return PTR_ERR(pdev); + } + + return 0; +} +device_initcall(sbi_cpuidle_init); diff --git a/drivers/cpuidle/dt_idle_genpd.c b/drivers/cpuidle/dt_idle_genpd.c new file mode 100644 index 000000000000..b37165514d4e --- /dev/null +++ b/drivers/cpuidle/dt_idle_genpd.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PM domains for CPUs via genpd. + * + * Copyright (C) 2019 Linaro Ltd. + * Author: Ulf Hansson <ulf.hansson@linaro.org> + * + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * Copyright (c) 2022 Ventana Micro Systems Inc. + */ + +#define pr_fmt(fmt) "dt-idle-genpd: " fmt + +#include <linux/cpu.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/string.h> + +#include "dt_idle_genpd.h" + +static int pd_parse_state_nodes( + int (*parse_state)(struct device_node *, u32 *), + struct genpd_power_state *states, int state_count) +{ + int i, ret; + u32 state, *state_buf; + + for (i = 0; i < state_count; i++) { + ret = parse_state(to_of_node(states[i].fwnode), &state); + if (ret) + goto free_state; + + state_buf = kmalloc(sizeof(u32), GFP_KERNEL); + if (!state_buf) { + ret = -ENOMEM; + goto free_state; + } + *state_buf = state; + states[i].data = state_buf; + } + + return 0; + +free_state: + i--; + for (; i >= 0; i--) + kfree(states[i].data); + return ret; +} + +static int pd_parse_states(struct device_node *np, + int (*parse_state)(struct device_node *, u32 *), + struct genpd_power_state **states, + int *state_count) +{ + int ret; + + /* Parse the domain idle states. */ + ret = of_genpd_parse_idle_states(np, states, state_count); + if (ret) + return ret; + + /* Fill out the dt specifics for each found state. */ + ret = pd_parse_state_nodes(parse_state, *states, *state_count); + if (ret) + kfree(*states); + + return ret; +} + +static void pd_free_states(struct genpd_power_state *states, + unsigned int state_count) +{ + int i; + + for (i = 0; i < state_count; i++) + kfree(states[i].data); + kfree(states); +} + +void dt_idle_pd_free(struct generic_pm_domain *pd) +{ + pd_free_states(pd->states, pd->state_count); + kfree(pd->name); + kfree(pd); +} + +struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np, + int (*parse_state)(struct device_node *, u32 *)) +{ + struct generic_pm_domain *pd; + struct genpd_power_state *states = NULL; + int ret, state_count = 0; + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + goto out; + + pd->name = kasprintf(GFP_KERNEL, "%pOF", np); + if (!pd->name) + goto free_pd; + + /* + * Parse the domain idle states and let genpd manage the state selection + * for those being compatible with "domain-idle-state". + */ + ret = pd_parse_states(np, parse_state, &states, &state_count); + if (ret) + goto free_name; + + pd->free_states = pd_free_states; + pd->name = kbasename(pd->name); + pd->states = states; + pd->state_count = state_count; + + pr_debug("alloc PM domain %s\n", pd->name); + return pd; + +free_name: + kfree(pd->name); +free_pd: + kfree(pd); +out: + pr_err("failed to alloc PM domain %pOF\n", np); + return NULL; +} + +int dt_idle_pd_init_topology(struct device_node *np) +{ + struct device_node *node; + struct of_phandle_args child, parent; + int ret; + + for_each_child_of_node(np, node) { + if (of_parse_phandle_with_args(node, "power-domains", + "#power-domain-cells", 0, &parent)) + continue; + + child.np = node; + child.args_count = 0; + ret = of_genpd_add_subdomain(&parent, &child); + of_node_put(parent.np); + if (ret) { + of_node_put(node); + return ret; + } + } + + return 0; +} + +struct device *dt_idle_attach_cpu(int cpu, const char *name) +{ + struct device *dev; + + dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), name); + if (IS_ERR_OR_NULL(dev)) + return dev; + + pm_runtime_irq_safe(dev); + if (cpu_online(cpu)) + pm_runtime_get_sync(dev); + + dev_pm_syscore_device(dev, true); + + return dev; +} + +void dt_idle_detach_cpu(struct device *dev) +{ + if (IS_ERR_OR_NULL(dev)) + return; + + dev_pm_domain_detach(dev, false); +} diff --git a/drivers/cpuidle/dt_idle_genpd.h b/drivers/cpuidle/dt_idle_genpd.h new file mode 100644 index 000000000000..a95483d08a02 --- /dev/null +++ b/drivers/cpuidle/dt_idle_genpd.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_IDLE_GENPD +#define __DT_IDLE_GENPD + +struct device_node; +struct generic_pm_domain; + +#ifdef CONFIG_DT_IDLE_GENPD + +void dt_idle_pd_free(struct generic_pm_domain *pd); + +struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np, + int (*parse_state)(struct device_node *, u32 *)); + +int dt_idle_pd_init_topology(struct device_node *np); + +struct device *dt_idle_attach_cpu(int cpu, const char *name); + +void dt_idle_detach_cpu(struct device *dev); + +#else + +static inline void dt_idle_pd_free(struct generic_pm_domain *pd) +{ +} + +static inline struct generic_pm_domain *dt_idle_pd_alloc( + struct device_node *np, + int (*parse_state)(struct device_node *, u32 *)) +{ + return NULL; +} + +static inline int dt_idle_pd_init_topology(struct device_node *np) +{ + return 0; +} + +static inline struct device *dt_idle_attach_cpu(int cpu, const char *name) +{ + return NULL; +} + +static inline void dt_idle_detach_cpu(struct device *dev) +{ +} + +#endif + +#endif diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 4081cb6cf7b3..4277853c7535 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -210,7 +210,7 @@ struct dm_table { #define DM_TIO_MAGIC 28714 struct dm_target_io { unsigned short magic; - unsigned short flags; + blk_short_t flags; unsigned int target_bio_nr; struct dm_io *io; struct dm_target *ti; @@ -244,7 +244,7 @@ static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit) #define DM_IO_MAGIC 19577 struct dm_io { unsigned short magic; - unsigned short flags; + blk_short_t flags; atomic_t io_count; struct mapped_device *md; struct bio *orig_bio; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index c58a5111cb57..ad2d5faa2ebb 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2472,9 +2472,11 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, dm_integrity_io_error(ic, "invalid sector in journal", -EIO); sec &= ~(sector_t)(ic->sectors_per_block - 1); } + if (unlikely(sec >= ic->provided_data_sectors)) { + journal_entry_set_unused(je); + continue; + } } - if (unlikely(sec >= ic->provided_data_sectors)) - continue; get_area_and_offset(ic, sec, &area, &offset); restore_last_bytes(ic, access_journal_data(ic, i, j), je); for (k = j + 1; k < ic->journal_section_entries; k++) { diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 901abd6dea41..87310fceb0d8 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -891,15 +891,21 @@ static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) struct hash_cell *hc = NULL; if (*param->uuid) { - if (*param->name || param->dev) + if (*param->name || param->dev) { + DMERR("Invalid ioctl structure: uuid %s, name %s, dev %llx", + param->uuid, param->name, (unsigned long long)param->dev); return NULL; + } hc = __get_uuid_cell(param->uuid); if (!hc) return NULL; } else if (*param->name) { - if (param->dev) + if (param->dev) { + DMERR("Invalid ioctl structure: name %s, dev %llx", + param->name, (unsigned long long)param->dev); return NULL; + } hc = __get_name_cell(param->name); if (!hc) @@ -1851,8 +1857,11 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern if (copy_from_user(param_kernel, user, minimum_data_size)) return -EFAULT; - if (param_kernel->data_size < minimum_data_size) + if (param_kernel->data_size < minimum_data_size) { + DMERR("Invalid data size in the ioctl structure: %u", + param_kernel->data_size); return -EINVAL; + } secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ad2e0bbeb559..3c5fad7c4ee6 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -892,13 +892,19 @@ static void dm_io_complete(struct dm_io *io) if (unlikely(wq_has_sleeper(&md->wait))) wake_up(&md->wait); - if (io_error == BLK_STS_DM_REQUEUE) { - /* - * Upper layer won't help us poll split bio, io->orig_bio - * may only reflect a subset of the pre-split original, - * so clear REQ_POLLED in case of requeue - */ - bio->bi_opf &= ~REQ_POLLED; + if (io_error == BLK_STS_DM_REQUEUE || io_error == BLK_STS_AGAIN) { + if (bio->bi_opf & REQ_POLLED) { + /* + * Upper layer won't help us poll split bio (io->orig_bio + * may only reflect a subset of the pre-split original) + * so clear REQ_POLLED in case of requeue. + */ + bio->bi_opf &= ~REQ_POLLED; + if (io_error == BLK_STS_AGAIN) { + /* io_uring doesn't handle BLK_STS_AGAIN (yet) */ + queue_io(md, bio); + } + } return; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 677fa4bf76d3..efb85c6d8e2d 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1830,9 +1830,6 @@ static void nvme_update_disk_info(struct gendisk *disk, nvme_config_discard(disk, ns); blk_queue_max_write_zeroes_sectors(disk->queue, ns->ctrl->max_zeroes_sectors); - - set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) || - test_bit(NVME_NS_FORCE_RO, &ns->flags)); } static inline bool nvme_first_scan(struct gendisk *disk) @@ -1891,6 +1888,8 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) goto out_unfreeze; } + set_disk_ro(ns->disk, (id->nsattr & NVME_NS_ATTR_RO) || + test_bit(NVME_NS_FORCE_RO, &ns->flags)); set_bit(NVME_NS_READY, &ns->flags); blk_mq_unfreeze_queue(ns->disk->queue); @@ -1903,6 +1902,9 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) if (nvme_ns_head_multipath(ns->head)) { blk_mq_freeze_queue(ns->head->disk->queue); nvme_update_disk_info(ns->head->disk, ns, id); + set_disk_ro(ns->head->disk, + (id->nsattr & NVME_NS_ATTR_RO) || + test_bit(NVME_NS_FORCE_RO, &ns->flags)); nvme_mpath_revalidate_paths(ns); blk_stack_limits(&ns->head->disk->queue->limits, &ns->queue->limits, 0); @@ -3589,15 +3591,20 @@ static const struct attribute_group *nvme_dev_attr_groups[] = { NULL, }; -static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys, +static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns_head *h; - lockdep_assert_held(&subsys->lock); + lockdep_assert_held(&ctrl->subsys->lock); - list_for_each_entry(h, &subsys->nsheads, entry) { - if (h->ns_id != nsid) + list_for_each_entry(h, &ctrl->subsys->nsheads, entry) { + /* + * Private namespaces can share NSIDs under some conditions. + * In that case we can't use the same ns_head for namespaces + * with the same NSID. + */ + if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h)) continue; if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) return h; @@ -3791,7 +3798,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, } mutex_lock(&ctrl->subsys->lock); - head = nvme_find_ns_head(ctrl->subsys, nsid); + head = nvme_find_ns_head(ctrl, nsid); if (!head) { ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids); if (ret) { @@ -3988,6 +3995,16 @@ static void nvme_ns_remove(struct nvme_ns *ns) set_capacity(ns->disk, 0); nvme_fault_inject_fini(&ns->fault_inject); + /* + * Ensure that !NVME_NS_READY is seen by other threads to prevent + * this ns going back into current_path. + */ + synchronize_srcu(&ns->head->srcu); + + /* wait for concurrent submissions */ + if (nvme_mpath_clear_current_path(ns)) + synchronize_srcu(&ns->head->srcu); + mutex_lock(&ns->ctrl->subsys->lock); list_del_rcu(&ns->siblings); if (list_empty(&ns->head->list)) { @@ -3999,10 +4016,6 @@ static void nvme_ns_remove(struct nvme_ns *ns) /* guarantee not available in head->list */ synchronize_rcu(); - /* wait for concurrent submissions */ - if (nvme_mpath_clear_current_path(ns)) - synchronize_srcu(&ns->head->srcu); - if (!nvme_ns_head_multipath(ns->head)) nvme_cdev_del(&ns->cdev, &ns->cdev_device); del_gendisk(ns->disk); @@ -4480,6 +4493,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) if (ctrl->queue_count > 1) { nvme_queue_scan(ctrl); nvme_start_queues(ctrl); + nvme_mpath_update(ctrl); } nvme_change_uevent(ctrl, "NVME_EVENT=connected"); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 1b31f19e1053..d464fdf978fb 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -482,10 +482,11 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) /* * Add a multipath node if the subsystems supports multiple controllers. - * We also do this for private namespaces as the namespace sharing data could - * change after a rescan. + * We also do this for private namespaces as the namespace sharing flag + * could change after a rescan. */ - if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath) + if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || + !nvme_is_unique_nsid(ctrl, head) || !multipath) return 0; head->disk = blk_alloc_disk(ctrl->numa_node); @@ -612,8 +613,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, ns->ana_grpid = le32_to_cpu(desc->grpid); ns->ana_state = desc->state; clear_bit(NVME_NS_ANA_PENDING, &ns->flags); - - if (nvme_state_is_live(ns->ana_state)) + /* + * nvme_mpath_set_live() will trigger I/O to the multipath path device + * and in turn to this path device. However we cannot accept this I/O + * if the controller is not live. This may deadlock if called from + * nvme_mpath_init_identify() and the ctrl will never complete + * initialization, preventing I/O from completing. For this case we + * will reprocess the ANA log page in nvme_mpath_update() once the + * controller is ready. + */ + if (nvme_state_is_live(ns->ana_state) && + ns->ctrl->state == NVME_CTRL_LIVE) nvme_mpath_set_live(ns); } @@ -700,6 +710,18 @@ static void nvme_ana_work(struct work_struct *work) nvme_read_ana_log(ctrl); } +void nvme_mpath_update(struct nvme_ctrl *ctrl) +{ + u32 nr_change_groups = 0; + + if (!ctrl->ana_log_buf) + return; + + mutex_lock(&ctrl->ana_lock); + nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state); + mutex_unlock(&ctrl->ana_lock); +} + static void nvme_anatt_timeout(struct timer_list *t) { struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index f4b674a8ce20..1393bbf82d71 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -723,6 +723,25 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, return queue_live; return __nvme_check_ready(ctrl, rq, queue_live); } + +/* + * NSID shall be unique for all shared namespaces, or if at least one of the + * following conditions is met: + * 1. Namespace Management is supported by the controller + * 2. ANA is supported by the controller + * 3. NVM Set are supported by the controller + * + * In other case, private namespace are not required to report a unique NSID. + */ +static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl, + struct nvme_ns_head *head) +{ + return head->shared || + (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) || + (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) || + (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS); +} + int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, void *buf, unsigned bufflen); int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, @@ -782,6 +801,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); void nvme_mpath_remove_disk(struct nvme_ns_head *head); int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl); +void nvme_mpath_update(struct nvme_ctrl *ctrl); void nvme_mpath_uninit(struct nvme_ctrl *ctrl); void nvme_mpath_stop(struct nvme_ctrl *ctrl); bool nvme_mpath_clear_current_path(struct nvme_ns *ns); @@ -853,6 +873,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); return 0; } +static inline void nvme_mpath_update(struct nvme_ctrl *ctrl) +{ +} static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) { } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 2e98ac3f3ad6..d817ca17463e 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -45,7 +45,7 @@ #define NVME_MAX_SEGS 127 static int use_threaded_interrupts; -module_param(use_threaded_interrupts, int, 0); +module_param(use_threaded_interrupts, int, 0444); static bool use_cmb_sqes = true; module_param(use_cmb_sqes, bool, 0444); @@ -3467,7 +3467,10 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_128_BYTES_SQES | NVME_QUIRK_SHARED_TAGS | NVME_QUIRK_SKIP_CID_GEN }, - + { PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */ + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY| + NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { 0, } }; diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 46d0dab686dd..397daaf51f1b 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -988,7 +988,7 @@ void nvmet_execute_async_event(struct nvmet_req *req) ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; mutex_unlock(&ctrl->lock); - schedule_work(&ctrl->async_event_work); + queue_work(nvmet_wq, &ctrl->async_event_work); } void nvmet_execute_keep_alive(struct nvmet_req *req) diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 8fedd1e052fe..e44b2988759e 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -1555,7 +1555,7 @@ static void nvmet_port_release(struct config_item *item) struct nvmet_port *port = to_nvmet_port(item); /* Let inflight controllers teardown complete */ - flush_scheduled_work(); + flush_workqueue(nvmet_wq); list_del(&port->global_entry); kfree(port->ana_state); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 64c2d2f3e25c..90e75324dae0 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -20,6 +20,9 @@ struct workqueue_struct *zbd_wq; static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; static DEFINE_IDA(cntlid_ida); +struct workqueue_struct *nvmet_wq; +EXPORT_SYMBOL_GPL(nvmet_wq); + /* * This read/write semaphore is used to synchronize access to configuration * information on a target system that will result in discovery log page @@ -205,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, list_add_tail(&aen->entry, &ctrl->async_events); mutex_unlock(&ctrl->lock); - schedule_work(&ctrl->async_event_work); + queue_work(nvmet_wq, &ctrl->async_event_work); } static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid) @@ -385,7 +388,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work) if (reset_tbkas) { pr_debug("ctrl %d reschedule traffic based keep-alive timer\n", ctrl->cntlid); - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); + queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); return; } @@ -403,7 +406,7 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) pr_debug("ctrl %d start keep-alive timer for %d secs\n", ctrl->cntlid, ctrl->kato); - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); + queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); } void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) @@ -1120,7 +1123,7 @@ static inline u8 nvmet_cc_iocqes(u32 cc) static inline bool nvmet_css_supported(u8 cc_css) { - switch (cc_css <<= NVME_CC_CSS_SHIFT) { + switch (cc_css << NVME_CC_CSS_SHIFT) { case NVME_CC_CSS_NVM: case NVME_CC_CSS_CSI: return true; @@ -1478,7 +1481,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) mutex_lock(&ctrl->lock); if (!(ctrl->csts & NVME_CSTS_CFS)) { ctrl->csts |= NVME_CSTS_CFS; - schedule_work(&ctrl->fatal_err_work); + queue_work(nvmet_wq, &ctrl->fatal_err_work); } mutex_unlock(&ctrl->lock); } @@ -1619,9 +1622,15 @@ static int __init nvmet_init(void) goto out_free_zbd_work_queue; } + nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0); + if (!nvmet_wq) { + error = -ENOMEM; + goto out_free_buffered_work_queue; + } + error = nvmet_init_discovery(); if (error) - goto out_free_work_queue; + goto out_free_nvmet_work_queue; error = nvmet_init_configfs(); if (error) @@ -1630,7 +1639,9 @@ static int __init nvmet_init(void) out_exit_discovery: nvmet_exit_discovery(); -out_free_work_queue: +out_free_nvmet_work_queue: + destroy_workqueue(nvmet_wq); +out_free_buffered_work_queue: destroy_workqueue(buffered_io_wq); out_free_zbd_work_queue: destroy_workqueue(zbd_wq); @@ -1642,6 +1653,7 @@ static void __exit nvmet_exit(void) nvmet_exit_configfs(); nvmet_exit_discovery(); ida_destroy(&cntlid_ida); + destroy_workqueue(nvmet_wq); destroy_workqueue(buffered_io_wq); destroy_workqueue(zbd_wq); diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index de90001fc5c4..ab2627e17bb9 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1491,7 +1491,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { if (!nvmet_fc_tgt_a_get(assoc)) continue; - if (!schedule_work(&assoc->del_work)) + if (!queue_work(nvmet_wq, &assoc->del_work)) /* already deleting - release local reference */ nvmet_fc_tgt_a_put(assoc); } @@ -1546,7 +1546,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, continue; assoc->hostport->invalid = 1; noassoc = false; - if (!schedule_work(&assoc->del_work)) + if (!queue_work(nvmet_wq, &assoc->del_work)) /* already deleting - release local reference */ nvmet_fc_tgt_a_put(assoc); } @@ -1592,7 +1592,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) nvmet_fc_tgtport_put(tgtport); if (found_ctrl) { - if (!schedule_work(&assoc->del_work)) + if (!queue_work(nvmet_wq, &assoc->del_work)) /* already deleting - release local reference */ nvmet_fc_tgt_a_put(assoc); return; @@ -2060,7 +2060,7 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, iod->rqstdatalen = lsreqbuf_len; iod->hosthandle = hosthandle; - schedule_work(&iod->work); + queue_work(nvmet_wq, &iod->work); return 0; } diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 54606f1872b4..5c16372f3b53 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -360,7 +360,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport, spin_lock(&rport->lock); list_add_tail(&rport->ls_list, &tls_req->ls_list); spin_unlock(&rport->lock); - schedule_work(&rport->ls_work); + queue_work(nvmet_wq, &rport->ls_work); return ret; } @@ -393,7 +393,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport, spin_lock(&rport->lock); list_add_tail(&rport->ls_list, &tls_req->ls_list); spin_unlock(&rport->lock); - schedule_work(&rport->ls_work); + queue_work(nvmet_wq, &rport->ls_work); } return 0; @@ -448,7 +448,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, spin_lock(&tport->lock); list_add_tail(&tport->ls_list, &tls_req->ls_list); spin_unlock(&tport->lock); - schedule_work(&tport->ls_work); + queue_work(nvmet_wq, &tport->ls_work); return ret; } @@ -480,7 +480,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport, spin_lock(&tport->lock); list_add_tail(&tport->ls_list, &tls_req->ls_list); spin_unlock(&tport->lock); - schedule_work(&tport->ls_work); + queue_work(nvmet_wq, &tport->ls_work); } return 0; @@ -520,7 +520,7 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport) tgt_rscn->tport = tgtport->private; INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work); - schedule_work(&tgt_rscn->work); + queue_work(nvmet_wq, &tgt_rscn->work); } static void @@ -739,7 +739,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport, INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work); kref_init(&tfcp_req->ref); - schedule_work(&tfcp_req->fcp_rcv_work); + queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work); return 0; } @@ -921,7 +921,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport, { struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); - schedule_work(&tfcp_req->tio_done_work); + queue_work(nvmet_wq, &tfcp_req->tio_done_work); } static void @@ -976,7 +976,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, if (abortio) /* leave the reference while the work item is scheduled */ - WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work)); + WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work)); else { /* * as the io has already had the done callback made, diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 6485dc8eb974..f3d58abf11e0 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -283,7 +283,7 @@ static void nvmet_file_execute_flush(struct nvmet_req *req) if (!nvmet_check_transfer_len(req, 0)) return; INIT_WORK(&req->f.work, nvmet_file_flush_work); - schedule_work(&req->f.work); + queue_work(nvmet_wq, &req->f.work); } static void nvmet_file_execute_discard(struct nvmet_req *req) @@ -343,7 +343,7 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req) if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) return; INIT_WORK(&req->f.work, nvmet_file_dsm_work); - schedule_work(&req->f.work); + queue_work(nvmet_wq, &req->f.work); } static void nvmet_file_write_zeroes_work(struct work_struct *w) @@ -373,7 +373,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req) if (!nvmet_check_transfer_len(req, 0)) return; INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work); - schedule_work(&req->f.work); + queue_work(nvmet_wq, &req->f.work); } u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 23f9d6f88804..59024af2da2e 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -166,7 +166,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, iod->req.transfer_len = blk_rq_payload_bytes(req); } - schedule_work(&iod->work); + queue_work(nvmet_wq, &iod->work); return BLK_STS_OK; } @@ -187,7 +187,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg) return; } - schedule_work(&iod->work); + queue_work(nvmet_wq, &iod->work); } static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index d910c6aad4b6..69818752a33a 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -366,6 +366,7 @@ struct nvmet_req { extern struct workqueue_struct *buffered_io_wq; extern struct workqueue_struct *zbd_wq; +extern struct workqueue_struct *nvmet_wq; static inline void nvmet_set_result(struct nvmet_req *req, u32 result) { diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index a4de1e0d518b..5247c24538eb 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -283,7 +283,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) if (req->p.use_workqueue || effects) { INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work); req->p.rq = rq; - schedule_work(&req->p.work); + queue_work(nvmet_wq, &req->p.work); } else { rq->end_io_data = req; blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done); diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 2446d0918a41..2fab0b219b25 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -1584,7 +1584,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, if (queue->host_qid == 0) { /* Let inflight controller teardown complete */ - flush_scheduled_work(); + flush_workqueue(nvmet_wq); } ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); @@ -1669,7 +1669,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) if (disconnect) { rdma_disconnect(queue->cm_id); - schedule_work(&queue->release_work); + queue_work(nvmet_wq, &queue->release_work); } } @@ -1699,7 +1699,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, mutex_unlock(&nvmet_rdma_queue_mutex); pr_err("failed to connect queue %d\n", queue->idx); - schedule_work(&queue->release_work); + queue_work(nvmet_wq, &queue->release_work); } /** @@ -1773,7 +1773,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, if (!queue) { struct nvmet_rdma_port *port = cm_id->context; - schedule_delayed_work(&port->repair_work, 0); + queue_delayed_work(nvmet_wq, &port->repair_work, 0); break; } fallthrough; @@ -1903,7 +1903,7 @@ static void nvmet_rdma_repair_port_work(struct work_struct *w) nvmet_rdma_disable_port(port); ret = nvmet_rdma_enable_port(port); if (ret) - schedule_delayed_work(&port->repair_work, 5 * HZ); + queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ); } static int nvmet_rdma_add_port(struct nvmet_port *nport) @@ -2053,7 +2053,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data } mutex_unlock(&nvmet_rdma_queue_mutex); - flush_scheduled_work(); + flush_workqueue(nvmet_wq); } static struct ib_client nvmet_rdma_ib_client = { diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 83ca577f72be..2793554e622e 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1269,7 +1269,7 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) spin_lock(&queue->state_lock); if (queue->state != NVMET_TCP_Q_DISCONNECTING) { queue->state = NVMET_TCP_Q_DISCONNECTING; - schedule_work(&queue->release_work); + queue_work(nvmet_wq, &queue->release_work); } spin_unlock(&queue->state_lock); } @@ -1684,7 +1684,7 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk) goto out; if (sk->sk_state == TCP_LISTEN) - schedule_work(&port->accept_work); + queue_work(nvmet_wq, &port->accept_work); out: read_unlock_bh(&sk->sk_callback_lock); } @@ -1815,7 +1815,7 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) if (sq->qid == 0) { /* Let inflight controller teardown complete */ - flush_scheduled_work(); + flush_workqueue(nvmet_wq); } queue->nr_cmds = sq->size * 2; @@ -1876,12 +1876,12 @@ static void __exit nvmet_tcp_exit(void) nvmet_unregister_transport(&nvmet_tcp_ops); - flush_scheduled_work(); + flush_workqueue(nvmet_wq); mutex_lock(&nvmet_tcp_queue_mutex); list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) kernel_sock_shutdown(queue->sock, SHUT_RDWR); mutex_unlock(&nvmet_tcp_queue_mutex); - flush_scheduled_work(); + flush_workqueue(nvmet_wq); destroy_workqueue(nvmet_tcp_wq); } diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index df84d221e3de..558b35aba610 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -766,14 +766,6 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *irqd) return irqd->parent_data->hwirq; } -static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry, - struct msi_desc *msi_desc) -{ - msi_entry->address = ((u64)msi_desc->msg.address_hi << 32) | - msi_desc->msg.address_lo; - msi_entry->data = msi_desc->msg.data; -} - /* * @nr_bm_irqs: Indicates the number of IRQs that were allocated from * the bitmap. diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile index 5d4be9735d9d..6420ca129548 100644 --- a/drivers/platform/chrome/Makefile +++ b/drivers/platform/chrome/Makefile @@ -2,6 +2,7 @@ # tell define_trace.h where to find the cros ec trace header CFLAGS_cros_ec_trace.o:= -I$(src) +CFLAGS_cros_ec_sensorhub_ring.o:= -I$(src) obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o obj-$(CONFIG_CHROMEOS_PRIVACY_SCREEN) += chromeos_privacy_screen.o @@ -21,7 +22,7 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o -cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o cros_ec_trace.o +cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c index 272c89837d74..0dbceee87a4b 100644 --- a/drivers/platform/chrome/cros_ec_debugfs.c +++ b/drivers/platform/chrome/cros_ec_debugfs.c @@ -25,6 +25,9 @@ #define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1)) +/* waitqueue for log readers */ +static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq); + /** * struct cros_ec_debugfs - EC debugging information. * @@ -33,7 +36,6 @@ * @log_buffer: circular buffer for console log information * @read_msg: preallocated EC command and buffer to read console log * @log_mutex: mutex to protect circular buffer - * @log_wq: waitqueue for log readers * @log_poll_work: recurring task to poll EC for new console log data * @panicinfo_blob: panicinfo debugfs blob */ @@ -44,7 +46,6 @@ struct cros_ec_debugfs { struct circ_buf log_buffer; struct cros_ec_command *read_msg; struct mutex log_mutex; - wait_queue_head_t log_wq; struct delayed_work log_poll_work; /* EC panicinfo */ struct debugfs_blob_wrapper panicinfo_blob; @@ -107,7 +108,7 @@ static void cros_ec_console_log_work(struct work_struct *__work) buf_space--; } - wake_up(&debug_info->log_wq); + wake_up(&cros_ec_debugfs_log_wq); } mutex_unlock(&debug_info->log_mutex); @@ -141,7 +142,7 @@ static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf, mutex_unlock(&debug_info->log_mutex); - ret = wait_event_interruptible(debug_info->log_wq, + ret = wait_event_interruptible(cros_ec_debugfs_log_wq, CIRC_CNT(cb->head, cb->tail, LOG_SIZE)); if (ret < 0) return ret; @@ -173,7 +174,7 @@ static __poll_t cros_ec_console_log_poll(struct file *file, struct cros_ec_debugfs *debug_info = file->private_data; __poll_t mask = 0; - poll_wait(file, &debug_info->log_wq, wait); + poll_wait(file, &cros_ec_debugfs_log_wq, wait); mutex_lock(&debug_info->log_mutex); if (CIRC_CNT(debug_info->log_buffer.head, @@ -377,7 +378,6 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info) debug_info->log_buffer.tail = 0; mutex_init(&debug_info->log_mutex); - init_waitqueue_head(&debug_info->log_wq); debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir, debug_info, &cros_ec_console_log_fops); diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c index 98e37080f760..71948dade0e2 100644 --- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c +++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c @@ -17,7 +17,8 @@ #include <linux/sort.h> #include <linux/slab.h> -#include "cros_ec_trace.h" +#define CREATE_TRACE_POINTS +#include "cros_ec_sensorhub_trace.h" /* Precision of fixed point for the m values from the filter */ #define M_PRECISION BIT(23) diff --git a/drivers/platform/chrome/cros_ec_sensorhub_trace.h b/drivers/platform/chrome/cros_ec_sensorhub_trace.h new file mode 100644 index 000000000000..57d9b4785969 --- /dev/null +++ b/drivers/platform/chrome/cros_ec_sensorhub_trace.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Trace events for the ChromeOS Sensorhub kernel module + * + * Copyright 2021 Google LLC. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cros_ec + +#if !defined(_CROS_EC_SENSORHUB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _CROS_EC_SENSORHUB_TRACE_H_ + +#include <linux/types.h> +#include <linux/platform_data/cros_ec_sensorhub.h> + +#include <linux/tracepoint.h> + +TRACE_EVENT(cros_ec_sensorhub_timestamp, + TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp, + s64 current_timestamp, s64 current_time), + TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp, + current_time), + TP_STRUCT__entry( + __field(u32, ec_sample_timestamp) + __field(u32, ec_fifo_timestamp) + __field(s64, fifo_timestamp) + __field(s64, current_timestamp) + __field(s64, current_time) + __field(s64, delta) + ), + TP_fast_assign( + __entry->ec_sample_timestamp = ec_sample_timestamp; + __entry->ec_fifo_timestamp = ec_fifo_timestamp; + __entry->fifo_timestamp = fifo_timestamp; + __entry->current_timestamp = current_timestamp; + __entry->current_time = current_time; + __entry->delta = current_timestamp - current_time; + ), + TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld", + __entry->ec_sample_timestamp, + __entry->ec_fifo_timestamp, + __entry->fifo_timestamp, + __entry->current_timestamp, + __entry->current_time, + __entry->delta + ) +); + +TRACE_EVENT(cros_ec_sensorhub_data, + TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp, + s64 current_timestamp, s64 current_time), + TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time), + TP_STRUCT__entry( + __field(u32, ec_sensor_num) + __field(u32, ec_fifo_timestamp) + __field(s64, fifo_timestamp) + __field(s64, current_timestamp) + __field(s64, current_time) + __field(s64, delta) + ), + TP_fast_assign( + __entry->ec_sensor_num = ec_sensor_num; + __entry->ec_fifo_timestamp = ec_fifo_timestamp; + __entry->fifo_timestamp = fifo_timestamp; + __entry->current_timestamp = current_timestamp; + __entry->current_time = current_time; + __entry->delta = current_timestamp - current_time; + ), + TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld", + __entry->ec_sensor_num, + __entry->ec_fifo_timestamp, + __entry->fifo_timestamp, + __entry->current_timestamp, + __entry->current_time, + __entry->delta + ) +); + +TRACE_EVENT(cros_ec_sensorhub_filter, + TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy), + TP_ARGS(state, dx, dy), + TP_STRUCT__entry( + __field(s64, dx) + __field(s64, dy) + __field(s64, median_m) + __field(s64, median_error) + __field(s64, history_len) + __field(s64, x) + __field(s64, y) + ), + TP_fast_assign( + __entry->dx = dx; + __entry->dy = dy; + __entry->median_m = state->median_m; + __entry->median_error = state->median_error; + __entry->history_len = state->history_len; + __entry->x = state->x_offset; + __entry->y = state->y_offset; + ), + TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld", + __entry->dx, + __entry->dy, + __entry->median_m, + __entry->median_error, + __entry->history_len, + __entry->x, + __entry->y + ) +); + + +#endif /* _CROS_EC_SENSORHUB_TRACE_H_ */ + +/* this part must be outside header guard */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE cros_ec_sensorhub_trace + +#include <trace/define_trace.h> diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h index 7e7cfc98657a..9bb5cd2c98b8 100644 --- a/drivers/platform/chrome/cros_ec_trace.h +++ b/drivers/platform/chrome/cros_ec_trace.h @@ -15,7 +15,6 @@ #include <linux/types.h> #include <linux/platform_data/cros_ec_commands.h> #include <linux/platform_data/cros_ec_proto.h> -#include <linux/platform_data/cros_ec_sensorhub.h> #include <linux/tracepoint.h> @@ -71,100 +70,6 @@ TRACE_EVENT(cros_ec_request_done, __entry->retval) ); -TRACE_EVENT(cros_ec_sensorhub_timestamp, - TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp, - s64 current_timestamp, s64 current_time), - TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp, - current_time), - TP_STRUCT__entry( - __field(u32, ec_sample_timestamp) - __field(u32, ec_fifo_timestamp) - __field(s64, fifo_timestamp) - __field(s64, current_timestamp) - __field(s64, current_time) - __field(s64, delta) - ), - TP_fast_assign( - __entry->ec_sample_timestamp = ec_sample_timestamp; - __entry->ec_fifo_timestamp = ec_fifo_timestamp; - __entry->fifo_timestamp = fifo_timestamp; - __entry->current_timestamp = current_timestamp; - __entry->current_time = current_time; - __entry->delta = current_timestamp - current_time; - ), - TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld", - __entry->ec_sample_timestamp, - __entry->ec_fifo_timestamp, - __entry->fifo_timestamp, - __entry->current_timestamp, - __entry->current_time, - __entry->delta - ) -); - -TRACE_EVENT(cros_ec_sensorhub_data, - TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp, - s64 current_timestamp, s64 current_time), - TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time), - TP_STRUCT__entry( - __field(u32, ec_sensor_num) - __field(u32, ec_fifo_timestamp) - __field(s64, fifo_timestamp) - __field(s64, current_timestamp) - __field(s64, current_time) - __field(s64, delta) - ), - TP_fast_assign( - __entry->ec_sensor_num = ec_sensor_num; - __entry->ec_fifo_timestamp = ec_fifo_timestamp; - __entry->fifo_timestamp = fifo_timestamp; - __entry->current_timestamp = current_timestamp; - __entry->current_time = current_time; - __entry->delta = current_timestamp - current_time; - ), - TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld", - __entry->ec_sensor_num, - __entry->ec_fifo_timestamp, - __entry->fifo_timestamp, - __entry->current_timestamp, - __entry->current_time, - __entry->delta - ) -); - -TRACE_EVENT(cros_ec_sensorhub_filter, - TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy), - TP_ARGS(state, dx, dy), - TP_STRUCT__entry( - __field(s64, dx) - __field(s64, dy) - __field(s64, median_m) - __field(s64, median_error) - __field(s64, history_len) - __field(s64, x) - __field(s64, y) - ), - TP_fast_assign( - __entry->dx = dx; - __entry->dy = dy; - __entry->median_m = state->median_m; - __entry->median_error = state->median_error; - __entry->history_len = state->history_len; - __entry->x = state->x_offset; - __entry->y = state->y_offset; - ), - TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld", - __entry->dx, - __entry->dy, - __entry->median_m, - __entry->median_error, - __entry->history_len, - __entry->x, - __entry->y - ) -); - - #endif /* _CROS_EC_TRACE_H_ */ /* this part must be outside header guard */ diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c index 5de0bfb0bc4d..4bd2752c0823 100644 --- a/drivers/platform/chrome/cros_ec_typec.c +++ b/drivers/platform/chrome/cros_ec_typec.c @@ -115,17 +115,18 @@ static int cros_typec_parse_port_props(struct typec_capability *cap, return ret; cap->data = ret; + /* Try-power-role is optional. */ ret = fwnode_property_read_string(fwnode, "try-power-role", &buf); if (ret) { - dev_err(dev, "try-power-role not found: %d\n", ret); - return ret; + dev_warn(dev, "try-power-role not found: %d\n", ret); + cap->prefer_role = TYPEC_NO_PREFERRED_ROLE; + } else { + ret = typec_find_power_role(buf); + if (ret < 0) + return ret; + cap->prefer_role = ret; } - ret = typec_find_power_role(buf); - if (ret < 0) - return ret; - cap->prefer_role = ret; - cap->fwnode = fwnode; return 0; @@ -227,6 +228,7 @@ static void cros_typec_remove_partner(struct cros_typec_data *typec, cros_typec_unregister_altmodes(typec, port_num, true); cros_typec_usb_disconnect_state(port); + port->mux_flags = USB_PD_MUX_NONE; typec_unregister_partner(port->partner); port->partner = NULL; @@ -512,20 +514,38 @@ static int cros_typec_enable_usb4(struct cros_typec_data *typec, } static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num, - uint8_t mux_flags, struct ec_response_usb_pd_control_v2 *pd_ctrl) { struct cros_typec_port *port = typec->ports[port_num]; + struct ec_response_usb_pd_mux_info resp; + struct ec_params_usb_pd_mux_info req = { + .port = port_num, + }; struct ec_params_usb_pd_mux_ack mux_ack; enum typec_orientation orientation; int ret; - if (mux_flags == USB_PD_MUX_NONE) { + ret = cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO, + &req, sizeof(req), &resp, sizeof(resp)); + if (ret < 0) { + dev_warn(typec->dev, "Failed to get mux info for port: %d, err = %d\n", + port_num, ret); + return ret; + } + + /* No change needs to be made, let's exit early. */ + if (port->mux_flags == resp.flags && port->role == pd_ctrl->role) + return 0; + + port->mux_flags = resp.flags; + port->role = pd_ctrl->role; + + if (port->mux_flags == USB_PD_MUX_NONE) { ret = cros_typec_usb_disconnect_state(port); goto mux_ack; } - if (mux_flags & USB_PD_MUX_POLARITY_INVERTED) + if (port->mux_flags & USB_PD_MUX_POLARITY_INVERTED) orientation = TYPEC_ORIENTATION_REVERSE; else orientation = TYPEC_ORIENTATION_NORMAL; @@ -540,22 +560,22 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num, if (ret) return ret; - if (mux_flags & USB_PD_MUX_USB4_ENABLED) { + if (port->mux_flags & USB_PD_MUX_USB4_ENABLED) { ret = cros_typec_enable_usb4(typec, port_num, pd_ctrl); - } else if (mux_flags & USB_PD_MUX_TBT_COMPAT_ENABLED) { + } else if (port->mux_flags & USB_PD_MUX_TBT_COMPAT_ENABLED) { ret = cros_typec_enable_tbt(typec, port_num, pd_ctrl); - } else if (mux_flags & USB_PD_MUX_DP_ENABLED) { + } else if (port->mux_flags & USB_PD_MUX_DP_ENABLED) { ret = cros_typec_enable_dp(typec, port_num, pd_ctrl); - } else if (mux_flags & USB_PD_MUX_SAFE_MODE) { + } else if (port->mux_flags & USB_PD_MUX_SAFE_MODE) { ret = cros_typec_usb_safe_state(port); - } else if (mux_flags & USB_PD_MUX_USB_ENABLED) { + } else if (port->mux_flags & USB_PD_MUX_USB_ENABLED) { port->state.alt = NULL; port->state.mode = TYPEC_STATE_USB; ret = typec_mux_set(port->mux, &port->state); } else { dev_dbg(typec->dev, "Unrecognized mode requested, mux flags: %x\n", - mux_flags); + port->mux_flags); } mux_ack: @@ -630,17 +650,6 @@ static void cros_typec_set_port_params_v1(struct cros_typec_data *typec, } } -static int cros_typec_get_mux_info(struct cros_typec_data *typec, int port_num, - struct ec_response_usb_pd_mux_info *resp) -{ - struct ec_params_usb_pd_mux_info req = { - .port = port_num, - }; - - return cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO, &req, - sizeof(req), resp, sizeof(*resp)); -} - /* * Helper function to register partner/plug altmodes. */ @@ -938,7 +947,6 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num) { struct ec_params_usb_pd_control req; struct ec_response_usb_pd_control_v2 resp; - struct ec_response_usb_pd_mux_info mux_resp; int ret; if (port_num < 0 || port_num >= typec->num_ports) { @@ -958,6 +966,11 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num) if (ret < 0) return ret; + /* Update the switches if they exist, according to requested state */ + ret = cros_typec_configure_mux(typec, port_num, &resp); + if (ret) + dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret); + dev_dbg(typec->dev, "Enabled %d: 0x%hhx\n", port_num, resp.enabled); dev_dbg(typec->dev, "Role %d: 0x%hhx\n", port_num, resp.role); dev_dbg(typec->dev, "Polarity %d: 0x%hhx\n", port_num, resp.polarity); @@ -973,27 +986,7 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num) if (typec->typec_cmd_supported) cros_typec_handle_status(typec, port_num); - /* Update the switches if they exist, according to requested state */ - ret = cros_typec_get_mux_info(typec, port_num, &mux_resp); - if (ret < 0) { - dev_warn(typec->dev, - "Failed to get mux info for port: %d, err = %d\n", - port_num, ret); - return 0; - } - - /* No change needs to be made, let's exit early. */ - if (typec->ports[port_num]->mux_flags == mux_resp.flags && - typec->ports[port_num]->role == resp.role) - return 0; - - typec->ports[port_num]->mux_flags = mux_resp.flags; - typec->ports[port_num]->role = resp.role; - ret = cros_typec_configure_mux(typec, port_num, mux_resp.flags, &resp); - if (ret) - dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret); - - return ret; + return 0; } static int cros_typec_get_cmd_version(struct cros_typec_data *typec) @@ -1075,7 +1068,13 @@ static int cros_typec_probe(struct platform_device *pdev) return -ENOMEM; typec->dev = dev; + typec->ec = dev_get_drvdata(pdev->dev.parent); + if (!typec->ec) { + dev_err(dev, "couldn't find parent EC device\n"); + return -ENODEV; + } + platform_set_drvdata(pdev, typec); ret = cros_typec_get_cmd_version(typec); diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index f0763e36b861..cb2491761958 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -745,9 +745,7 @@ sclp_sync_wait(void) /* Loop until driver state indicates finished request */ while (sclp_running_state != sclp_running_state_idle) { /* Check for expired request timer */ - if (timer_pending(&sclp_request_timer) && - get_tod_clock_fast() > timeout && - del_timer(&sclp_request_timer)) + if (get_tod_clock_fast() > timeout && del_timer(&sclp_request_timer)) sclp_request_timer.function(&sclp_request_timer); cpu_relax(); } diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index de028868c6f4..fe5ee2646fcf 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c @@ -109,8 +109,7 @@ static void sclp_console_sync_queue(void) unsigned long flags; spin_lock_irqsave(&sclp_con_lock, flags); - if (timer_pending(&sclp_con_timer)) - del_timer(&sclp_con_timer); + del_timer(&sclp_con_timer); while (sclp_con_queue_running) { spin_unlock_irqrestore(&sclp_con_lock, flags); sclp_sync_wait(); diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 7bc4e4a10937..3b4e7e5d9b71 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -231,8 +231,7 @@ sclp_vt220_emit_current(void) list_add_tail(&sclp_vt220_current_request->list, &sclp_vt220_outqueue); sclp_vt220_current_request = NULL; - if (timer_pending(&sclp_vt220_timer)) - del_timer(&sclp_vt220_timer); + del_timer(&sclp_vt220_timer); } sclp_vt220_flush_later = 0; } @@ -776,8 +775,7 @@ static void __sclp_vt220_flush_buffer(void) sclp_vt220_emit_current(); spin_lock_irqsave(&sclp_vt220_lock, flags); - if (timer_pending(&sclp_vt220_timer)) - del_timer(&sclp_vt220_timer); + del_timer(&sclp_vt220_timer); while (sclp_vt220_queue_running) { spin_unlock_irqrestore(&sclp_vt220_lock, flags); sclp_sync_wait(); diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index 7ada994d4592..38cc1565d6ae 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c @@ -354,10 +354,10 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request, if (( sense[0] == SENSE_DATA_CHECK || sense[0] == SENSE_EQUIPMENT_CHECK || - sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK + sense[0] == (SENSE_EQUIPMENT_CHECK | SENSE_DEFERRED_UNIT_CHECK) ) && ( sense[1] == SENSE_DRIVE_ONLINE || - sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE + sense[1] == (SENSE_BEGINNING_OF_TAPE | SENSE_WRITE_MODE) )) { switch (request->op) { /* diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 05e136cfb8be..6d63b968309a 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -113,16 +113,10 @@ ccw_device_timeout(struct timer_list *t) void ccw_device_set_timeout(struct ccw_device *cdev, int expires) { - if (expires == 0) { + if (expires == 0) del_timer(&cdev->private->timer); - return; - } - if (timer_pending(&cdev->private->timer)) { - if (mod_timer(&cdev->private->timer, jiffies + expires)) - return; - } - cdev->private->timer.expires = jiffies + expires; - add_timer(&cdev->private->timer); + else + mod_timer(&cdev->private->timer, jiffies + expires); } int diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c index 8b463681a149..ab6a7495180a 100644 --- a/drivers/s390/cio/eadm_sch.c +++ b/drivers/s390/cio/eadm_sch.c @@ -112,16 +112,10 @@ static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires) { struct eadm_private *private = get_eadm_private(sch); - if (expires == 0) { + if (expires == 0) del_timer(&private->timer); - return; - } - if (timer_pending(&private->timer)) { - if (mod_timer(&private->timer, jiffies + expires)) - return; - } - private->timer.expires = jiffies + expires; - add_timer(&private->timer); + else + mod_timer(&private->timer, jiffies + expires); } static void eadm_subchannel_irq(struct subchannel *sch) diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 8fd5a17bdf99..6a65885f5f43 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -315,6 +315,7 @@ struct ap_perms { unsigned long ioctlm[BITS_TO_LONGS(AP_IOCTLS)]; unsigned long apm[BITS_TO_LONGS(AP_DEVICES)]; unsigned long aqm[BITS_TO_LONGS(AP_DOMAINS)]; + unsigned long adm[BITS_TO_LONGS(AP_DOMAINS)]; }; extern struct ap_perms ap_perms; extern struct mutex ap_perms_mutex; diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index cf23ce1b1146..7f69ca695fc2 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -155,7 +155,7 @@ static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey) /* * The cca_xxx2protkey call may fail when a card has been * addressed where the master key was changed after last fetch - * of the mkvp into the cache. Try 3 times: First witout verify + * of the mkvp into the cache. Try 3 times: First without verify * then with verify and last round with verify and old master * key verification pattern match not ignored. */ diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 7dc26365e29a..6e08d04b605d 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -1189,13 +1189,6 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = { * @matrix_mdev: a mediated matrix device * @kvm: reference to KVM instance * - * Note: The matrix_dev->lock must be taken prior to calling - * this function; however, the lock will be temporarily released while the - * guest's AP configuration is set to avoid a potential lockdep splat. - * The kvm->lock is taken to set the guest's AP configuration which, under - * certain circumstances, will result in a circular lock dependency if this is - * done under the @matrix_mdev->lock. - * * Return: 0 if no other mediated matrix device has a reference to @kvm; * otherwise, returns an -EPERM. */ @@ -1269,18 +1262,11 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb, * by @matrix_mdev. * * @matrix_mdev: a matrix mediated device - * @kvm: the pointer to the kvm structure being unset. - * - * Note: The matrix_dev->lock must be taken prior to calling - * this function; however, the lock will be temporarily released while the - * guest's AP configuration is cleared to avoid a potential lockdep splat. - * The kvm->lock is taken to clear the guest's AP configuration which, under - * certain circumstances, will result in a circular lock dependency if this is - * done under the @matrix_mdev->lock. */ -static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev, - struct kvm *kvm) +static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev) { + struct kvm *kvm = matrix_mdev->kvm; + if (kvm && kvm->arch.crypto.crycbd) { down_write(&kvm->arch.crypto.pqap_hook_rwsem); kvm->arch.crypto.pqap_hook = NULL; @@ -1311,7 +1297,7 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb, matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier); if (!data) - vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm); + vfio_ap_mdev_unset_kvm(matrix_mdev); else if (vfio_ap_mdev_set_kvm(matrix_mdev, data)) notify_rc = NOTIFY_DONE; @@ -1448,7 +1434,7 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev) &matrix_mdev->iommu_notifier); vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY, &matrix_mdev->group_notifier); - vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm); + vfio_ap_mdev_unset_kvm(matrix_mdev); } static int vfio_ap_mdev_get_device_info(unsigned long arg) diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 80e2a306709a..aa6dc3c0c353 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -285,10 +285,53 @@ static ssize_t aqmask_store(struct device *dev, static DEVICE_ATTR_RW(aqmask); +static ssize_t admask_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int i, rc; + struct zcdn_device *zcdndev = to_zcdn_dev(dev); + + if (mutex_lock_interruptible(&ap_perms_mutex)) + return -ERESTARTSYS; + + buf[0] = '0'; + buf[1] = 'x'; + for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++) + snprintf(buf + 2 + 2 * i * sizeof(long), + PAGE_SIZE - 2 - 2 * i * sizeof(long), + "%016lx", zcdndev->perms.adm[i]); + buf[2 + 2 * i * sizeof(long)] = '\n'; + buf[2 + 2 * i * sizeof(long) + 1] = '\0'; + rc = 2 + 2 * i * sizeof(long) + 1; + + mutex_unlock(&ap_perms_mutex); + + return rc; +} + +static ssize_t admask_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int rc; + struct zcdn_device *zcdndev = to_zcdn_dev(dev); + + rc = ap_parse_mask_str(buf, zcdndev->perms.adm, + AP_DOMAINS, &ap_perms_mutex); + if (rc) + return rc; + + return count; +} + +static DEVICE_ATTR_RW(admask); + static struct attribute *zcdn_dev_attrs[] = { &dev_attr_ioctlmask.attr, &dev_attr_apmask.attr, &dev_attr_aqmask.attr, + &dev_attr_admask.attr, NULL }; @@ -880,11 +923,22 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, if (rc) goto out; + tdom = *domain; + if (perms != &ap_perms && tdom < AP_DOMAINS) { + if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { + if (!test_bit_inv(tdom, perms->adm)) { + rc = -ENODEV; + goto out; + } + } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { + rc = -EOPNOTSUPP; + goto out; + } + } /* * If a valid target domain is set and this domain is NOT a usage * domain but a control only domain, autoselect target domain. */ - tdom = *domain; if (tdom < AP_DOMAINS && !ap_test_config_usage_domain(tdom) && ap_test_config_ctrl_domain(tdom)) @@ -1062,6 +1116,18 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, if (rc) goto out_free; + if (perms != &ap_perms && domain < AUTOSEL_DOM) { + if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { + if (!test_bit_inv(domain, perms->adm)) { + rc = -ENODEV; + goto out_free; + } + } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { + rc = -EOPNOTSUPP; + goto out_free; + } + } + pref_zc = NULL; pref_zq = NULL; spin_lock(&zcrypt_list_lock); diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c index 3e259befd30a..fcbd537530e8 100644 --- a/drivers/s390/crypto/zcrypt_card.c +++ b/drivers/s390/crypto/zcrypt_card.c @@ -90,7 +90,7 @@ static ssize_t online_store(struct device *dev, list_for_each_entry(zq, &zc->zqueues, list) maxzqs++; if (maxzqs > 0) - zq_uelist = kcalloc(maxzqs + 1, sizeof(zq), GFP_ATOMIC); + zq_uelist = kcalloc(maxzqs + 1, sizeof(*zq_uelist), GFP_ATOMIC); list_for_each_entry(zq, &zc->zqueues, list) if (zcrypt_queue_force_online(zq, online)) if (zq_uelist) { diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c index 9ce5a71da69b..98d33f932b0b 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.c +++ b/drivers/s390/crypto/zcrypt_ep11misc.c @@ -1109,7 +1109,7 @@ static int ep11_wrapkey(u16 card, u16 domain, if (kb->head.type == TOKTYPE_NON_CCA && kb->head.version == TOKVER_EP11_AES) { has_header = true; - keysize = kb->head.len < keysize ? kb->head.len : keysize; + keysize = min_t(size_t, kb->head.len, keysize); } /* request cprb and payload */ @@ -1552,7 +1552,6 @@ static int aio_read(struct kiocb *req, const struct iocb *iocb, file = req->ki_filp; if (unlikely(!(file->f_mode & FMODE_READ))) return -EBADF; - ret = -EINVAL; if (unlikely(!file->f_op->read_iter)) return -EINVAL; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index aa0a60ee26cb..6bfc4343c98d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -8296,7 +8296,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset, * cover the full folio, like invalidating the last folio, we're * still safe to wait for ordered extent to finish. */ - if (!(offset == 0 && length == PAGE_SIZE)) { + if (!(offset == 0 && length == folio_size(folio))) { btrfs_releasepage(&folio->page, GFP_NOFS); return; } diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c index 04a88bfe4fcf..998e3f180d90 100644 --- a/fs/btrfs/reflink.c +++ b/fs/btrfs/reflink.c @@ -645,7 +645,7 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, int ret; /* - * Lock destination range to serialize with concurrent readpages() and + * Lock destination range to serialize with concurrent readahead() and * source range to serialize with relocation. */ btrfs_double_extent_lock(src, loff, dst, dst_loff, len); @@ -739,7 +739,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src, } /* - * Lock destination range to serialize with concurrent readpages() and + * Lock destination range to serialize with concurrent readahead() and * source range to serialize with relocation. */ btrfs_double_extent_lock(src, off, inode, destoff, len); diff --git a/fs/buffer.c b/fs/buffer.c index d67fbe063a3a..2b5561ae5d0b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2352,8 +2352,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size) if (err) goto out; - err = pagecache_write_begin(NULL, mapping, size, 0, - AOP_FLAG_CONT_EXPAND, &page, &fsdata); + err = pagecache_write_begin(NULL, mapping, size, 0, 0, &page, &fsdata); if (err) goto out; diff --git a/fs/ceph/file.c b/fs/ceph/file.c index feb75eb1cd82..6c9e837aa1d3 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1869,7 +1869,7 @@ retry_snap: * are pending vmtruncate. So write and vmtruncate * can not run at the same time */ - written = generic_perform_write(file, from, pos); + written = generic_perform_write(iocb, from); if (likely(written >= 0)) iocb->ki_pos = pos + written; ceph_end_io_write(inode); diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index ea00e1a91250..9d334816eac0 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -94,7 +94,7 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon) le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics), le32_to_cpu(tcon->fsAttrInfo.Attributes), le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength), - tcon->tidStatus); + tcon->status); if (dev_type == FILE_DEVICE_DISK) seq_puts(m, " type: DISK "); else if (dev_type == FILE_DEVICE_CD_ROM) diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index d1211ad4e85b..a47fa44b6d52 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -699,14 +699,14 @@ static void cifs_umount_begin(struct super_block *sb) tcon = cifs_sb_master_tcon(cifs_sb); spin_lock(&cifs_tcp_ses_lock); - if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) { + if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) { /* we have other mounts to same share or we have already tried to force umount this and woken up all waiting network requests, nothing to do */ spin_unlock(&cifs_tcp_ses_lock); return; } else if (tcon->tc_count == 1) - tcon->tidStatus = CifsExiting; + tcon->status = TID_EXITING; spin_unlock(&cifs_tcp_ses_lock); /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 0a4085ced40f..8de977c359b1 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -116,10 +116,18 @@ enum statusEnum { CifsInNegotiate, CifsNeedSessSetup, CifsInSessSetup, - CifsNeedTcon, - CifsInTcon, - CifsNeedFilesInvalidate, - CifsInFilesInvalidate +}; + +/* associated with each tree connection to the server */ +enum tid_status_enum { + TID_NEW = 0, + TID_GOOD, + TID_EXITING, + TID_NEED_RECON, + TID_NEED_TCON, + TID_IN_TCON, + TID_NEED_FILES_INVALIDATE, /* currently unused */ + TID_IN_FILES_INVALIDATE }; enum securityEnum { @@ -853,13 +861,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb) #define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4) #define CIFS_MAX_RFC1002_RSIZE ((1<<17) - 1 - sizeof(READ_RSP) + 4) -/* - * The default wsize is 1M. find_get_pages seems to return a maximum of 256 - * pages in a single call. With PAGE_SIZE == 4k, this means we can fill - * a single wsize request with a single call. - */ #define CIFS_DEFAULT_IOSIZE (1024 * 1024) -#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024) /* * Windows only supports a max of 60kb reads and 65535 byte writes. Default to @@ -1039,7 +1041,7 @@ struct cifs_tcon { char *password; /* for share-level security */ __u32 tid; /* The 4 byte tree id */ __u16 Flags; /* optional support bits */ - enum statusEnum tidStatus; + enum tid_status_enum status; atomic_t num_smbs_sent; union { struct { diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index 68b9a436af4b..aeba371c4c70 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h @@ -123,18 +123,6 @@ */ #define CIFS_SESS_KEY_SIZE (16) -/* - * Size of the smb3 signing key - */ -#define SMB3_SIGN_KEY_SIZE (16) - -/* - * Size of the smb3 encryption/decryption key storage. - * This size is big enough to store any cipher key types. - */ -#define SMB3_ENC_DEC_KEY_SIZE (32) - -#define CIFS_CLIENT_CHALLENGE_SIZE (8) #define CIFS_SERVER_CHALLENGE_SIZE (8) #define CIFS_HMAC_MD5_HASH_SIZE (16) #define CIFS_CPHTXT_SIZE (16) @@ -1658,7 +1646,7 @@ struct smb_t2_rsp { #define SMB_FIND_FILE_ID_FULL_DIR_INFO 0x105 #define SMB_FIND_FILE_ID_BOTH_DIR_INFO 0x106 #define SMB_FIND_FILE_UNIX 0x202 -#define SMB_FIND_FILE_POSIX_INFO 0x064 +/* #define SMB_FIND_FILE_POSIX_INFO 0x064 */ typedef struct smb_com_transaction2_qpi_req { struct smb_hdr hdr; /* wct = 14+ */ diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 071e2f21a7db..47e927c4ff8d 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -75,12 +75,11 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon) /* only send once per connect */ spin_lock(&cifs_tcp_ses_lock); - if (tcon->ses->status != CifsGood || - tcon->tidStatus != CifsNeedReconnect) { + if ((tcon->ses->status != CifsGood) || (tcon->status != TID_NEED_RECON)) { spin_unlock(&cifs_tcp_ses_lock); return; } - tcon->tidStatus = CifsInFilesInvalidate; + tcon->status = TID_IN_FILES_INVALIDATE; spin_unlock(&cifs_tcp_ses_lock); /* list all files open on tree connection and mark them invalid */ @@ -100,8 +99,8 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon) mutex_unlock(&tcon->crfid.fid_mutex); spin_lock(&cifs_tcp_ses_lock); - if (tcon->tidStatus == CifsInFilesInvalidate) - tcon->tidStatus = CifsNeedTcon; + if (tcon->status == TID_IN_FILES_INVALIDATE) + tcon->status = TID_NEED_TCON; spin_unlock(&cifs_tcp_ses_lock); /* @@ -136,7 +135,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) * have tcon) are allowed as we start force umount */ spin_lock(&cifs_tcp_ses_lock); - if (tcon->tidStatus == CifsExiting) { + if (tcon->status == TID_EXITING) { if (smb_command != SMB_COM_WRITE_ANDX && smb_command != SMB_COM_OPEN_ANDX && smb_command != SMB_COM_TREE_DISCONNECT) { @@ -597,7 +596,7 @@ CIFSSMBNegotiate(const unsigned int xid, set_credits(server, server->maxReq); /* probably no need to store and check maxvcs */ server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize); - /* set up max_read for readpages check */ + /* set up max_read for readahead check */ server->max_read = server->maxBuf; server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 9964c3634322..ee3b7c15e884 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -245,7 +245,7 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server, list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { tcon->need_reconnect = true; - tcon->tidStatus = CifsNeedReconnect; + tcon->status = TID_NEED_RECON; } if (ses->tcon_ipc) ses->tcon_ipc->need_reconnect = true; @@ -2207,7 +2207,7 @@ get_ses_fail: static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) { - if (tcon->tidStatus == CifsExiting) + if (tcon->status == TID_EXITING) return 0; if (strncmp(tcon->treeName, ctx->UNC, MAX_TREE_SIZE)) return 0; @@ -3513,6 +3513,9 @@ static int connect_dfs_target(struct mount_ctx *mnt_ctx, const char *full_path, struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb; char *oldmnt = cifs_sb->ctx->mount_options; + cifs_dbg(FYI, "%s: full_path=%s ref_path=%s target=%s\n", __func__, full_path, ref_path, + dfs_cache_get_tgt_name(tit)); + rc = dfs_cache_get_tgt_referral(ref_path, tit, &ref); if (rc) goto out; @@ -3611,13 +3614,18 @@ static int __follow_dfs_link(struct mount_ctx *mnt_ctx) if (rc) goto out; - /* Try all dfs link targets */ + /* Try all dfs link targets. If an I/O fails from currently connected DFS target with an + * error other than STATUS_PATH_NOT_COVERED (-EREMOTE), then retry it from other targets as + * specified in MS-DFSC "3.1.5.2 I/O Operation to Target Fails with an Error Other Than + * STATUS_PATH_NOT_COVERED." + */ for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(&tl); tit; tit = dfs_cache_get_next_tgt(&tl, tit)) { rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->leaf_fullpath + 1, tit); if (!rc) { rc = is_path_remote(mnt_ctx); - break; + if (!rc || rc == -EREMOTE) + break; } } @@ -3691,7 +3699,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) goto error; rc = is_path_remote(&mnt_ctx); - if (rc == -EREMOTE) + if (rc) rc = follow_dfs_link(&mnt_ctx); if (rc) goto error; @@ -4478,12 +4486,12 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru /* only send once per connect */ spin_lock(&cifs_tcp_ses_lock); if (tcon->ses->status != CifsGood || - (tcon->tidStatus != CifsNew && - tcon->tidStatus != CifsNeedTcon)) { + (tcon->status != TID_NEW && + tcon->status != TID_NEED_TCON)) { spin_unlock(&cifs_tcp_ses_lock); return 0; } - tcon->tidStatus = CifsInTcon; + tcon->status = TID_IN_TCON; spin_unlock(&cifs_tcp_ses_lock); tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL); @@ -4524,13 +4532,13 @@ out: if (rc) { spin_lock(&cifs_tcp_ses_lock); - if (tcon->tidStatus == CifsInTcon) - tcon->tidStatus = CifsNeedTcon; + if (tcon->status == TID_IN_TCON) + tcon->status = TID_NEED_TCON; spin_unlock(&cifs_tcp_ses_lock); } else { spin_lock(&cifs_tcp_ses_lock); - if (tcon->tidStatus == CifsInTcon) - tcon->tidStatus = CifsGood; + if (tcon->status == TID_IN_TCON) + tcon->status = TID_GOOD; spin_unlock(&cifs_tcp_ses_lock); tcon->need_reconnect = false; } @@ -4546,24 +4554,24 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru /* only send once per connect */ spin_lock(&cifs_tcp_ses_lock); if (tcon->ses->status != CifsGood || - (tcon->tidStatus != CifsNew && - tcon->tidStatus != CifsNeedTcon)) { + (tcon->status != TID_NEW && + tcon->status != TID_NEED_TCON)) { spin_unlock(&cifs_tcp_ses_lock); return 0; } - tcon->tidStatus = CifsInTcon; + tcon->status = TID_IN_TCON; spin_unlock(&cifs_tcp_ses_lock); rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc); if (rc) { spin_lock(&cifs_tcp_ses_lock); - if (tcon->tidStatus == CifsInTcon) - tcon->tidStatus = CifsNeedTcon; + if (tcon->status == TID_IN_TCON) + tcon->status = TID_NEED_TCON; spin_unlock(&cifs_tcp_ses_lock); } else { spin_lock(&cifs_tcp_ses_lock); - if (tcon->tidStatus == CifsInTcon) - tcon->tidStatus = CifsGood; + if (tcon->status == TID_IN_TCON) + tcon->status = TID_GOOD; spin_unlock(&cifs_tcp_ses_lock); tcon->need_reconnect = false; } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 60f43bff7ccb..d511a78383c3 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4210,13 +4210,19 @@ cifs_page_mkwrite(struct vm_fault *vmf) { struct page *page = vmf->page; + /* Wait for the page to be written to the cache before we allow it to + * be modified. We then assume the entire page will need writing back. + */ #ifdef CONFIG_CIFS_FSCACHE if (PageFsCache(page) && wait_on_page_fscache_killable(page) < 0) return VM_FAULT_RETRY; #endif - lock_page(page); + wait_on_page_writeback(page); + + if (lock_page_killable(page) < 0) + return VM_FAULT_RETRY; return VM_FAULT_LOCKED; } diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 60d853c92f6a..2f9e7d2f81b6 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -49,7 +49,7 @@ static void cifs_set_ops(struct inode *inode) inode->i_fop = &cifs_file_ops; } - /* check if server can support readpages */ + /* check if server can support readahead */ if (cifs_sb_master_tcon(cifs_sb)->ses->server->max_read < PAGE_SIZE + MAX_CIFS_HDR_SIZE) inode->i_data.a_ops = &cifs_addr_ops_smallbuf; diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 56598f7dbe00..afaf59c22193 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -116,7 +116,7 @@ tconInfoAlloc(void) } atomic_inc(&tconInfoAllocCount); - ret_buf->tidStatus = CifsNew; + ret_buf->status = TID_NEW; ++ret_buf->tc_count; INIT_LIST_HEAD(&ret_buf->openFileList); INIT_LIST_HEAD(&ret_buf->tcon_list); diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h index 4125fd113cfb..82e916ad167c 100644 --- a/fs/cifs/smb2glob.h +++ b/fs/cifs/smb2glob.h @@ -41,15 +41,4 @@ #define END_OF_CHAIN 4 #define RELATED_REQUEST 8 -#define SMB2_SIGNATURE_SIZE (16) -#define SMB2_NTLMV2_SESSKEY_SIZE (16) -#define SMB2_HMACSHA256_SIZE (32) -#define SMB2_CMACAES_SIZE (16) -#define SMB3_SIGNKEY_SIZE (16) -#define SMB3_GCM128_CRYPTKEY_SIZE (16) -#define SMB3_GCM256_CRYPTKEY_SIZE (32) - -/* Maximum buffer size value we can send with 1 credit */ -#define SMB2_MAX_BUFFER_SIZE 65536 - #endif /* _SMB2_GLOB_H */ diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index b25623e3fe3d..c653beb735b8 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -203,7 +203,7 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr) if (smb2_rsp_struct_sizes[command] != pdu->StructureSize2) { if (command != SMB2_OPLOCK_BREAK_HE && (shdr->Status == 0 || - pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2)) { + pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2_LE)) { /* error packets have 9 byte structure size */ cifs_dbg(VFS, "Invalid response size %u for command %d\n", le16_to_cpu(pdu->StructureSize2), command); @@ -303,7 +303,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr) /* error responses do not have data area */ if (shdr->Status && shdr->Status != STATUS_MORE_PROCESSING_REQUIRED && (((struct smb2_err_rsp *)shdr)->StructureSize) == - SMB2_ERROR_STRUCTURE_SIZE2) + SMB2_ERROR_STRUCTURE_SIZE2_LE) return NULL; /* @@ -478,11 +478,11 @@ smb2_get_lease_state(struct cifsInodeInfo *cinode) __le32 lease = 0; if (CIFS_CACHE_WRITE(cinode)) - lease |= SMB2_LEASE_WRITE_CACHING; + lease |= SMB2_LEASE_WRITE_CACHING_LE; if (CIFS_CACHE_HANDLE(cinode)) - lease |= SMB2_LEASE_HANDLE_CACHING; + lease |= SMB2_LEASE_HANDLE_CACHING_LE; if (CIFS_CACHE_READ(cinode)) - lease |= SMB2_LEASE_READ_CACHING; + lease |= SMB2_LEASE_READ_CACHING_LE; return lease; } @@ -832,8 +832,8 @@ smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *serve rc = __smb2_handle_cancelled_cmd(tcon, le16_to_cpu(hdr->Command), le64_to_cpu(hdr->MessageId), - le64_to_cpu(rsp->PersistentFileId), - le64_to_cpu(rsp->VolatileFileId)); + rsp->PersistentFileId, + rsp->VolatileFileId); if (rc) cifs_put_tcon(tcon); diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 891b11576e55..db23f5b404ba 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -897,8 +897,8 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, atomic_inc(&tcon->num_remote_opens); o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; - oparms.fid->persistent_fid = le64_to_cpu(o_rsp->PersistentFileId); - oparms.fid->volatile_fid = le64_to_cpu(o_rsp->VolatileFileId); + oparms.fid->persistent_fid = o_rsp->PersistentFileId; + oparms.fid->volatile_fid = o_rsp->VolatileFileId; #ifdef CONFIG_CIFS_DEBUG2 oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId); #endif /* CIFS_DEBUG2 */ @@ -1192,17 +1192,12 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb) { int rc; - __le16 *utf16_path; struct kvec rsp_iov = {NULL, 0}; int buftype = CIFS_NO_BUFFER; struct smb2_query_info_rsp *rsp; struct smb2_file_full_ea_info *info = NULL; - utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); - if (!utf16_path) - return -ENOMEM; - - rc = smb2_query_info_compound(xid, tcon, utf16_path, + rc = smb2_query_info_compound(xid, tcon, path, FILE_READ_EA, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, @@ -1235,7 +1230,6 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, le32_to_cpu(rsp->OutputBufferLength), ea_name); qeas_exit: - kfree(utf16_path); free_rsp_buf(buftype, rsp_iov.iov_base); return rc; } @@ -1295,7 +1289,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, * the new EA. If not we should not add it since we * would not be able to even read the EAs back. */ - rc = smb2_query_info_compound(xid, tcon, utf16_path, + rc = smb2_query_info_compound(xid, tcon, path, FILE_READ_EA, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, @@ -1643,6 +1637,7 @@ smb2_ioctl_query_info(const unsigned int xid, unsigned int size[2]; void *data[2]; int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR; + void (*free_req1_func)(struct smb_rqst *r); vars = kzalloc(sizeof(*vars), GFP_ATOMIC); if (vars == NULL) @@ -1652,27 +1647,29 @@ smb2_ioctl_query_info(const unsigned int xid, resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER; - if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) - goto e_fault; - + if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) { + rc = -EFAULT; + goto free_vars; + } if (qi.output_buffer_length > 1024) { - kfree(vars); - return -EINVAL; + rc = -EINVAL; + goto free_vars; } if (!ses || !server) { - kfree(vars); - return -EIO; + rc = -EIO; + goto free_vars; } if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; - buffer = memdup_user(arg + sizeof(struct smb_query_info), - qi.output_buffer_length); - if (IS_ERR(buffer)) { - kfree(vars); - return PTR_ERR(buffer); + if (qi.output_buffer_length) { + buffer = memdup_user(arg + sizeof(struct smb_query_info), qi.output_buffer_length); + if (IS_ERR(buffer)) { + rc = PTR_ERR(buffer); + goto free_vars; + } } /* Open */ @@ -1710,45 +1707,45 @@ smb2_ioctl_query_info(const unsigned int xid, rc = SMB2_open_init(tcon, server, &rqst[0], &oplock, &oparms, path); if (rc) - goto iqinf_exit; + goto free_output_buffer; smb2_set_next_command(tcon, &rqst[0]); /* Query */ if (qi.flags & PASSTHRU_FSCTL) { /* Can eventually relax perm check since server enforces too */ - if (!capable(CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_ADMIN)) { rc = -EPERM; - else { - rqst[1].rq_iov = &vars->io_iov[0]; - rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; - - rc = SMB2_ioctl_init(tcon, server, - &rqst[1], - COMPOUND_FID, COMPOUND_FID, - qi.info_type, true, buffer, - qi.output_buffer_length, - CIFSMaxBufSize - - MAX_SMB2_CREATE_RESPONSE_SIZE - - MAX_SMB2_CLOSE_RESPONSE_SIZE); + goto free_open_req; } + rqst[1].rq_iov = &vars->io_iov[0]; + rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; + + rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, + qi.info_type, true, buffer, qi.output_buffer_length, + CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - + MAX_SMB2_CLOSE_RESPONSE_SIZE); + free_req1_func = SMB2_ioctl_free; } else if (qi.flags == PASSTHRU_SET_INFO) { /* Can eventually relax perm check since server enforces too */ - if (!capable(CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_ADMIN)) { rc = -EPERM; - else { - rqst[1].rq_iov = &vars->si_iov[0]; - rqst[1].rq_nvec = 1; - - size[0] = 8; - data[0] = buffer; - - rc = SMB2_set_info_init(tcon, server, - &rqst[1], - COMPOUND_FID, COMPOUND_FID, - current->tgid, - FILE_END_OF_FILE_INFORMATION, - SMB2_O_INFO_FILE, 0, data, size); + goto free_open_req; + } + if (qi.output_buffer_length < 8) { + rc = -EINVAL; + goto free_open_req; } + rqst[1].rq_iov = &vars->si_iov[0]; + rqst[1].rq_nvec = 1; + + /* MS-FSCC 2.4.13 FileEndOfFileInformation */ + size[0] = 8; + data[0] = buffer; + + rc = SMB2_set_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, + current->tgid, FILE_END_OF_FILE_INFORMATION, + SMB2_O_INFO_FILE, 0, data, size); + free_req1_func = SMB2_set_info_free; } else if (qi.flags == PASSTHRU_QUERY_INFO) { rqst[1].rq_iov = &vars->qi_iov[0]; rqst[1].rq_nvec = 1; @@ -1759,6 +1756,7 @@ smb2_ioctl_query_info(const unsigned int xid, qi.info_type, qi.additional_information, qi.input_buffer_length, qi.output_buffer_length, buffer); + free_req1_func = SMB2_query_info_free; } else { /* unknown flags */ cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n", qi.flags); @@ -1766,7 +1764,7 @@ smb2_ioctl_query_info(const unsigned int xid, } if (rc) - goto iqinf_exit; + goto free_open_req; smb2_set_next_command(tcon, &rqst[1]); smb2_set_related(&rqst[1]); @@ -1777,14 +1775,14 @@ smb2_ioctl_query_info(const unsigned int xid, rc = SMB2_close_init(tcon, server, &rqst[2], COMPOUND_FID, COMPOUND_FID, false); if (rc) - goto iqinf_exit; + goto free_req_1; smb2_set_related(&rqst[2]); rc = compound_send_recv(xid, ses, server, flags, 3, rqst, resp_buftype, rsp_iov); if (rc) - goto iqinf_exit; + goto out; /* No need to bump num_remote_opens since handle immediately closed */ if (qi.flags & PASSTHRU_FSCTL) { @@ -1794,18 +1792,22 @@ smb2_ioctl_query_info(const unsigned int xid, qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount); if (qi.input_buffer_length > 0 && le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length - > rsp_iov[1].iov_len) - goto e_fault; + > rsp_iov[1].iov_len) { + rc = -EFAULT; + goto out; + } if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length, - sizeof(qi.input_buffer_length))) - goto e_fault; + sizeof(qi.input_buffer_length))) { + rc = -EFAULT; + goto out; + } if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info), (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset), qi.input_buffer_length)) - goto e_fault; + rc = -EFAULT; } else { pqi = (struct smb_query_info __user *)arg; qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; @@ -1813,28 +1815,30 @@ smb2_ioctl_query_info(const unsigned int xid, qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength); if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length, - sizeof(qi.input_buffer_length))) - goto e_fault; + sizeof(qi.input_buffer_length))) { + rc = -EFAULT; + goto out; + } if (copy_to_user(pqi + 1, qi_rsp->Buffer, qi.input_buffer_length)) - goto e_fault; + rc = -EFAULT; } - iqinf_exit: - cifs_small_buf_release(rqst[0].rq_iov[0].iov_base); - cifs_small_buf_release(rqst[1].rq_iov[0].iov_base); - cifs_small_buf_release(rqst[2].rq_iov[0].iov_base); +out: free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base); - kfree(vars); + SMB2_close_free(&rqst[2]); +free_req_1: + free_req1_func(&rqst[1]); +free_open_req: + SMB2_open_free(&rqst[0]); +free_output_buffer: kfree(buffer); +free_vars: + kfree(vars); return rc; - -e_fault: - rc = -EFAULT; - goto iqinf_exit; } static ssize_t @@ -2407,8 +2411,8 @@ again: cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc); goto qdf_free; } - fid->persistent_fid = le64_to_cpu(op_rsp->PersistentFileId); - fid->volatile_fid = le64_to_cpu(op_rsp->VolatileFileId); + fid->persistent_fid = op_rsp->PersistentFileId; + fid->volatile_fid = op_rsp->VolatileFileId; /* Anything else than ENODATA means a genuine error */ if (rc && rc != -ENODATA) { @@ -2646,7 +2650,7 @@ smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst) */ int smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon, - __le16 *utf16_path, u32 desired_access, + const char *path, u32 desired_access, u32 class, u32 type, u32 output_len, struct kvec *rsp, int *buftype, struct cifs_sb_info *cifs_sb) @@ -2664,6 +2668,14 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_open_parms oparms; struct cifs_fid fid; int rc; + __le16 *utf16_path; + struct cached_fid *cfid = NULL; + + if (!path) + path = ""; + utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); + if (!utf16_path) + return -ENOMEM; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; @@ -2672,6 +2684,8 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon, resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER; memset(rsp_iov, 0, sizeof(rsp_iov)); + rc = open_cached_dir(xid, tcon, path, cifs_sb, &cfid); + memset(&open_iov, 0, sizeof(open_iov)); rqst[0].rq_iov = open_iov; rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; @@ -2693,15 +2707,29 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon, rqst[1].rq_iov = qi_iov; rqst[1].rq_nvec = 1; - rc = SMB2_query_info_init(tcon, server, - &rqst[1], COMPOUND_FID, COMPOUND_FID, - class, type, 0, - output_len, 0, - NULL); + if (cfid) { + rc = SMB2_query_info_init(tcon, server, + &rqst[1], + cfid->fid->persistent_fid, + cfid->fid->volatile_fid, + class, type, 0, + output_len, 0, + NULL); + } else { + rc = SMB2_query_info_init(tcon, server, + &rqst[1], + COMPOUND_FID, + COMPOUND_FID, + class, type, 0, + output_len, 0, + NULL); + } if (rc) goto qic_exit; - smb2_set_next_command(tcon, &rqst[1]); - smb2_set_related(&rqst[1]); + if (!cfid) { + smb2_set_next_command(tcon, &rqst[1]); + smb2_set_related(&rqst[1]); + } memset(&close_iov, 0, sizeof(close_iov)); rqst[2].rq_iov = close_iov; @@ -2713,9 +2741,15 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon, goto qic_exit; smb2_set_related(&rqst[2]); - rc = compound_send_recv(xid, ses, server, - flags, 3, rqst, - resp_buftype, rsp_iov); + if (cfid) { + rc = compound_send_recv(xid, ses, server, + flags, 1, &rqst[1], + &resp_buftype[1], &rsp_iov[1]); + } else { + rc = compound_send_recv(xid, ses, server, + flags, 3, rqst, + resp_buftype, rsp_iov); + } if (rc) { free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); if (rc == -EREMCHG) { @@ -2729,11 +2763,14 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon, *buftype = resp_buftype[1]; qic_exit: + kfree(utf16_path); SMB2_open_free(&rqst[0]); SMB2_query_info_free(&rqst[1]); SMB2_close_free(&rqst[2]); free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base); + if (cfid) + close_cached_dir(cfid); return rc; } @@ -2743,13 +2780,12 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon, { struct smb2_query_info_rsp *rsp; struct smb2_fs_full_size_info *info = NULL; - __le16 utf16_path = 0; /* Null - open root of share */ struct kvec rsp_iov = {NULL, 0}; int buftype = CIFS_NO_BUFFER; int rc; - rc = smb2_query_info_compound(xid, tcon, &utf16_path, + rc = smb2_query_info_compound(xid, tcon, "", FILE_READ_ATTRIBUTES, FS_FULL_SIZE_INFORMATION, SMB2_O_INFO_FILESYSTEM, @@ -4293,12 +4329,12 @@ static __le32 map_oplock_to_lease(u8 oplock) { if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) - return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING; + return SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE; else if (oplock == SMB2_OPLOCK_LEVEL_II) - return SMB2_LEASE_READ_CACHING; + return SMB2_LEASE_READ_CACHING_LE; else if (oplock == SMB2_OPLOCK_LEVEL_BATCH) - return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING | - SMB2_LEASE_WRITE_CACHING; + return SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE | + SMB2_LEASE_WRITE_CACHING_LE; return 0; } @@ -4360,7 +4396,7 @@ smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key) struct create_lease *lc = (struct create_lease *)buf; *epoch = 0; /* not used */ - if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS) + if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE) return SMB2_OPLOCK_LEVEL_NOCHANGE; return le32_to_cpu(lc->lcontext.LeaseState); } @@ -4371,7 +4407,7 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key) struct create_lease_v2 *lc = (struct create_lease_v2 *)buf; *epoch = le16_to_cpu(lc->lcontext.Epoch); - if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS) + if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE) return SMB2_OPLOCK_LEVEL_NOCHANGE; if (lease_key) memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); @@ -5814,8 +5850,8 @@ struct smb_version_values smb20_values = { .protocol_id = SMB20_PROT_ID, .req_capabilities = 0, /* MBZ */ .large_lock_type = 0, - .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, - .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, + .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .header_preamble_size = 0, @@ -5835,8 +5871,8 @@ struct smb_version_values smb21_values = { .protocol_id = SMB21_PROT_ID, .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */ .large_lock_type = 0, - .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, - .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, + .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .header_preamble_size = 0, @@ -5856,8 +5892,8 @@ struct smb_version_values smb3any_values = { .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, - .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, - .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, + .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .header_preamble_size = 0, @@ -5877,8 +5913,8 @@ struct smb_version_values smbdefault_values = { .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, - .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, - .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, + .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .header_preamble_size = 0, @@ -5898,8 +5934,8 @@ struct smb_version_values smb30_values = { .protocol_id = SMB30_PROT_ID, .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, - .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, - .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, + .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .header_preamble_size = 0, @@ -5919,8 +5955,8 @@ struct smb_version_values smb302_values = { .protocol_id = SMB302_PROT_ID, .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, - .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, - .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, + .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .header_preamble_size = 0, @@ -5940,8 +5976,8 @@ struct smb_version_values smb311_values = { .protocol_id = SMB311_PROT_ID, .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, - .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, - .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE, + .shared_lock_type = SMB2_LOCKFLAG_SHARED, .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, .header_size = sizeof(struct smb2_hdr), .header_preamble_size = 0, diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 7e7909b1ae11..1b7ad0c09566 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -163,7 +163,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, return 0; spin_lock(&cifs_tcp_ses_lock); - if (tcon->tidStatus == CifsExiting) { + if (tcon->status == TID_EXITING) { /* * only tree disconnect, open, and write, * (and ulogoff which does not have tcon) @@ -2734,13 +2734,10 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode, goto err_free_req; } - trace_smb3_posix_mkdir_done(xid, le64_to_cpu(rsp->PersistentFileId), - tcon->tid, - ses->Suid, CREATE_NOT_FILE, - FILE_WRITE_ATTRIBUTES); + trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid, + CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES); - SMB2_close(xid, tcon, le64_to_cpu(rsp->PersistentFileId), - le64_to_cpu(rsp->VolatileFileId)); + SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId); /* Eventually save off posix specific response info and timestaps */ @@ -3009,14 +3006,12 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, } else if (rsp == NULL) /* unlikely to happen, but safer to check */ goto creat_exit; else - trace_smb3_open_done(xid, le64_to_cpu(rsp->PersistentFileId), - tcon->tid, - ses->Suid, oparms->create_options, - oparms->desired_access); + trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid, + oparms->create_options, oparms->desired_access); atomic_inc(&tcon->num_remote_opens); - oparms->fid->persistent_fid = le64_to_cpu(rsp->PersistentFileId); - oparms->fid->volatile_fid = le64_to_cpu(rsp->VolatileFileId); + oparms->fid->persistent_fid = rsp->PersistentFileId; + oparms->fid->volatile_fid = rsp->VolatileFileId; oparms->fid->access = oparms->desired_access; #ifdef CONFIG_CIFS_DEBUG2 oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId); @@ -3313,8 +3308,8 @@ SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, if (rc) return rc; - req->PersistentFileId = cpu_to_le64(persistent_fid); - req->VolatileFileId = cpu_to_le64(volatile_fid); + req->PersistentFileId = persistent_fid; + req->VolatileFileId = volatile_fid; if (query_attrs) req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB; else @@ -3677,8 +3672,8 @@ SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst, if (rc) return rc; - req->PersistentFileId = cpu_to_le64(persistent_fid); - req->VolatileFileId = cpu_to_le64(volatile_fid); + req->PersistentFileId = persistent_fid; + req->VolatileFileId = volatile_fid; /* See note 354 of MS-SMB2, 64K max */ req->OutputBufferLength = cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE); @@ -3858,12 +3853,14 @@ void smb2_reconnect_server(struct work_struct *work) tcon = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL); if (!tcon) { resched = true; - list_del_init(&ses->rlist); - cifs_put_smb_ses(ses); + list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) { + list_del_init(&ses->rlist); + cifs_put_smb_ses(ses); + } goto done; } - tcon->tidStatus = CifsGood; + tcon->status = TID_GOOD; tcon->retry = false; tcon->need_reconnect = false; @@ -3951,8 +3948,8 @@ SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst, if (rc) return rc; - req->PersistentFileId = cpu_to_le64(persistent_fid); - req->VolatileFileId = cpu_to_le64(volatile_fid); + req->PersistentFileId = persistent_fid; + req->VolatileFileId = volatile_fid; iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; @@ -4033,8 +4030,8 @@ smb2_new_read_req(void **buf, unsigned int *total_len, shdr = &req->hdr; shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); - req->PersistentFileId = cpu_to_le64(io_parms->persistent_fid); - req->VolatileFileId = cpu_to_le64(io_parms->volatile_fid); + req->PersistentFileId = io_parms->persistent_fid; + req->VolatileFileId = io_parms->volatile_fid; req->ReadChannelInfoOffset = 0; /* reserved */ req->ReadChannelInfoLength = 0; /* reserved */ req->Channel = 0; /* reserved */ @@ -4094,8 +4091,8 @@ smb2_new_read_req(void **buf, unsigned int *total_len, */ shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF); shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF); - req->PersistentFileId = cpu_to_le64(0xFFFFFFFFFFFFFFFF); - req->VolatileFileId = cpu_to_le64(0xFFFFFFFFFFFFFFFF); + req->PersistentFileId = (u64)-1; + req->VolatileFileId = (u64)-1; } } if (remaining_bytes > io_parms->length) @@ -4307,21 +4304,19 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); cifs_dbg(VFS, "Send error in read = %d\n", rc); trace_smb3_read_err(xid, - le64_to_cpu(req->PersistentFileId), + req->PersistentFileId, io_parms->tcon->tid, ses->Suid, io_parms->offset, io_parms->length, rc); } else - trace_smb3_read_done(xid, - le64_to_cpu(req->PersistentFileId), - io_parms->tcon->tid, ses->Suid, - io_parms->offset, 0); + trace_smb3_read_done(xid, req->PersistentFileId, io_parms->tcon->tid, + ses->Suid, io_parms->offset, 0); free_rsp_buf(resp_buftype, rsp_iov.iov_base); cifs_small_buf_release(req); return rc == -ENODATA ? 0 : rc; } else trace_smb3_read_done(xid, - le64_to_cpu(req->PersistentFileId), + req->PersistentFileId, io_parms->tcon->tid, ses->Suid, io_parms->offset, io_parms->length); @@ -4463,8 +4458,8 @@ smb2_async_writev(struct cifs_writedata *wdata, shdr = (struct smb2_hdr *)req; shdr->Id.SyncId.ProcessId = cpu_to_le32(wdata->cfile->pid); - req->PersistentFileId = cpu_to_le64(wdata->cfile->fid.persistent_fid); - req->VolatileFileId = cpu_to_le64(wdata->cfile->fid.volatile_fid); + req->PersistentFileId = wdata->cfile->fid.persistent_fid; + req->VolatileFileId = wdata->cfile->fid.volatile_fid; req->WriteChannelInfoOffset = 0; req->WriteChannelInfoLength = 0; req->Channel = 0; @@ -4562,7 +4557,7 @@ smb2_async_writev(struct cifs_writedata *wdata, if (rc) { trace_smb3_write_err(0 /* no xid */, - le64_to_cpu(req->PersistentFileId), + req->PersistentFileId, tcon->tid, tcon->ses->Suid, wdata->offset, wdata->bytes, rc); kref_put(&wdata->refcount, release); @@ -4615,8 +4610,8 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); - req->PersistentFileId = cpu_to_le64(io_parms->persistent_fid); - req->VolatileFileId = cpu_to_le64(io_parms->volatile_fid); + req->PersistentFileId = io_parms->persistent_fid; + req->VolatileFileId = io_parms->volatile_fid; req->WriteChannelInfoOffset = 0; req->WriteChannelInfoLength = 0; req->Channel = 0; @@ -4645,7 +4640,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, if (rc) { trace_smb3_write_err(xid, - le64_to_cpu(req->PersistentFileId), + req->PersistentFileId, io_parms->tcon->tid, io_parms->tcon->ses->Suid, io_parms->offset, io_parms->length, rc); @@ -4654,7 +4649,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, } else { *nbytes = le32_to_cpu(rsp->DataLength); trace_smb3_write_done(xid, - le64_to_cpu(req->PersistentFileId), + req->PersistentFileId, io_parms->tcon->tid, io_parms->tcon->ses->Suid, io_parms->offset, *nbytes); diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 33cfd0a1adf1..d8c4388b190d 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -56,16 +56,6 @@ struct smb2_rdma_crypto_transform { #define COMPOUND_FID 0xFFFFFFFFFFFFFFFFULL -#define SMB2_ERROR_STRUCTURE_SIZE2 cpu_to_le16(9) - -struct smb2_err_rsp { - struct smb2_hdr hdr; - __le16 StructureSize; - __le16 Reserved; /* MBZ */ - __le32 ByteCount; /* even if zero, at least one byte follows */ - __u8 ErrorData[1]; /* variable length */ -} __packed; - #define SYMLINK_ERROR_TAG 0x4c4d5953 struct smb2_symlink_err_rsp { @@ -139,47 +129,6 @@ struct share_redirect_error_context_rsp { #define SMB2_LEASE_HANDLE_CACHING_HE 0x02 #define SMB2_LEASE_WRITE_CACHING_HE 0x04 -#define SMB2_LEASE_NONE cpu_to_le32(0x00) -#define SMB2_LEASE_READ_CACHING cpu_to_le32(0x01) -#define SMB2_LEASE_HANDLE_CACHING cpu_to_le32(0x02) -#define SMB2_LEASE_WRITE_CACHING cpu_to_le32(0x04) - -#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS cpu_to_le32(0x00000002) -#define SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET cpu_to_le32(0x00000004) - -#define SMB2_LEASE_KEY_SIZE 16 - -struct lease_context { - u8 LeaseKey[SMB2_LEASE_KEY_SIZE]; - __le32 LeaseState; - __le32 LeaseFlags; - __le64 LeaseDuration; -} __packed; - -struct lease_context_v2 { - u8 LeaseKey[SMB2_LEASE_KEY_SIZE]; - __le32 LeaseState; - __le32 LeaseFlags; - __le64 LeaseDuration; - __le64 ParentLeaseKeyLow; - __le64 ParentLeaseKeyHigh; - __le16 Epoch; - __le16 Reserved; -} __packed; - -struct create_lease { - struct create_context ccontext; - __u8 Name[8]; - struct lease_context lcontext; -} __packed; - -struct create_lease_v2 { - struct create_context ccontext; - __u8 Name[8]; - struct lease_context_v2 lcontext; - __u8 Pad[4]; -} __packed; - struct create_durable { struct create_context ccontext; __u8 Name[8]; @@ -192,13 +141,6 @@ struct create_durable { } Data; } __packed; -struct create_posix { - struct create_context ccontext; - __u8 Name[16]; - __le32 Mode; - __u32 Reserved; -} __packed; - /* See MS-SMB2 2.2.13.2.11 */ /* Flags */ #define SMB2_DHANDLE_FLAG_PERSISTENT 0x00000002 @@ -287,12 +229,6 @@ struct copychunk_ioctl { __u32 Reserved2; } __packed; -/* this goes in the ioctl buffer when doing FSCTL_SET_ZERO_DATA */ -struct file_zero_data_information { - __le64 FileOffset; - __le64 BeyondFinalZero; -} __packed; - struct copychunk_ioctl_rsp { __le32 ChunksWritten; __le32 ChunkBytesWritten; @@ -338,11 +274,6 @@ struct fsctl_get_integrity_information_rsp { __le32 ClusterSizeInBytes; } __packed; -struct file_allocated_range_buffer { - __le64 file_offset; - __le64 length; -} __packed; - /* Integrity ChecksumAlgorithm choices for above */ #define CHECKSUM_TYPE_NONE 0x0000 #define CHECKSUM_TYPE_CRC64 0x0002 @@ -351,53 +282,6 @@ struct file_allocated_range_buffer { /* Integrity flags for above */ #define FSCTL_INTEGRITY_FLAG_CHECKSUM_ENFORCEMENT_OFF 0x00000001 -/* Reparse structures - see MS-FSCC 2.1.2 */ - -/* struct fsctl_reparse_info_req is empty, only response structs (see below) */ - -struct reparse_data_buffer { - __le32 ReparseTag; - __le16 ReparseDataLength; - __u16 Reserved; - __u8 DataBuffer[]; /* Variable Length */ -} __packed; - -struct reparse_guid_data_buffer { - __le32 ReparseTag; - __le16 ReparseDataLength; - __u16 Reserved; - __u8 ReparseGuid[16]; - __u8 DataBuffer[]; /* Variable Length */ -} __packed; - -struct reparse_mount_point_data_buffer { - __le32 ReparseTag; - __le16 ReparseDataLength; - __u16 Reserved; - __le16 SubstituteNameOffset; - __le16 SubstituteNameLength; - __le16 PrintNameOffset; - __le16 PrintNameLength; - __u8 PathBuffer[]; /* Variable Length */ -} __packed; - -#define SYMLINK_FLAG_RELATIVE 0x00000001 - -struct reparse_symlink_data_buffer { - __le32 ReparseTag; - __le16 ReparseDataLength; - __u16 Reserved; - __le16 SubstituteNameOffset; - __le16 SubstituteNameLength; - __le16 PrintNameOffset; - __le16 PrintNameLength; - __le32 Flags; - __u8 PathBuffer[]; /* Variable Length */ -} __packed; - -/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */ - - /* See MS-DFSC 2.2.2 */ struct fsctl_get_dfs_referral_req { __le16 MaxReferralLevel; @@ -413,22 +297,6 @@ struct network_resiliency_req { } __packed; /* There is no buffer for the response ie no struct network_resiliency_rsp */ - -struct validate_negotiate_info_req { - __le32 Capabilities; - __u8 Guid[SMB2_CLIENT_GUID_SIZE]; - __le16 SecurityMode; - __le16 DialectCount; - __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */ -} __packed; - -struct validate_negotiate_info_rsp { - __le32 Capabilities; - __u8 Guid[SMB2_CLIENT_GUID_SIZE]; - __le16 SecurityMode; - __le16 Dialect; /* Dialect in use for the connection */ -} __packed; - #define RSS_CAPABLE cpu_to_le32(0x00000001) #define RDMA_CAPABLE cpu_to_le32(0x00000002) @@ -464,14 +332,6 @@ struct compress_ioctl { __le16 CompressionState; /* See cifspdu.h for possible flag values */ } __packed; -struct duplicate_extents_to_file { - __u64 PersistentFileHandle; /* source file handle, opaque endianness */ - __u64 VolatileFileHandle; - __le64 SourceFileOffset; - __le64 TargetFileOffset; - __le64 ByteCount; /* Bytes to be copied */ -} __packed; - /* * Maximum number of iovs we need for an ioctl request. * [0] : struct smb2_ioctl_req @@ -479,370 +339,11 @@ struct duplicate_extents_to_file { */ #define SMB2_IOCTL_IOV_SIZE 2 -struct smb2_ioctl_req { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 57 */ - __u16 Reserved; - __le32 CtlCode; - __u64 PersistentFileId; /* opaque endianness */ - __u64 VolatileFileId; /* opaque endianness */ - __le32 InputOffset; - __le32 InputCount; - __le32 MaxInputResponse; - __le32 OutputOffset; - __le32 OutputCount; - __le32 MaxOutputResponse; - __le32 Flags; - __u32 Reserved2; - __u8 Buffer[]; -} __packed; - -struct smb2_ioctl_rsp { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 57 */ - __u16 Reserved; - __le32 CtlCode; - __u64 PersistentFileId; /* opaque endianness */ - __u64 VolatileFileId; /* opaque endianness */ - __le32 InputOffset; - __le32 InputCount; - __le32 OutputOffset; - __le32 OutputCount; - __le32 Flags; - __u32 Reserved2; - /* char * buffer[] */ -} __packed; - -#define SMB2_LOCKFLAG_SHARED_LOCK 0x0001 -#define SMB2_LOCKFLAG_EXCLUSIVE_LOCK 0x0002 -#define SMB2_LOCKFLAG_UNLOCK 0x0004 -#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY 0x0010 - -struct smb2_lock_element { - __le64 Offset; - __le64 Length; - __le32 Flags; - __le32 Reserved; -} __packed; - -struct smb2_lock_req { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 48 */ - __le16 LockCount; - /* - * The least significant four bits are the index, the other 28 bits are - * the lock sequence number (0 to 64). See MS-SMB2 2.2.26 - */ - __le32 LockSequenceNumber; - __u64 PersistentFileId; /* opaque endianness */ - __u64 VolatileFileId; /* opaque endianness */ - /* Followed by at least one */ - struct smb2_lock_element locks[1]; -} __packed; - -struct smb2_lock_rsp { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 4 */ - __le16 Reserved; -} __packed; - -struct smb2_echo_req { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 4 */ - __u16 Reserved; -} __packed; - -struct smb2_echo_rsp { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 4 */ - __u16 Reserved; -} __packed; - -/* search (query_directory) Flags field */ -#define SMB2_RESTART_SCANS 0x01 -#define SMB2_RETURN_SINGLE_ENTRY 0x02 -#define SMB2_INDEX_SPECIFIED 0x04 -#define SMB2_REOPEN 0x10 - -#define SMB2_QUERY_DIRECTORY_IOV_SIZE 2 - -/* - * Valid FileInformation classes. - * - * Note that these are a subset of the (file) QUERY_INFO levels defined - * later in this file (but since QUERY_DIRECTORY uses equivalent numbers - * we do not redefine them here) - * - * FileDirectoryInfomation 0x01 - * FileFullDirectoryInformation 0x02 - * FileIdFullDirectoryInformation 0x26 - * FileBothDirectoryInformation 0x03 - * FileIdBothDirectoryInformation 0x25 - * FileNamesInformation 0x0C - * FileIdExtdDirectoryInformation 0x3C - */ - -struct smb2_query_directory_req { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 33 */ - __u8 FileInformationClass; - __u8 Flags; - __le32 FileIndex; - __u64 PersistentFileId; /* opaque endianness */ - __u64 VolatileFileId; /* opaque endianness */ - __le16 FileNameOffset; - __le16 FileNameLength; - __le32 OutputBufferLength; - __u8 Buffer[1]; -} __packed; - -struct smb2_query_directory_rsp { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 9 */ - __le16 OutputBufferOffset; - __le32 OutputBufferLength; - __u8 Buffer[1]; -} __packed; - -/* Possible InfoType values */ -#define SMB2_O_INFO_FILE 0x01 -#define SMB2_O_INFO_FILESYSTEM 0x02 -#define SMB2_O_INFO_SECURITY 0x03 -#define SMB2_O_INFO_QUOTA 0x04 - -/* Security info type additionalinfo flags. See MS-SMB2 (2.2.37) or MS-DTYP */ -#define OWNER_SECINFO 0x00000001 -#define GROUP_SECINFO 0x00000002 -#define DACL_SECINFO 0x00000004 -#define SACL_SECINFO 0x00000008 -#define LABEL_SECINFO 0x00000010 -#define ATTRIBUTE_SECINFO 0x00000020 -#define SCOPE_SECINFO 0x00000040 -#define BACKUP_SECINFO 0x00010000 -#define UNPROTECTED_SACL_SECINFO 0x10000000 -#define UNPROTECTED_DACL_SECINFO 0x20000000 -#define PROTECTED_SACL_SECINFO 0x40000000 -#define PROTECTED_DACL_SECINFO 0x80000000 - -/* Flags used for FileFullEAinfo */ -#define SL_RESTART_SCAN 0x00000001 -#define SL_RETURN_SINGLE_ENTRY 0x00000002 -#define SL_INDEX_SPECIFIED 0x00000004 - -struct smb2_query_info_req { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 41 */ - __u8 InfoType; - __u8 FileInfoClass; - __le32 OutputBufferLength; - __le16 InputBufferOffset; - __u16 Reserved; - __le32 InputBufferLength; - __le32 AdditionalInformation; - __le32 Flags; - __u64 PersistentFileId; /* opaque endianness */ - __u64 VolatileFileId; /* opaque endianness */ - __u8 Buffer[1]; -} __packed; - -struct smb2_query_info_rsp { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 9 */ - __le16 OutputBufferOffset; - __le32 OutputBufferLength; - __u8 Buffer[1]; -} __packed; - -/* - * Maximum number of iovs we need for a set-info request. - * The largest one is rename/hardlink - * [0] : struct smb2_set_info_req + smb2_file_[rename|link]_info - * [1] : path - * [2] : compound padding - */ -#define SMB2_SET_INFO_IOV_SIZE 3 - -struct smb2_set_info_req { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 33 */ - __u8 InfoType; - __u8 FileInfoClass; - __le32 BufferLength; - __le16 BufferOffset; - __u16 Reserved; - __le32 AdditionalInformation; - __u64 PersistentFileId; /* opaque endianness */ - __u64 VolatileFileId; /* opaque endianness */ - __u8 Buffer[1]; -} __packed; - -struct smb2_set_info_rsp { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 2 */ -} __packed; - -struct smb2_oplock_break { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 24 */ - __u8 OplockLevel; - __u8 Reserved; - __le32 Reserved2; - __u64 PersistentFid; - __u64 VolatileFid; -} __packed; - -#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01) - -struct smb2_lease_break { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 44 */ - __le16 Epoch; - __le32 Flags; - __u8 LeaseKey[16]; - __le32 CurrentLeaseState; - __le32 NewLeaseState; - __le32 BreakReason; - __le32 AccessMaskHint; - __le32 ShareMaskHint; -} __packed; - -struct smb2_lease_ack { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 36 */ - __le16 Reserved; - __le32 Flags; - __u8 LeaseKey[16]; - __le32 LeaseState; - __le64 LeaseDuration; -} __packed; - /* - * PDU infolevel structure definitions + * PDU query infolevel structure definitions * BB consider moving to a different header */ -/* File System Information Classes */ -#define FS_VOLUME_INFORMATION 1 /* Query */ -#define FS_LABEL_INFORMATION 2 /* Local only */ -#define FS_SIZE_INFORMATION 3 /* Query */ -#define FS_DEVICE_INFORMATION 4 /* Query */ -#define FS_ATTRIBUTE_INFORMATION 5 /* Query */ -#define FS_CONTROL_INFORMATION 6 /* Query, Set */ -#define FS_FULL_SIZE_INFORMATION 7 /* Query */ -#define FS_OBJECT_ID_INFORMATION 8 /* Query, Set */ -#define FS_DRIVER_PATH_INFORMATION 9 /* Local only */ -#define FS_VOLUME_FLAGS_INFORMATION 10 /* Local only */ -#define FS_SECTOR_SIZE_INFORMATION 11 /* SMB3 or later. Query */ -#define FS_POSIX_INFORMATION 100 /* SMB3.1.1 POSIX. Query */ - -struct smb2_fs_full_size_info { - __le64 TotalAllocationUnits; - __le64 CallerAvailableAllocationUnits; - __le64 ActualAvailableAllocationUnits; - __le32 SectorsPerAllocationUnit; - __le32 BytesPerSector; -} __packed; - -#define SSINFO_FLAGS_ALIGNED_DEVICE 0x00000001 -#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002 -#define SSINFO_FLAGS_NO_SEEK_PENALTY 0x00000004 -#define SSINFO_FLAGS_TRIM_ENABLED 0x00000008 - -/* sector size info struct */ -struct smb3_fs_ss_info { - __le32 LogicalBytesPerSector; - __le32 PhysicalBytesPerSectorForAtomicity; - __le32 PhysicalBytesPerSectorForPerf; - __le32 FileSystemEffectivePhysicalBytesPerSectorForAtomicity; - __le32 Flags; - __le32 ByteOffsetForSectorAlignment; - __le32 ByteOffsetForPartitionAlignment; -} __packed; - -/* volume info struct - see MS-FSCC 2.5.9 */ -#define MAX_VOL_LABEL_LEN 32 -struct smb3_fs_vol_info { - __le64 VolumeCreationTime; - __u32 VolumeSerialNumber; - __le32 VolumeLabelLength; /* includes trailing null */ - __u8 SupportsObjects; /* True if eg like NTFS, supports objects */ - __u8 Reserved; - __u8 VolumeLabel[]; /* variable len */ -} __packed; - -/* partial list of QUERY INFO levels */ -#define FILE_DIRECTORY_INFORMATION 1 -#define FILE_FULL_DIRECTORY_INFORMATION 2 -#define FILE_BOTH_DIRECTORY_INFORMATION 3 -#define FILE_BASIC_INFORMATION 4 -#define FILE_STANDARD_INFORMATION 5 -#define FILE_INTERNAL_INFORMATION 6 -#define FILE_EA_INFORMATION 7 -#define FILE_ACCESS_INFORMATION 8 -#define FILE_NAME_INFORMATION 9 -#define FILE_RENAME_INFORMATION 10 -#define FILE_LINK_INFORMATION 11 -#define FILE_NAMES_INFORMATION 12 -#define FILE_DISPOSITION_INFORMATION 13 -#define FILE_POSITION_INFORMATION 14 -#define FILE_FULL_EA_INFORMATION 15 -#define FILE_MODE_INFORMATION 16 -#define FILE_ALIGNMENT_INFORMATION 17 -#define FILE_ALL_INFORMATION 18 -#define FILE_ALLOCATION_INFORMATION 19 -#define FILE_END_OF_FILE_INFORMATION 20 -#define FILE_ALTERNATE_NAME_INFORMATION 21 -#define FILE_STREAM_INFORMATION 22 -#define FILE_PIPE_INFORMATION 23 -#define FILE_PIPE_LOCAL_INFORMATION 24 -#define FILE_PIPE_REMOTE_INFORMATION 25 -#define FILE_MAILSLOT_QUERY_INFORMATION 26 -#define FILE_MAILSLOT_SET_INFORMATION 27 -#define FILE_COMPRESSION_INFORMATION 28 -#define FILE_OBJECT_ID_INFORMATION 29 -/* Number 30 not defined in documents */ -#define FILE_MOVE_CLUSTER_INFORMATION 31 -#define FILE_QUOTA_INFORMATION 32 -#define FILE_REPARSE_POINT_INFORMATION 33 -#define FILE_NETWORK_OPEN_INFORMATION 34 -#define FILE_ATTRIBUTE_TAG_INFORMATION 35 -#define FILE_TRACKING_INFORMATION 36 -#define FILEID_BOTH_DIRECTORY_INFORMATION 37 -#define FILEID_FULL_DIRECTORY_INFORMATION 38 -#define FILE_VALID_DATA_LENGTH_INFORMATION 39 -#define FILE_SHORT_NAME_INFORMATION 40 -#define FILE_SFIO_RESERVE_INFORMATION 44 -#define FILE_SFIO_VOLUME_INFORMATION 45 -#define FILE_HARD_LINK_INFORMATION 46 -#define FILE_NORMALIZED_NAME_INFORMATION 48 -#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50 -#define FILE_STANDARD_LINK_INFORMATION 54 -#define FILE_ID_INFORMATION 59 -#define FILE_ID_EXTD_DIRECTORY_INFORMATION 60 - -struct smb2_file_internal_info { - __le64 IndexNumber; -} __packed; /* level 6 Query */ - -struct smb2_file_rename_info { /* encoding of request for level 10 */ - __u8 ReplaceIfExists; /* 1 = replace existing target with new */ - /* 0 = fail if target already exists */ - __u8 Reserved[7]; - __u64 RootDirectory; /* MBZ for network operations (why says spec?) */ - __le32 FileNameLength; - char FileName[]; /* New name to be assigned */ - /* padding - overall struct size must be >= 24 so filename + pad >= 6 */ -} __packed; /* level 10 Set */ - -struct smb2_file_link_info { /* encoding of request for level 11 */ - __u8 ReplaceIfExists; /* 1 = replace existing link with new */ - /* 0 = fail if link already exists */ - __u8 Reserved[7]; - __u64 RootDirectory; /* MBZ for network operations (why says spec?) */ - __le32 FileNameLength; - char FileName[]; /* Name to be assigned to new link */ -} __packed; /* level 11 Set */ - struct smb2_file_full_ea_info { /* encoding of response for level 15 */ __le32 next_entry_offset; __u8 flags; @@ -851,38 +352,6 @@ struct smb2_file_full_ea_info { /* encoding of response for level 15 */ char ea_data[]; /* \0 terminated name plus value */ } __packed; /* level 15 Set */ -/* - * This level 18, although with struct with same name is different from cifs - * level 0x107. Level 0x107 has an extra u64 between AccessFlags and - * CurrentByteOffset. - */ -struct smb2_file_all_info { /* data block encoding of response to level 18 */ - __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */ - __le64 LastAccessTime; - __le64 LastWriteTime; - __le64 ChangeTime; - __le32 Attributes; - __u32 Pad1; /* End of FILE_BASIC_INFO_INFO equivalent */ - __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */ - __le64 EndOfFile; /* size ie offset to first free byte in file */ - __le32 NumberOfLinks; /* hard links */ - __u8 DeletePending; - __u8 Directory; - __u16 Pad2; /* End of FILE_STANDARD_INFO equivalent */ - __le64 IndexNumber; - __le32 EASize; - __le32 AccessFlags; - __le64 CurrentByteOffset; - __le32 Mode; - __le32 AlignmentRequirement; - __le32 FileNameLength; - char FileName[1]; -} __packed; /* level 18 Query */ - -struct smb2_file_eof_info { /* encoding of request for level 10 */ - __le64 EndOfFile; /* new end of file value */ -} __packed; /* level 20 Set */ - struct smb2_file_reparse_point_info { __le64 IndexNumber; __le32 Tag; @@ -935,6 +404,8 @@ struct create_posix_rsp { struct cifs_sid group; /* var-sized on the wire */ } __packed; +#define SMB2_QUERY_DIRECTORY_IOV_SIZE 2 + /* * SMB2-only POSIX info level for query dir * @@ -966,31 +437,6 @@ struct smb2_posix_info { */ } __packed; -/* Level 100 query info */ -struct smb311_posix_qinfo { - __le64 CreationTime; - __le64 LastAccessTime; - __le64 LastWriteTime; - __le64 ChangeTime; - __le64 EndOfFile; - __le64 AllocationSize; - __le32 DosAttributes; - __le64 Inode; - __le32 DeviceId; - __le32 Zero; - /* beginning of POSIX Create Context Response */ - __le32 HardLinks; - __le32 ReparseTag; - __le32 Mode; - u8 Sids[]; - /* - * var sized owner SID - * var sized group SID - * le32 filenamelength - * u8 filename[] - */ -} __packed; - /* * Parsed version of the above struct. Allows direct access to the * variable length fields diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 4a7062fd1c26..a69f1eed1cfe 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -283,7 +283,7 @@ extern int smb311_update_preauth_hash(struct cifs_ses *ses, struct kvec *iov, int nvec); extern int smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon, - __le16 *utf16_path, u32 desired_access, + const char *path, u32 desired_access, u32 class, u32 type, u32 output_len, struct kvec *rsp, int *buftype, struct cifs_sb_info *cifs_sb); diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 4fcca79f39ae..526a4c1bed99 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -248,7 +248,7 @@ EXPORT_SYMBOL(fscrypt_encrypt_block_inplace); * which must still be locked and not uptodate. Normally, blocksize == * PAGE_SIZE and the whole page is decrypted at once. * - * This is for use by the filesystem's ->readpages() method. + * This is for use by the filesystem's ->readahead() method. * * Return: 0 on success; -errno on failure */ diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index 619e5b4bed10..c6800b880920 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -203,7 +203,8 @@ struct exfat_mount_options { /* on error: continue, panic, remount-ro */ enum exfat_error_mode errors; unsigned utf8:1, /* Use of UTF-8 character set */ - discard:1; /* Issue discard requests on deletions */ + discard:1, /* Issue discard requests on deletions */ + keep_last_dots:1; /* Keep trailing periods in paths */ int time_offset; /* Offset of timestamps from UTC (in minutes) */ }; diff --git a/fs/exfat/file.c b/fs/exfat/file.c index d890fd34bb2d..2f5130059236 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -218,8 +218,6 @@ int __exfat_truncate(struct inode *inode, loff_t new_size) if (exfat_free_cluster(inode, &clu)) return -EIO; - exfat_clear_volume_dirty(sb); - return 0; } diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index af4eb39cc0c3..a02a04a993bf 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -65,11 +65,14 @@ static int exfat_d_revalidate(struct dentry *dentry, unsigned int flags) return ret; } -/* returns the length of a struct qstr, ignoring trailing dots */ -static unsigned int exfat_striptail_len(unsigned int len, const char *name) +/* returns the length of a struct qstr, ignoring trailing dots if necessary */ +static unsigned int exfat_striptail_len(unsigned int len, const char *name, + bool keep_last_dots) { - while (len && name[len - 1] == '.') - len--; + if (!keep_last_dots) { + while (len && name[len - 1] == '.') + len--; + } return len; } @@ -83,7 +86,8 @@ static int exfat_d_hash(const struct dentry *dentry, struct qstr *qstr) struct super_block *sb = dentry->d_sb; struct nls_table *t = EXFAT_SB(sb)->nls_io; const unsigned char *name = qstr->name; - unsigned int len = exfat_striptail_len(qstr->len, qstr->name); + unsigned int len = exfat_striptail_len(qstr->len, qstr->name, + EXFAT_SB(sb)->options.keep_last_dots); unsigned long hash = init_name_hash(dentry); int i, charlen; wchar_t c; @@ -104,8 +108,10 @@ static int exfat_d_cmp(const struct dentry *dentry, unsigned int len, { struct super_block *sb = dentry->d_sb; struct nls_table *t = EXFAT_SB(sb)->nls_io; - unsigned int alen = exfat_striptail_len(name->len, name->name); - unsigned int blen = exfat_striptail_len(len, str); + unsigned int alen = exfat_striptail_len(name->len, name->name, + EXFAT_SB(sb)->options.keep_last_dots); + unsigned int blen = exfat_striptail_len(len, str, + EXFAT_SB(sb)->options.keep_last_dots); wchar_t c1, c2; int charlen, i; @@ -136,7 +142,8 @@ static int exfat_utf8_d_hash(const struct dentry *dentry, struct qstr *qstr) { struct super_block *sb = dentry->d_sb; const unsigned char *name = qstr->name; - unsigned int len = exfat_striptail_len(qstr->len, qstr->name); + unsigned int len = exfat_striptail_len(qstr->len, qstr->name, + EXFAT_SB(sb)->options.keep_last_dots); unsigned long hash = init_name_hash(dentry); int i, charlen; unicode_t u; @@ -161,8 +168,11 @@ static int exfat_utf8_d_cmp(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { struct super_block *sb = dentry->d_sb; - unsigned int alen = exfat_striptail_len(name->len, name->name); - unsigned int blen = exfat_striptail_len(len, str); + unsigned int alen = exfat_striptail_len(name->len, name->name, + EXFAT_SB(sb)->options.keep_last_dots); + unsigned int blen = exfat_striptail_len(len, str, + EXFAT_SB(sb)->options.keep_last_dots); + unicode_t u_a, u_b; int charlen, i; @@ -416,13 +426,25 @@ static int __exfat_resolve_path(struct inode *inode, const unsigned char *path, struct super_block *sb = inode->i_sb; struct exfat_sb_info *sbi = EXFAT_SB(sb); struct exfat_inode_info *ei = EXFAT_I(inode); + int pathlen = strlen(path); - /* strip all trailing periods */ - namelen = exfat_striptail_len(strlen(path), path); + /* + * get the length of the pathname excluding + * trailing periods, if any. + */ + namelen = exfat_striptail_len(pathlen, path, false); + if (EXFAT_SB(sb)->options.keep_last_dots) { + /* + * Do not allow the creation of files with names + * ending with period(s). + */ + if (!lookup && (namelen < pathlen)) + return -EINVAL; + namelen = pathlen; + } if (!namelen) return -ENOENT; - - if (strlen(path) > (MAX_NAME_LENGTH * MAX_CHARSET_SIZE)) + if (pathlen > (MAX_NAME_LENGTH * MAX_CHARSET_SIZE)) return -ENAMETOOLONG; /* @@ -554,7 +576,6 @@ static int exfat_create(struct user_namespace *mnt_userns, struct inode *dir, exfat_set_volume_dirty(sb); err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_FILE, &info); - exfat_clear_volume_dirty(sb); if (err) goto unlock; @@ -812,7 +833,6 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry) /* This doesn't modify ei */ ei->dir.dir = DIR_DELETED; - exfat_clear_volume_dirty(sb); inode_inc_iversion(dir); dir->i_mtime = dir->i_atime = current_time(dir); @@ -846,7 +866,6 @@ static int exfat_mkdir(struct user_namespace *mnt_userns, struct inode *dir, exfat_set_volume_dirty(sb); err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_DIR, &info); - exfat_clear_volume_dirty(sb); if (err) goto unlock; @@ -976,7 +995,6 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry) goto unlock; } ei->dir.dir = DIR_DELETED; - exfat_clear_volume_dirty(sb); inode_inc_iversion(dir); dir->i_mtime = dir->i_atime = current_time(dir); @@ -1311,7 +1329,6 @@ del_out: */ new_ei->dir.dir = DIR_DELETED; } - exfat_clear_volume_dirty(sb); out: return ret; } diff --git a/fs/exfat/super.c b/fs/exfat/super.c index 9f892903419a..8ca21e7917d1 100644 --- a/fs/exfat/super.c +++ b/fs/exfat/super.c @@ -100,7 +100,6 @@ static int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flags) { struct exfat_sb_info *sbi = EXFAT_SB(sb); struct boot_sector *p_boot = (struct boot_sector *)sbi->boot_bh->b_data; - bool sync; /* retain persistent-flags */ new_flags |= sbi->vol_flags_persistent; @@ -119,16 +118,11 @@ static int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flags) p_boot->vol_flags = cpu_to_le16(new_flags); - if ((new_flags & VOLUME_DIRTY) && !buffer_dirty(sbi->boot_bh)) - sync = true; - else - sync = false; - set_buffer_uptodate(sbi->boot_bh); mark_buffer_dirty(sbi->boot_bh); - if (sync) - sync_dirty_buffer(sbi->boot_bh); + __sync_dirty_buffer(sbi->boot_bh, REQ_SYNC | REQ_FUA | REQ_PREFLUSH); + return 0; } @@ -174,6 +168,8 @@ static int exfat_show_options(struct seq_file *m, struct dentry *root) seq_puts(m, ",errors=remount-ro"); if (opts->discard) seq_puts(m, ",discard"); + if (opts->keep_last_dots) + seq_puts(m, ",keep_last_dots"); if (opts->time_offset) seq_printf(m, ",time_offset=%d", opts->time_offset); return 0; @@ -217,6 +213,7 @@ enum { Opt_charset, Opt_errors, Opt_discard, + Opt_keep_last_dots, Opt_time_offset, /* Deprecated options */ @@ -243,6 +240,7 @@ static const struct fs_parameter_spec exfat_parameters[] = { fsparam_string("iocharset", Opt_charset), fsparam_enum("errors", Opt_errors, exfat_param_enums), fsparam_flag("discard", Opt_discard), + fsparam_flag("keep_last_dots", Opt_keep_last_dots), fsparam_s32("time_offset", Opt_time_offset), __fsparam(NULL, "utf8", Opt_utf8, fs_param_deprecated, NULL), @@ -297,6 +295,9 @@ static int exfat_parse_param(struct fs_context *fc, struct fs_parameter *param) case Opt_discard: opts->discard = 1; break; + case Opt_keep_last_dots: + opts->keep_last_dots = 1; + break; case Opt_time_offset: /* * Make the limit 24 just in case someone invents something diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 8bd66cdc41be..6feb07e3e1eb 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -267,7 +267,7 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb, goto out; current->backing_dev_info = inode_to_bdi(inode); - ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos); + ret = generic_perform_write(iocb, from); current->backing_dev_info = NULL; out: diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 1ce13f69fbec..13740f2d0e61 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3589,7 +3589,7 @@ const struct iomap_ops ext4_iomap_report_ops = { static bool ext4_journalled_dirty_folio(struct address_space *mapping, struct folio *folio) { - WARN_ON_ONCE(!page_has_buffers(&folio->page)); + WARN_ON_ONCE(!folio_buffers(folio)); folio_set_checked(folio); return filemap_dirty_folio(mapping, folio); } diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index 1aa26d6634fc..af491e170c4a 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -109,7 +109,7 @@ static void verity_work(struct work_struct *work) struct bio *bio = ctx->bio; /* - * fsverity_verify_bio() may call readpages() again, and although verity + * fsverity_verify_bio() may call readahead() again, and although verity * will be disabled for that, decryption may still be needed, causing * another bio_post_read_ctx to be allocated. So to guarantee that * mempool_alloc() never deadlocks we must free the current ctx first. diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index a8fc4fa511a8..f5366feea82d 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -456,7 +456,7 @@ static bool f2fs_dirty_meta_folio(struct address_space *mapping, folio_mark_uptodate(folio); if (!folio_test_dirty(folio)) { filemap_dirty_folio(mapping, folio); - inc_page_count(F2FS_P_SB(&folio->page), F2FS_DIRTY_META); + inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_META); set_page_private_reference(&folio->page); return true; } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index f8fcbe91059b..8e0c2e773c8d 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -164,7 +164,7 @@ static void f2fs_verify_bio(struct work_struct *work) bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS); /* - * fsverity_verify_bio() may call readpages() again, and while verity + * fsverity_verify_bio() may call readahead() again, and while verity * will be disabled for this, decryption and/or decompression may still * be needed, resulting in another bio_post_read_ctx being allocated. * So to prevent deadlocks we need to release the current ctx to the @@ -2392,7 +2392,7 @@ static void f2fs_readahead(struct readahead_control *rac) if (!f2fs_is_compress_backend_ready(inode)) return; - /* If the file has inline data, skip readpages */ + /* If the file has inline data, skip readahead */ if (f2fs_has_inline_data(inode)) return; @@ -3571,7 +3571,7 @@ static bool f2fs_dirty_data_folio(struct address_space *mapping, f2fs_update_dirty_folio(inode, folio); return true; } - return true; + return false; } diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index d3f39a704b8b..5b89af0f27f0 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -4448,7 +4448,7 @@ static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb, return -EOPNOTSUPP; current->backing_dev_info = inode_to_bdi(inode); - ret = generic_perform_write(file, from, iocb->ki_pos); + ret = generic_perform_write(iocb, from); current->backing_dev_info = NULL; if (ret > 0) { diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 0b6e741e94a0..c45d341dcf6e 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -2146,11 +2146,11 @@ static bool f2fs_dirty_node_folio(struct address_space *mapping, folio_mark_uptodate(folio); #ifdef CONFIG_F2FS_CHECK_FS if (IS_INODE(&folio->page)) - f2fs_inode_chksum_set(F2FS_P_SB(&folio->page), &folio->page); + f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page); #endif if (!folio_test_dirty(folio)) { filemap_dirty_folio(mapping, folio); - inc_page_count(F2FS_P_SB(&folio->page), F2FS_DIRTY_NODES); + inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES); set_page_private_reference(&folio->page); return true; } diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index eac4984cc753..488b460e046f 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -627,7 +627,7 @@ struct fuse_conn { /** Connection successful. Only set in INIT */ unsigned conn_init:1; - /** Do readpages asynchronously? Only set in INIT */ + /** Do readahead asynchronously? Only set in INIT */ unsigned async_read:1; /** Return an unique read error after abort. Only set in INIT */ diff --git a/fs/internal.h b/fs/internal.h index fb2c2ea807d7..08503dc68d2b 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -74,7 +74,7 @@ int do_linkat(int olddfd, struct filename *old, int newdfd, * namespace.c */ extern struct vfsmount *lookup_mnt(const struct path *); -extern int finish_automount(struct vfsmount *, struct path *); +extern int finish_automount(struct vfsmount *, const struct path *); extern int sb_prepare_remount_readonly(struct super_block *); diff --git a/fs/io_uring.c b/fs/io_uring.c index b94d57c1b0e5..a8413f006417 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -611,6 +611,7 @@ struct io_sr_msg { int msg_flags; int bgid; size_t len; + size_t done_io; }; struct io_open { @@ -781,6 +782,7 @@ enum { REQ_F_SKIP_LINK_CQES_BIT, REQ_F_SINGLE_POLL_BIT, REQ_F_DOUBLE_POLL_BIT, + REQ_F_PARTIAL_IO_BIT, /* keep async read/write and isreg together and in order */ REQ_F_SUPPORT_NOWAIT_BIT, REQ_F_ISREG_BIT, @@ -843,6 +845,8 @@ enum { REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT), /* double poll may active */ REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT), + /* request has already done partial IO */ + REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT), }; struct async_poll { @@ -923,7 +927,6 @@ struct io_kiocb { struct io_wq_work_node comp_list; atomic_t refs; atomic_t poll_refs; - struct io_kiocb *link; struct io_task_work io_task_work; /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ struct hlist_node hash_node; @@ -931,9 +934,11 @@ struct io_kiocb { struct async_poll *apoll; /* opcode allocated if it needs to store data for async defer */ void *async_data; - /* custom credentials, valid IFF REQ_F_CREDS is set */ /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ struct io_buffer *kbuf; + /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */ + struct io_kiocb *link; + /* custom credentials, valid IFF REQ_F_CREDS is set */ const struct cred *creds; struct io_wq_work work; }; @@ -962,6 +967,7 @@ struct io_op_def { /* set if opcode supports polled "wait" */ unsigned pollin : 1; unsigned pollout : 1; + unsigned poll_exclusive : 1; /* op supports buffer selection */ unsigned buffer_select : 1; /* do prep async if is going to be punted */ @@ -1056,6 +1062,7 @@ static const struct io_op_def io_op_defs[] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, + .poll_exclusive = 1, }, [IORING_OP_ASYNC_CANCEL] = { .audit_skip = 1, @@ -1330,6 +1337,8 @@ static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list) static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req) { + lockdep_assert_held(&req->ctx->completion_lock); + if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) return 0; return __io_put_kbuf(req, &req->ctx->io_buffers_comp); @@ -1362,6 +1371,8 @@ static inline unsigned int io_put_kbuf(struct io_kiocb *req, cflags = __io_put_kbuf(req, &ctx->io_buffers_comp); spin_unlock(&ctx->completion_lock); } else { + lockdep_assert_held(&req->ctx->uring_lock); + cflags = __io_put_kbuf(req, &req->ctx->io_buffers_cache); } @@ -1382,7 +1393,7 @@ static struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx, return NULL; } -static void io_kbuf_recycle(struct io_kiocb *req) +static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) { struct io_ring_ctx *ctx = req->ctx; struct io_buffer_list *bl; @@ -1390,6 +1401,12 @@ static void io_kbuf_recycle(struct io_kiocb *req) if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) return; + /* don't recycle if we already did IO to this buffer */ + if (req->flags & REQ_F_PARTIAL_IO) + return; + + if (issue_flags & IO_URING_F_UNLOCKED) + mutex_lock(&ctx->uring_lock); lockdep_assert_held(&ctx->uring_lock); @@ -1398,6 +1415,9 @@ static void io_kbuf_recycle(struct io_kiocb *req) list_add(&buf->list, &bl->buf_list); req->flags &= ~REQ_F_BUFFER_SELECTED; req->kbuf = NULL; + + if (issue_flags & IO_URING_F_UNLOCKED) + mutex_unlock(&ctx->uring_lock); } static bool io_match_task(struct io_kiocb *head, struct task_struct *task, @@ -2104,6 +2124,12 @@ static void __io_req_complete_post(struct io_kiocb *req, s32 res, } } io_req_put_rsrc(req, ctx); + /* + * Selected buffer deallocation in io_clean_op() assumes that + * we don't hold ->completion_lock. Clean them here to avoid + * deadlocks. + */ + io_put_kbuf_comp(req); io_dismantle_req(req); io_put_task(req->task, 1); wq_list_add_head(&req->comp_list, &ctx->locked_free_list); @@ -2148,7 +2174,7 @@ static inline void io_req_complete(struct io_kiocb *req, s32 res) static void io_req_complete_failed(struct io_kiocb *req, s32 res) { req_set_fail(req); - io_req_complete_post(req, res, io_put_kbuf(req, 0)); + io_req_complete_post(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED)); } static void io_req_complete_fail_submit(struct io_kiocb *req) @@ -2437,6 +2463,8 @@ static void handle_prev_tw_list(struct io_wq_work_node *node, struct io_kiocb *req = container_of(node, struct io_kiocb, io_task_work.node); + prefetch(container_of(next, struct io_kiocb, io_task_work.node)); + if (req->ctx != *ctx) { if (unlikely(!*uring_locked && *ctx)) ctx_commit_and_unlock(*ctx); @@ -2469,6 +2497,8 @@ static void handle_tw_list(struct io_wq_work_node *node, struct io_kiocb *req = container_of(node, struct io_kiocb, io_task_work.node); + prefetch(container_of(next, struct io_kiocb, io_task_work.node)); + if (req->ctx != *ctx) { ctx_flush_and_put(*ctx, locked); *ctx = req->ctx; @@ -2974,8 +3004,12 @@ static bool io_rw_should_reissue(struct io_kiocb *req) static bool __io_complete_rw_common(struct io_kiocb *req, long res) { - if (req->rw.kiocb.ki_flags & IOCB_WRITE) + if (req->rw.kiocb.ki_flags & IOCB_WRITE) { kiocb_end_write(req); + fsnotify_modify(req->file); + } else { + fsnotify_access(req->file); + } if (unlikely(res != req->result)) { if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_should_reissue(req)) { @@ -4439,9 +4473,6 @@ static int io_msg_ring_prep(struct io_kiocb *req, sqe->splice_fd_in || sqe->buf_index || sqe->personality)) return -EINVAL; - if (req->file->f_op != &io_uring_fops) - return -EBADFD; - req->msg.user_data = READ_ONCE(sqe->off); req->msg.len = READ_ONCE(sqe->len); return 0; @@ -4451,14 +4482,18 @@ static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *target_ctx; struct io_msg *msg = &req->msg; - int ret = -EOVERFLOW; bool filled; + int ret; + + ret = -EBADFD; + if (req->file->f_op != &io_uring_fops) + goto done; + ret = -EOVERFLOW; target_ctx = req->file->private_data; spin_lock(&target_ctx->completion_lock); - filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, - IORING_CQE_F_MSG); + filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, 0); io_commit_cqring(target_ctx); spin_unlock(&target_ctx->completion_lock); @@ -4467,6 +4502,9 @@ static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags) ret = 0; } +done: + if (ret < 0) + req_set_fail(req); __io_req_complete(req, issue_flags, ret, 0); return 0; } @@ -4537,6 +4575,8 @@ static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) req->sync.len); if (ret < 0) req_set_fail(req); + else + fsnotify_modify(req->file); io_req_complete(req, ret); return 0; } @@ -5419,12 +5459,21 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (req->ctx->compat) sr->msg_flags |= MSG_CMSG_COMPAT; #endif + sr->done_io = 0; return 0; } +static bool io_net_retry(struct socket *sock, int flags) +{ + if (!(flags & MSG_WAITALL)) + return false; + return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET; +} + static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) { struct io_async_msghdr iomsg, *kmsg; + struct io_sr_msg *sr = &req->sr_msg; struct socket *sock; struct io_buffer *kbuf; unsigned flags; @@ -5467,6 +5516,11 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) return io_setup_async_msg(req, kmsg); if (ret == -ERESTARTSYS) ret = -EINTR; + if (ret > 0 && io_net_retry(sock, flags)) { + sr->done_io += ret; + req->flags |= REQ_F_PARTIAL_IO; + return io_setup_async_msg(req, kmsg); + } req_set_fail(req); } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { req_set_fail(req); @@ -5476,6 +5530,10 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) if (kmsg->free_iov) kfree(kmsg->free_iov); req->flags &= ~REQ_F_NEED_CLEANUP; + if (ret >= 0) + ret += sr->done_io; + else if (sr->done_io) + ret = sr->done_io; __io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags)); return 0; } @@ -5526,12 +5584,23 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) return -EAGAIN; if (ret == -ERESTARTSYS) ret = -EINTR; + if (ret > 0 && io_net_retry(sock, flags)) { + sr->len -= ret; + sr->buf += ret; + sr->done_io += ret; + req->flags |= REQ_F_PARTIAL_IO; + return -EAGAIN; + } req_set_fail(req); } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { out_free: req_set_fail(req); } + if (ret >= 0) + ret += sr->done_io; + else if (sr->done_io) + ret = sr->done_io; __io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags)); return 0; } @@ -5569,9 +5638,6 @@ static int io_accept(struct io_kiocb *req, unsigned int issue_flags) struct file *file; int ret, fd; - if (req->file->f_flags & O_NONBLOCK) - req->flags |= REQ_F_NOWAIT; - if (!fixed) { fd = __get_unused_fd_flags(accept->flags, accept->nofile); if (unlikely(fd < 0)) @@ -5801,7 +5867,7 @@ struct io_poll_table { }; #define IO_POLL_CANCEL_FLAG BIT(31) -#define IO_POLL_REF_MASK ((1u << 20)-1) +#define IO_POLL_REF_MASK GENMASK(30, 0) /* * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can @@ -6035,10 +6101,13 @@ static void io_poll_cancel_req(struct io_kiocb *req) io_poll_execute(req, 0, 0); } +#define wqe_to_req(wait) ((void *)((unsigned long) (wait)->private & ~1)) +#define wqe_is_double(wait) ((unsigned long) (wait)->private & 1) + static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, void *key) { - struct io_kiocb *req = wait->private; + struct io_kiocb *req = wqe_to_req(wait); struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb, wait); __poll_t mask = key_to_poll(key); @@ -6076,7 +6145,10 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, if (mask && poll->events & EPOLLONESHOT) { list_del_init(&poll->wait.entry); poll->head = NULL; - req->flags &= ~REQ_F_SINGLE_POLL; + if (wqe_is_double(wait)) + req->flags &= ~REQ_F_DOUBLE_POLL; + else + req->flags &= ~REQ_F_SINGLE_POLL; } __io_poll_execute(req, mask, poll->events); } @@ -6088,6 +6160,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, struct io_poll_iocb **poll_ptr) { struct io_kiocb *req = pt->req; + unsigned long wqe_private = (unsigned long) req; /* * The file being polled uses multiple waitqueues for poll handling @@ -6113,6 +6186,8 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, pt->error = -ENOMEM; return; } + /* mark as double wq entry */ + wqe_private |= 1; req->flags |= REQ_F_DOUBLE_POLL; io_init_poll_iocb(poll, first->events, first->wait.func); *poll_ptr = poll; @@ -6123,7 +6198,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, req->flags |= REQ_F_SINGLE_POLL; pt->nr_entries++; poll->head = head; - poll->wait.private = req; + poll->wait.private = (void *) wqe_private; if (poll->events & EPOLLEXCLUSIVE) add_wait_queue_exclusive(head, &poll->wait); @@ -6150,7 +6225,6 @@ static int __io_arm_poll_handler(struct io_kiocb *req, INIT_HLIST_NODE(&req->hash_node); io_init_poll_iocb(poll, mask, io_poll_wake); poll->file = req->file; - poll->wait.private = req; ipt->pt._key = mask; ipt->req = req; @@ -6238,7 +6312,8 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) } else { mask |= POLLOUT | POLLWRNORM; } - + if (def->poll_exclusive) + mask |= EPOLLEXCLUSIVE; if (!(issue_flags & IO_URING_F_UNLOCKED) && !list_empty(&ctx->apoll_cache)) { apoll = list_first_entry(&ctx->apoll_cache, struct async_poll, @@ -6254,6 +6329,8 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) req->flags |= REQ_F_POLLED; ipt.pt._qproc = io_async_queue_proc; + io_kbuf_recycle(req, issue_flags); + ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask); if (ret || ipt.error) return ret ? IO_APOLL_READY : IO_APOLL_ABORTED; @@ -6281,6 +6358,7 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, list = &ctx->cancel_hash[i]; hlist_for_each_entry_safe(req, tmp, list, hash_node) { if (io_match_task_safe(req, tsk, cancel_all)) { + hlist_del_init(&req->hash_node); io_poll_cancel_req(req); found = true; } @@ -7075,8 +7153,11 @@ fail: static void io_clean_op(struct io_kiocb *req) { - if (req->flags & REQ_F_BUFFER_SELECTED) + if (req->flags & REQ_F_BUFFER_SELECTED) { + spin_lock(&req->ctx->completion_lock); io_put_kbuf_comp(req); + spin_unlock(&req->ctx->completion_lock); + } if (req->flags & REQ_F_NEED_CLEANUP) { switch (req->opcode) { @@ -7505,11 +7586,9 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req) * Queued up for async execution, worker will release * submit reference when the iocb is actually submitted. */ - io_kbuf_recycle(req); io_queue_async_work(req, NULL); break; case IO_APOLL_OK: - io_kbuf_recycle(req); break; } @@ -8053,6 +8132,13 @@ static int io_sq_thread(void *data) needs_sched = false; break; } + + /* + * Ensure the store of the wakeup flag is not + * reordered with the load of the SQ tail + */ + smp_mb(); + if (io_sqring_entries(ctx)) { needs_sched = false; break; @@ -8782,6 +8868,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) fput(fpl->fp[i]); } else { kfree_skb(skb); + free_uid(fpl->user); kfree(fpl); } diff --git a/fs/ioctl.c b/fs/ioctl.c index 090bf47606ab..80ac36aea913 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -173,7 +173,7 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo, if (*len == 0) return -EINVAL; - if (start > maxbytes) + if (start >= maxbytes) return -EFBIG; /* diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 49dccd9050f1..8ce8720093b9 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -435,18 +435,17 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) { struct iomap_page *iop = to_iomap_page(folio); struct inode *inode = folio->mapping->host; - size_t len; unsigned first, last, i; if (!iop) return false; - /* Limit range to this folio */ - len = min(folio_size(folio) - from, count); + /* Caller's range may extend past the end of this folio */ + count = min(folio_size(folio) - from, count); - /* First and last blocks in range within page */ + /* First and last blocks in range within folio */ first = from >> inode->i_blkbits; - last = (from + len - 1) >> inode->i_blkbits; + last = (from + count - 1) >> inode->i_blkbits; for (i = first; i <= last; i++) if (!test_bit(i, iop->uptodate)) diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 74067a73ff78..88423069407c 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -120,13 +120,8 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos) if (next == ERR_PTR(-ENODEV)) kernfs_seq_stop_active(sf, next); return next; - } else { - /* - * The same behavior and code as single_open(). Returns - * !NULL if pos is at the beginning; otherwise, NULL. - */ - return NULL + !*ppos; } + return single_start(sf, ppos); } static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos) diff --git a/fs/ksmbd/oplock.c b/fs/ksmbd/oplock.c index 077b8761d099..23871b18a429 100644 --- a/fs/ksmbd/oplock.c +++ b/fs/ksmbd/oplock.c @@ -656,8 +656,8 @@ static void __smb2_oplock_break_noti(struct work_struct *wk) rsp->OplockLevel = SMB2_OPLOCK_LEVEL_NONE; rsp->Reserved = 0; rsp->Reserved2 = 0; - rsp->PersistentFid = cpu_to_le64(fp->persistent_id); - rsp->VolatileFid = cpu_to_le64(fp->volatile_id); + rsp->PersistentFid = fp->persistent_id; + rsp->VolatileFid = fp->volatile_id; inc_rfc1001_len(work->response_buf, 24); diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c index 2e12f6d8483b..4cd03d661df0 100644 --- a/fs/ksmbd/server.c +++ b/fs/ksmbd/server.c @@ -585,7 +585,7 @@ static int __init ksmbd_server_init(void) if (ret) goto err_crypto_destroy; - pr_warn_once("The ksmbd server is experimental, use at your own risk.\n"); + pr_warn_once("The ksmbd server is experimental\n"); return 0; diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c index 67e8e28e3fc3..3bf6c56c654c 100644 --- a/fs/ksmbd/smb2pdu.c +++ b/fs/ksmbd/smb2pdu.c @@ -377,12 +377,8 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work) * command in the compound request */ if (req->Command == SMB2_CREATE && rsp->Status == STATUS_SUCCESS) { - work->compound_fid = - le64_to_cpu(((struct smb2_create_rsp *)rsp)-> - VolatileFileId); - work->compound_pfid = - le64_to_cpu(((struct smb2_create_rsp *)rsp)-> - PersistentFileId); + work->compound_fid = ((struct smb2_create_rsp *)rsp)->VolatileFileId; + work->compound_pfid = ((struct smb2_create_rsp *)rsp)->PersistentFileId; work->compound_sid = le64_to_cpu(rsp->SessionId); } @@ -2129,7 +2125,7 @@ static noinline int create_smb2_pipe(struct ksmbd_work *work) rsp->EndofFile = cpu_to_le64(0); rsp->FileAttributes = FILE_ATTRIBUTE_NORMAL_LE; rsp->Reserved2 = 0; - rsp->VolatileFileId = cpu_to_le64(id); + rsp->VolatileFileId = id; rsp->PersistentFileId = 0; rsp->CreateContextsOffset = 0; rsp->CreateContextsLength = 0; @@ -3157,8 +3153,8 @@ int smb2_open(struct ksmbd_work *work) rsp->Reserved2 = 0; - rsp->PersistentFileId = cpu_to_le64(fp->persistent_id); - rsp->VolatileFileId = cpu_to_le64(fp->volatile_id); + rsp->PersistentFileId = fp->persistent_id; + rsp->VolatileFileId = fp->volatile_id; rsp->CreateContextsOffset = 0; rsp->CreateContextsLength = 0; @@ -3865,9 +3861,7 @@ int smb2_query_dir(struct ksmbd_work *work) goto err_out2; } - dir_fp = ksmbd_lookup_fd_slow(work, - le64_to_cpu(req->VolatileFileId), - le64_to_cpu(req->PersistentFileId)); + dir_fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId); if (!dir_fp) { rc = -EBADF; goto err_out2; @@ -4088,12 +4082,12 @@ static int smb2_get_info_file_pipe(struct ksmbd_session *sess, * Windows can sometime send query file info request on * pipe without opening it, checking error condition here */ - id = le64_to_cpu(req->VolatileFileId); + id = req->VolatileFileId; if (!ksmbd_session_rpc_method(sess, id)) return -ENOENT; ksmbd_debug(SMB, "FileInfoClass %u, FileId 0x%llx\n", - req->FileInfoClass, le64_to_cpu(req->VolatileFileId)); + req->FileInfoClass, req->VolatileFileId); switch (req->FileInfoClass) { case FILE_STANDARD_INFORMATION: @@ -4738,7 +4732,7 @@ static int smb2_get_info_file(struct ksmbd_work *work, } if (work->next_smb2_rcv_hdr_off) { - if (!has_file_id(le64_to_cpu(req->VolatileFileId))) { + if (!has_file_id(req->VolatileFileId)) { ksmbd_debug(SMB, "Compound request set FID = %llu\n", work->compound_fid); id = work->compound_fid; @@ -4747,8 +4741,8 @@ static int smb2_get_info_file(struct ksmbd_work *work, } if (!has_file_id(id)) { - id = le64_to_cpu(req->VolatileFileId); - pid = le64_to_cpu(req->PersistentFileId); + id = req->VolatileFileId; + pid = req->PersistentFileId; } fp = ksmbd_lookup_fd_slow(work, id, pid); @@ -5113,7 +5107,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work, } if (work->next_smb2_rcv_hdr_off) { - if (!has_file_id(le64_to_cpu(req->VolatileFileId))) { + if (!has_file_id(req->VolatileFileId)) { ksmbd_debug(SMB, "Compound request set FID = %llu\n", work->compound_fid); id = work->compound_fid; @@ -5122,8 +5116,8 @@ static int smb2_get_info_sec(struct ksmbd_work *work, } if (!has_file_id(id)) { - id = le64_to_cpu(req->VolatileFileId); - pid = le64_to_cpu(req->PersistentFileId); + id = req->VolatileFileId; + pid = req->PersistentFileId; } fp = ksmbd_lookup_fd_slow(work, id, pid); @@ -5221,7 +5215,7 @@ static noinline int smb2_close_pipe(struct ksmbd_work *work) struct smb2_close_req *req = smb2_get_msg(work->request_buf); struct smb2_close_rsp *rsp = smb2_get_msg(work->response_buf); - id = le64_to_cpu(req->VolatileFileId); + id = req->VolatileFileId; ksmbd_session_rpc_close(work->sess, id); rsp->StructureSize = cpu_to_le16(60); @@ -5280,7 +5274,7 @@ int smb2_close(struct ksmbd_work *work) } if (work->next_smb2_rcv_hdr_off && - !has_file_id(le64_to_cpu(req->VolatileFileId))) { + !has_file_id(req->VolatileFileId)) { if (!has_file_id(work->compound_fid)) { /* file already closed, return FILE_CLOSED */ ksmbd_debug(SMB, "file already closed\n"); @@ -5299,7 +5293,7 @@ int smb2_close(struct ksmbd_work *work) work->compound_pfid = KSMBD_NO_FID; } } else { - volatile_id = le64_to_cpu(req->VolatileFileId); + volatile_id = req->VolatileFileId; } ksmbd_debug(SMB, "volatile_id = %llu\n", volatile_id); @@ -5988,7 +5982,7 @@ int smb2_set_info(struct ksmbd_work *work) if (work->next_smb2_rcv_hdr_off) { req = ksmbd_req_buf_next(work); rsp = ksmbd_resp_buf_next(work); - if (!has_file_id(le64_to_cpu(req->VolatileFileId))) { + if (!has_file_id(req->VolatileFileId)) { ksmbd_debug(SMB, "Compound request set FID = %llu\n", work->compound_fid); id = work->compound_fid; @@ -6000,8 +5994,8 @@ int smb2_set_info(struct ksmbd_work *work) } if (!has_file_id(id)) { - id = le64_to_cpu(req->VolatileFileId); - pid = le64_to_cpu(req->PersistentFileId); + id = req->VolatileFileId; + pid = req->PersistentFileId; } fp = ksmbd_lookup_fd_slow(work, id, pid); @@ -6079,7 +6073,7 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work) struct smb2_read_req *req = smb2_get_msg(work->request_buf); struct smb2_read_rsp *rsp = smb2_get_msg(work->response_buf); - id = le64_to_cpu(req->VolatileFileId); + id = req->VolatileFileId; inc_rfc1001_len(work->response_buf, 16); rpc_resp = ksmbd_rpc_read(work->sess, id); @@ -6215,8 +6209,7 @@ int smb2_read(struct ksmbd_work *work) goto out; } - fp = ksmbd_lookup_fd_slow(work, le64_to_cpu(req->VolatileFileId), - le64_to_cpu(req->PersistentFileId)); + fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId); if (!fp) { err = -ENOENT; goto out; @@ -6335,7 +6328,7 @@ static noinline int smb2_write_pipe(struct ksmbd_work *work) size_t length; length = le32_to_cpu(req->Length); - id = le64_to_cpu(req->VolatileFileId); + id = req->VolatileFileId; if (le16_to_cpu(req->DataOffset) == offsetof(struct smb2_write_req, Buffer)) { @@ -6471,8 +6464,7 @@ int smb2_write(struct ksmbd_work *work) goto out; } - fp = ksmbd_lookup_fd_slow(work, le64_to_cpu(req->VolatileFileId), - le64_to_cpu(req->PersistentFileId)); + fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId); if (!fp) { err = -ENOENT; goto out; @@ -6584,12 +6576,9 @@ int smb2_flush(struct ksmbd_work *work) WORK_BUFFERS(work, req, rsp); - ksmbd_debug(SMB, "SMB2_FLUSH called for fid %llu\n", - le64_to_cpu(req->VolatileFileId)); + ksmbd_debug(SMB, "SMB2_FLUSH called for fid %llu\n", req->VolatileFileId); - err = ksmbd_vfs_fsync(work, - le64_to_cpu(req->VolatileFileId), - le64_to_cpu(req->PersistentFileId)); + err = ksmbd_vfs_fsync(work, req->VolatileFileId, req->PersistentFileId); if (err) goto out; @@ -6618,8 +6607,7 @@ int smb2_cancel(struct ksmbd_work *work) struct ksmbd_conn *conn = work->conn; struct smb2_hdr *hdr = smb2_get_msg(work->request_buf); struct smb2_hdr *chdr; - struct ksmbd_work *cancel_work = NULL; - int canceled = 0; + struct ksmbd_work *cancel_work = NULL, *iter; struct list_head *command_list; ksmbd_debug(SMB, "smb2 cancel called on mid %llu, async flags 0x%x\n", @@ -6629,11 +6617,11 @@ int smb2_cancel(struct ksmbd_work *work) command_list = &conn->async_requests; spin_lock(&conn->request_lock); - list_for_each_entry(cancel_work, command_list, + list_for_each_entry(iter, command_list, async_request_entry) { - chdr = smb2_get_msg(cancel_work->request_buf); + chdr = smb2_get_msg(iter->request_buf); - if (cancel_work->async_id != + if (iter->async_id != le64_to_cpu(hdr->Id.AsyncId)) continue; @@ -6641,7 +6629,7 @@ int smb2_cancel(struct ksmbd_work *work) "smb2 with AsyncId %llu cancelled command = 0x%x\n", le64_to_cpu(hdr->Id.AsyncId), le16_to_cpu(chdr->Command)); - canceled = 1; + cancel_work = iter; break; } spin_unlock(&conn->request_lock); @@ -6649,24 +6637,24 @@ int smb2_cancel(struct ksmbd_work *work) command_list = &conn->requests; spin_lock(&conn->request_lock); - list_for_each_entry(cancel_work, command_list, request_entry) { - chdr = smb2_get_msg(cancel_work->request_buf); + list_for_each_entry(iter, command_list, request_entry) { + chdr = smb2_get_msg(iter->request_buf); if (chdr->MessageId != hdr->MessageId || - cancel_work == work) + iter == work) continue; ksmbd_debug(SMB, "smb2 with mid %llu cancelled command = 0x%x\n", le64_to_cpu(hdr->MessageId), le16_to_cpu(chdr->Command)); - canceled = 1; + cancel_work = iter; break; } spin_unlock(&conn->request_lock); } - if (canceled) { + if (cancel_work) { cancel_work->state = KSMBD_WORK_CANCELLED; if (cancel_work->cancel_fn) cancel_work->cancel_fn(cancel_work->cancel_argv); @@ -6804,12 +6792,9 @@ int smb2_lock(struct ksmbd_work *work) int prior_lock = 0; ksmbd_debug(SMB, "Received lock request\n"); - fp = ksmbd_lookup_fd_slow(work, - le64_to_cpu(req->VolatileFileId), - le64_to_cpu(req->PersistentFileId)); + fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId); if (!fp) { - ksmbd_debug(SMB, "Invalid file id for lock : %llu\n", - le64_to_cpu(req->VolatileFileId)); + ksmbd_debug(SMB, "Invalid file id for lock : %llu\n", req->VolatileFileId); err = -ENOENT; goto out2; } @@ -7164,8 +7149,8 @@ static int fsctl_copychunk(struct ksmbd_work *work, ci_rsp = (struct copychunk_ioctl_rsp *)&rsp->Buffer[0]; - rsp->VolatileFileId = cpu_to_le64(volatile_id); - rsp->PersistentFileId = cpu_to_le64(persistent_id); + rsp->VolatileFileId = volatile_id; + rsp->PersistentFileId = persistent_id; ci_rsp->ChunksWritten = cpu_to_le32(ksmbd_server_side_copy_max_chunk_count()); ci_rsp->ChunkBytesWritten = @@ -7379,8 +7364,8 @@ ipv6_retry: if (nii_rsp) nii_rsp->Next = 0; - rsp->PersistentFileId = cpu_to_le64(SMB2_NO_FID); - rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID); + rsp->PersistentFileId = SMB2_NO_FID; + rsp->VolatileFileId = SMB2_NO_FID; return nbytes; } @@ -7547,9 +7532,7 @@ static int fsctl_request_resume_key(struct ksmbd_work *work, { struct ksmbd_file *fp; - fp = ksmbd_lookup_fd_slow(work, - le64_to_cpu(req->VolatileFileId), - le64_to_cpu(req->PersistentFileId)); + fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId); if (!fp) return -ENOENT; @@ -7579,7 +7562,7 @@ int smb2_ioctl(struct ksmbd_work *work) if (work->next_smb2_rcv_hdr_off) { req = ksmbd_req_buf_next(work); rsp = ksmbd_resp_buf_next(work); - if (!has_file_id(le64_to_cpu(req->VolatileFileId))) { + if (!has_file_id(req->VolatileFileId)) { ksmbd_debug(SMB, "Compound request set FID = %llu\n", work->compound_fid); id = work->compound_fid; @@ -7590,14 +7573,14 @@ int smb2_ioctl(struct ksmbd_work *work) } if (!has_file_id(id)) - id = le64_to_cpu(req->VolatileFileId); + id = req->VolatileFileId; if (req->Flags != cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL)) { rsp->hdr.Status = STATUS_NOT_SUPPORTED; goto out; } - cnt_code = le32_to_cpu(req->CntCode); + cnt_code = le32_to_cpu(req->CtlCode); ret = smb2_calc_max_out_buf_len(work, 48, le32_to_cpu(req->MaxOutputResponse)); if (ret < 0) { @@ -7656,8 +7639,8 @@ int smb2_ioctl(struct ksmbd_work *work) goto out; nbytes = sizeof(struct validate_negotiate_info_rsp); - rsp->PersistentFileId = cpu_to_le64(SMB2_NO_FID); - rsp->VolatileFileId = cpu_to_le64(SMB2_NO_FID); + rsp->PersistentFileId = SMB2_NO_FID; + rsp->VolatileFileId = SMB2_NO_FID; break; case FSCTL_QUERY_NETWORK_INTERFACE_INFO: ret = fsctl_query_iface_info_ioctl(conn, rsp, out_buf_len); @@ -7703,10 +7686,10 @@ int smb2_ioctl(struct ksmbd_work *work) rsp->PersistentFileId = req->PersistentFileId; fsctl_copychunk(work, (struct copychunk_ioctl_req *)&req->Buffer[0], - le32_to_cpu(req->CntCode), + le32_to_cpu(req->CtlCode), le32_to_cpu(req->InputCount), - le64_to_cpu(req->VolatileFileId), - le64_to_cpu(req->PersistentFileId), + req->VolatileFileId, + req->PersistentFileId, rsp); break; case FSCTL_SET_SPARSE: @@ -7857,7 +7840,7 @@ dup_ext_out: goto out; } - rsp->CntCode = cpu_to_le32(cnt_code); + rsp->CtlCode = cpu_to_le32(cnt_code); rsp->InputCount = cpu_to_le32(0); rsp->InputOffset = cpu_to_le32(112); rsp->OutputOffset = cpu_to_le32(112); @@ -7903,8 +7886,8 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work) char req_oplevel = 0, rsp_oplevel = 0; unsigned int oplock_change_type; - volatile_id = le64_to_cpu(req->VolatileFid); - persistent_id = le64_to_cpu(req->PersistentFid); + volatile_id = req->VolatileFid; + persistent_id = req->PersistentFid; req_oplevel = req->OplockLevel; ksmbd_debug(OPLOCK, "v_id %llu, p_id %llu request oplock level %d\n", volatile_id, persistent_id, req_oplevel); @@ -7999,8 +7982,8 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work) rsp->OplockLevel = rsp_oplevel; rsp->Reserved = 0; rsp->Reserved2 = 0; - rsp->VolatileFid = cpu_to_le64(volatile_id); - rsp->PersistentFid = cpu_to_le64(persistent_id); + rsp->VolatileFid = volatile_id; + rsp->PersistentFid = persistent_id; inc_rfc1001_len(work->response_buf, 24); return; @@ -8500,7 +8483,7 @@ static void fill_transform_hdr(void *tr_buf, char *old_buf, __le16 cipher_type) struct smb2_hdr *hdr = smb2_get_msg(old_buf); unsigned int orig_len = get_rfc1002_len(old_buf); - memset(tr_buf, 0, sizeof(struct smb2_transform_hdr) + 4); + /* tr_buf must be cleared by the caller */ tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM; tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len); tr_hdr->Flags = cpu_to_le16(TRANSFORM_FLAG_ENCRYPTED); diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h index d49468426576..af455278d005 100644 --- a/fs/ksmbd/smb2pdu.h +++ b/fs/ksmbd/smb2pdu.h @@ -16,42 +16,13 @@ #define FILE_CREATED 0x00000002 #define FILE_OVERWRITTEN 0x00000003 -/* - * Size of the session key (crypto key encrypted with the password - */ -#define SMB2_NTLMV2_SESSKEY_SIZE 16 -#define SMB2_SIGNATURE_SIZE 16 -#define SMB2_HMACSHA256_SIZE 32 -#define SMB2_CMACAES_SIZE 16 -#define SMB3_GCM128_CRYPTKEY_SIZE 16 -#define SMB3_GCM256_CRYPTKEY_SIZE 32 - -/* - * Size of the smb3 encryption/decryption keys - */ -#define SMB3_ENC_DEC_KEY_SIZE 32 - -/* - * Size of the smb3 signing key - */ -#define SMB3_SIGN_KEY_SIZE 16 - -#define CIFS_CLIENT_CHALLENGE_SIZE 8 -#define SMB_SERVER_CHALLENGE_SIZE 8 - /* SMB2 Max Credits */ #define SMB2_MAX_CREDITS 8192 -/* Maximum buffer size value we can send with 1 credit */ -#define SMB2_MAX_BUFFER_SIZE 65536 - -#define NUMBER_OF_SMB2_COMMANDS 0x0013 - /* BB FIXME - analyze following length BB */ #define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */ #define SMB21_DEFAULT_IOSIZE (1024 * 1024) -#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024) #define SMB3_DEFAULT_TRANS_SIZE (1024 * 1024) #define SMB3_MIN_IOSIZE (64 * 1024) #define SMB3_MAX_IOSIZE (8 * 1024 * 1024) @@ -65,18 +36,6 @@ * */ -#define SMB2_ERROR_STRUCTURE_SIZE2 9 -#define SMB2_ERROR_STRUCTURE_SIZE2_LE cpu_to_le16(SMB2_ERROR_STRUCTURE_SIZE2) - -struct smb2_err_rsp { - struct smb2_hdr hdr; - __le16 StructureSize; - __u8 ErrorContextCount; - __u8 Reserved; - __le32 ByteCount; /* even if zero, at least one byte follows */ - __u8 ErrorData[1]; /* variable length */ -} __packed; - struct preauth_integrity_info { /* PreAuth integrity Hash ID */ __le16 Preauth_HashId; @@ -116,8 +75,8 @@ struct create_durable_reconn_req { union { __u8 Reserved[16]; struct { - __le64 PersistentFileId; - __le64 VolatileFileId; + __u64 PersistentFileId; + __u64 VolatileFileId; } Fid; } Data; } __packed; @@ -126,8 +85,8 @@ struct create_durable_reconn_v2_req { struct create_context ccontext; __u8 Name[8]; struct { - __le64 PersistentFileId; - __le64 VolatileFileId; + __u64 PersistentFileId; + __u64 VolatileFileId; } Fid; __u8 CreateGuid[16]; __le32 Flags; @@ -161,13 +120,6 @@ struct create_alloc_size_req { __le64 AllocationSize; } __packed; -struct create_posix { - struct create_context ccontext; - __u8 Name[16]; - __le32 Mode; - __u32 Reserved; -} __packed; - struct create_durable_rsp { struct create_context ccontext; __u8 Name[8]; @@ -209,45 +161,6 @@ struct create_posix_rsp { u8 SidBuffer[40]; } __packed; -#define SMB2_LEASE_NONE_LE cpu_to_le32(0x00) -#define SMB2_LEASE_READ_CACHING_LE cpu_to_le32(0x01) -#define SMB2_LEASE_HANDLE_CACHING_LE cpu_to_le32(0x02) -#define SMB2_LEASE_WRITE_CACHING_LE cpu_to_le32(0x04) - -#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE cpu_to_le32(0x02) - -#define SMB2_LEASE_KEY_SIZE 16 - -struct lease_context { - __u8 LeaseKey[SMB2_LEASE_KEY_SIZE]; - __le32 LeaseState; - __le32 LeaseFlags; - __le64 LeaseDuration; -} __packed; - -struct lease_context_v2 { - __u8 LeaseKey[SMB2_LEASE_KEY_SIZE]; - __le32 LeaseState; - __le32 LeaseFlags; - __le64 LeaseDuration; - __u8 ParentLeaseKey[SMB2_LEASE_KEY_SIZE]; - __le16 Epoch; - __le16 Reserved; -} __packed; - -struct create_lease { - struct create_context ccontext; - __u8 Name[8]; - struct lease_context lcontext; -} __packed; - -struct create_lease_v2 { - struct create_context ccontext; - __u8 Name[8]; - struct lease_context_v2 lcontext; - __u8 Pad[4]; -} __packed; - struct smb2_buffer_desc_v1 { __le64 offset; __le32 token; @@ -256,63 +169,6 @@ struct smb2_buffer_desc_v1 { #define SMB2_0_IOCTL_IS_FSCTL 0x00000001 -struct duplicate_extents_to_file { - __u64 PersistentFileHandle; /* source file handle, opaque endianness */ - __u64 VolatileFileHandle; - __le64 SourceFileOffset; - __le64 TargetFileOffset; - __le64 ByteCount; /* Bytes to be copied */ -} __packed; - -struct smb2_ioctl_req { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 57 */ - __le16 Reserved; /* offset from start of SMB2 header to write data */ - __le32 CntCode; - __le64 PersistentFileId; - __le64 VolatileFileId; - __le32 InputOffset; /* Reserved MBZ */ - __le32 InputCount; - __le32 MaxInputResponse; - __le32 OutputOffset; - __le32 OutputCount; - __le32 MaxOutputResponse; - __le32 Flags; - __le32 Reserved2; - __u8 Buffer[1]; -} __packed; - -struct smb2_ioctl_rsp { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 49 */ - __le16 Reserved; /* offset from start of SMB2 header to write data */ - __le32 CntCode; - __le64 PersistentFileId; - __le64 VolatileFileId; - __le32 InputOffset; /* Reserved MBZ */ - __le32 InputCount; - __le32 OutputOffset; - __le32 OutputCount; - __le32 Flags; - __le32 Reserved2; - __u8 Buffer[1]; -} __packed; - -struct validate_negotiate_info_req { - __le32 Capabilities; - __u8 Guid[SMB2_CLIENT_GUID_SIZE]; - __le16 SecurityMode; - __le16 DialectCount; - __le16 Dialects[1]; /* dialect (someday maybe list) client asked for */ -} __packed; - -struct validate_negotiate_info_rsp { - __le32 Capabilities; - __u8 Guid[SMB2_CLIENT_GUID_SIZE]; - __le16 SecurityMode; - __le16 Dialect; /* Dialect in use for the connection */ -} __packed; - struct smb_sockaddr_in { __be16 Port; __be32 IPv4address; @@ -357,7 +213,7 @@ struct file_object_buf_type1_ioctl_rsp { } __packed; struct resume_key_ioctl_rsp { - __le64 ResumeKey[3]; + __u64 ResumeKey[3]; __le32 ContextLength; __u8 Context[4]; /* ignored, Windows sets to 4 bytes of zero */ } __packed; @@ -386,167 +242,6 @@ struct file_sparse { __u8 SetSparse; } __packed; -struct file_zero_data_information { - __le64 FileOffset; - __le64 BeyondFinalZero; -} __packed; - -struct file_allocated_range_buffer { - __le64 file_offset; - __le64 length; -} __packed; - -struct reparse_data_buffer { - __le32 ReparseTag; - __le16 ReparseDataLength; - __u16 Reserved; - __u8 DataBuffer[]; /* Variable Length */ -} __packed; - -/* SMB2 Notify Action Flags */ -#define FILE_ACTION_ADDED 0x00000001 -#define FILE_ACTION_REMOVED 0x00000002 -#define FILE_ACTION_MODIFIED 0x00000003 -#define FILE_ACTION_RENAMED_OLD_NAME 0x00000004 -#define FILE_ACTION_RENAMED_NEW_NAME 0x00000005 -#define FILE_ACTION_ADDED_STREAM 0x00000006 -#define FILE_ACTION_REMOVED_STREAM 0x00000007 -#define FILE_ACTION_MODIFIED_STREAM 0x00000008 -#define FILE_ACTION_REMOVED_BY_DELETE 0x00000009 - -#define SMB2_LOCKFLAG_SHARED 0x0001 -#define SMB2_LOCKFLAG_EXCLUSIVE 0x0002 -#define SMB2_LOCKFLAG_UNLOCK 0x0004 -#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY 0x0010 -#define SMB2_LOCKFLAG_MASK 0x0007 - -struct smb2_lock_element { - __le64 Offset; - __le64 Length; - __le32 Flags; - __le32 Reserved; -} __packed; - -struct smb2_lock_req { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 48 */ - __le16 LockCount; - __le32 Reserved; - __le64 PersistentFileId; - __le64 VolatileFileId; - /* Followed by at least one */ - struct smb2_lock_element locks[1]; -} __packed; - -struct smb2_lock_rsp { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 4 */ - __le16 Reserved; -} __packed; - -struct smb2_echo_req { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 4 */ - __u16 Reserved; -} __packed; - -struct smb2_echo_rsp { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 4 */ - __u16 Reserved; -} __packed; - -/* search (query_directory) Flags field */ -#define SMB2_RESTART_SCANS 0x01 -#define SMB2_RETURN_SINGLE_ENTRY 0x02 -#define SMB2_INDEX_SPECIFIED 0x04 -#define SMB2_REOPEN 0x10 - -struct smb2_query_directory_req { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 33 */ - __u8 FileInformationClass; - __u8 Flags; - __le32 FileIndex; - __le64 PersistentFileId; - __le64 VolatileFileId; - __le16 FileNameOffset; - __le16 FileNameLength; - __le32 OutputBufferLength; - __u8 Buffer[1]; -} __packed; - -struct smb2_query_directory_rsp { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 9 */ - __le16 OutputBufferOffset; - __le32 OutputBufferLength; - __u8 Buffer[1]; -} __packed; - -/* Possible InfoType values */ -#define SMB2_O_INFO_FILE 0x01 -#define SMB2_O_INFO_FILESYSTEM 0x02 -#define SMB2_O_INFO_SECURITY 0x03 -#define SMB2_O_INFO_QUOTA 0x04 - -/* Security info type additionalinfo flags. See MS-SMB2 (2.2.37) or MS-DTYP */ -#define OWNER_SECINFO 0x00000001 -#define GROUP_SECINFO 0x00000002 -#define DACL_SECINFO 0x00000004 -#define SACL_SECINFO 0x00000008 -#define LABEL_SECINFO 0x00000010 -#define ATTRIBUTE_SECINFO 0x00000020 -#define SCOPE_SECINFO 0x00000040 -#define BACKUP_SECINFO 0x00010000 -#define UNPROTECTED_SACL_SECINFO 0x10000000 -#define UNPROTECTED_DACL_SECINFO 0x20000000 -#define PROTECTED_SACL_SECINFO 0x40000000 -#define PROTECTED_DACL_SECINFO 0x80000000 - -struct smb2_query_info_req { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 41 */ - __u8 InfoType; - __u8 FileInfoClass; - __le32 OutputBufferLength; - __le16 InputBufferOffset; - __u16 Reserved; - __le32 InputBufferLength; - __le32 AdditionalInformation; - __le32 Flags; - __le64 PersistentFileId; - __le64 VolatileFileId; - __u8 Buffer[1]; -} __packed; - -struct smb2_query_info_rsp { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 9 */ - __le16 OutputBufferOffset; - __le32 OutputBufferLength; - __u8 Buffer[1]; -} __packed; - -struct smb2_set_info_req { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 33 */ - __u8 InfoType; - __u8 FileInfoClass; - __le32 BufferLength; - __le16 BufferOffset; - __u16 Reserved; - __le32 AdditionalInformation; - __le64 PersistentFileId; - __le64 VolatileFileId; - __u8 Buffer[1]; -} __packed; - -struct smb2_set_info_rsp { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 2 */ -} __packed; - /* FILE Info response size */ #define FILE_DIRECTORY_INFORMATION_SIZE 1 #define FILE_FULL_DIRECTORY_INFORMATION_SIZE 2 @@ -602,145 +297,11 @@ struct fs_type_info { long magic_number; } __packed; -struct smb2_oplock_break { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 24 */ - __u8 OplockLevel; - __u8 Reserved; - __le32 Reserved2; - __le64 PersistentFid; - __le64 VolatileFid; -} __packed; - -#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01) - -struct smb2_lease_break { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 44 */ - __le16 Epoch; - __le32 Flags; - __u8 LeaseKey[16]; - __le32 CurrentLeaseState; - __le32 NewLeaseState; - __le32 BreakReason; - __le32 AccessMaskHint; - __le32 ShareMaskHint; -} __packed; - -struct smb2_lease_ack { - struct smb2_hdr hdr; - __le16 StructureSize; /* Must be 36 */ - __le16 Reserved; - __le32 Flags; - __u8 LeaseKey[16]; - __le32 LeaseState; - __le64 LeaseDuration; -} __packed; - /* - * PDU infolevel structure definitions + * PDU query infolevel structure definitions * BB consider moving to a different header */ -/* File System Information Classes */ -#define FS_VOLUME_INFORMATION 1 /* Query */ -#define FS_LABEL_INFORMATION 2 /* Set */ -#define FS_SIZE_INFORMATION 3 /* Query */ -#define FS_DEVICE_INFORMATION 4 /* Query */ -#define FS_ATTRIBUTE_INFORMATION 5 /* Query */ -#define FS_CONTROL_INFORMATION 6 /* Query, Set */ -#define FS_FULL_SIZE_INFORMATION 7 /* Query */ -#define FS_OBJECT_ID_INFORMATION 8 /* Query, Set */ -#define FS_DRIVER_PATH_INFORMATION 9 /* Query */ -#define FS_SECTOR_SIZE_INFORMATION 11 /* SMB3 or later. Query */ -#define FS_POSIX_INFORMATION 100 /* SMB3.1.1 POSIX. Query */ - -struct smb2_fs_full_size_info { - __le64 TotalAllocationUnits; - __le64 CallerAvailableAllocationUnits; - __le64 ActualAvailableAllocationUnits; - __le32 SectorsPerAllocationUnit; - __le32 BytesPerSector; -} __packed; - -#define SSINFO_FLAGS_ALIGNED_DEVICE 0x00000001 -#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002 -#define SSINFO_FLAGS_NO_SEEK_PENALTY 0x00000004 -#define SSINFO_FLAGS_TRIM_ENABLED 0x00000008 - -/* sector size info struct */ -struct smb3_fs_ss_info { - __le32 LogicalBytesPerSector; - __le32 PhysicalBytesPerSectorForAtomicity; - __le32 PhysicalBytesPerSectorForPerf; - __le32 FSEffPhysicalBytesPerSectorForAtomicity; - __le32 Flags; - __le32 ByteOffsetForSectorAlignment; - __le32 ByteOffsetForPartitionAlignment; -} __packed; - -/* File System Control Information */ -struct smb2_fs_control_info { - __le64 FreeSpaceStartFiltering; - __le64 FreeSpaceThreshold; - __le64 FreeSpaceStopFiltering; - __le64 DefaultQuotaThreshold; - __le64 DefaultQuotaLimit; - __le32 FileSystemControlFlags; - __le32 Padding; -} __packed; - -/* partial list of QUERY INFO levels */ -#define FILE_DIRECTORY_INFORMATION 1 -#define FILE_FULL_DIRECTORY_INFORMATION 2 -#define FILE_BOTH_DIRECTORY_INFORMATION 3 -#define FILE_BASIC_INFORMATION 4 -#define FILE_STANDARD_INFORMATION 5 -#define FILE_INTERNAL_INFORMATION 6 -#define FILE_EA_INFORMATION 7 -#define FILE_ACCESS_INFORMATION 8 -#define FILE_NAME_INFORMATION 9 -#define FILE_RENAME_INFORMATION 10 -#define FILE_LINK_INFORMATION 11 -#define FILE_NAMES_INFORMATION 12 -#define FILE_DISPOSITION_INFORMATION 13 -#define FILE_POSITION_INFORMATION 14 -#define FILE_FULL_EA_INFORMATION 15 -#define FILE_MODE_INFORMATION 16 -#define FILE_ALIGNMENT_INFORMATION 17 -#define FILE_ALL_INFORMATION 18 -#define FILE_ALLOCATION_INFORMATION 19 -#define FILE_END_OF_FILE_INFORMATION 20 -#define FILE_ALTERNATE_NAME_INFORMATION 21 -#define FILE_STREAM_INFORMATION 22 -#define FILE_PIPE_INFORMATION 23 -#define FILE_PIPE_LOCAL_INFORMATION 24 -#define FILE_PIPE_REMOTE_INFORMATION 25 -#define FILE_MAILSLOT_QUERY_INFORMATION 26 -#define FILE_MAILSLOT_SET_INFORMATION 27 -#define FILE_COMPRESSION_INFORMATION 28 -#define FILE_OBJECT_ID_INFORMATION 29 -/* Number 30 not defined in documents */ -#define FILE_MOVE_CLUSTER_INFORMATION 31 -#define FILE_QUOTA_INFORMATION 32 -#define FILE_REPARSE_POINT_INFORMATION 33 -#define FILE_NETWORK_OPEN_INFORMATION 34 -#define FILE_ATTRIBUTE_TAG_INFORMATION 35 -#define FILE_TRACKING_INFORMATION 36 -#define FILEID_BOTH_DIRECTORY_INFORMATION 37 -#define FILEID_FULL_DIRECTORY_INFORMATION 38 -#define FILE_VALID_DATA_LENGTH_INFORMATION 39 -#define FILE_SHORT_NAME_INFORMATION 40 -#define FILE_SFIO_RESERVE_INFORMATION 44 -#define FILE_SFIO_VOLUME_INFORMATION 45 -#define FILE_HARD_LINK_INFORMATION 46 -#define FILE_NORMALIZED_NAME_INFORMATION 48 -#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50 -#define FILE_STANDARD_LINK_INFORMATION 54 - -#define OP_BREAK_STRUCT_SIZE_20 24 -#define OP_BREAK_STRUCT_SIZE_21 36 - struct smb2_file_access_info { __le32 AccessFlags; } __packed; @@ -749,56 +310,6 @@ struct smb2_file_alignment_info { __le32 AlignmentRequirement; } __packed; -struct smb2_file_internal_info { - __le64 IndexNumber; -} __packed; /* level 6 Query */ - -struct smb2_file_rename_info { /* encoding of request for level 10 */ - __u8 ReplaceIfExists; /* 1 = replace existing target with new */ - /* 0 = fail if target already exists */ - __u8 Reserved[7]; - __u64 RootDirectory; /* MBZ for network operations (why says spec?) */ - __le32 FileNameLength; - char FileName[]; /* New name to be assigned */ -} __packed; /* level 10 Set */ - -struct smb2_file_link_info { /* encoding of request for level 11 */ - __u8 ReplaceIfExists; /* 1 = replace existing link with new */ - /* 0 = fail if link already exists */ - __u8 Reserved[7]; - __u64 RootDirectory; /* MBZ for network operations (why says spec?) */ - __le32 FileNameLength; - char FileName[]; /* Name to be assigned to new link */ -} __packed; /* level 11 Set */ - -/* - * This level 18, although with struct with same name is different from cifs - * level 0x107. Level 0x107 has an extra u64 between AccessFlags and - * CurrentByteOffset. - */ -struct smb2_file_all_info { /* data block encoding of response to level 18 */ - __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */ - __le64 LastAccessTime; - __le64 LastWriteTime; - __le64 ChangeTime; - __le32 Attributes; - __u32 Pad1; /* End of FILE_BASIC_INFO_INFO equivalent */ - __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */ - __le64 EndOfFile; /* size ie offset to first free byte in file */ - __le32 NumberOfLinks; /* hard links */ - __u8 DeletePending; - __u8 Directory; - __u16 Pad2; /* End of FILE_STANDARD_INFO equivalent */ - __le64 IndexNumber; - __le32 EASize; - __le32 AccessFlags; - __le64 CurrentByteOffset; - __le32 Mode; - __le32 AlignmentRequirement; - __le32 FileNameLength; - char FileName[1]; -} __packed; /* level 18 Query */ - struct smb2_file_basic_info { /* data block encoding of response to level 18 */ __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */ __le64 LastAccessTime; @@ -821,10 +332,6 @@ struct smb2_file_stream_info { char StreamName[]; } __packed; -struct smb2_file_eof_info { /* encoding of request for level 10 */ - __le64 EndOfFile; /* new end of file value */ -} __packed; /* level 20 Set */ - struct smb2_file_ntwrk_info { __le64 CreationTime; __le64 LastAccessTime; @@ -915,34 +422,6 @@ struct create_sd_buf_req { struct smb_ntsd ntsd; } __packed; -/* Find File infolevels */ -#define SMB_FIND_FILE_POSIX_INFO 0x064 - -/* Level 100 query info */ -struct smb311_posix_qinfo { - __le64 CreationTime; - __le64 LastAccessTime; - __le64 LastWriteTime; - __le64 ChangeTime; - __le64 EndOfFile; - __le64 AllocationSize; - __le32 DosAttributes; - __le64 Inode; - __le32 DeviceId; - __le32 Zero; - /* beginning of POSIX Create Context Response */ - __le32 HardLinks; - __le32 ReparseTag; - __le32 Mode; - u8 Sids[]; - /* - * var sized owner SID - * var sized group SID - * le32 filenamelength - * u8 filename[] - */ -} __packed; - struct smb2_posix_info { __le32 NextEntryOffset; __u32 Ignored; diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c index 82a1429bbe12..8fef9de787d3 100644 --- a/fs/ksmbd/transport_tcp.c +++ b/fs/ksmbd/transport_tcp.c @@ -476,7 +476,7 @@ static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event, switch (event) { case NETDEV_UP: - if (netdev->priv_flags & IFF_BRIDGE_PORT) + if (netif_is_bridge_port(netdev)) return NOTIFY_OK; list_for_each_entry(iface, &iface_list, entry) { @@ -585,7 +585,7 @@ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz) rtnl_lock(); for_each_netdev(&init_net, netdev) { - if (netdev->priv_flags & IFF_BRIDGE_PORT) + if (netif_is_bridge_port(netdev)) continue; if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL))) return -ENOMEM; diff --git a/fs/namespace.c b/fs/namespace.c index 6e9844b8c6fb..a0a36bfa3aa0 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2112,22 +2112,23 @@ static int invent_group_ids(struct mount *mnt, bool recurse) int count_mounts(struct mnt_namespace *ns, struct mount *mnt) { unsigned int max = READ_ONCE(sysctl_mount_max); - unsigned int mounts = 0, old, pending, sum; + unsigned int mounts = 0; struct mount *p; + if (ns->mounts >= max) + return -ENOSPC; + max -= ns->mounts; + if (ns->pending_mounts >= max) + return -ENOSPC; + max -= ns->pending_mounts; + for (p = mnt; p; p = next_mnt(p, mnt)) mounts++; - old = ns->mounts; - pending = ns->pending_mounts; - sum = old + pending; - if ((old > sum) || - (pending > sum) || - (max < sum) || - (mounts > (max - sum))) + if (mounts > max) return -ENOSPC; - ns->pending_mounts = pending + mounts; + ns->pending_mounts += mounts; return 0; } @@ -2921,7 +2922,7 @@ static int do_move_mount_old(struct path *path, const char *old_name) * add a mount into a namespace's mount tree */ static int do_add_mount(struct mount *newmnt, struct mountpoint *mp, - struct path *path, int mnt_flags) + const struct path *path, int mnt_flags) { struct mount *parent = real_mount(path->mnt); @@ -3044,7 +3045,7 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags, return err; } -int finish_automount(struct vfsmount *m, struct path *path) +int finish_automount(struct vfsmount *m, const struct path *path) { struct dentry *dentry = path->dentry; struct mountpoint *mp; diff --git a/fs/nfs/file.c b/fs/nfs/file.c index b0ca244c50d0..150b7fa8f0a7 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -646,7 +646,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) result = generic_write_checks(iocb, from); if (result > 0) { current->backing_dev_info = inode_to_bdi(inode); - result = generic_perform_write(file, from, iocb->ki_pos); + result = generic_perform_write(iocb, from); current->backing_dev_info = NULL; } nfs_end_io_write(inode); diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index d154dcfe06af..90e3dad8ee45 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -1746,7 +1746,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) { set_buffer_dirty(bh); } while ((bh = bh->b_this_page) != head); spin_unlock(&mapping->private_lock); - block_dirty_folio(mapping, page_folio(page)); + filemap_dirty_folio(mapping, page_folio(page)); if (unlikely(buffers_to_free)) { do { bh = buffers_to_free->b_this_page; diff --git a/fs/read_write.c b/fs/read_write.c index dc5000173b80..e643aec2b0ef 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -1630,7 +1630,6 @@ int generic_write_checks_count(struct kiocb *iocb, loff_t *count) if (!*count) return 0; - /* FIXME: this is for backwards compatibility with 2.4 */ if (iocb->ki_flags & IOCB_APPEND) iocb->ki_pos = i_size_read(inode); diff --git a/fs/seq_file.c b/fs/seq_file.c index f8e1f4ee87ff..7ab8a58c29b6 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -554,9 +554,9 @@ int seq_dentry(struct seq_file *m, struct dentry *dentry, const char *esc) } EXPORT_SYMBOL(seq_dentry); -static void *single_start(struct seq_file *p, loff_t *pos) +void *single_start(struct seq_file *p, loff_t *pos) { - return NULL + (*pos == 0); + return *pos ? NULL : SEQ_START_TOKEN; } static void *single_next(struct seq_file *p, void *v, loff_t *pos) diff --git a/fs/smbfs_common/smb2pdu.h b/fs/smbfs_common/smb2pdu.h index 38b8fc514860..0507aecfc669 100644 --- a/fs/smbfs_common/smb2pdu.h +++ b/fs/smbfs_common/smb2pdu.h @@ -61,6 +61,40 @@ #define NUMBER_OF_SMB2_COMMANDS 0x0013 /* + * Size of the session key (crypto key encrypted with the password + */ +#define SMB2_NTLMV2_SESSKEY_SIZE 16 +#define SMB2_SIGNATURE_SIZE 16 +#define SMB2_HMACSHA256_SIZE 32 +#define SMB2_CMACAES_SIZE 16 +#define SMB3_GCM128_CRYPTKEY_SIZE 16 +#define SMB3_GCM256_CRYPTKEY_SIZE 32 + +/* + * Size of the smb3 encryption/decryption keys + * This size is big enough to store any cipher key types. + */ +#define SMB3_ENC_DEC_KEY_SIZE 32 + +/* + * Size of the smb3 signing key + */ +#define SMB3_SIGN_KEY_SIZE 16 + +#define CIFS_CLIENT_CHALLENGE_SIZE 8 + +/* Maximum buffer size value we can send with 1 credit */ +#define SMB2_MAX_BUFFER_SIZE 65536 + +/* + * The default wsize is 1M for SMB2 (and for some CIFS cases). + * find_get_pages seems to return a maximum of 256 + * pages in a single call. With PAGE_SIZE == 4k, this means we can + * fill a single wsize request with a single call. + */ +#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024) + +/* * SMB2 Header Definition * * "MBZ" : Must be Zero @@ -88,6 +122,15 @@ #define SMB2_FLAGS_DFS_OPERATIONS cpu_to_le32(0x10000000) #define SMB2_FLAGS_REPLAY_OPERATION cpu_to_le32(0x20000000) /* SMB3 & up */ +/* + * Definitions for SMB2 Protocol Data Units (network frames) + * + * See MS-SMB2.PDF specification for protocol details. + * The Naming convention is the lower case version of the SMB2 + * command code name for the struct. Note that structures must be packed. + * + */ + /* See MS-SMB2 section 2.2.1 */ struct smb2_hdr { __le32 ProtocolId; /* 0xFE 'S' 'M' 'B' */ @@ -115,6 +158,18 @@ struct smb2_pdu { __le16 StructureSize2; /* size of wct area (varies, request specific) */ } __packed; +#define SMB2_ERROR_STRUCTURE_SIZE2 9 +#define SMB2_ERROR_STRUCTURE_SIZE2_LE cpu_to_le16(SMB2_ERROR_STRUCTURE_SIZE2) + +struct smb2_err_rsp { + struct smb2_hdr hdr; + __le16 StructureSize; + __u8 ErrorContextCount; + __u8 Reserved; + __le32 ByteCount; /* even if zero, at least one byte follows */ + __u8 ErrorData[1]; /* variable length */ +} __packed; + #define SMB3_AES_CCM_NONCE 11 #define SMB3_AES_GCM_NONCE 12 @@ -608,8 +663,8 @@ struct smb2_close_req { __le16 StructureSize; /* Must be 24 */ __le16 Flags; __le32 Reserved; - __le64 PersistentFileId; /* opaque endianness */ - __le64 VolatileFileId; /* opaque endianness */ + __u64 PersistentFileId; /* opaque endianness */ + __u64 VolatileFileId; /* opaque endianness */ } __packed; /* @@ -653,8 +708,8 @@ struct smb2_read_req { __u8 Flags; /* MBZ unless SMB3.02 or later */ __le32 Length; __le64 Offset; - __le64 PersistentFileId; - __le64 VolatileFileId; + __u64 PersistentFileId; + __u64 VolatileFileId; __le32 MinimumCount; __le32 Channel; /* MBZ except for SMB3 or later */ __le32 RemainingBytes; @@ -692,8 +747,8 @@ struct smb2_write_req { __le16 DataOffset; /* offset from start of SMB2 header to write data */ __le32 Length; __le64 Offset; - __le64 PersistentFileId; /* opaque endianness */ - __le64 VolatileFileId; /* opaque endianness */ + __u64 PersistentFileId; /* opaque endianness */ + __u64 VolatileFileId; /* opaque endianness */ __le32 Channel; /* MBZ unless SMB3.02 or later */ __le32 RemainingBytes; __le16 WriteChannelInfoOffset; @@ -722,8 +777,8 @@ struct smb2_flush_req { __le16 StructureSize; /* Must be 24 */ __le16 Reserved1; __le32 Reserved2; - __le64 PersistentFileId; - __le64 VolatileFileId; + __u64 PersistentFileId; + __u64 VolatileFileId; } __packed; struct smb2_flush_rsp { @@ -732,6 +787,123 @@ struct smb2_flush_rsp { __le16 Reserved; } __packed; +#define SMB2_LOCKFLAG_SHARED 0x0001 +#define SMB2_LOCKFLAG_EXCLUSIVE 0x0002 +#define SMB2_LOCKFLAG_UNLOCK 0x0004 +#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY 0x0010 +#define SMB2_LOCKFLAG_MASK 0x0007 + +struct smb2_lock_element { + __le64 Offset; + __le64 Length; + __le32 Flags; + __le32 Reserved; +} __packed; + +struct smb2_lock_req { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 48 */ + __le16 LockCount; + /* + * The least significant four bits are the index, the other 28 bits are + * the lock sequence number (0 to 64). See MS-SMB2 2.2.26 + */ + __le32 LockSequenceNumber; + __u64 PersistentFileId; + __u64 VolatileFileId; + /* Followed by at least one */ + struct smb2_lock_element locks[1]; +} __packed; + +struct smb2_lock_rsp { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 4 */ + __le16 Reserved; +} __packed; + +struct smb2_echo_req { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 4 */ + __u16 Reserved; +} __packed; + +struct smb2_echo_rsp { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 4 */ + __u16 Reserved; +} __packed; + +/* + * Valid FileInformation classes for query directory + * + * Note that these are a subset of the (file) QUERY_INFO levels defined + * later in this file (but since QUERY_DIRECTORY uses equivalent numbers + * we do not redefine them here) + * + * FileDirectoryInfomation 0x01 + * FileFullDirectoryInformation 0x02 + * FileIdFullDirectoryInformation 0x26 + * FileBothDirectoryInformation 0x03 + * FileIdBothDirectoryInformation 0x25 + * FileNamesInformation 0x0C + * FileIdExtdDirectoryInformation 0x3C + */ + +/* search (query_directory) Flags field */ +#define SMB2_RESTART_SCANS 0x01 +#define SMB2_RETURN_SINGLE_ENTRY 0x02 +#define SMB2_INDEX_SPECIFIED 0x04 +#define SMB2_REOPEN 0x10 + +struct smb2_query_directory_req { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 33 */ + __u8 FileInformationClass; + __u8 Flags; + __le32 FileIndex; + __u64 PersistentFileId; + __u64 VolatileFileId; + __le16 FileNameOffset; + __le16 FileNameLength; + __le32 OutputBufferLength; + __u8 Buffer[1]; +} __packed; + +struct smb2_query_directory_rsp { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 9 */ + __le16 OutputBufferOffset; + __le32 OutputBufferLength; + __u8 Buffer[1]; +} __packed; + +/* + * Maximum number of iovs we need for a set-info request. + * The largest one is rename/hardlink + * [0] : struct smb2_set_info_req + smb2_file_[rename|link]_info + * [1] : path + * [2] : compound padding + */ +#define SMB2_SET_INFO_IOV_SIZE 3 + +struct smb2_set_info_req { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 33 */ + __u8 InfoType; + __u8 FileInfoClass; + __le32 BufferLength; + __le16 BufferOffset; + __u16 Reserved; + __le32 AdditionalInformation; + __u64 PersistentFileId; + __u64 VolatileFileId; + __u8 Buffer[1]; +} __packed; + +struct smb2_set_info_rsp { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 2 */ +} __packed; /* * SMB2_NOTIFY See MS-SMB2 section 2.2.35 @@ -769,8 +941,8 @@ struct smb2_change_notify_req { __le16 StructureSize; __le16 Flags; __le32 OutputBufferLength; - __le64 PersistentFileId; /* opaque endianness */ - __le64 VolatileFileId; /* opaque endianness */ + __u64 PersistentFileId; /* opaque endianness */ + __u64 VolatileFileId; /* opaque endianness */ __le32 CompletionFilter; __u32 Reserved; } __packed; @@ -978,12 +1150,455 @@ struct smb2_create_rsp { __le64 EndofFile; __le32 FileAttributes; __le32 Reserved2; - __le64 PersistentFileId; - __le64 VolatileFileId; + __u64 PersistentFileId; + __u64 VolatileFileId; __le32 CreateContextsOffset; __le32 CreateContextsLength; __u8 Buffer[1]; } __packed; +struct create_posix { + struct create_context ccontext; + __u8 Name[16]; + __le32 Mode; + __u32 Reserved; +} __packed; + +#define SMB2_LEASE_NONE_LE cpu_to_le32(0x00) +#define SMB2_LEASE_READ_CACHING_LE cpu_to_le32(0x01) +#define SMB2_LEASE_HANDLE_CACHING_LE cpu_to_le32(0x02) +#define SMB2_LEASE_WRITE_CACHING_LE cpu_to_le32(0x04) + +#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE cpu_to_le32(0x02) + +#define SMB2_LEASE_KEY_SIZE 16 + +struct lease_context { + __u8 LeaseKey[SMB2_LEASE_KEY_SIZE]; + __le32 LeaseState; + __le32 LeaseFlags; + __le64 LeaseDuration; +} __packed; + +struct lease_context_v2 { + __u8 LeaseKey[SMB2_LEASE_KEY_SIZE]; + __le32 LeaseState; + __le32 LeaseFlags; + __le64 LeaseDuration; + __u8 ParentLeaseKey[SMB2_LEASE_KEY_SIZE]; + __le16 Epoch; + __le16 Reserved; +} __packed; + +struct create_lease { + struct create_context ccontext; + __u8 Name[8]; + struct lease_context lcontext; +} __packed; + +struct create_lease_v2 { + struct create_context ccontext; + __u8 Name[8]; + struct lease_context_v2 lcontext; + __u8 Pad[4]; +} __packed; + +/* See MS-SMB2 2.2.31 and 2.2.32 */ +struct smb2_ioctl_req { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 57 */ + __le16 Reserved; /* offset from start of SMB2 header to write data */ + __le32 CtlCode; + __u64 PersistentFileId; + __u64 VolatileFileId; + __le32 InputOffset; /* Reserved MBZ */ + __le32 InputCount; + __le32 MaxInputResponse; + __le32 OutputOffset; + __le32 OutputCount; + __le32 MaxOutputResponse; + __le32 Flags; + __le32 Reserved2; + __u8 Buffer[]; +} __packed; + +struct smb2_ioctl_rsp { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 49 */ + __le16 Reserved; + __le32 CtlCode; + __u64 PersistentFileId; + __u64 VolatileFileId; + __le32 InputOffset; /* Reserved MBZ */ + __le32 InputCount; + __le32 OutputOffset; + __le32 OutputCount; + __le32 Flags; + __le32 Reserved2; + __u8 Buffer[]; +} __packed; + +/* this goes in the ioctl buffer when doing FSCTL_SET_ZERO_DATA */ +struct file_zero_data_information { + __le64 FileOffset; + __le64 BeyondFinalZero; +} __packed; + +/* Reparse structures - see MS-FSCC 2.1.2 */ + +/* struct fsctl_reparse_info_req is empty, only response structs (see below) */ +struct reparse_data_buffer { + __le32 ReparseTag; + __le16 ReparseDataLength; + __u16 Reserved; + __u8 DataBuffer[]; /* Variable Length */ +} __packed; + +struct reparse_guid_data_buffer { + __le32 ReparseTag; + __le16 ReparseDataLength; + __u16 Reserved; + __u8 ReparseGuid[16]; + __u8 DataBuffer[]; /* Variable Length */ +} __packed; + +struct reparse_mount_point_data_buffer { + __le32 ReparseTag; + __le16 ReparseDataLength; + __u16 Reserved; + __le16 SubstituteNameOffset; + __le16 SubstituteNameLength; + __le16 PrintNameOffset; + __le16 PrintNameLength; + __u8 PathBuffer[]; /* Variable Length */ +} __packed; + +#define SYMLINK_FLAG_RELATIVE 0x00000001 + +struct reparse_symlink_data_buffer { + __le32 ReparseTag; + __le16 ReparseDataLength; + __u16 Reserved; + __le16 SubstituteNameOffset; + __le16 SubstituteNameLength; + __le16 PrintNameOffset; + __le16 PrintNameLength; + __le32 Flags; + __u8 PathBuffer[]; /* Variable Length */ +} __packed; + +/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */ + +struct validate_negotiate_info_req { + __le32 Capabilities; + __u8 Guid[SMB2_CLIENT_GUID_SIZE]; + __le16 SecurityMode; + __le16 DialectCount; + __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */ +} __packed; + +struct validate_negotiate_info_rsp { + __le32 Capabilities; + __u8 Guid[SMB2_CLIENT_GUID_SIZE]; + __le16 SecurityMode; + __le16 Dialect; /* Dialect in use for the connection */ +} __packed; + +struct duplicate_extents_to_file { + __u64 PersistentFileHandle; /* source file handle, opaque endianness */ + __u64 VolatileFileHandle; + __le64 SourceFileOffset; + __le64 TargetFileOffset; + __le64 ByteCount; /* Bytes to be copied */ +} __packed; + +/* Possible InfoType values */ +#define SMB2_O_INFO_FILE 0x01 +#define SMB2_O_INFO_FILESYSTEM 0x02 +#define SMB2_O_INFO_SECURITY 0x03 +#define SMB2_O_INFO_QUOTA 0x04 + +/* SMB2 Query Info see MS-SMB2 (2.2.37) or MS-DTYP */ + +/* List of QUERY INFO levels (those also valid for QUERY_DIR are noted below */ +#define FILE_DIRECTORY_INFORMATION 1 /* also for QUERY_DIR */ +#define FILE_FULL_DIRECTORY_INFORMATION 2 /* also for QUERY_DIR */ +#define FILE_BOTH_DIRECTORY_INFORMATION 3 /* also for QUERY_DIR */ +#define FILE_BASIC_INFORMATION 4 +#define FILE_STANDARD_INFORMATION 5 +#define FILE_INTERNAL_INFORMATION 6 +#define FILE_EA_INFORMATION 7 +#define FILE_ACCESS_INFORMATION 8 +#define FILE_NAME_INFORMATION 9 +#define FILE_RENAME_INFORMATION 10 +#define FILE_LINK_INFORMATION 11 +#define FILE_NAMES_INFORMATION 12 /* also for QUERY_DIR */ +#define FILE_DISPOSITION_INFORMATION 13 +#define FILE_POSITION_INFORMATION 14 +#define FILE_FULL_EA_INFORMATION 15 +#define FILE_MODE_INFORMATION 16 +#define FILE_ALIGNMENT_INFORMATION 17 +#define FILE_ALL_INFORMATION 18 +#define FILE_ALLOCATION_INFORMATION 19 +#define FILE_END_OF_FILE_INFORMATION 20 +#define FILE_ALTERNATE_NAME_INFORMATION 21 +#define FILE_STREAM_INFORMATION 22 +#define FILE_PIPE_INFORMATION 23 +#define FILE_PIPE_LOCAL_INFORMATION 24 +#define FILE_PIPE_REMOTE_INFORMATION 25 +#define FILE_MAILSLOT_QUERY_INFORMATION 26 +#define FILE_MAILSLOT_SET_INFORMATION 27 +#define FILE_COMPRESSION_INFORMATION 28 +#define FILE_OBJECT_ID_INFORMATION 29 +/* Number 30 not defined in documents */ +#define FILE_MOVE_CLUSTER_INFORMATION 31 +#define FILE_QUOTA_INFORMATION 32 +#define FILE_REPARSE_POINT_INFORMATION 33 +#define FILE_NETWORK_OPEN_INFORMATION 34 +#define FILE_ATTRIBUTE_TAG_INFORMATION 35 +#define FILE_TRACKING_INFORMATION 36 +#define FILEID_BOTH_DIRECTORY_INFORMATION 37 /* also for QUERY_DIR */ +#define FILEID_FULL_DIRECTORY_INFORMATION 38 /* also for QUERY_DIR */ +#define FILE_VALID_DATA_LENGTH_INFORMATION 39 +#define FILE_SHORT_NAME_INFORMATION 40 +#define FILE_SFIO_RESERVE_INFORMATION 44 +#define FILE_SFIO_VOLUME_INFORMATION 45 +#define FILE_HARD_LINK_INFORMATION 46 +#define FILE_NORMALIZED_NAME_INFORMATION 48 +#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50 +#define FILE_STANDARD_LINK_INFORMATION 54 +#define FILE_ID_INFORMATION 59 +#define FILE_ID_EXTD_DIRECTORY_INFORMATION 60 /* also for QUERY_DIR */ +/* Used for Query Info and Find File POSIX Info for SMB3.1.1 and SMB1 */ +#define SMB_FIND_FILE_POSIX_INFO 0x064 + +/* Security info type additionalinfo flags. */ +#define OWNER_SECINFO 0x00000001 +#define GROUP_SECINFO 0x00000002 +#define DACL_SECINFO 0x00000004 +#define SACL_SECINFO 0x00000008 +#define LABEL_SECINFO 0x00000010 +#define ATTRIBUTE_SECINFO 0x00000020 +#define SCOPE_SECINFO 0x00000040 +#define BACKUP_SECINFO 0x00010000 +#define UNPROTECTED_SACL_SECINFO 0x10000000 +#define UNPROTECTED_DACL_SECINFO 0x20000000 +#define PROTECTED_SACL_SECINFO 0x40000000 +#define PROTECTED_DACL_SECINFO 0x80000000 + +/* Flags used for FileFullEAinfo */ +#define SL_RESTART_SCAN 0x00000001 +#define SL_RETURN_SINGLE_ENTRY 0x00000002 +#define SL_INDEX_SPECIFIED 0x00000004 + +struct smb2_query_info_req { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 41 */ + __u8 InfoType; + __u8 FileInfoClass; + __le32 OutputBufferLength; + __le16 InputBufferOffset; + __u16 Reserved; + __le32 InputBufferLength; + __le32 AdditionalInformation; + __le32 Flags; + __u64 PersistentFileId; + __u64 VolatileFileId; + __u8 Buffer[1]; +} __packed; + +struct smb2_query_info_rsp { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 9 */ + __le16 OutputBufferOffset; + __le32 OutputBufferLength; + __u8 Buffer[1]; +} __packed; + +/* + * PDU query infolevel structure definitions + */ + +struct file_allocated_range_buffer { + __le64 file_offset; + __le64 length; +} __packed; + +struct smb2_file_internal_info { + __le64 IndexNumber; +} __packed; /* level 6 Query */ + +struct smb2_file_rename_info { /* encoding of request for level 10 */ + __u8 ReplaceIfExists; /* 1 = replace existing target with new */ + /* 0 = fail if target already exists */ + __u8 Reserved[7]; + __u64 RootDirectory; /* MBZ for network operations (why says spec?) */ + __le32 FileNameLength; + char FileName[]; /* New name to be assigned */ + /* padding - overall struct size must be >= 24 so filename + pad >= 6 */ +} __packed; /* level 10 Set */ + +struct smb2_file_link_info { /* encoding of request for level 11 */ + __u8 ReplaceIfExists; /* 1 = replace existing link with new */ + /* 0 = fail if link already exists */ + __u8 Reserved[7]; + __u64 RootDirectory; /* MBZ for network operations (why says spec?) */ + __le32 FileNameLength; + char FileName[]; /* Name to be assigned to new link */ +} __packed; /* level 11 Set */ + +/* + * This level 18, although with struct with same name is different from cifs + * level 0x107. Level 0x107 has an extra u64 between AccessFlags and + * CurrentByteOffset. + */ +struct smb2_file_all_info { /* data block encoding of response to level 18 */ + __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */ + __le64 LastAccessTime; + __le64 LastWriteTime; + __le64 ChangeTime; + __le32 Attributes; + __u32 Pad1; /* End of FILE_BASIC_INFO_INFO equivalent */ + __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */ + __le64 EndOfFile; /* size ie offset to first free byte in file */ + __le32 NumberOfLinks; /* hard links */ + __u8 DeletePending; + __u8 Directory; + __u16 Pad2; /* End of FILE_STANDARD_INFO equivalent */ + __le64 IndexNumber; + __le32 EASize; + __le32 AccessFlags; + __le64 CurrentByteOffset; + __le32 Mode; + __le32 AlignmentRequirement; + __le32 FileNameLength; + char FileName[1]; +} __packed; /* level 18 Query */ + +struct smb2_file_eof_info { /* encoding of request for level 10 */ + __le64 EndOfFile; /* new end of file value */ +} __packed; /* level 20 Set */ + +/* Level 100 query info */ +struct smb311_posix_qinfo { + __le64 CreationTime; + __le64 LastAccessTime; + __le64 LastWriteTime; + __le64 ChangeTime; + __le64 EndOfFile; + __le64 AllocationSize; + __le32 DosAttributes; + __le64 Inode; + __le32 DeviceId; + __le32 Zero; + /* beginning of POSIX Create Context Response */ + __le32 HardLinks; + __le32 ReparseTag; + __le32 Mode; + u8 Sids[]; + /* + * var sized owner SID + * var sized group SID + * le32 filenamelength + * u8 filename[] + */ +} __packed; + +/* File System Information Classes */ +#define FS_VOLUME_INFORMATION 1 /* Query */ +#define FS_LABEL_INFORMATION 2 /* Set */ +#define FS_SIZE_INFORMATION 3 /* Query */ +#define FS_DEVICE_INFORMATION 4 /* Query */ +#define FS_ATTRIBUTE_INFORMATION 5 /* Query */ +#define FS_CONTROL_INFORMATION 6 /* Query, Set */ +#define FS_FULL_SIZE_INFORMATION 7 /* Query */ +#define FS_OBJECT_ID_INFORMATION 8 /* Query, Set */ +#define FS_DRIVER_PATH_INFORMATION 9 /* Query */ +#define FS_SECTOR_SIZE_INFORMATION 11 /* SMB3 or later. Query */ +#define FS_POSIX_INFORMATION 100 /* SMB3.1.1 POSIX. Query */ + +struct smb2_fs_full_size_info { + __le64 TotalAllocationUnits; + __le64 CallerAvailableAllocationUnits; + __le64 ActualAvailableAllocationUnits; + __le32 SectorsPerAllocationUnit; + __le32 BytesPerSector; +} __packed; + +#define SSINFO_FLAGS_ALIGNED_DEVICE 0x00000001 +#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002 +#define SSINFO_FLAGS_NO_SEEK_PENALTY 0x00000004 +#define SSINFO_FLAGS_TRIM_ENABLED 0x00000008 + +/* sector size info struct */ +struct smb3_fs_ss_info { + __le32 LogicalBytesPerSector; + __le32 PhysicalBytesPerSectorForAtomicity; + __le32 PhysicalBytesPerSectorForPerf; + __le32 FSEffPhysicalBytesPerSectorForAtomicity; + __le32 Flags; + __le32 ByteOffsetForSectorAlignment; + __le32 ByteOffsetForPartitionAlignment; +} __packed; + +/* File System Control Information */ +struct smb2_fs_control_info { + __le64 FreeSpaceStartFiltering; + __le64 FreeSpaceThreshold; + __le64 FreeSpaceStopFiltering; + __le64 DefaultQuotaThreshold; + __le64 DefaultQuotaLimit; + __le32 FileSystemControlFlags; + __le32 Padding; +} __packed; + +/* volume info struct - see MS-FSCC 2.5.9 */ +#define MAX_VOL_LABEL_LEN 32 +struct smb3_fs_vol_info { + __le64 VolumeCreationTime; + __u32 VolumeSerialNumber; + __le32 VolumeLabelLength; /* includes trailing null */ + __u8 SupportsObjects; /* True if eg like NTFS, supports objects */ + __u8 Reserved; + __u8 VolumeLabel[]; /* variable len */ +} __packed; + +/* See MS-SMB2 2.2.23 through 2.2.25 */ +struct smb2_oplock_break { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 24 */ + __u8 OplockLevel; + __u8 Reserved; + __le32 Reserved2; + __u64 PersistentFid; + __u64 VolatileFid; +} __packed; + +#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01) + +struct smb2_lease_break { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 44 */ + __le16 Epoch; + __le32 Flags; + __u8 LeaseKey[16]; + __le32 CurrentLeaseState; + __le32 NewLeaseState; + __le32 BreakReason; + __le32 AccessMaskHint; + __le32 ShareMaskHint; +} __packed; + +struct smb2_lease_ack { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 36 */ + __le16 Reserved; + __le32 Flags; + __u8 LeaseKey[16]; + __le32 LeaseState; + __le64 LeaseDuration; +} __packed; +#define OP_BREAK_STRUCT_SIZE_20 24 +#define OP_BREAK_STRUCT_SIZE_21 36 #endif /* _COMMON_SMB2PDU_H */ diff --git a/fs/verity/verify.c b/fs/verity/verify.c index 0adb970f4e73..14e2fb49cff5 100644 --- a/fs/verity/verify.c +++ b/fs/verity/verify.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Data verification functions, i.e. hooks for ->readpages() + * Data verification functions, i.e. hooks for ->readahead() * * Copyright 2019 Google LLC */ @@ -214,7 +214,7 @@ EXPORT_SYMBOL_GPL(fsverity_verify_page); * that fail verification are set to the Error state. Verification is skipped * for pages already in the Error state, e.g. due to fscrypt decryption failure. * - * This is a helper function for use by the ->readpages() method of filesystems + * This is a helper function for use by the ->readahead() method of filesystems * that issue bios to read data directly into the page cache. Filesystems that * populate the page cache without issuing bios (e.g. non block-based * filesystems) must instead call fsverity_verify_page() directly on each page. diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 353e53b892e6..b52ed339727f 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -82,6 +82,24 @@ xfs_prealloc_blocks( } /* + * The number of blocks per AG that we withhold from xfs_mod_fdblocks to + * guarantee that we can refill the AGFL prior to allocating space in a nearly + * full AG. Although the the space described by the free space btrees, the + * blocks used by the freesp btrees themselves, and the blocks owned by the + * AGFL are counted in the ondisk fdblocks, it's a mistake to let the ondisk + * free space in the AG drop so low that the free space btrees cannot refill an + * empty AGFL up to the minimum level. Rather than grind through empty AGs + * until the fs goes down, we subtract this many AG blocks from the incore + * fdblocks to ensure user allocation does not overcommit the space the + * filesystem needs for the AGFLs. The rmap btree uses a per-AG reservation to + * withhold space from xfs_mod_fdblocks, so we do not account for that here. + */ +#define XFS_ALLOCBT_AGFL_RESERVE 4 + +/* + * Compute the number of blocks that we set aside to guarantee the ability to + * refill the AGFL and handle a full bmap btree split. + * * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of * AGF buffer (PV 947395), we place constraints on the relationship among * actual allocations for data blocks, freelist blocks, and potential file data @@ -93,14 +111,14 @@ xfs_prealloc_blocks( * extents need to be actually allocated. To get around this, we explicitly set * aside a few blocks which will not be reserved in delayed allocation. * - * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a - * potential split of the file's bmap btree. + * For each AG, we need to reserve enough blocks to replenish a totally empty + * AGFL and 4 more to handle a potential split of the file's bmap btree. */ unsigned int xfs_alloc_set_aside( struct xfs_mount *mp) { - return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4); + return mp->m_sb.sb_agcount * (XFS_ALLOCBT_AGFL_RESERVE + 4); } /* @@ -124,12 +142,12 @@ xfs_alloc_ag_max_usable( unsigned int blocks; blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */ - blocks += XFS_ALLOC_AGFL_RESERVE; + blocks += XFS_ALLOCBT_AGFL_RESERVE; blocks += 3; /* AGF, AGI btree root blocks */ if (xfs_has_finobt(mp)) blocks++; /* finobt root block */ if (xfs_has_rmapbt(mp)) - blocks++; /* rmap root block */ + blocks++; /* rmap root block */ if (xfs_has_reflink(mp)) blocks++; /* refcount root block */ diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h index 1c14a0b1abea..d4c057b764f9 100644 --- a/fs/xfs/libxfs/xfs_alloc.h +++ b/fs/xfs/libxfs/xfs_alloc.h @@ -88,7 +88,6 @@ typedef struct xfs_alloc_arg { #define XFS_ALLOC_NOBUSY (1 << 2)/* Busy extents not allowed */ /* freespace limit calculations */ -#define XFS_ALLOC_AGFL_RESERVE 4 unsigned int xfs_alloc_set_aside(struct xfs_mount *mp); unsigned int xfs_alloc_ag_max_usable(struct xfs_mount *mp); diff --git a/fs/xfs/xfs_bio_io.c b/fs/xfs/xfs_bio_io.c index 32fa02945f73..ae4345b37621 100644 --- a/fs/xfs/xfs_bio_io.c +++ b/fs/xfs/xfs_bio_io.c @@ -9,39 +9,6 @@ static inline unsigned int bio_max_vecs(unsigned int count) return bio_max_segs(howmany(count, PAGE_SIZE)); } -static void -xfs_flush_bdev_async_endio( - struct bio *bio) -{ - complete(bio->bi_private); -} - -/* - * Submit a request for an async cache flush to run. If the request queue does - * not require flush operations, just skip it altogether. If the caller needs - * to wait for the flush completion at a later point in time, they must supply a - * valid completion. This will be signalled when the flush completes. The - * caller never sees the bio that is issued here. - */ -void -xfs_flush_bdev_async( - struct bio *bio, - struct block_device *bdev, - struct completion *done) -{ - struct request_queue *q = bdev->bd_disk->queue; - - if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { - complete(done); - return; - } - - bio_init(bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); - bio->bi_private = done; - bio->bi_end_io = xfs_flush_bdev_async_endio; - - submit_bio(bio); -} int xfs_rw_bdev( struct block_device *bdev, diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 33e26690a8c4..68f74549fa22 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -17,6 +17,7 @@ #include "xfs_fsops.h" #include "xfs_trans_space.h" #include "xfs_log.h" +#include "xfs_log_priv.h" #include "xfs_ag.h" #include "xfs_ag_resv.h" #include "xfs_trace.h" @@ -347,7 +348,7 @@ xfs_fs_counts( cnt->allocino = percpu_counter_read_positive(&mp->m_icount); cnt->freeino = percpu_counter_read_positive(&mp->m_ifree); cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) - - mp->m_alloc_set_aside; + xfs_fdblocks_unavailable(mp); spin_lock(&mp->m_sb_lock); cnt->freertx = mp->m_sb.sb_frextents; @@ -430,46 +431,36 @@ xfs_reserve_blocks( * If the request is larger than the current reservation, reserve the * blocks before we update the reserve counters. Sample m_fdblocks and * perform a partial reservation if the request exceeds free space. + * + * The code below estimates how many blocks it can request from + * fdblocks to stash in the reserve pool. This is a classic TOCTOU + * race since fdblocks updates are not always coordinated via + * m_sb_lock. Set the reserve size even if there's not enough free + * space to fill it because mod_fdblocks will refill an undersized + * reserve when it can. */ - error = -ENOSPC; - do { - free = percpu_counter_sum(&mp->m_fdblocks) - - mp->m_alloc_set_aside; - if (free <= 0) - break; - - delta = request - mp->m_resblks; - lcounter = free - delta; - if (lcounter < 0) - /* We can't satisfy the request, just get what we can */ - fdblks_delta = free; - else - fdblks_delta = delta; - + free = percpu_counter_sum(&mp->m_fdblocks) - + xfs_fdblocks_unavailable(mp); + delta = request - mp->m_resblks; + mp->m_resblks = request; + if (delta > 0 && free > 0) { /* * We'll either succeed in getting space from the free block - * count or we'll get an ENOSPC. If we get a ENOSPC, it means - * things changed while we were calculating fdblks_delta and so - * we should try again to see if there is anything left to - * reserve. + * count or we'll get an ENOSPC. Don't set the reserved flag + * here - we don't want to reserve the extra reserve blocks + * from the reserve. * - * Don't set the reserved flag here - we don't want to reserve - * the extra reserve blocks from the reserve..... + * The desired reserve size can change after we drop the lock. + * Use mod_fdblocks to put the space into the reserve or into + * fdblocks as appropriate. */ + fdblks_delta = min(free, delta); spin_unlock(&mp->m_sb_lock); error = xfs_mod_fdblocks(mp, -fdblks_delta, 0); + if (!error) + xfs_mod_fdblocks(mp, fdblks_delta, 0); spin_lock(&mp->m_sb_lock); - } while (error == -ENOSPC); - - /* - * Update the reserve counters if blocks have been successfully - * allocated. - */ - if (!error && fdblks_delta) { - mp->m_resblks += fdblks_delta; - mp->m_resblks_avail += fdblks_delta; } - out: if (outval) { outval->resblks = mp->m_resblks; @@ -528,8 +519,11 @@ xfs_do_force_shutdown( int tag; const char *why; - if (test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &mp->m_opstate)) + + if (test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &mp->m_opstate)) { + xlog_shutdown_wait(mp->m_log); return; + } if (mp->m_sb_bp) mp->m_sb_bp->b_flags |= XBF_DONE; diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 20186c584c7d..bffd6eb0b298 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -883,7 +883,7 @@ xfs_reclaim_inode( */ if (xlog_is_shutdown(ip->i_mount->m_log)) { xfs_iunpin_wait(ip); - xfs_iflush_abort(ip); + xfs_iflush_shutdown_abort(ip); goto reclaim; } if (xfs_ipincount(ip)) diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 26227d26f274..9de6205fe134 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -3631,7 +3631,7 @@ xfs_iflush_cluster( /* * We must use the safe variant here as on shutdown xfs_iflush_abort() - * can remove itself from the list. + * will remove itself from the list. */ list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) { iip = (struct xfs_inode_log_item *)lip; diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 11158fa81a09..9e6ef55cf29e 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -544,10 +544,17 @@ xfs_inode_item_push( uint rval = XFS_ITEM_SUCCESS; int error; - ASSERT(iip->ili_item.li_buf); + if (!bp || (ip->i_flags & XFS_ISTALE)) { + /* + * Inode item/buffer is being being aborted due to cluster + * buffer deletion. Trigger a log force to have that operation + * completed and items removed from the AIL before the next push + * attempt. + */ + return XFS_ITEM_PINNED; + } - if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp) || - (ip->i_flags & XFS_ISTALE)) + if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp)) return XFS_ITEM_PINNED; if (xfs_iflags_test(ip, XFS_IFLUSHING)) @@ -834,46 +841,143 @@ xfs_buf_inode_io_fail( } /* - * This is the inode flushing abort routine. It is called when - * the filesystem is shutting down to clean up the inode state. It is - * responsible for removing the inode item from the AIL if it has not been - * re-logged and clearing the inode's flush state. + * Clear the inode logging fields so no more flushes are attempted. If we are + * on a buffer list, it is now safe to remove it because the buffer is + * guaranteed to be locked. The caller will drop the reference to the buffer + * the log item held. + */ +static void +xfs_iflush_abort_clean( + struct xfs_inode_log_item *iip) +{ + iip->ili_last_fields = 0; + iip->ili_fields = 0; + iip->ili_fsync_fields = 0; + iip->ili_flush_lsn = 0; + iip->ili_item.li_buf = NULL; + list_del_init(&iip->ili_item.li_bio_list); +} + +/* + * Abort flushing the inode from a context holding the cluster buffer locked. + * + * This is the normal runtime method of aborting writeback of an inode that is + * attached to a cluster buffer. It occurs when the inode and the backing + * cluster buffer have been freed (i.e. inode is XFS_ISTALE), or when cluster + * flushing or buffer IO completion encounters a log shutdown situation. + * + * If we need to abort inode writeback and we don't already hold the buffer + * locked, call xfs_iflush_shutdown_abort() instead as this should only ever be + * necessary in a shutdown situation. */ void xfs_iflush_abort( struct xfs_inode *ip) { struct xfs_inode_log_item *iip = ip->i_itemp; - struct xfs_buf *bp = NULL; + struct xfs_buf *bp; - if (iip) { - /* - * Clear the failed bit before removing the item from the AIL so - * xfs_trans_ail_delete() doesn't try to clear and release the - * buffer attached to the log item before we are done with it. - */ - clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags); - xfs_trans_ail_delete(&iip->ili_item, 0); + if (!iip) { + /* clean inode, nothing to do */ + xfs_iflags_clear(ip, XFS_IFLUSHING); + return; + } + + /* + * Remove the inode item from the AIL before we clear its internal + * state. Whilst the inode is in the AIL, it should have a valid buffer + * pointer for push operations to access - it is only safe to remove the + * inode from the buffer once it has been removed from the AIL. + * + * We also clear the failed bit before removing the item from the AIL + * as xfs_trans_ail_delete()->xfs_clear_li_failed() will release buffer + * references the inode item owns and needs to hold until we've fully + * aborted the inode log item and detached it from the buffer. + */ + clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags); + xfs_trans_ail_delete(&iip->ili_item, 0); + + /* + * Grab the inode buffer so can we release the reference the inode log + * item holds on it. + */ + spin_lock(&iip->ili_lock); + bp = iip->ili_item.li_buf; + xfs_iflush_abort_clean(iip); + spin_unlock(&iip->ili_lock); + xfs_iflags_clear(ip, XFS_IFLUSHING); + if (bp) + xfs_buf_rele(bp); +} + +/* + * Abort an inode flush in the case of a shutdown filesystem. This can be called + * from anywhere with just an inode reference and does not require holding the + * inode cluster buffer locked. If the inode is attached to a cluster buffer, + * it will grab and lock it safely, then abort the inode flush. + */ +void +xfs_iflush_shutdown_abort( + struct xfs_inode *ip) +{ + struct xfs_inode_log_item *iip = ip->i_itemp; + struct xfs_buf *bp; + + if (!iip) { + /* clean inode, nothing to do */ + xfs_iflags_clear(ip, XFS_IFLUSHING); + return; + } + + spin_lock(&iip->ili_lock); + bp = iip->ili_item.li_buf; + if (!bp) { + spin_unlock(&iip->ili_lock); + xfs_iflush_abort(ip); + return; + } + + /* + * We have to take a reference to the buffer so that it doesn't get + * freed when we drop the ili_lock and then wait to lock the buffer. + * We'll clean up the extra reference after we pick up the ili_lock + * again. + */ + xfs_buf_hold(bp); + spin_unlock(&iip->ili_lock); + xfs_buf_lock(bp); + + spin_lock(&iip->ili_lock); + if (!iip->ili_item.li_buf) { /* - * Clear the inode logging fields so no more flushes are - * attempted. + * Raced with another removal, hold the only reference + * to bp now. Inode should not be in the AIL now, so just clean + * up and return; */ - spin_lock(&iip->ili_lock); - iip->ili_last_fields = 0; - iip->ili_fields = 0; - iip->ili_fsync_fields = 0; - iip->ili_flush_lsn = 0; - bp = iip->ili_item.li_buf; - iip->ili_item.li_buf = NULL; - list_del_init(&iip->ili_item.li_bio_list); + ASSERT(list_empty(&iip->ili_item.li_bio_list)); + ASSERT(!test_bit(XFS_LI_IN_AIL, &iip->ili_item.li_flags)); + xfs_iflush_abort_clean(iip); spin_unlock(&iip->ili_lock); + xfs_iflags_clear(ip, XFS_IFLUSHING); + xfs_buf_relse(bp); + return; } - xfs_iflags_clear(ip, XFS_IFLUSHING); - if (bp) - xfs_buf_rele(bp); + + /* + * Got two references to bp. The first will get dropped by + * xfs_iflush_abort() when the item is removed from the buffer list, but + * we can't drop our reference until _abort() returns because we have to + * unlock the buffer as well. Hence we abort and then unlock and release + * our reference to the buffer. + */ + ASSERT(iip->ili_item.li_buf == bp); + spin_unlock(&iip->ili_lock); + xfs_iflush_abort(ip); + xfs_buf_relse(bp); } + /* * convert an xfs_inode_log_format struct from the old 32 bit version * (which can have different field alignments) to the native 64 bit version diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h index 1a302000d604..bbd836a44ff0 100644 --- a/fs/xfs/xfs_inode_item.h +++ b/fs/xfs/xfs_inode_item.h @@ -44,6 +44,7 @@ static inline int xfs_inode_clean(struct xfs_inode *ip) extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); extern void xfs_inode_item_destroy(struct xfs_inode *); extern void xfs_iflush_abort(struct xfs_inode *); +extern void xfs_iflush_shutdown_abort(struct xfs_inode *); extern int xfs_inode_item_format_convert(xfs_log_iovec_t *, struct xfs_inode_log_format *); diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index 09a8fba84ff9..cb9105d667db 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h @@ -197,8 +197,6 @@ static inline uint64_t howmany_64(uint64_t x, uint32_t y) int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count, char *data, unsigned int op); -void xfs_flush_bdev_async(struct bio *bio, struct block_device *bdev, - struct completion *done); #define ASSERT_ALWAYS(expr) \ (likely(expr) ? (void)0 : assfail(NULL, #expr, __FILE__, __LINE__)) diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index a8034c0afdf2..499e15b24215 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -487,7 +487,10 @@ out_error: * Run all the pending iclog callbacks and wake log force waiters and iclog * space waiters so they can process the newly set shutdown state. We really * don't care what order we process callbacks here because the log is shut down - * and so state cannot change on disk anymore. + * and so state cannot change on disk anymore. However, we cannot wake waiters + * until the callbacks have been processed because we may be in unmount and + * we must ensure that all AIL operations the callbacks perform have completed + * before we tear down the AIL. * * We avoid processing actively referenced iclogs so that we don't run callbacks * while the iclog owner might still be preparing the iclog for IO submssion. @@ -501,7 +504,6 @@ xlog_state_shutdown_callbacks( struct xlog_in_core *iclog; LIST_HEAD(cb_list); - spin_lock(&log->l_icloglock); iclog = log->l_iclog; do { if (atomic_read(&iclog->ic_refcnt)) { @@ -509,26 +511,22 @@ xlog_state_shutdown_callbacks( continue; } list_splice_init(&iclog->ic_callbacks, &cb_list); + spin_unlock(&log->l_icloglock); + + xlog_cil_process_committed(&cb_list); + + spin_lock(&log->l_icloglock); wake_up_all(&iclog->ic_write_wait); wake_up_all(&iclog->ic_force_wait); } while ((iclog = iclog->ic_next) != log->l_iclog); wake_up_all(&log->l_flush_wait); - spin_unlock(&log->l_icloglock); - - xlog_cil_process_committed(&cb_list); } /* * Flush iclog to disk if this is the last reference to the given iclog and the * it is in the WANT_SYNC state. * - * If the caller passes in a non-zero @old_tail_lsn and the current log tail - * does not match, there may be metadata on disk that must be persisted before - * this iclog is written. To satisfy that requirement, set the - * XLOG_ICL_NEED_FLUSH flag as a condition for writing this iclog with the new - * log tail value. - * * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the * log tail is updated correctly. NEED_FUA indicates that the iclog will be * written to stable storage, and implies that a commit record is contained @@ -545,12 +543,10 @@ xlog_state_shutdown_callbacks( * always capture the tail lsn on the iclog on the first NEED_FUA release * regardless of the number of active reference counts on this iclog. */ - int xlog_state_release_iclog( struct xlog *log, - struct xlog_in_core *iclog, - xfs_lsn_t old_tail_lsn) + struct xlog_in_core *iclog) { xfs_lsn_t tail_lsn; bool last_ref; @@ -561,18 +557,14 @@ xlog_state_release_iclog( /* * Grabbing the current log tail needs to be atomic w.r.t. the writing * of the tail LSN into the iclog so we guarantee that the log tail does - * not move between deciding if a cache flush is required and writing - * the LSN into the iclog below. + * not move between the first time we know that the iclog needs to be + * made stable and when we eventually submit it. */ - if (old_tail_lsn || iclog->ic_state == XLOG_STATE_WANT_SYNC) { + if ((iclog->ic_state == XLOG_STATE_WANT_SYNC || + (iclog->ic_flags & XLOG_ICL_NEED_FUA)) && + !iclog->ic_header.h_tail_lsn) { tail_lsn = xlog_assign_tail_lsn(log->l_mp); - - if (old_tail_lsn && tail_lsn != old_tail_lsn) - iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; - - if ((iclog->ic_flags & XLOG_ICL_NEED_FUA) && - !iclog->ic_header.h_tail_lsn) - iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); + iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); } last_ref = atomic_dec_and_test(&iclog->ic_refcnt); @@ -583,11 +575,8 @@ xlog_state_release_iclog( * pending iclog callbacks that were waiting on the release of * this iclog. */ - if (last_ref) { - spin_unlock(&log->l_icloglock); + if (last_ref) xlog_state_shutdown_callbacks(log); - spin_lock(&log->l_icloglock); - } return -EIO; } @@ -600,8 +589,6 @@ xlog_state_release_iclog( } iclog->ic_state = XLOG_STATE_SYNCING; - if (!iclog->ic_header.h_tail_lsn) - iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); xlog_verify_tail_lsn(log, iclog); trace_xlog_iclog_syncing(iclog, _RET_IP_); @@ -873,7 +860,7 @@ xlog_force_iclog( iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; if (iclog->ic_state == XLOG_STATE_ACTIVE) xlog_state_switch_iclogs(iclog->ic_log, iclog, 0); - return xlog_state_release_iclog(iclog->ic_log, iclog, 0); + return xlog_state_release_iclog(iclog->ic_log, iclog); } /* @@ -1373,7 +1360,7 @@ xlog_ioend_work( */ if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) { xfs_alert(log->l_mp, "log I/O error %d", error); - xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); + xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); } xlog_state_done_syncing(iclog); @@ -1912,7 +1899,7 @@ xlog_write_iclog( iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA); if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) { - xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); + xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); return; } if (is_vmalloc_addr(iclog->ic_data)) @@ -2411,7 +2398,7 @@ xlog_write_copy_finish( ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC || xlog_is_shutdown(log)); release_iclog: - error = xlog_state_release_iclog(log, iclog, 0); + error = xlog_state_release_iclog(log, iclog); spin_unlock(&log->l_icloglock); return error; } @@ -2487,7 +2474,7 @@ xlog_write( xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, "ctx ticket reservation ran out. Need to up reservation"); xlog_print_tic_res(log->l_mp, ticket); - xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); + xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); } len = xlog_write_calc_vec_length(ticket, log_vector, optype); @@ -2628,7 +2615,7 @@ next_lv: spin_lock(&log->l_icloglock); xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); - error = xlog_state_release_iclog(log, iclog, 0); + error = xlog_state_release_iclog(log, iclog); spin_unlock(&log->l_icloglock); return error; @@ -3052,7 +3039,7 @@ restart: * reference to the iclog. */ if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) - error = xlog_state_release_iclog(log, iclog, 0); + error = xlog_state_release_iclog(log, iclog); spin_unlock(&log->l_icloglock); if (error) return error; @@ -3821,9 +3808,10 @@ xlog_verify_iclog( #endif /* - * Perform a forced shutdown on the log. This should be called once and once - * only by the high level filesystem shutdown code to shut the log subsystem - * down cleanly. + * Perform a forced shutdown on the log. + * + * This can be called from low level log code to trigger a shutdown, or from the + * high level mount shutdown code when the mount shuts down. * * Our main objectives here are to make sure that: * a. if the shutdown was not due to a log IO error, flush the logs to @@ -3832,6 +3820,8 @@ xlog_verify_iclog( * parties to find out. Nothing new gets queued after this is done. * c. Tasks sleeping on log reservations, pinned objects and * other resources get woken up. + * d. The mount is also marked as shut down so that log triggered shutdowns + * still behave the same as if they called xfs_forced_shutdown(). * * Return true if the shutdown cause was a log IO error and we actually shut the * log down. @@ -3843,25 +3833,25 @@ xlog_force_shutdown( { bool log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR); - /* - * If this happens during log recovery then we aren't using the runtime - * log mechanisms yet so there's nothing to shut down. - */ - if (!log || xlog_in_recovery(log)) + if (!log) return false; - ASSERT(!xlog_is_shutdown(log)); - /* * Flush all the completed transactions to disk before marking the log * being shut down. We need to do this first as shutting down the log * before the force will prevent the log force from flushing the iclogs * to disk. * - * Re-entry due to a log IO error shutdown during the log force is - * prevented by the atomicity of higher level shutdown code. + * When we are in recovery, there are no transactions to flush, and + * we don't want to touch the log because we don't want to perturb the + * current head/tail for future recovery attempts. Hence we need to + * avoid a log force in this case. + * + * If we are shutting down due to a log IO error, then we must avoid + * trying to write the log as that may just result in more IO errors and + * an endless shutdown/force loop. */ - if (!log_error) + if (!log_error && !xlog_in_recovery(log)) xfs_log_force(log->l_mp, XFS_LOG_SYNC); /* @@ -3878,12 +3868,25 @@ xlog_force_shutdown( spin_lock(&log->l_icloglock); if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) { spin_unlock(&log->l_icloglock); - ASSERT(0); return false; } spin_unlock(&log->l_icloglock); /* + * If this log shutdown also sets the mount shutdown state, issue a + * shutdown warning message. + */ + if (!test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &log->l_mp->m_opstate)) { + xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR, +"Filesystem has been shut down due to log error (0x%x).", + shutdown_flags); + xfs_alert(log->l_mp, +"Please unmount the filesystem and rectify the problem(s)."); + if (xfs_error_level >= XFS_ERRLEVEL_HIGH) + xfs_stack_trace(); + } + + /* * We don't want anybody waiting for log reservations after this. That * means we have to wake up everybody queued up on reserveq as well as * writeq. In addition, we make sure in xlog_{re}grant_log_space that @@ -3903,8 +3906,12 @@ xlog_force_shutdown( wake_up_all(&log->l_cilp->xc_start_wait); wake_up_all(&log->l_cilp->xc_commit_wait); spin_unlock(&log->l_cilp->xc_push_lock); + + spin_lock(&log->l_icloglock); xlog_state_shutdown_callbacks(log); + spin_unlock(&log->l_icloglock); + wake_up_var(&log->l_opstate); return log_error; } diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index 796e4464f809..ba57323bfdce 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -540,7 +540,7 @@ xlog_cil_insert_items( spin_unlock(&cil->xc_cil_lock); if (tp->t_ticket->t_curr_res < 0) - xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); + xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); } static void @@ -705,11 +705,21 @@ xlog_cil_set_ctx_write_state( * The LSN we need to pass to the log items on transaction * commit is the LSN reported by the first log vector write, not * the commit lsn. If we use the commit record lsn then we can - * move the tail beyond the grant write head. + * move the grant write head beyond the tail LSN and overwrite + * it. */ ctx->start_lsn = lsn; wake_up_all(&cil->xc_start_wait); spin_unlock(&cil->xc_push_lock); + + /* + * Make sure the metadata we are about to overwrite in the log + * has been flushed to stable storage before this iclog is + * issued. + */ + spin_lock(&cil->xc_log->l_icloglock); + iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; + spin_unlock(&cil->xc_log->l_icloglock); return; } @@ -854,7 +864,7 @@ xlog_cil_write_commit_record( error = xlog_write(log, ctx, &vec, ctx->ticket, XLOG_COMMIT_TRANS); if (error) - xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); + xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); return error; } @@ -888,10 +898,7 @@ xlog_cil_push_work( struct xfs_trans_header thdr; struct xfs_log_iovec lhdr; struct xfs_log_vec lvhdr = { NULL }; - xfs_lsn_t preflush_tail_lsn; xfs_csn_t push_seq; - struct bio bio; - DECLARE_COMPLETION_ONSTACK(bdev_flush); bool push_commit_stable; new_ctx = xlog_cil_ctx_alloc(); @@ -962,23 +969,6 @@ xlog_cil_push_work( spin_unlock(&cil->xc_push_lock); /* - * The CIL is stable at this point - nothing new will be added to it - * because we hold the flush lock exclusively. Hence we can now issue - * a cache flush to ensure all the completed metadata in the journal we - * are about to overwrite is on stable storage. - * - * Because we are issuing this cache flush before we've written the - * tail lsn to the iclog, we can have metadata IO completions move the - * tail forwards between the completion of this flush and the iclog - * being written. In this case, we need to re-issue the cache flush - * before the iclog write. To detect whether the log tail moves, sample - * the tail LSN *before* we issue the flush. - */ - preflush_tail_lsn = atomic64_read(&log->l_tail_lsn); - xfs_flush_bdev_async(&bio, log->l_mp->m_ddev_targp->bt_bdev, - &bdev_flush); - - /* * Pull all the log vectors off the items in the CIL, and remove the * items from the CIL. We don't need the CIL lock here because it's only * needed on the transaction commit side which is currently locked out @@ -1054,12 +1044,6 @@ xlog_cil_push_work( lvhdr.lv_iovecp = &lhdr; lvhdr.lv_next = ctx->lv_chain; - /* - * Before we format and submit the first iclog, we have to ensure that - * the metadata writeback ordering cache flush is complete. - */ - wait_for_completion(&bdev_flush); - error = xlog_cil_write_chain(ctx, &lvhdr); if (error) goto out_abort_free_ticket; @@ -1118,7 +1102,7 @@ xlog_cil_push_work( if (push_commit_stable && ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE) xlog_state_switch_iclogs(log, ctx->commit_iclog, 0); - xlog_state_release_iclog(log, ctx->commit_iclog, preflush_tail_lsn); + xlog_state_release_iclog(log, ctx->commit_iclog); /* Not safe to reference ctx now! */ @@ -1139,7 +1123,7 @@ out_abort_free_ticket: return; } spin_lock(&log->l_icloglock); - xlog_state_release_iclog(log, ctx->commit_iclog, 0); + xlog_state_release_iclog(log, ctx->commit_iclog); /* Not safe to reference ctx now! */ spin_unlock(&log->l_icloglock); } diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 23103d68423c..401cdc400980 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -484,6 +484,17 @@ xlog_is_shutdown(struct xlog *log) return test_bit(XLOG_IO_ERROR, &log->l_opstate); } +/* + * Wait until the xlog_force_shutdown() has marked the log as shut down + * so xlog_is_shutdown() will always return true. + */ +static inline void +xlog_shutdown_wait( + struct xlog *log) +{ + wait_var_event(&log->l_opstate, xlog_is_shutdown(log)); +} + /* common routines */ extern int xlog_recover( @@ -524,8 +535,7 @@ void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket); void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog, int eventual_size); -int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog, - xfs_lsn_t log_tail_lsn); +int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog); /* * When we crack an atomic LSN, we sample it first so that the value will not diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 96c997ed2ec8..c4ad4296c540 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2485,7 +2485,7 @@ xlog_finish_defer_ops( error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres, dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp); if (error) { - xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); + xlog_force_shutdown(mp->m_log, SHUTDOWN_LOG_IO_ERROR); return error; } @@ -2519,21 +2519,22 @@ xlog_abort_defer_ops( xfs_defer_ops_capture_free(mp, dfc); } } + /* * When this is called, all of the log intent items which did not have - * corresponding log done items should be in the AIL. What we do now - * is update the data structures associated with each one. + * corresponding log done items should be in the AIL. What we do now is update + * the data structures associated with each one. * - * Since we process the log intent items in normal transactions, they - * will be removed at some point after the commit. This prevents us - * from just walking down the list processing each one. We'll use a - * flag in the intent item to skip those that we've already processed - * and use the AIL iteration mechanism's generation count to try to - * speed this up at least a bit. + * Since we process the log intent items in normal transactions, they will be + * removed at some point after the commit. This prevents us from just walking + * down the list processing each one. We'll use a flag in the intent item to + * skip those that we've already processed and use the AIL iteration mechanism's + * generation count to try to speed this up at least a bit. * - * When we start, we know that the intents are the only things in the - * AIL. As we process them, however, other items are added to the - * AIL. + * When we start, we know that the intents are the only things in the AIL. As we + * process them, however, other items are added to the AIL. Hence we know we + * have started recovery on all the pending intents when we find an non-intent + * item in the AIL. */ STATIC int xlog_recover_process_intents( @@ -2556,17 +2557,8 @@ xlog_recover_process_intents( for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL; lip = xfs_trans_ail_cursor_next(ailp, &cur)) { - /* - * We're done when we see something other than an intent. - * There should be no intents left in the AIL now. - */ - if (!xlog_item_is_intent(lip)) { -#ifdef DEBUG - for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur)) - ASSERT(!xlog_item_is_intent(lip)); -#endif + if (!xlog_item_is_intent(lip)) break; - } /* * We should never see a redo item with a LSN higher than @@ -2607,8 +2599,9 @@ err: } /* - * A cancel occurs when the mount has failed and we're bailing out. - * Release all pending log intent items so they don't pin the AIL. + * A cancel occurs when the mount has failed and we're bailing out. Release all + * pending log intent items that we haven't started recovery on so they don't + * pin the AIL. */ STATIC void xlog_recover_cancel_intents( @@ -2622,17 +2615,8 @@ xlog_recover_cancel_intents( spin_lock(&ailp->ail_lock); lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); while (lip != NULL) { - /* - * We're done when we see something other than an intent. - * There should be no intents left in the AIL now. - */ - if (!xlog_item_is_intent(lip)) { -#ifdef DEBUG - for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur)) - ASSERT(!xlog_item_is_intent(lip)); -#endif + if (!xlog_item_is_intent(lip)) break; - } spin_unlock(&ailp->ail_lock); lip->li_ops->iop_release(lip); @@ -3470,7 +3454,7 @@ xlog_recover_finish( */ xlog_recover_cancel_intents(log); xfs_alert(log->l_mp, "Failed to recover intents"); - xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); + xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); return error; } @@ -3517,7 +3501,7 @@ xlog_recover_finish( * end of intents processing can be pushed through the CIL * and AIL. */ - xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); + xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); } return 0; diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index bed73e8002a5..c5f153c3693f 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -21,6 +21,7 @@ #include "xfs_trans.h" #include "xfs_trans_priv.h" #include "xfs_log.h" +#include "xfs_log_priv.h" #include "xfs_error.h" #include "xfs_quota.h" #include "xfs_fsops.h" @@ -1146,7 +1147,7 @@ xfs_mod_fdblocks( * problems (i.e. transaction abort, pagecache discards, etc.) than * slightly premature -ENOSPC. */ - set_aside = mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks); + set_aside = xfs_fdblocks_unavailable(mp); percpu_counter_add_batch(&mp->m_fdblocks, delta, batch); if (__percpu_counter_compare(&mp->m_fdblocks, set_aside, XFS_FDBLOCKS_BATCH) >= 0) { diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 00720a02e761..f6dc19de8322 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -479,6 +479,21 @@ extern void xfs_unmountfs(xfs_mount_t *); */ #define XFS_FDBLOCKS_BATCH 1024 +/* + * Estimate the amount of free space that is not available to userspace and is + * not explicitly reserved from the incore fdblocks. This includes: + * + * - The minimum number of blocks needed to support splitting a bmap btree + * - The blocks currently in use by the freespace btrees because they record + * the actual blocks that will fill per-AG metadata space reservations + */ +static inline uint64_t +xfs_fdblocks_unavailable( + struct xfs_mount *mp) +{ + return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks); +} + extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta, bool reserved); extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta); diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index d84714e4e46a..54be9d64093e 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -815,7 +815,8 @@ xfs_fs_statfs( spin_unlock(&mp->m_sb_lock); /* make sure statp->f_bfree does not underflow */ - statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0); + statp->f_bfree = max_t(int64_t, 0, + fdblocks - xfs_fdblocks_unavailable(mp)); statp->f_bavail = statp->f_bfree; fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree); diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 917a69f0a6ff..0ac717aad380 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -836,6 +836,7 @@ __xfs_trans_commit( bool regrant) { struct xfs_mount *mp = tp->t_mountp; + struct xlog *log = mp->m_log; xfs_csn_t commit_seq = 0; int error = 0; int sync = tp->t_flags & XFS_TRANS_SYNC; @@ -864,7 +865,13 @@ __xfs_trans_commit( if (!(tp->t_flags & XFS_TRANS_DIRTY)) goto out_unreserve; - if (xfs_is_shutdown(mp)) { + /* + * We must check against log shutdown here because we cannot abort log + * items and leave them dirty, inconsistent and unpinned in memory while + * the log is active. This leaves them open to being written back to + * disk, and that will lead to on-disk corruption. + */ + if (xlog_is_shutdown(log)) { error = -EIO; goto out_unreserve; } @@ -878,7 +885,7 @@ __xfs_trans_commit( xfs_trans_apply_sb_deltas(tp); xfs_trans_apply_dquot_deltas(tp); - xlog_cil_commit(mp->m_log, tp, &commit_seq, regrant); + xlog_cil_commit(log, tp, &commit_seq, regrant); xfs_trans_free(tp); @@ -905,10 +912,10 @@ out_unreserve: */ xfs_trans_unreserve_and_mod_dquots(tp); if (tp->t_ticket) { - if (regrant && !xlog_is_shutdown(mp->m_log)) - xfs_log_ticket_regrant(mp->m_log, tp->t_ticket); + if (regrant && !xlog_is_shutdown(log)) + xfs_log_ticket_regrant(log, tp->t_ticket); else - xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket); + xfs_log_ticket_ungrant(log, tp->t_ticket); tp->t_ticket = NULL; } xfs_trans_free_items(tp, !!error); @@ -926,18 +933,27 @@ xfs_trans_commit( } /* - * Unlock all of the transaction's items and free the transaction. - * The transaction must not have modified any of its items, because - * there is no way to restore them to their previous state. + * Unlock all of the transaction's items and free the transaction. If the + * transaction is dirty, we must shut down the filesystem because there is no + * way to restore them to their previous state. * - * If the transaction has made a log reservation, make sure to release - * it as well. + * If the transaction has made a log reservation, make sure to release it as + * well. + * + * This is a high level function (equivalent to xfs_trans_commit()) and so can + * be called after the transaction has effectively been aborted due to the mount + * being shut down. However, if the mount has not been shut down and the + * transaction is dirty we will shut the mount down and, in doing so, that + * guarantees that the log is shut down, too. Hence we don't need to be as + * careful with shutdown state and dirty items here as we need to be in + * xfs_trans_commit(). */ void xfs_trans_cancel( struct xfs_trans *tp) { struct xfs_mount *mp = tp->t_mountp; + struct xlog *log = mp->m_log; bool dirty = (tp->t_flags & XFS_TRANS_DIRTY); trace_xfs_trans_cancel(tp, _RET_IP_); @@ -955,16 +971,18 @@ xfs_trans_cancel( } /* - * See if the caller is relying on us to shut down the - * filesystem. This happens in paths where we detect - * corruption and decide to give up. + * See if the caller is relying on us to shut down the filesystem. We + * only want an error report if there isn't already a shutdown in + * progress, so we only need to check against the mount shutdown state + * here. */ if (dirty && !xfs_is_shutdown(mp)) { XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); } #ifdef DEBUG - if (!dirty && !xfs_is_shutdown(mp)) { + /* Log items need to be consistent until the log is shut down. */ + if (!dirty && !xlog_is_shutdown(log)) { struct xfs_log_item *lip; list_for_each_entry(lip, &tp->t_items, li_trans) @@ -975,7 +993,7 @@ xfs_trans_cancel( xfs_trans_unreserve_and_mod_dquots(tp); if (tp->t_ticket) { - xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket); + xfs_log_ticket_ungrant(log, tp->t_ticket); tp->t_ticket = NULL; } diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index c2ccb98c7bcd..d3a97a028560 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -873,17 +873,17 @@ xfs_trans_ail_delete( int shutdown_type) { struct xfs_ail *ailp = lip->li_ailp; - struct xfs_mount *mp = ailp->ail_log->l_mp; + struct xlog *log = ailp->ail_log; xfs_lsn_t tail_lsn; spin_lock(&ailp->ail_lock); if (!test_bit(XFS_LI_IN_AIL, &lip->li_flags)) { spin_unlock(&ailp->ail_lock); - if (shutdown_type && !xlog_is_shutdown(ailp->ail_log)) { - xfs_alert_tag(mp, XFS_PTAG_AILDELETE, + if (shutdown_type && !xlog_is_shutdown(log)) { + xfs_alert_tag(log->l_mp, XFS_PTAG_AILDELETE, "%s: attempting to delete a log item that is not in the AIL", __func__); - xfs_force_shutdown(mp, shutdown_type); + xlog_force_shutdown(log, shutdown_type); } return; } diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index f2ad8ed8f777..652cd05b0924 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -95,7 +95,10 @@ struct blkcg_gq { spinlock_t async_bio_lock; struct bio_list async_bios; - struct work_struct async_bio_work; + union { + struct work_struct async_bio_work; + struct work_struct free_work; + }; atomic_t use_delay; atomic64_t delay_nsec; diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index dd0763a1c674..1973ef9bd40f 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -85,8 +85,10 @@ struct block_device { */ #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) typedef u32 __bitwise blk_status_t; +typedef u32 blk_short_t; #else typedef u8 __bitwise blk_status_t; +typedef u16 blk_short_t; #endif #define BLK_STS_OK 0 #define BLK_STS_NOTSUPP ((__force blk_status_t)1) diff --git a/include/linux/fs.h b/include/linux/fs.h index 183160872133..bbde95387a23 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -275,7 +275,6 @@ enum positive_aop_returns { AOP_TRUNCATED_PAGE = 0x80001, }; -#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */ #define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct * helper code (eg buffer layer) * to clear GFP_FS from alloc */ @@ -338,28 +337,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb) return kiocb->ki_complete == NULL; } -/* - * "descriptor" for what we're up to with a read. - * This allows us to use the same read code yet - * have multiple different users of the data that - * we read from a file. - * - * The simplest case just copies the data to user - * mode. - */ -typedef struct { - size_t written; - size_t count; - union { - char __user *buf; - void *data; - } arg; - int error; -} read_descriptor_t; - -typedef int (*read_actor_t)(read_descriptor_t *, struct page *, - unsigned long, unsigned long); - struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); int (*readpage)(struct file *, struct page *); @@ -370,12 +347,6 @@ struct address_space_operations { /* Mark a folio dirty. Return true if this dirtied it */ bool (*dirty_folio)(struct address_space *, struct folio *); - /* - * Reads in the requested pages. Unlike ->readpage(), this is - * PURELY used for read-ahead!. - */ - int (*readpages)(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages); void (*readahead)(struct readahead_control *); int (*write_begin)(struct file *, struct address_space *mapping, @@ -3027,7 +2998,7 @@ extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *); -extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t); +ssize_t generic_perform_write(struct kiocb *, struct iov_iter *); ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos, rwf_t flags); diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h index b568b3c7d095..a7afc800bd8d 100644 --- a/include/linux/fsverity.h +++ b/include/linux/fsverity.h @@ -221,7 +221,7 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work) * * This checks whether ->i_verity_info has been set. * - * Filesystems call this from ->readpages() to check whether the pages need to + * Filesystems call this from ->readahead() to check whether the pages need to * be verified or not. Don't use IS_VERITY() for this purpose; it's subject to * a race condition where the file is being read concurrently with * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9536ffa0473b..3f9b22c4983a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -148,6 +148,7 @@ static inline bool is_error_page(struct page *page) #define KVM_REQUEST_MASK GENMASK(7,0) #define KVM_REQUEST_NO_WAKEUP BIT(8) #define KVM_REQUEST_WAIT BIT(9) +#define KVM_REQUEST_NO_ACTION BIT(10) /* * Architecture-independent vcpu->requests bit members * Bits 4-7 are reserved for more arch-independent bits. @@ -156,9 +157,18 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_UNBLOCK 2 #define KVM_REQ_UNHALT 3 -#define KVM_REQ_GPC_INVALIDATE (5 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQUEST_ARCH_BASE 8 +/* + * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to + * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick" + * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing + * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous + * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no + * guarantee the vCPU received an IPI and has actually exited guest mode. + */ +#define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) + #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ @@ -1221,27 +1231,27 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); * @gpc: struct gfn_to_pfn_cache object. * @vcpu: vCPU to be used for marking pages dirty and to be woken on * invalidation. - * @guest_uses_pa: indicates that the resulting host physical PFN is used while - * @vcpu is IN_GUEST_MODE so invalidations should wake it. - * @kernel_map: requests a kernel virtual mapping (kmap / memremap). + * @usage: indicates if the resulting host physical PFN is used while + * the @vcpu is IN_GUEST_MODE (in which case invalidation of + * the cache from MMU notifiers---but not for KVM memslot + * changes!---will also force @vcpu to exit the guest and + * refresh the cache); and/or if the PFN used directly + * by KVM (and thus needs a kernel virtual mapping). * @gpa: guest physical address to map. * @len: sanity check; the range being access must fit a single page. - * @dirty: mark the cache dirty immediately. * * @return: 0 for success. * -EINVAL for a mapping which would cross a page boundary. * -EFAULT for an untranslatable guest physical address. * * This primes a gfn_to_pfn_cache and links it into the @kvm's list for - * invalidations to be processed. Invalidation callbacks to @vcpu using - * %KVM_REQ_GPC_INVALIDATE will occur only for MMU notifiers, not for KVM - * memslot changes. Callers are required to use kvm_gfn_to_pfn_cache_check() - * to ensure that the cache is valid before accessing the target page. + * invalidations to be processed. Callers are required to use + * kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before + * accessing the target page. */ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, - struct kvm_vcpu *vcpu, bool guest_uses_pa, - bool kernel_map, gpa_t gpa, unsigned long len, - bool dirty); + struct kvm_vcpu *vcpu, enum pfn_cache_usage usage, + gpa_t gpa, unsigned long len); /** * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache. @@ -1250,7 +1260,6 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, * @gpc: struct gfn_to_pfn_cache object. * @gpa: current guest physical address to map. * @len: sanity check; the range being access must fit a single page. - * @dirty: mark the cache dirty immediately. * * @return: %true if the cache is still valid and the address matches. * %false if the cache is not valid. @@ -1272,7 +1281,6 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, * @gpc: struct gfn_to_pfn_cache object. * @gpa: updated guest physical address to map. * @len: sanity check; the range being access must fit a single page. - * @dirty: mark the cache dirty immediately. * * @return: 0 for success. * -EINVAL for a mapping which would cross a page boundary. @@ -1285,7 +1293,7 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, * with the lock still held to permit access. */ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, - gpa_t gpa, unsigned long len, bool dirty); + gpa_t gpa, unsigned long len); /** * kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache. @@ -1293,10 +1301,9 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, * @kvm: pointer to kvm instance. * @gpc: struct gfn_to_pfn_cache object. * - * This unmaps the referenced page and marks it dirty, if appropriate. The - * cache is left in the invalid state but at least the mapping from GPA to - * userspace HVA will remain cached and can be reused on a subsequent - * refresh. + * This unmaps the referenced page. The cache is left in the invalid state + * but at least the mapping from GPA to userspace HVA will remain cached + * and can be reused on a subsequent refresh. */ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc); @@ -1984,7 +1991,7 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) void kvm_arch_irq_routing_update(struct kvm *kvm); -static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) +static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu) { /* * Ensure the rest of the request is published to kvm_check_request's @@ -1994,6 +2001,19 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); } +static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) +{ + /* + * Request that don't require vCPU action should never be logged in + * vcpu->requests. The vCPU won't clear the request, so it will stay + * logged indefinitely and prevent the vCPU from entering the guest. + */ + BUILD_BUG_ON(!__builtin_constant_p(req) || + (req & KVM_REQUEST_NO_ACTION)); + + __kvm_make_request(req, vcpu); +} + static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) { return READ_ONCE(vcpu->requests); diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index dceac12c1ce5..ac1ebb37a0ff 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -18,6 +18,7 @@ struct kvm_memslots; enum kvm_mr_change; +#include <linux/bits.h> #include <linux/types.h> #include <linux/spinlock_types.h> @@ -46,6 +47,12 @@ typedef u64 hfn_t; typedef hfn_t kvm_pfn_t; +enum pfn_cache_usage { + KVM_GUEST_USES_PFN = BIT(0), + KVM_HOST_USES_PFN = BIT(1), + KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN, +}; + struct gfn_to_hva_cache { u64 generation; gpa_t gpa; @@ -64,11 +71,9 @@ struct gfn_to_pfn_cache { rwlock_t lock; void *khva; kvm_pfn_t pfn; + enum pfn_cache_usage usage; bool active; bool valid; - bool dirty; - bool kernel_map; - bool guest_uses_pa; }; #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE diff --git a/include/linux/net.h b/include/linux/net.h index ba736b457a06..12093f4db50c 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -125,6 +125,25 @@ struct socket { struct socket_wq wq; }; +/* + * "descriptor" for what we're up to with a read. + * This allows us to use the same read code yet + * have multiple different users of the data that + * we read from a file. + * + * The simplest case just copies the data to user + * mode. + */ +typedef struct { + size_t written; + size_t count; + union { + char __user *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + struct vm_area_struct; struct page; struct sockaddr; diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 4f44f83817a9..f626a445d1a8 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -346,6 +346,7 @@ enum { NVME_CTRL_ONCS_TIMESTAMP = 1 << 6, NVME_CTRL_VWC_PRESENT = 1 << 0, NVME_CTRL_OACS_SEC_SUPP = 1 << 0, + NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3, NVME_CTRL_OACS_DIRECTIVES = 1 << 5, NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8, NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1, diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a8d0b327b066..993994cd943a 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -752,8 +752,6 @@ struct page *read_cache_page(struct address_space *, pgoff_t index, filler_t *filler, void *data); extern struct page * read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); -extern int read_cache_pages(struct address_space *mapping, - struct list_head *pages, filler_t *filler, void *data); static inline struct page *read_mapping_page(struct address_space *mapping, pgoff_t index, struct file *file) diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index dffeb8281c2d..8f5a86e210b9 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -174,7 +174,7 @@ static inline unsigned int __map_depth(const struct sbitmap *sb, int index) static inline void sbitmap_free(struct sbitmap *sb) { free_percpu(sb->alloc_hint); - kfree(sb->map); + kvfree(sb->map); sb->map = NULL; } diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 88cc16444b43..60820ab511d2 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -162,6 +162,7 @@ int seq_dentry(struct seq_file *, struct dentry *, const char *); int seq_path_root(struct seq_file *m, const struct path *path, const struct path *root, const char *esc); +void *single_start(struct seq_file *, loff_t *); int single_open(struct file *, int (*)(struct seq_file *, void *), void *); int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t); int single_release(struct inode *, struct file *); diff --git a/include/uapi/linux/user_events.h b/include/linux/user_events.h index e570840571e1..e570840571e1 100644 --- a/include/uapi/linux/user_events.h +++ b/include/linux/user_events.h diff --git a/include/linux/xarray.h b/include/linux/xarray.h index bb52b786be1b..72feab5ea8d4 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -9,6 +9,7 @@ * See Documentation/core-api/xarray.rst for how to use the XArray. */ +#include <linux/bitmap.h> #include <linux/bug.h> #include <linux/compiler.h> #include <linux/gfp.h> diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index d2be4eb22008..784adc6f6ed2 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -201,11 +201,9 @@ struct io_uring_cqe { * * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries - * IORING_CQE_F_MSG If set, CQE was generated with IORING_OP_MSG_RING */ #define IORING_CQE_F_BUFFER (1U << 0) #define IORING_CQE_F_MORE (1U << 1) -#define IORING_CQE_F_MSG (1U << 2) enum { IORING_CQE_BUFFER_SHIFT = 16, diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h index 24a1c45bd1ae..98e60801195e 100644 --- a/include/uapi/linux/loop.h +++ b/include/uapi/linux/loop.h @@ -45,7 +45,7 @@ struct loop_info { unsigned long lo_inode; /* ioctl r/o */ __kernel_old_dev_t lo_rdevice; /* ioctl r/o */ int lo_offset; - int lo_encrypt_type; + int lo_encrypt_type; /* obsolete, ignored */ int lo_encrypt_key_size; /* ioctl w/o */ int lo_flags; char lo_name[LO_NAME_SIZE]; @@ -61,7 +61,7 @@ struct loop_info64 { __u64 lo_offset; __u64 lo_sizelimit;/* bytes, 0 == max available */ __u32 lo_number; /* ioctl r/o */ - __u32 lo_encrypt_type; + __u32 lo_encrypt_type; /* obsolete, ignored */ __u32 lo_encrypt_key_size; /* ioctl w/o */ __u32 lo_flags; __u8 lo_file_name[LO_NAME_SIZE]; diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 9bb54c0b3b2d..2c43e327a619 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -767,6 +767,7 @@ config USER_EVENTS bool "User trace events" select TRACING select DYNAMIC_EVENTS + depends on BROKEN || COMPILE_TEST # API needs to be straighten out help User trace events are user-defined trace events that can be used like an existing kernel trace event. User trace diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index 8b3d241a31c2..68d62bfac12f 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -18,7 +18,12 @@ #include <linux/tracefs.h> #include <linux/types.h> #include <linux/uaccess.h> +/* Reminder to move to uapi when everything works */ +#ifdef CONFIG_COMPILE_TEST +#include <linux/user_events.h> +#else #include <uapi/linux/user_events.h> +#endif #include "trace.h" #include "trace_dynevent.h" diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index 3990e4df3d7b..230038d4f908 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -370,6 +370,7 @@ static void __put_watch_queue(struct kref *kref) for (i = 0; i < wqueue->nr_pages; i++) __free_page(wqueue->notes[i]); + kfree(wqueue->notes); bitmap_free(wqueue->notes_bitmap); wfilter = rcu_access_pointer(wqueue->filter); diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 2eb3de18ded3..ae4fd4de9ebe 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -110,7 +110,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, sb->alloc_hint = NULL; } - sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node); + sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node); if (!sb->map) { free_percpu(sb->alloc_hint); return -ENOMEM; diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 8b1c318189ce..e77d4856442c 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1463,6 +1463,25 @@ unlock: XA_BUG_ON(xa, !xa_empty(xa)); } +static noinline void check_create_range_5(struct xarray *xa, + unsigned long index, unsigned int order) +{ + XA_STATE_ORDER(xas, xa, index, order); + unsigned int i; + + xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); + + for (i = 0; i < order + 10; i++) { + do { + xas_lock(&xas); + xas_create_range(&xas); + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + } + + xa_destroy(xa); +} + static noinline void check_create_range(struct xarray *xa) { unsigned int order; @@ -1490,6 +1509,9 @@ static noinline void check_create_range(struct xarray *xa) check_create_range_4(xa, (3U << order) + 1, order); check_create_range_4(xa, (3U << order) - 1, order); check_create_range_4(xa, (1U << 24) + 1, order); + + check_create_range_5(xa, 0, order); + check_create_range_5(xa, (1U << order), order); } check_create_range_3(); diff --git a/lib/xarray.c b/lib/xarray.c index b95e92598b9c..4acc88ea7c21 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -722,6 +722,8 @@ void xas_create_range(struct xa_state *xas) for (;;) { struct xa_node *node = xas->xa_node; + if (node->shift >= shift) + break; xas->xa_node = xa_parent_locked(xas->xa, node); xas->xa_offset = node->offset - 1; if (node->offset != 0) @@ -1079,6 +1081,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order) xa_mk_node(child)); if (xa_is_value(curr)) values--; + xas_update(xas, child); } else { unsigned int canon = offset - xas->xa_sibs; @@ -1093,6 +1096,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order) } while (offset-- > xas->xa_offset); node->nr_values += values; + xas_update(xas, node); } EXPORT_SYMBOL_GPL(xas_split); #endif diff --git a/mm/filemap.c b/mm/filemap.c index 647d72bf23b6..3a5ffb5587cd 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2538,7 +2538,7 @@ static int filemap_create_folio(struct file *file, * the page cache as the locked folio would then be enough to * synchronize with hole punching. But there are code paths * such as filemap_update_page() filling in partially uptodate - * pages or ->readpages() that need to hold invalidate_lock + * pages or ->readahead() that need to hold invalidate_lock * while mapping blocks for IO so let's hold the lock here as * well to keep locking rules simple. */ @@ -3752,9 +3752,10 @@ out: } EXPORT_SYMBOL(generic_file_direct_write); -ssize_t generic_perform_write(struct file *file, - struct iov_iter *i, loff_t pos) +ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) { + struct file *file = iocb->ki_filp; + loff_t pos = iocb->ki_pos; struct address_space *mapping = file->f_mapping; const struct address_space_operations *a_ops = mapping->a_ops; long status = 0; @@ -3884,7 +3885,8 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) goto out; - status = generic_perform_write(file, from, pos = iocb->ki_pos); + pos = iocb->ki_pos; + status = generic_perform_write(iocb, from); /* * If generic_perform_write() returned a synchronous error * then we want to return the number of bytes which were @@ -3916,7 +3918,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) */ } } else { - written = generic_perform_write(file, from, iocb->ki_pos); + written = generic_perform_write(iocb, from); if (likely(written > 0)) iocb->ki_pos += written; } diff --git a/mm/readahead.c b/mm/readahead.c index d3a47546d17d..8e3775829513 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -13,29 +13,29 @@ * * Readahead is used to read content into the page cache before it is * explicitly requested by the application. Readahead only ever - * attempts to read pages that are not yet in the page cache. If a - * page is present but not up-to-date, readahead will not try to read + * attempts to read folios that are not yet in the page cache. If a + * folio is present but not up-to-date, readahead will not try to read * it. In that case a simple ->readpage() will be requested. * * Readahead is triggered when an application read request (whether a - * systemcall or a page fault) finds that the requested page is not in + * system call or a page fault) finds that the requested folio is not in * the page cache, or that it is in the page cache and has the - * %PG_readahead flag set. This flag indicates that the page was loaded - * as part of a previous read-ahead request and now that it has been - * accessed, it is time for the next read-ahead. + * readahead flag set. This flag indicates that the folio was read + * as part of a previous readahead request and now that it has been + * accessed, it is time for the next readahead. * * Each readahead request is partly synchronous read, and partly async - * read-ahead. This is reflected in the struct file_ra_state which - * contains ->size being to total number of pages, and ->async_size - * which is the number of pages in the async section. The first page in - * this async section will have %PG_readahead set as a trigger for a - * subsequent read ahead. Once a series of sequential reads has been + * readahead. This is reflected in the struct file_ra_state which + * contains ->size being the total number of pages, and ->async_size + * which is the number of pages in the async section. The readahead + * flag will be set on the first folio in this async section to trigger + * a subsequent readahead. Once a series of sequential reads has been * established, there should be no need for a synchronous component and - * all read ahead request will be fully asynchronous. + * all readahead request will be fully asynchronous. * - * When either of the triggers causes a readahead, three numbers need to - * be determined: the start of the region, the size of the region, and - * the size of the async tail. + * When either of the triggers causes a readahead, three numbers need + * to be determined: the start of the region to read, the size of the + * region, and the size of the async tail. * * The start of the region is simply the first page address at or after * the accessed address, which is not currently populated in the page @@ -45,14 +45,14 @@ * was explicitly requested from the determined request size, unless * this would be less than zero - then zero is used. NOTE THIS * CALCULATION IS WRONG WHEN THE START OF THE REGION IS NOT THE ACCESSED - * PAGE. + * PAGE. ALSO THIS CALCULATION IS NOT USED CONSISTENTLY. * * The size of the region is normally determined from the size of the * previous readahead which loaded the preceding pages. This may be * discovered from the struct file_ra_state for simple sequential reads, * or from examining the state of the page cache when multiple * sequential reads are interleaved. Specifically: where the readahead - * was triggered by the %PG_readahead flag, the size of the previous + * was triggered by the readahead flag, the size of the previous * readahead is assumed to be the number of pages from the triggering * page to the start of the new readahead. In these cases, the size of * the previous readahead is scaled, often doubled, for the new @@ -65,52 +65,52 @@ * larger than the current request, and it is not scaled up, unless it * is at the start of file. * - * In general read ahead is accelerated at the start of the file, as + * In general readahead is accelerated at the start of the file, as * reads from there are often sequential. There are other minor - * adjustments to the read ahead size in various special cases and these + * adjustments to the readahead size in various special cases and these * are best discovered by reading the code. * - * The above calculation determines the readahead, to which any requested - * read size may be added. + * The above calculation, based on the previous readahead size, + * determines the size of the readahead, to which any requested read + * size may be added. * * Readahead requests are sent to the filesystem using the ->readahead() * address space operation, for which mpage_readahead() is a canonical * implementation. ->readahead() should normally initiate reads on all - * pages, but may fail to read any or all pages without causing an IO + * folios, but may fail to read any or all folios without causing an I/O * error. The page cache reading code will issue a ->readpage() request - * for any page which ->readahead() does not provided, and only an error + * for any folio which ->readahead() did not read, and only an error * from this will be final. * - * ->readahead() will generally call readahead_page() repeatedly to get - * each page from those prepared for read ahead. It may fail to read a - * page by: + * ->readahead() will generally call readahead_folio() repeatedly to get + * each folio from those prepared for readahead. It may fail to read a + * folio by: * - * * not calling readahead_page() sufficiently many times, effectively - * ignoring some pages, as might be appropriate if the path to + * * not calling readahead_folio() sufficiently many times, effectively + * ignoring some folios, as might be appropriate if the path to * storage is congested. * - * * failing to actually submit a read request for a given page, + * * failing to actually submit a read request for a given folio, * possibly due to insufficient resources, or * * * getting an error during subsequent processing of a request. * - * In the last two cases, the page should be unlocked to indicate that - * the read attempt has failed. In the first case the page will be - * unlocked by the caller. + * In the last two cases, the folio should be unlocked by the filesystem + * to indicate that the read attempt has failed. In the first case the + * folio will be unlocked by the VFS. * - * Those pages not in the final ``async_size`` of the request should be + * Those folios not in the final ``async_size`` of the request should be * considered to be important and ->readahead() should not fail them due * to congestion or temporary resource unavailability, but should wait * for necessary resources (e.g. memory or indexing information) to - * become available. Pages in the final ``async_size`` may be + * become available. Folios in the final ``async_size`` may be * considered less urgent and failure to read them is more acceptable. - * In this case it is best to use delete_from_page_cache() to remove the - * pages from the page cache as is automatically done for pages that - * were not fetched with readahead_page(). This will allow a - * subsequent synchronous read ahead request to try them again. If they + * In this case it is best to use filemap_remove_folio() to remove the + * folios from the page cache as is automatically done for folios that + * were not fetched with readahead_folio(). This will allow a + * subsequent synchronous readahead request to try them again. If they * are left in the page cache, then they will be read individually using - * ->readpage(). - * + * ->readpage() which may be less efficient. */ #include <linux/kernel.h> @@ -142,91 +142,14 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) } EXPORT_SYMBOL_GPL(file_ra_state_init); -/* - * see if a page needs releasing upon read_cache_pages() failure - * - the caller of read_cache_pages() may have set PG_private or PG_fscache - * before calling, such as the NFS fs marking pages that are cached locally - * on disk, thus we need to give the fs a chance to clean up in the event of - * an error - */ -static void read_cache_pages_invalidate_page(struct address_space *mapping, - struct page *page) -{ - if (page_has_private(page)) { - if (!trylock_page(page)) - BUG(); - page->mapping = mapping; - folio_invalidate(page_folio(page), 0, PAGE_SIZE); - page->mapping = NULL; - unlock_page(page); - } - put_page(page); -} - -/* - * release a list of pages, invalidating them first if need be - */ -static void read_cache_pages_invalidate_pages(struct address_space *mapping, - struct list_head *pages) -{ - struct page *victim; - - while (!list_empty(pages)) { - victim = lru_to_page(pages); - list_del(&victim->lru); - read_cache_pages_invalidate_page(mapping, victim); - } -} - -/** - * read_cache_pages - populate an address space with some pages & start reads against them - * @mapping: the address_space - * @pages: The address of a list_head which contains the target pages. These - * pages have their ->index populated and are otherwise uninitialised. - * @filler: callback routine for filling a single page. - * @data: private data for the callback routine. - * - * Hides the details of the LRU cache etc from the filesystems. - * - * Returns: %0 on success, error return by @filler otherwise - */ -int read_cache_pages(struct address_space *mapping, struct list_head *pages, - int (*filler)(void *, struct page *), void *data) -{ - struct page *page; - int ret = 0; - - while (!list_empty(pages)) { - page = lru_to_page(pages); - list_del(&page->lru); - if (add_to_page_cache_lru(page, mapping, page->index, - readahead_gfp_mask(mapping))) { - read_cache_pages_invalidate_page(mapping, page); - continue; - } - put_page(page); - - ret = filler(data, page); - if (unlikely(ret)) { - read_cache_pages_invalidate_pages(mapping, pages); - break; - } - task_io_account_read(PAGE_SIZE); - } - return ret; -} - -EXPORT_SYMBOL(read_cache_pages); - -static void read_pages(struct readahead_control *rac, struct list_head *pages, - bool skip_page) +static void read_pages(struct readahead_control *rac) { const struct address_space_operations *aops = rac->mapping->a_ops; struct page *page; struct blk_plug plug; if (!readahead_count(rac)) - goto out; + return; blk_start_plug(&plug); @@ -234,7 +157,7 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages, aops->readahead(rac); /* * Clean up the remaining pages. The sizes in ->ra - * maybe be used to size next read-ahead, so make sure + * may be used to size the next readahead, so make sure * they accurately reflect what happened. */ while ((page = readahead_page(rac))) { @@ -246,13 +169,6 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages, unlock_page(page); put_page(page); } - } else if (aops->readpages) { - aops->readpages(rac->file, rac->mapping, pages, - readahead_count(rac)); - /* Clean up the remaining pages */ - put_pages_list(pages); - rac->_index += rac->_nr_pages; - rac->_nr_pages = 0; } else { while ((page = readahead_page(rac))) { aops->readpage(rac->file, page); @@ -262,12 +178,7 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages, blk_finish_plug(&plug); - BUG_ON(pages && !list_empty(pages)); BUG_ON(readahead_count(rac)); - -out: - if (skip_page) - rac->_index++; } /** @@ -289,7 +200,6 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, { struct address_space *mapping = ractl->mapping; unsigned long index = readahead_index(ractl); - LIST_HEAD(page_pool); gfp_t gfp_mask = readahead_gfp_mask(mapping); unsigned long i; @@ -321,7 +231,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, * have a stable reference to this page, and it's * not worth getting one just for that. */ - read_pages(ractl, &page_pool, true); + read_pages(ractl); + ractl->_index++; i = ractl->_index + ractl->_nr_pages - index - 1; continue; } @@ -329,13 +240,11 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, folio = filemap_alloc_folio(gfp_mask, 0); if (!folio) break; - if (mapping->a_ops->readpages) { - folio->index = index + i; - list_add(&folio->lru, &page_pool); - } else if (filemap_add_folio(mapping, folio, index + i, + if (filemap_add_folio(mapping, folio, index + i, gfp_mask) < 0) { folio_put(folio); - read_pages(ractl, &page_pool, true); + read_pages(ractl); + ractl->_index++; i = ractl->_index + ractl->_nr_pages - index - 1; continue; } @@ -349,7 +258,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, * uptodate then the caller will launch readpage again, and * will then handle the error. */ - read_pages(ractl, &page_pool, false); + read_pages(ractl); filemap_invalidate_unlock_shared(mapping); memalloc_nofs_restore(nofs); } @@ -394,8 +303,7 @@ void force_page_cache_ra(struct readahead_control *ractl, struct backing_dev_info *bdi = inode_to_bdi(mapping->host); unsigned long max_pages, index; - if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages && - !mapping->a_ops->readahead)) + if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readahead)) return; /* @@ -512,7 +420,7 @@ static pgoff_t count_history_pages(struct address_space *mapping, } /* - * page cache context based read-ahead + * page cache context based readahead */ static int try_context_readahead(struct address_space *mapping, struct file_ra_state *ra, @@ -624,7 +532,7 @@ void page_cache_ra_order(struct readahead_control *ractl, ra->async_size += index - limit - 1; } - read_pages(ractl, NULL, false); + read_pages(ractl); /* * If there were already pages in the page cache, then we may have @@ -763,9 +671,9 @@ void page_cache_sync_ra(struct readahead_control *ractl, bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM); /* - * Even if read-ahead is disabled, issue this request as read-ahead + * Even if readahead is disabled, issue this request as readahead * as we'll need it to satisfy the requested range. The forced - * read-ahead will do the right thing and limit the read to just the + * readahead will do the right thing and limit the read to just the * requested range, which we'll set to 1 page for this case. */ if (!ractl->ra->ra_pages || blk_cgroup_congested()) { @@ -781,7 +689,6 @@ void page_cache_sync_ra(struct readahead_control *ractl, return; } - /* do read-ahead */ ondemand_readahead(ractl, NULL, req_count); } EXPORT_SYMBOL_GPL(page_cache_sync_ra); @@ -789,7 +696,7 @@ EXPORT_SYMBOL_GPL(page_cache_sync_ra); void page_cache_async_ra(struct readahead_control *ractl, struct folio *folio, unsigned long req_count) { - /* no read-ahead */ + /* no readahead */ if (!ractl->ra->ra_pages) return; @@ -804,7 +711,6 @@ void page_cache_async_ra(struct readahead_control *ractl, if (blk_cgroup_congested()) return; - /* do read-ahead */ ondemand_readahead(ractl, folio, req_count); } EXPORT_SYMBOL_GPL(page_cache_async_ra); diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index 901835a56e89..c4340c90e172 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -658,13 +658,6 @@ static char *escape_string_value(const char *in) return out; } -/* - * Kconfig configuration printer - * - * This printer is used when generating the resulting configuration after - * kconfig invocation and `defconfig' files. Unset symbol might be omitted by - * passing a non-NULL argument to the printer. - */ enum output_n { OUTPUT_N, OUTPUT_N_AS_UNSET, OUTPUT_N_NONE }; static void __print_symbol(FILE *fp, struct symbol *sym, enum output_n output_n, diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index f704034ebbe6..20f44504a644 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -50,7 +50,7 @@ gen_initcalls() { info GEN .tmp_initcalls.lds - ${PYTHON} ${srctree}/scripts/jobserver-exec \ + ${PYTHON3} ${srctree}/scripts/jobserver-exec \ ${PERL} ${srctree}/scripts/generate_initcall_order.pl \ ${KBUILD_VMLINUX_OBJS} ${KBUILD_VMLINUX_LIBS} \ > .tmp_initcalls.lds diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index d10f93aac1c8..ed9d056d2108 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -674,7 +674,7 @@ static void handle_modversion(const struct module *mod, unsigned int crc; if (sym->st_shndx == SHN_UNDEF) { - warn("EXPORT symbol \"%s\" [%s%s] version ...\n" + warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n" "Is \"%s\" prototyped in <asm/asm-prototypes.h>?\n", symname, mod->name, mod->is_vmlinux ? "" : ".ko", symname); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 69c318fdff61..70e05af5ebea 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -117,6 +117,8 @@ EXPORT_SYMBOL_GPL(kvm_debugfs_dir); static const struct file_operations stat_fops_per_vm; +static struct file_operations kvm_chardev_ops; + static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, unsigned long arg); #ifdef CONFIG_KVM_COMPAT @@ -251,7 +253,8 @@ static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req, { int cpu; - kvm_make_request(req, vcpu); + if (likely(!(req & KVM_REQUEST_NO_ACTION))) + __kvm_make_request(req, vcpu); if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) return; @@ -1131,6 +1134,16 @@ static struct kvm *kvm_create_vm(unsigned long type) preempt_notifier_inc(); kvm_init_pm_notifier(kvm); + /* + * When the fd passed to this ioctl() is opened it pins the module, + * but try_module_get() also prevents getting a reference if the module + * is in MODULE_STATE_GOING (e.g. if someone ran "rmmod --wait"). + */ + if (!try_module_get(kvm_chardev_ops.owner)) { + r = -ENODEV; + goto out_err; + } + return kvm; out_err: @@ -1220,6 +1233,7 @@ static void kvm_destroy_vm(struct kvm *kvm) preempt_notifier_dec(); hardware_disable_all(); mmdrop(mm); + module_put(kvm_chardev_ops.owner); } void kvm_get_kvm(struct kvm *kvm) @@ -3663,7 +3677,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp) return 0; } -static struct file_operations kvm_vcpu_fops = { +static const struct file_operations kvm_vcpu_fops = { .release = kvm_vcpu_release, .unlocked_ioctl = kvm_vcpu_ioctl, .mmap = kvm_vcpu_mmap, @@ -4714,7 +4728,7 @@ static long kvm_vm_compat_ioctl(struct file *filp, } #endif -static struct file_operations kvm_vm_fops = { +static const struct file_operations kvm_vm_fops = { .release = kvm_vm_release, .unlocked_ioctl = kvm_vm_ioctl, .llseek = noop_llseek, @@ -5721,8 +5735,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, goto out_free_5; kvm_chardev_ops.owner = module; - kvm_vm_fops.owner = module; - kvm_vcpu_fops.owner = module; r = misc_register(&kvm_dev); if (r) { diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index ce878f4be4da..dd84676615f1 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -27,7 +27,7 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, { DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS); struct gfn_to_pfn_cache *gpc; - bool wake_vcpus = false; + bool evict_vcpus = false; spin_lock(&kvm->gpc_lock); list_for_each_entry(gpc, &kvm->gpc_list, list) { @@ -40,41 +40,32 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, /* * If a guest vCPU could be using the physical address, - * it needs to be woken. + * it needs to be forced out of guest mode. */ - if (gpc->guest_uses_pa) { - if (!wake_vcpus) { - wake_vcpus = true; + if (gpc->usage & KVM_GUEST_USES_PFN) { + if (!evict_vcpus) { + evict_vcpus = true; bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); } __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap); } - - /* - * We cannot call mark_page_dirty() from here because - * this physical CPU might not have an active vCPU - * with which to do the KVM dirty tracking. - * - * Neither is there any point in telling the kernel MM - * that the underlying page is dirty. A vCPU in guest - * mode might still be writing to it up to the point - * where we wake them a few lines further down anyway. - * - * So all the dirty marking happens on the unmap. - */ } write_unlock_irq(&gpc->lock); } spin_unlock(&kvm->gpc_lock); - if (wake_vcpus) { - unsigned int req = KVM_REQ_GPC_INVALIDATE; + if (evict_vcpus) { + /* + * KVM needs to ensure the vCPU is fully out of guest context + * before allowing the invalidation to continue. + */ + unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE; bool called; /* * If the OOM reaper is active, then all vCPUs should have * been stopped already, so perform the request without - * KVM_REQUEST_WAIT and be sad if any needed to be woken. + * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd. */ if (!may_block) req &= ~KVM_REQUEST_WAIT; @@ -104,8 +95,7 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, } EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check); -static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva, - gpa_t gpa, bool dirty) +static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva, gpa_t gpa) { /* Unmap the old page if it was mapped before, and release it */ if (!is_error_noslot_pfn(pfn)) { @@ -118,9 +108,7 @@ static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva, #endif } - kvm_release_pfn(pfn, dirty); - if (dirty) - mark_page_dirty(kvm, gpa); + kvm_release_pfn(pfn, false); } } @@ -152,7 +140,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, unsigned long uhva) } int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, - gpa_t gpa, unsigned long len, bool dirty) + gpa_t gpa, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(kvm); unsigned long page_offset = gpa & ~PAGE_MASK; @@ -160,7 +148,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, unsigned long old_uhva; gpa_t old_gpa; void *old_khva; - bool old_valid, old_dirty; + bool old_valid; int ret = 0; /* @@ -177,20 +165,19 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, old_khva = gpc->khva - offset_in_page(gpc->khva); old_uhva = gpc->uhva; old_valid = gpc->valid; - old_dirty = gpc->dirty; /* If the userspace HVA is invalid, refresh that first */ if (gpc->gpa != gpa || gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva)) { gfn_t gfn = gpa_to_gfn(gpa); - gpc->dirty = false; gpc->gpa = gpa; gpc->generation = slots->generation; gpc->memslot = __gfn_to_memslot(slots, gfn); gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); if (kvm_is_error_hva(gpc->uhva)) { + gpc->pfn = KVM_PFN_ERR_FAULT; ret = -EFAULT; goto out; } @@ -219,7 +206,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, goto map_done; } - if (gpc->kernel_map) { + if (gpc->usage & KVM_HOST_USES_PFN) { if (new_pfn == old_pfn) { new_khva = old_khva; old_pfn = KVM_PFN_ERR_FAULT; @@ -255,14 +242,9 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, } out: - if (ret) - gpc->dirty = false; - else - gpc->dirty = dirty; - write_unlock_irq(&gpc->lock); - __release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty); + __release_gpc(kvm, old_pfn, old_khva, old_gpa); return ret; } @@ -272,7 +254,6 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) { void *old_khva; kvm_pfn_t old_pfn; - bool old_dirty; gpa_t old_gpa; write_lock_irq(&gpc->lock); @@ -280,7 +261,6 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) gpc->valid = false; old_khva = gpc->khva - offset_in_page(gpc->khva); - old_dirty = gpc->dirty; old_gpa = gpc->gpa; old_pfn = gpc->pfn; @@ -293,16 +273,17 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) write_unlock_irq(&gpc->lock); - __release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty); + __release_gpc(kvm, old_pfn, old_khva, old_gpa); } EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap); int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, - struct kvm_vcpu *vcpu, bool guest_uses_pa, - bool kernel_map, gpa_t gpa, unsigned long len, - bool dirty) + struct kvm_vcpu *vcpu, enum pfn_cache_usage usage, + gpa_t gpa, unsigned long len) { + WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage); + if (!gpc->active) { rwlock_init(&gpc->lock); @@ -310,8 +291,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpc->pfn = KVM_PFN_ERR_FAULT; gpc->uhva = KVM_HVA_ERR_BAD; gpc->vcpu = vcpu; - gpc->kernel_map = kernel_map; - gpc->guest_uses_pa = guest_uses_pa; + gpc->usage = usage; gpc->valid = false; gpc->active = true; @@ -319,7 +299,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, list_add(&gpc->list, &kvm->gpc_list); spin_unlock(&kvm->gpc_lock); } - return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len, dirty); + return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len); } EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_init); |