diff options
2924 files changed, 62875 insertions, 32872 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-i2c-devices-bq32k b/Documentation/ABI/testing/sysfs-bus-i2c-devices-bq32k new file mode 100644 index 000000000000..398b258fb770 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-i2c-devices-bq32k @@ -0,0 +1,7 @@ +What: /sys/bus/i2c/devices/.../trickle_charge_bypass +Date: Jan 2017 +KernelVersion: 4.11 +Contact: Enric Balletbo i Serra <eballetbo@gmail.com> +Description: Attribute for enable/disable the trickle charge bypass + The trickle_charge_bypass attribute allows the userspace to + enable/disable the Trickle charge FET bypass. diff --git a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl index d7fcdc5a4379..0320910b866d 100644 --- a/Documentation/DocBook/libata.tmpl +++ b/Documentation/DocBook/libata.tmpl @@ -1020,7 +1020,7 @@ and other resources, etc. </itemizedlist> <para> - Of errors detected as above, the followings are not ATA/ATAPI + Of errors detected as above, the following are not ATA/ATAPI device errors but ATA bus errors and should be handled according to <xref linkend="excatATAbusErr"/>. </para> diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt index 72292308d0f5..6962cab997ef 100644 --- a/Documentation/IPMI.txt +++ b/Documentation/IPMI.txt @@ -257,7 +257,7 @@ and tell you when they come and go. Creating the User -To user the message handler, you must first create a user using +To use the message handler, you must first create a user using ipmi_create_user. The interface number specifies which SMI you want to connect to, and you must supply callback functions to be called when data comes in. The callback function can run at interrupt level, diff --git a/Documentation/acpi/method-customizing.txt b/Documentation/acpi/method-customizing.txt index 5f55373dd53b..a3f598e141f2 100644 --- a/Documentation/acpi/method-customizing.txt +++ b/Documentation/acpi/method-customizing.txt @@ -57,7 +57,7 @@ Note: To get the ACPI debug object output (Store (AAAA, Debug)), 3. undo your changes The "undo" operation is not supported for a new inserted method right now, i.e. we can not remove a method currently. - For an overrided method, in order to undo your changes, please + For an overridden method, in order to undo your changes, please save a copy of the method original ASL code in step c) section 1, and redo step c) ~ g) to override the method with the original one. diff --git a/Documentation/acpi/method-tracing.txt b/Documentation/acpi/method-tracing.txt index c2505eefc878..0aba14c8f459 100644 --- a/Documentation/acpi/method-tracing.txt +++ b/Documentation/acpi/method-tracing.txt @@ -152,7 +152,7 @@ tracing facility. Users can enable/disable this debug tracing feature by executing the following command: # echo string > /sys/module/acpi/parameters/trace_state - Where "string" should be one of the followings: + Where "string" should be one of the following: "disable" Disable the method tracing feature. "enable" diff --git a/Documentation/admin-guide/ras.rst b/Documentation/admin-guide/ras.rst index 9939348bd4a3..1b90c6f00a92 100644 --- a/Documentation/admin-guide/ras.rst +++ b/Documentation/admin-guide/ras.rst @@ -81,7 +81,7 @@ That defines some categories of errors: still run, eventually replacing the affected hardware by a hot spare, if available. - Also, when an error happens on an userspace process, it is also possible to + Also, when an error happens on a userspace process, it is also possible to kill such process and let userspace restart it. The mechanism for handling non-fatal errors is usually complex and may diff --git a/Documentation/blockdev/mflash.txt b/Documentation/blockdev/mflash.txt index 1f610ecf698a..f7e050551487 100644 --- a/Documentation/blockdev/mflash.txt +++ b/Documentation/blockdev/mflash.txt @@ -17,7 +17,7 @@ driver and currently works well under standard IDE subsystem. Actually it's one chip SSD. IO mode is ATA-like custom mode for the host that doesn't have IDE interface. -Followings are brief descriptions about IO mode. +Following are brief descriptions about IO mode. A. IO mode based on ATA protocol and uses some custom command. (read confirm, write confirm) B. IO mode uses SRAM bus interface. diff --git a/Documentation/cgroup-v1/rdma.txt b/Documentation/cgroup-v1/rdma.txt new file mode 100644 index 000000000000..af618171e0eb --- /dev/null +++ b/Documentation/cgroup-v1/rdma.txt @@ -0,0 +1,109 @@ + RDMA Controller + ---------------- + +Contents +-------- + +1. Overview + 1-1. What is RDMA controller? + 1-2. Why RDMA controller needed? + 1-3. How is RDMA controller implemented? +2. Usage Examples + +1. Overview + +1-1. What is RDMA controller? +----------------------------- + +RDMA controller allows user to limit RDMA/IB specific resources that a given +set of processes can use. These processes are grouped using RDMA controller. + +RDMA controller defines two resources which can be limited for processes of a +cgroup. + +1-2. Why RDMA controller needed? +-------------------------------- + +Currently user space applications can easily take away all the rdma verb +specific resources such as AH, CQ, QP, MR etc. Due to which other applications +in other cgroup or kernel space ULPs may not even get chance to allocate any +rdma resources. This can leads to service unavailability. + +Therefore RDMA controller is needed through which resource consumption +of processes can be limited. Through this controller different rdma +resources can be accounted. + +1-3. How is RDMA controller implemented? +---------------------------------------- + +RDMA cgroup allows limit configuration of resources. Rdma cgroup maintains +resource accounting per cgroup, per device using resource pool structure. +Each such resource pool is limited up to 64 resources in given resource pool +by rdma cgroup, which can be extended later if required. + +This resource pool object is linked to the cgroup css. Typically there +are 0 to 4 resource pool instances per cgroup, per device in most use cases. +But nothing limits to have it more. At present hundreds of RDMA devices per +single cgroup may not be handled optimally, however there is no +known use case or requirement for such configuration either. + +Since RDMA resources can be allocated from any process and can be freed by any +of the child processes which shares the address space, rdma resources are +always owned by the creator cgroup css. This allows process migration from one +to other cgroup without major complexity of transferring resource ownership; +because such ownership is not really present due to shared nature of +rdma resources. Linking resources around css also ensures that cgroups can be +deleted after processes migrated. This allow progress migration as well with +active resources, even though that is not a primary use case. + +Whenever RDMA resource charging occurs, owner rdma cgroup is returned to +the caller. Same rdma cgroup should be passed while uncharging the resource. +This also allows process migrated with active RDMA resource to charge +to new owner cgroup for new resource. It also allows to uncharge resource of +a process from previously charged cgroup which is migrated to new cgroup, +even though that is not a primary use case. + +Resource pool object is created in following situations. +(a) User sets the limit and no previous resource pool exist for the device +of interest for the cgroup. +(b) No resource limits were configured, but IB/RDMA stack tries to +charge the resource. So that it correctly uncharge them when applications are +running without limits and later on when limits are enforced during uncharging, +otherwise usage count will drop to negative. + +Resource pool is destroyed if all the resource limits are set to max and +it is the last resource getting deallocated. + +User should set all the limit to max value if it intents to remove/unconfigure +the resource pool for a particular device. + +IB stack honors limits enforced by the rdma controller. When application +query about maximum resource limits of IB device, it returns minimum of +what is configured by user for a given cgroup and what is supported by +IB device. + +Following resources can be accounted by rdma controller. + hca_handle Maximum number of HCA Handles + hca_object Maximum number of HCA Objects + +2. Usage Examples +----------------- + +(a) Configure resource limit: +echo mlx4_0 hca_handle=2 hca_object=2000 > /sys/fs/cgroup/rdma/1/rdma.max +echo ocrdma1 hca_handle=3 > /sys/fs/cgroup/rdma/2/rdma.max + +(b) Query resource limit: +cat /sys/fs/cgroup/rdma/2/rdma.max +#Output: +mlx4_0 hca_handle=2 hca_object=2000 +ocrdma1 hca_handle=3 hca_object=max + +(c) Query current usage: +cat /sys/fs/cgroup/rdma/2/rdma.current +#Output: +mlx4_0 hca_handle=1 hca_object=20 +ocrdma1 hca_handle=1 hca_object=23 + +(d) Delete resource limit: +echo echo mlx4_0 hca_handle=max hca_object=max > /sys/fs/cgroup/rdma/1/rdma.max diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt index 4cc07ce3b8dd..3b8449f8ac7e 100644 --- a/Documentation/cgroup-v2.txt +++ b/Documentation/cgroup-v2.txt @@ -47,6 +47,12 @@ CONTENTS 5-3. IO 5-3-1. IO Interface Files 5-3-2. Writeback + 5-4. PID + 5-4-1. PID Interface Files + 5-5. RDMA + 5-5-1. RDMA Interface Files + 5-6. Misc + 5-6-1. perf_event 6. Namespace 6-1. Basics 6-2. The Root and Views @@ -328,14 +334,12 @@ a process with a non-root euid to migrate a target process into a cgroup by writing its PID to the "cgroup.procs" file, the following conditions must be met. -- The writer's euid must match either uid or suid of the target process. - - The writer must have write access to the "cgroup.procs" file. - The writer must have write access to the "cgroup.procs" file of the common ancestor of the source and destination cgroups. -The above three constraints ensure that while a delegatee may migrate +The above two constraints ensure that while a delegatee may migrate processes around freely in the delegated sub-hierarchy it can't pull in from or push out to outside the sub-hierarchy. @@ -350,10 +354,10 @@ all processes under C0 and C1 belong to U0. Let's also say U0 wants to write the PID of a process which is currently in C10 into "C00/cgroup.procs". U0 has write access to the -file and uid match on the process; however, the common ancestor of the -source cgroup C10 and the destination cgroup C00 is above the points -of delegation and U0 would not have write access to its "cgroup.procs" -files and thus the write will be denied with -EACCES. +file; however, the common ancestor of the source cgroup C10 and the +destination cgroup C00 is above the points of delegation and U0 would +not have write access to its "cgroup.procs" files and thus the write +will be denied with -EACCES. 2-6. Guidelines @@ -1119,6 +1123,91 @@ writeback as follows. vm.dirty[_background]_ratio. +5-4. PID + +The process number controller is used to allow a cgroup to stop any +new tasks from being fork()'d or clone()'d after a specified limit is +reached. + +The number of tasks in a cgroup can be exhausted in ways which other +controllers cannot prevent, thus warranting its own controller. For +example, a fork bomb is likely to exhaust the number of tasks before +hitting memory restrictions. + +Note that PIDs used in this controller refer to TIDs, process IDs as +used by the kernel. + + +5-4-1. PID Interface Files + + pids.max + + A read-write single value file which exists on non-root cgroups. The + default is "max". + + Hard limit of number of processes. + + pids.current + + A read-only single value file which exists on all cgroups. + + The number of processes currently in the cgroup and its descendants. + +Organisational operations are not blocked by cgroup policies, so it is +possible to have pids.current > pids.max. This can be done by either +setting the limit to be smaller than pids.current, or attaching enough +processes to the cgroup such that pids.current is larger than +pids.max. However, it is not possible to violate a cgroup PID policy +through fork() or clone(). These will return -EAGAIN if the creation +of a new process would cause a cgroup policy to be violated. + + +5-5. RDMA + +The "rdma" controller regulates the distribution and accounting of +of RDMA resources. + +5-5-1. RDMA Interface Files + + rdma.max + A readwrite nested-keyed file that exists for all the cgroups + except root that describes current configured resource limit + for a RDMA/IB device. + + Lines are keyed by device name and are not ordered. + Each line contains space separated resource name and its configured + limit that can be distributed. + + The following nested keys are defined. + + hca_handle Maximum number of HCA Handles + hca_object Maximum number of HCA Objects + + An example for mlx4 and ocrdma device follows. + + mlx4_0 hca_handle=2 hca_object=2000 + ocrdma1 hca_handle=3 hca_object=max + + rdma.current + A read-only file that describes current resource usage. + It exists for all the cgroup except root. + + An example for mlx4 and ocrdma device follows. + + mlx4_0 hca_handle=1 hca_object=20 + ocrdma1 hca_handle=1 hca_object=23 + + +5-6. Misc + +5-6-1. perf_event + +perf_event controller, if not mounted on a legacy hierarchy, is +automatically enabled on the v2 hierarchy so that perf events can +always be filtered by cgroup v2 path. The controller can still be +moved to a legacy hierarchy after v2 hierarchy is populated. + + 6. Namespace 6-1. Basics diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt index 0d199353e477..cd2cb2fc85ea 100644 --- a/Documentation/device-mapper/dm-raid.txt +++ b/Documentation/device-mapper/dm-raid.txt @@ -319,7 +319,7 @@ Version History 1.5.2 'mismatch_cnt' is zero unless [last_]sync_action is "check". 1.6.0 Add discard support (and devices_handle_discard_safely module param). 1.7.0 Add support for MD RAID0 mappings. -1.8.0 Explictely check for compatible flags in the superblock metadata +1.8.0 Explicitly check for compatible flags in the superblock metadata and reject to start the raid set if any are set by a newer target version, thus avoiding data corruption on a raid set with a reshape in progress. diff --git a/Documentation/devicetree/bindings/arm/amlogic.txt b/Documentation/devicetree/bindings/arm/amlogic.txt index 9b2b41ab6817..c246cd2730d9 100644 --- a/Documentation/devicetree/bindings/arm/amlogic.txt +++ b/Documentation/devicetree/bindings/arm/amlogic.txt @@ -40,6 +40,8 @@ Board compatible values: - "hardkernel,odroid-c2" (Meson gxbb) - "amlogic,p200" (Meson gxbb) - "amlogic,p201" (Meson gxbb) + - "wetek,hub" (Meson gxbb) + - "wetek,play2" (Meson gxbb) - "amlogic,p212" (Meson gxl s905x) - "amlogic,p230" (Meson gxl s905d) - "amlogic,p231" (Meson gxl s905d) diff --git a/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.txt b/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.txt index e56a1df3a9d3..dd906db34b32 100644 --- a/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.txt +++ b/Documentation/devicetree/bindings/clock/brcm,bcm2835-cprman.txt @@ -16,7 +16,20 @@ Required properties: - #clock-cells: Should be <1>. The permitted clock-specifier values can be found in include/dt-bindings/clock/bcm2835.h - reg: Specifies base physical address and size of the registers -- clocks: The external oscillator clock phandle +- clocks: phandles to the parent clocks used as input to the module, in + the following order: + + - External oscillator + - DSI0 byte clock + - DSI0 DDR2 clock + - DSI0 DDR clock + - DSI1 byte clock + - DSI1 DDR2 clock + - DSI1 DDR clock + + Only external oscillator is required. The DSI clocks may + not be present, in which case their children will be + unusable. Example: diff --git a/Documentation/devicetree/bindings/clock/exynos4415-clock.txt b/Documentation/devicetree/bindings/clock/exynos4415-clock.txt deleted file mode 100644 index 847d98bae8cf..000000000000 --- a/Documentation/devicetree/bindings/clock/exynos4415-clock.txt +++ /dev/null @@ -1,38 +0,0 @@ -* Samsung Exynos4415 Clock Controller - -The Exynos4415 clock controller generates and supplies clock to various -consumer devices within the Exynos4415 SoC. - -Required properties: - -- compatible: should be one of the following: - - "samsung,exynos4415-cmu" - for the main system clocks controller - (CMU_LEFTBUS, CMU_RIGHTBUS, CMU_TOP, CMU_CPU clock domains). - - "samsung,exynos4415-cmu-dmc" - for the Exynos4415 SoC DRAM Memory - Controller (DMC) domain clock controller. - -- reg: physical base address of the controller and length of memory mapped - region. - -- #clock-cells: should be 1. - -Each clock is assigned an identifier and client nodes can use this identifier -to specify the clock which they consume. - -All available clocks are defined as preprocessor macros in -dt-bindings/clock/exynos4415.h header and can be used in device -tree sources. - -Example 1: An example of a clock controller node is listed below. - - cmu: clock-controller@10030000 { - compatible = "samsung,exynos4415-cmu"; - reg = <0x10030000 0x18000>; - #clock-cells = <1>; - }; - - cmu-dmc: clock-controller@105C0000 { - compatible = "samsung,exynos4415-cmu-dmc"; - reg = <0x105C0000 0x3000>; - #clock-cells = <1>; - }; diff --git a/Documentation/devicetree/bindings/clock/hi3660-clock.txt b/Documentation/devicetree/bindings/clock/hi3660-clock.txt new file mode 100644 index 000000000000..cc9b86c35758 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/hi3660-clock.txt @@ -0,0 +1,42 @@ +* Hisilicon Hi3660 Clock Controller + +The Hi3660 clock controller generates and supplies clock to various +controllers within the Hi3660 SoC. + +Required Properties: + +- compatible: the compatible should be one of the following strings to + indicate the clock controller functionality. + + - "hisilicon,hi3660-crgctrl" + - "hisilicon,hi3660-pctrl" + - "hisilicon,hi3660-pmuctrl" + - "hisilicon,hi3660-sctrl" + - "hisilicon,hi3660-iomcu" + +- reg: physical base address of the controller and length of memory mapped + region. + +- #clock-cells: should be 1. + +Each clock is assigned an identifier and client nodes use this identifier +to specify the clock which they consume. + +All these identifier could be found in <dt-bindings/clock/hi3660-clock.h>. + +Examples: + crg_ctrl: clock-controller@fff35000 { + compatible = "hisilicon,hi3660-crgctrl", "syscon"; + reg = <0x0 0xfff35000 0x0 0x1000>; + #clock-cells = <1>; + }; + + uart0: serial@fdf02000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0xfdf02000 0x0 0x1000>; + interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&crg_ctrl HI3660_CLK_MUX_UART0>, + <&crg_ctrl HI3660_PCLK>; + clock-names = "uartclk", "apb_pclk"; + status = "disabled"; + }; diff --git a/Documentation/devicetree/bindings/clock/idt,versaclock5.txt b/Documentation/devicetree/bindings/clock/idt,versaclock5.txt new file mode 100644 index 000000000000..87e9c47a89a3 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/idt,versaclock5.txt @@ -0,0 +1,65 @@ +Binding for IDT VersaClock5 programmable i2c clock generator. + +The IDT VersaClock5 are programmable i2c clock generators providing +from 3 to 12 output clocks. + +==I2C device node== + +Required properties: +- compatible: shall be one of "idt,5p49v5923" , "idt,5p49v5933". +- reg: i2c device address, shall be 0x68 or 0x6a. +- #clock-cells: from common clock binding; shall be set to 1. +- clocks: from common clock binding; list of parent clock handles, + - 5p49v5923: (required) either or both of XTAL or CLKIN + reference clock. + - 5p49v5933: (optional) property not present (internal + Xtal used) or CLKIN reference + clock. +- clock-names: from common clock binding; clock input names, can be + - 5p49v5923: (required) either or both of "xin", "clkin". + - 5p49v5933: (optional) property not present or "clkin". + +==Mapping between clock specifier and physical pins== + +When referencing the provided clock in the DT using phandle and +clock specifier, the following mapping applies: + +5P49V5923: + 0 -- OUT0_SEL_I2CB + 1 -- OUT1 + 2 -- OUT2 + +5P49V5933: + 0 -- OUT0_SEL_I2CB + 1 -- OUT1 + 2 -- OUT4 + +==Example== + +/* 25MHz reference crystal */ +ref25: ref25m { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <25000000>; +}; + +i2c-master-node { + + /* IDT 5P49V5923 i2c clock generator */ + vc5: clock-generator@6a { + compatible = "idt,5p49v5923"; + reg = <0x6a>; + #clock-cells = <1>; + + /* Connect XIN input to 25MHz reference */ + clocks = <&ref25m>; + clock-names = "xin"; + }; +}; + +/* Consumer referencing the 5P49V5923 pin OUT1 */ +consumer { + ... + clocks = <&vc5 1>; + ... +} diff --git a/Documentation/devicetree/bindings/clock/mvebu-corediv-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-corediv-clock.txt index 520562a7dc2a..c7b4e3a6b2c6 100644 --- a/Documentation/devicetree/bindings/clock/mvebu-corediv-clock.txt +++ b/Documentation/devicetree/bindings/clock/mvebu-corediv-clock.txt @@ -7,6 +7,7 @@ Required properties: - compatible : must be "marvell,armada-370-corediv-clock", "marvell,armada-375-corediv-clock", "marvell,armada-380-corediv-clock", + "marvell,mv98dx3236-corediv-clock", - reg : must be the register address of Core Divider control register - #clock-cells : from common clock binding; shall be set to 1 diff --git a/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt index 99c214660bdc..7f28506eaee7 100644 --- a/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt +++ b/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt @@ -3,6 +3,7 @@ Device Tree Clock bindings for cpu clock of Marvell EBU platforms Required properties: - compatible : shall be one of the following: "marvell,armada-xp-cpu-clock" - cpu clocks for Armada XP + "marvell,mv98dx3236-cpu-clock" - cpu clocks for 98DX3236 SoC - reg : Address and length of the clock complex register set, followed by address and length of the PMU DFS registers - #clock-cells : should be set to 1. diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt index 87d3714b956a..a7235e9e1c97 100644 --- a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt @@ -11,6 +11,7 @@ Required properties : compatible "qcom,rpmcc" should be also included. "qcom,rpmcc-msm8916", "qcom,rpmcc" + "qcom,rpmcc-msm8974", "qcom,rpmcc" "qcom,rpmcc-apq8064", "qcom,rpmcc" - #clock-cells : shall contain 1 diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt index c46919412953..f4f944d81308 100644 --- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt +++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt @@ -42,6 +42,10 @@ Required Properties: Domain bindings in Documentation/devicetree/bindings/power/power_domain.txt. + - #reset-cells: Must be 1 + - The single reset specifier cell must be the module number, as defined + in the datasheet. + Examples -------- @@ -55,6 +59,7 @@ Examples clock-names = "extal", "extalr"; #clock-cells = <2>; #power-domain-cells = <0>; + #reset-cells = <1>; }; @@ -69,5 +74,6 @@ Examples dmas = <&dmac1 0x13>, <&dmac1 0x12>; dma-names = "tx", "rx"; power-domains = <&cpg>; + resets = <&cpg 310>; status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3328-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3328-cru.txt new file mode 100644 index 000000000000..e71c675ba5da --- /dev/null +++ b/Documentation/devicetree/bindings/clock/rockchip,rk3328-cru.txt @@ -0,0 +1,57 @@ +* Rockchip RK3328 Clock and Reset Unit + +The RK3328 clock controller generates and supplies clock to various +controllers within the SoC and also implements a reset controller for SoC +peripherals. + +Required Properties: + +- compatible: should be "rockchip,rk3328-cru" +- reg: physical base address of the controller and length of memory mapped + region. +- #clock-cells: should be 1. +- #reset-cells: should be 1. + +Optional Properties: + +- rockchip,grf: phandle to the syscon managing the "general register files" + If missing pll rates are not changeable, due to the missing pll lock status. + +Each clock is assigned an identifier and client nodes can use this identifier +to specify the clock which they consume. All available clocks are defined as +preprocessor macros in the dt-bindings/clock/rk3328-cru.h headers and can be +used in device tree sources. Similar macros exist for the reset sources in +these files. + +External clocks: + +There are several clocks that are generated outside the SoC. It is expected +that they are defined using standard clock bindings with following +clock-output-names: + - "xin24m" - crystal input - required, + - "clkin_i2s" - external I2S clock - optional, + - "gmac_clkin" - external GMAC clock - optional + - "phy_50m_out" - output clock of the pll in the mac phy + +Example: Clock controller node: + + cru: clock-controller@ff440000 { + compatible = "rockchip,rk3328-cru"; + reg = <0x0 0xff440000 0x0 0x1000>; + rockchip,grf = <&grf>; + + #clock-cells = <1>; + #reset-cells = <1>; + }; + +Example: UART controller node that consumes the clock generated by the clock + controller: + + uart0: serial@ff120000 { + compatible = "snps,dw-apb-uart"; + reg = <0xff120000 0x100>; + interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>; + reg-shift = <2>; + reg-io-width = <4>; + clocks = <&cru SCLK_UART0>; + }; diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3399-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3399-cru.txt index 3888dd33fcbd..3bc56fae90ac 100644 --- a/Documentation/devicetree/bindings/clock/rockchip,rk3399-cru.txt +++ b/Documentation/devicetree/bindings/clock/rockchip,rk3399-cru.txt @@ -13,6 +13,12 @@ Required Properties: - #clock-cells: should be 1. - #reset-cells: should be 1. +Optional Properties: + +- rockchip,grf: phandle to the syscon managing the "general register files". + It is used for GRF muxes, if missing any muxes present in the GRF will not + be available. + Each clock is assigned an identifier and client nodes can use this identifier to specify the clock which they consume. All available clocks are defined as preprocessor macros in the dt-bindings/clock/rk3399-cru.h headers and can be diff --git a/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt b/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt index 8f19d87cbf24..b240121d2ac9 100644 --- a/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt +++ b/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt @@ -10,6 +10,7 @@ Required properties: - compatible: Should be: "st,stm32f42xx-rcc" "st,stm32f469-rcc" + "st,stm32f746-rcc" - reg: should be register base and length as documented in the datasheet - #reset-cells: 1, see below @@ -84,6 +85,25 @@ The secondary index is bound with the following magic numbers: 12 CLK_I2SQ_PDIV (post divisor of pll i2s q divisor) 13 CLK_SAIQ_PDIV (post divisor of pll sai q divisor) + 14 CLK_HSI (Internal ocscillator clock) + 15 CLK_SYSCLK (System Clock) + 16 CLK_HDMI_CEC (HDMI-CEC clock) + 17 CLK_SPDIF (SPDIF-Rx clock) + 18 CLK_USART1 (U(s)arts clocks) + 19 CLK_USART2 + 20 CLK_USART3 + 21 CLK_UART4 + 22 CLK_UART5 + 23 CLK_USART6 + 24 CLK_UART7 + 25 CLK_UART8 + 26 CLK_I2C1 (I2S clocks) + 27 CLK_I2C2 + 28 CLK_I2C3 + 29 CLK_I2C4 + 30 CLK_LPTIMER (LPTimer1 clock) +) + Example: /* Misc clock, FCLK */ diff --git a/Documentation/devicetree/bindings/clock/stericsson,abx500.txt b/Documentation/devicetree/bindings/clock/stericsson,abx500.txt new file mode 100644 index 000000000000..dbaa886b223e --- /dev/null +++ b/Documentation/devicetree/bindings/clock/stericsson,abx500.txt @@ -0,0 +1,20 @@ +Clock bindings for ST-Ericsson ABx500 clocks + +Required properties : +- compatible : shall contain the following: + "stericsson,ab8500-clk" +- #clock-cells should be <1> + +The ABx500 clocks need to be placed as a subnode of an AB8500 +device node, see mfd/ab8500.txt + +All available clocks are defined as preprocessor macros in +dt-bindings/clock/ste-ab8500.h header and can be used in device +tree sources. + +Example: + +clock-controller { + compatible = "stericsson,ab8500-clk"; + #clock-cells = <1>; +}; diff --git a/Documentation/devicetree/bindings/clock/sun9i-de.txt b/Documentation/devicetree/bindings/clock/sun9i-de.txt new file mode 100644 index 000000000000..fb18f327b97a --- /dev/null +++ b/Documentation/devicetree/bindings/clock/sun9i-de.txt @@ -0,0 +1,28 @@ +Allwinner A80 Display Engine Clock Control Binding +-------------------------------------------------- + +Required properties : +- compatible: must contain one of the following compatibles: + - "allwinner,sun9i-a80-de-clks" + +- reg: Must contain the registers base address and length +- clocks: phandle to the clocks feeding the display engine subsystem. + Three are needed: + - "mod": the display engine module clock + - "dram": the DRAM bus clock for the system + - "bus": the bus clock for the whole display engine subsystem +- clock-names: Must contain the clock names described just above +- resets: phandle to the reset control for the display engine subsystem. +- #clock-cells : must contain 1 +- #reset-cells : must contain 1 + +Example: +de_clocks: clock@3000000 { + compatible = "allwinner,sun9i-a80-de-clks"; + reg = <0x03000000 0x30>; + clocks = <&ccu CLK_DE>, <&ccu CLK_SDRAM>, <&ccu CLK_BUS_DE>; + clock-names = "mod", "dram", "bus"; + resets = <&ccu RST_BUS_DE>; + #clock-cells = <1>; + #reset-cells = <1>; +}; diff --git a/Documentation/devicetree/bindings/clock/sun9i-usb.txt b/Documentation/devicetree/bindings/clock/sun9i-usb.txt new file mode 100644 index 000000000000..3564bd4f2a20 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/sun9i-usb.txt @@ -0,0 +1,24 @@ +Allwinner A80 USB Clock Control Binding +--------------------------------------- + +Required properties : +- compatible: must contain one of the following compatibles: + - "allwinner,sun9i-a80-usb-clocks" + +- reg: Must contain the registers base address and length +- clocks: phandle to the clocks feeding the USB subsystem. Two are needed: + - "bus": the bus clock for the whole USB subsystem + - "hosc": the high frequency oscillator (usually at 24MHz) +- clock-names: Must contain the clock names described just above +- #clock-cells : must contain 1 +- #reset-cells : must contain 1 + +Example: +usb_clocks: clock@a08000 { + compatible = "allwinner,sun9i-a80-usb-clks"; + reg = <0x00a08000 0x8>; + clocks = <&ccu CLK_BUS_USB>, <&osc24M>; + clock-names = "bus", "hosc"; + #clock-cells = <1>; + #reset-cells = <1>; +}; diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt index 74d44a4273f2..bae5668cf427 100644 --- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt +++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt @@ -7,6 +7,8 @@ Required properties : - "allwinner,sun8i-a23-ccu" - "allwinner,sun8i-a33-ccu" - "allwinner,sun8i-h3-ccu" + - "allwinner,sun8i-v3s-ccu" + - "allwinner,sun9i-a80-ccu" - "allwinner,sun50i-a64-ccu" - reg: Must contain the registers base address and length diff --git a/Documentation/devicetree/bindings/clock/ti,cdce925.txt b/Documentation/devicetree/bindings/clock/ti,cdce925.txt index 4c7669ad681b..0d01f2d5cc36 100644 --- a/Documentation/devicetree/bindings/clock/ti,cdce925.txt +++ b/Documentation/devicetree/bindings/clock/ti,cdce925.txt @@ -1,15 +1,22 @@ -Binding for TO CDCE925 programmable I2C clock synthesizers. +Binding for TI CDCE913/925/937/949 programmable I2C clock synthesizers. Reference This binding uses the common clock binding[1]. [1] Documentation/devicetree/bindings/clock/clock-bindings.txt -[2] http://www.ti.com/product/cdce925 +[2] http://www.ti.com/product/cdce913 +[3] http://www.ti.com/product/cdce925 +[4] http://www.ti.com/product/cdce937 +[5] http://www.ti.com/product/cdce949 The driver provides clock sources for each output Y1 through Y5. Required properties: - - compatible: Shall be "ti,cdce925" + - compatible: Shall be one of the following: + - "ti,cdce913": 1-PLL, 3 Outputs + - "ti,cdce925": 2-PLL, 5 Outputs + - "ti,cdce937": 3-PLL, 7 Outputs + - "ti,cdce949": 4-PLL, 9 Outputs - reg: I2C device address. - clocks: Points to a fixed parent clock that provides the input frequency. - #clock-cells: From common clock bindings: Shall be 1. @@ -18,7 +25,7 @@ Optional properties: - xtal-load-pf: Crystal load-capacitor value to fine-tune performance on a board, or to compensate for external influences. -For both PLL1 and PLL2 an optional child node can be used to specify spread +For all PLL1, PLL2, ... an optional child node can be used to specify spread spectrum clocking parameters for a board. - spread-spectrum: SSC mode as defined in the data sheet. - spread-spectrum-center: Use "centered" mode instead of "max" mode. When diff --git a/Documentation/devicetree/bindings/clock/zx296718-clk.txt b/Documentation/devicetree/bindings/clock/zx296718-clk.txt index 8c18b7b237bf..4ad703808407 100644 --- a/Documentation/devicetree/bindings/clock/zx296718-clk.txt +++ b/Documentation/devicetree/bindings/clock/zx296718-clk.txt @@ -13,6 +13,9 @@ Required properties: "zte,zx296718-lsp1crm": zx296718 device level clock selection and gating + "zte,zx296718-audiocrm": + zx296718 audio clock selection, divider and gating + - reg: Address and length of the register set The clock consumer should specify the desired clock by having the clock diff --git a/Documentation/devicetree/bindings/display/ssd1307fb.txt b/Documentation/devicetree/bindings/display/ssd1307fb.txt index eb31ed47a283..209d931ef16c 100644 --- a/Documentation/devicetree/bindings/display/ssd1307fb.txt +++ b/Documentation/devicetree/bindings/display/ssd1307fb.txt @@ -8,14 +8,15 @@ Required properties: 0x3c or 0x3d - pwm: Should contain the pwm to use according to the OF device tree PWM specification [0]. Only required for the ssd1307. - - reset-gpios: Should contain the GPIO used to reset the OLED display - solomon,height: Height in pixel of the screen driven by the controller - solomon,width: Width in pixel of the screen driven by the controller - solomon,page-offset: Offset of pages (band of 8 pixels) that the screen is mapped to. Optional properties: - - reset-active-low: Is the reset gpio is active on physical low? + - reset-gpios: The GPIO used to reset the OLED display, if available. See + Documentation/devicetree/bindings/gpio/gpio.txt for details. + - vbat-supply: The supply for VBAT - solomon,segment-no-remap: Display needs normal (non-inverted) data column to segment mapping - solomon,com-seq: Display uses sequential COM pin configuration diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt index cf53d5fba20a..aa097045a10e 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt @@ -19,7 +19,14 @@ Optional Properties: - i2c-mux-idle-disconnect: Boolean; if defined, forces mux to disconnect all children in idle state. This is necessary for example, if there are several multiplexers on the bus and the devices behind them use same I2C addresses. - + - interrupt-parent: Phandle for the interrupt controller that services + interrupts for this device. + - interrupts: Interrupt mapping for IRQ. + - interrupt-controller: Marks the device node as an interrupt controller. + - #interrupt-cells : Should be two. + - first cell is the pin number + - second cell is used to specify flags. + See also Documentation/devicetree/bindings/interrupt-controller/interrupts.txt Example: @@ -29,6 +36,11 @@ Example: #size-cells = <0>; reg = <0x74>; + interrupt-parent = <&ipic>; + interrupts = <17 IRQ_TYPE_LEVEL_LOW>; + interrupt-controller; + #interrupt-cells = <2>; + i2c@2 { #address-cells = <1>; #size-cells = <0>; diff --git a/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt b/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt index 7716acc55dec..ae9c2a735f39 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt @@ -10,6 +10,7 @@ Required properties: - "renesas,iic-r8a7793" (R-Car M2-N) - "renesas,iic-r8a7794" (R-Car E2) - "renesas,iic-r8a7795" (R-Car H3) + - "renesas,iic-r8a7796" (R-Car M3-W) - "renesas,iic-sh73a0" (SH-Mobile AG5) - "renesas,rcar-gen2-iic" (generic R-Car Gen2 compatible device) - "renesas,rcar-gen3-iic" (generic R-Car Gen3 compatible device) diff --git a/Documentation/devicetree/bindings/i2c/i2c-stm32.txt b/Documentation/devicetree/bindings/i2c/i2c-stm32.txt new file mode 100644 index 000000000000..78eaf7b718ed --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-stm32.txt @@ -0,0 +1,33 @@ +* I2C controller embedded in STMicroelectronics STM32 I2C platform + +Required properties : +- compatible : Must be "st,stm32f4-i2c" +- reg : Offset and length of the register set for the device +- interrupts : Must contain the interrupt id for I2C event and then the + interrupt id for I2C error. +- resets: Must contain the phandle to the reset controller. +- clocks: Must contain the input clock of the I2C instance. +- A pinctrl state named "default" must be defined to set pins in mode of + operation for I2C transfer +- #address-cells = <1>; +- #size-cells = <0>; + +Optional properties : +- clock-frequency : Desired I2C bus clock frequency in Hz. If not specified, + the default 100 kHz frequency will be used. As only Normal and Fast modes + are supported, possible values are 100000 and 400000. + +Example : + + i2c@40005400 { + compatible = "st,stm32f4-i2c"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x40005400 0x400>; + interrupts = <31>, + <32>; + resets = <&rcc 277>; + clocks = <&rcc 0 149>; + pinctrl-0 = <&i2c1_sda_pin>, <&i2c1_scl_pin>; + pinctrl-names = "default"; + }; diff --git a/Documentation/devicetree/bindings/i2c/nvidia,tegra186-bpmp-i2c.txt b/Documentation/devicetree/bindings/i2c/nvidia,tegra186-bpmp-i2c.txt new file mode 100644 index 000000000000..ab240e10debc --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/nvidia,tegra186-bpmp-i2c.txt @@ -0,0 +1,42 @@ +NVIDIA Tegra186 BPMP I2C controller + +In Tegra186, the BPMP (Boot and Power Management Processor) owns certain HW +devices, such as the I2C controller for the power management I2C bus. Software +running on other CPUs must perform IPC to the BPMP in order to execute +transactions on that I2C bus. This binding describes an I2C bus that is +accessed in such a fashion. + +The BPMP I2C node must be located directly inside the main BPMP node. See +../firmware/nvidia,tegra186-bpmp.txt for details of the BPMP binding. + +This node represents an I2C controller. See ../i2c/i2c.txt for details of the +core I2C binding. + +Required properties: +- compatible: + Array of strings. + One of: + - "nvidia,tegra186-bpmp-i2c". +- #address-cells: Address cells for I2C device address. + Single-cell integer. + Must be <1>. +- #size-cells: + Single-cell integer. + Must be <0>. +- nvidia,bpmp-bus-id: + Single-cell integer. + Indicates the I2C bus number this DT node represent, as defined by the + BPMP firmware. + +Example: + +bpmp { + ... + + i2c { + compatible = "nvidia,tegra186-bpmp-i2c"; + #address-cells = <1>; + #size-cells = <0>; + nvidia,bpmp-bus-id = <5>; + }; +}; diff --git a/Documentation/devicetree/bindings/mfd/qcom-rpm.txt b/Documentation/devicetree/bindings/mfd/qcom-rpm.txt index 485bc59fcc48..3c91ad430eea 100644 --- a/Documentation/devicetree/bindings/mfd/qcom-rpm.txt +++ b/Documentation/devicetree/bindings/mfd/qcom-rpm.txt @@ -234,7 +234,7 @@ see regulator.txt - with additional custom properties described below: - qcom,switch-mode-frequency: Usage: required Value type: <u32> - Definition: Frequency (Hz) of the swith mode power supply; + Definition: Frequency (Hz) of the switch mode power supply; must be one of: 19200000, 9600000, 6400000, 4800000, 3840000, 3200000, 2740000, 2400000, 2130000, 1920000, 1750000, 1600000, diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt index 7aa840c8768d..ae4234ca4ee4 100644 --- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt +++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt @@ -1,7 +1,7 @@ * Marvell Armada 370 / Armada XP / Armada 3700 Ethernet Controller (NETA) Required properties: -- compatible: could be one of the followings +- compatible: could be one of the following: "marvell,armada-370-neta" "marvell,armada-xp-neta" "marvell,armada-3700-neta" diff --git a/Documentation/devicetree/bindings/opp/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt index 9f5ca4457b5f..63725498bd20 100644 --- a/Documentation/devicetree/bindings/opp/opp.txt +++ b/Documentation/devicetree/bindings/opp/opp.txt @@ -136,7 +136,7 @@ Optional properties: larger OPP table, based on what version of the hardware we are running on. We still can't have multiple nodes with the same opp-hz value in OPP table. - It's an user defined array containing a hierarchy of hardware version numbers, + It's a user defined array containing a hierarchy of hardware version numbers, supported by the OPP. For example: a platform with hierarchy of three levels of versions (A, B and C), this field should be like <X Y Z>, where X corresponds to Version hierarchy A, Y corresponds to version hierarchy B and Z @@ -188,14 +188,14 @@ Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together. opp@1000000000 { opp-hz = /bits/ 64 <1000000000>; - opp-microvolt = <970000 975000 985000>; + opp-microvolt = <975000 970000 985000>; opp-microamp = <70000>; clock-latency-ns = <300000>; opp-suspend; }; opp@1100000000 { opp-hz = /bits/ 64 <1100000000>; - opp-microvolt = <980000 1000000 1010000>; + opp-microvolt = <1000000 980000 1010000>; opp-microamp = <80000>; clock-latency-ns = <310000>; }; @@ -267,14 +267,14 @@ independently. opp@1000000000 { opp-hz = /bits/ 64 <1000000000>; - opp-microvolt = <970000 975000 985000>; + opp-microvolt = <975000 970000 985000>; opp-microamp = <70000>; clock-latency-ns = <300000>; opp-suspend; }; opp@1100000000 { opp-hz = /bits/ 64 <1100000000>; - opp-microvolt = <980000 1000000 1010000>; + opp-microvolt = <1000000 980000 1010000>; opp-microamp = <80000>; clock-latency-ns = <310000>; }; @@ -343,14 +343,14 @@ DVFS state together. opp@1000000000 { opp-hz = /bits/ 64 <1000000000>; - opp-microvolt = <970000 975000 985000>; + opp-microvolt = <975000 970000 985000>; opp-microamp = <70000>; clock-latency-ns = <300000>; opp-suspend; }; opp@1100000000 { opp-hz = /bits/ 64 <1100000000>; - opp-microvolt = <980000 1000000 1010000>; + opp-microvolt = <1000000 980000 1010000>; opp-microamp = <80000>; clock-latency-ns = <310000>; }; @@ -369,7 +369,7 @@ DVFS state together. opp@1300000000 { opp-hz = /bits/ 64 <1300000000>; - opp-microvolt = <1045000 1050000 1055000>; + opp-microvolt = <1050000 1045000 1055000>; opp-microamp = <95000>; clock-latency-ns = <400000>; opp-suspend; @@ -382,7 +382,7 @@ DVFS state together. }; opp@1500000000 { opp-hz = /bits/ 64 <1500000000>; - opp-microvolt = <1010000 1100000 1110000>; + opp-microvolt = <1100000 1010000 1110000>; opp-microamp = <95000>; clock-latency-ns = <400000>; turbo-mode; @@ -424,9 +424,9 @@ Example 4: Handling multiple regulators opp@1000000000 { opp-hz = /bits/ 64 <1000000000>; - opp-microvolt = <970000 975000 985000>, /* Supply 0 */ - <960000 965000 975000>, /* Supply 1 */ - <960000 965000 975000>; /* Supply 2 */ + opp-microvolt = <975000 970000 985000>, /* Supply 0 */ + <965000 960000 975000>, /* Supply 1 */ + <965000 960000 975000>; /* Supply 2 */ opp-microamp = <70000>, /* Supply 0 */ <70000>, /* Supply 1 */ <70000>; /* Supply 2 */ @@ -437,9 +437,9 @@ Example 4: Handling multiple regulators opp@1000000000 { opp-hz = /bits/ 64 <1000000000>; - opp-microvolt = <970000 975000 985000>, /* Supply 0 */ - <960000 965000 975000>, /* Supply 1 */ - <960000 965000 975000>; /* Supply 2 */ + opp-microvolt = <975000 970000 985000>, /* Supply 0 */ + <965000 960000 975000>, /* Supply 1 */ + <965000 960000 975000>; /* Supply 2 */ opp-microamp = <70000>, /* Supply 0 */ <0>, /* Supply 1 doesn't need this */ <70000>; /* Supply 2 */ @@ -474,7 +474,7 @@ Example 5: opp-supported-hw */ opp-supported-hw = <0xF 0xFFFFFFFF 0xFFFFFFFF> opp-hz = /bits/ 64 <600000000>; - opp-microvolt = <900000 915000 925000>; + opp-microvolt = <915000 900000 925000>; ... }; @@ -487,7 +487,7 @@ Example 5: opp-supported-hw */ opp-supported-hw = <0x20 0xff0000ff 0x0000f4f0> opp-hz = /bits/ 64 <800000000>; - opp-microvolt = <900000 915000 925000>; + opp-microvolt = <915000 900000 925000>; ... }; }; @@ -512,18 +512,18 @@ Example 6: opp-microvolt-<name>, opp-microamp-<name>: opp@1000000000 { opp-hz = /bits/ 64 <1000000000>; - opp-microvolt-slow = <900000 915000 925000>; - opp-microvolt-fast = <970000 975000 985000>; + opp-microvolt-slow = <915000 900000 925000>; + opp-microvolt-fast = <975000 970000 985000>; opp-microamp-slow = <70000>; opp-microamp-fast = <71000>; }; opp@1200000000 { opp-hz = /bits/ 64 <1200000000>; - opp-microvolt-slow = <900000 915000 925000>, /* Supply vcc0 */ - <910000 925000 935000>; /* Supply vcc1 */ - opp-microvolt-fast = <970000 975000 985000>, /* Supply vcc0 */ - <960000 965000 975000>; /* Supply vcc1 */ + opp-microvolt-slow = <915000 900000 925000>, /* Supply vcc0 */ + <925000 910000 935000>; /* Supply vcc1 */ + opp-microvolt-fast = <975000 970000 985000>, /* Supply vcc0 */ + <965000 960000 975000>; /* Supply vcc1 */ opp-microamp = <70000>; /* Will be used for both slow/fast */ }; }; diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt index 7c85dca4221a..2fd688c8dbdb 100644 --- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt @@ -6,7 +6,7 @@ the first two functions being GPIO in and out. The configuration on the pins includes drive strength and pull-up. Required properties: -- compatible: Should be one of the followings (depending on you SoC): +- compatible: Should be one of the following (depending on your SoC): "allwinner,sun4i-a10-pinctrl" "allwinner,sun5i-a10s-pinctrl" "allwinner,sun5i-a13-pinctrl" diff --git a/Documentation/devicetree/bindings/power/pd-samsung.txt b/Documentation/devicetree/bindings/power/pd-samsung.txt index 7eb9674e9687..549f7dee9b9d 100644 --- a/Documentation/devicetree/bindings/power/pd-samsung.txt +++ b/Documentation/devicetree/bindings/power/pd-samsung.txt @@ -23,7 +23,7 @@ Optional Properties: - clock-names: The following clocks can be specified: - oscclk: Oscillator clock. - clkN: Input clocks to the devices in this power domain. These clocks - will be reparented to oscclk before swithing power domain off. + will be reparented to oscclk before switching power domain off. Their original parent will be brought back after turning on the domain. Maximum of 4 clocks (N = 0 to 3) are supported. - asbN: Clocks required by asynchronous bridges (ASB) present in diff --git a/Documentation/devicetree/bindings/pwm/imx-pwm.txt b/Documentation/devicetree/bindings/pwm/imx-pwm.txt index e00c2e9f484d..c61bdf8cd41b 100644 --- a/Documentation/devicetree/bindings/pwm/imx-pwm.txt +++ b/Documentation/devicetree/bindings/pwm/imx-pwm.txt @@ -6,8 +6,8 @@ Required properties: - "fsl,imx1-pwm" for PWM compatible with the one integrated on i.MX1 - "fsl,imx27-pwm" for PWM compatible with the one integrated on i.MX27 - reg: physical base address and length of the controller's registers -- #pwm-cells: should be 2. See pwm.txt in this directory for a description of - the cells format. +- #pwm-cells: 2 for i.MX1 and 3 for i.MX27 and newer SoCs. See pwm.txt + in this directory for a description of the cells format. - clocks : Clock specifiers for both ipg and per clocks. - clock-names : Clock names should include both "ipg" and "per" See the clock consumer binding, @@ -17,7 +17,7 @@ See the clock consumer binding, Example: pwm1: pwm@53fb4000 { - #pwm-cells = <2>; + #pwm-cells = <3>; compatible = "fsl,imx53-pwm", "fsl,imx27-pwm"; reg = <0x53fb4000 0x4000>; clocks = <&clks IMX5_CLK_PWM1_IPG_GATE>, diff --git a/Documentation/devicetree/bindings/rtc/armada-380-rtc.txt b/Documentation/devicetree/bindings/rtc/armada-380-rtc.txt index 2eb9d4ee7dc0..c3c9a1226f9a 100644 --- a/Documentation/devicetree/bindings/rtc/armada-380-rtc.txt +++ b/Documentation/devicetree/bindings/rtc/armada-380-rtc.txt @@ -1,9 +1,11 @@ -* Real Time Clock of the Armada 38x SoCs +* Real Time Clock of the Armada 38x/7K/8K SoCs -RTC controller for the Armada 38x SoCs +RTC controller for the Armada 38x, 7K and 8K SoCs Required properties: -- compatible : Should be "marvell,armada-380-rtc" +- compatible : Should be one of the following: + "marvell,armada-380-rtc" for Armada 38x SoC + "marvell,armada-8k-rtc" for Aramda 7K/8K SoCs - reg: a list of base address and size pairs, one for each entry in reg-names - reg names: should contain: diff --git a/Documentation/devicetree/bindings/rtc/cortina,gemini.txt b/Documentation/devicetree/bindings/rtc/cortina,gemini.txt new file mode 100644 index 000000000000..4ce4e794ddbb --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/cortina,gemini.txt @@ -0,0 +1,14 @@ +* Cortina Systems Gemini RTC + +Gemini SoC real-time clock. + +Required properties: +- compatible : Should be "cortina,gemini-rtc" + +Examples: + +rtc@45000000 { + compatible = "cortina,gemini-rtc"; + reg = <0x45000000 0x100>; + interrupts = <17 IRQ_TYPE_LEVEL_HIGH>; +}; diff --git a/Documentation/devicetree/bindings/rtc/imxdi-rtc.txt b/Documentation/devicetree/bindings/rtc/imxdi-rtc.txt index c9d80d7da141..323cf26374cb 100644 --- a/Documentation/devicetree/bindings/rtc/imxdi-rtc.txt +++ b/Documentation/devicetree/bindings/rtc/imxdi-rtc.txt @@ -8,10 +8,13 @@ Required properties: region. - interrupts: rtc alarm interrupt +Optional properties: +- interrupts: dryice security violation interrupt + Example: rtc@80056000 { compatible = "fsl,imx53-rtc", "fsl,imx25-rtc"; reg = <0x80056000 2000>; - interrupts = <29>; + interrupts = <29 56>; }; diff --git a/Documentation/devicetree/bindings/rtc/maxim,ds3231.txt b/Documentation/devicetree/bindings/rtc/maxim,ds3231.txt index 1ad4c1c2b3b3..85be53a42180 100644 --- a/Documentation/devicetree/bindings/rtc/maxim,ds3231.txt +++ b/Documentation/devicetree/bindings/rtc/maxim,ds3231.txt @@ -1,7 +1,8 @@ * Maxim DS3231 Real Time Clock Required properties: -see: Documentation/devicetree/bindings/i2c/trivial-admin-guide/devices.rst +- compatible: Should contain "maxim,ds3231". +- reg: I2C address for chip. Optional property: - #clock-cells: Should be 1. diff --git a/Documentation/devicetree/bindings/rtc/pcf8563.txt b/Documentation/devicetree/bindings/rtc/pcf8563.txt index 086c998c5561..36984acbb383 100644 --- a/Documentation/devicetree/bindings/rtc/pcf8563.txt +++ b/Documentation/devicetree/bindings/rtc/pcf8563.txt @@ -3,7 +3,8 @@ Philips PCF8563/Epson RTC8564 Real Time Clock Required properties: -see: Documentation/devicetree/bindings/i2c/trivial-admin-guide/devices.rst +- compatible: Should contain "nxp,pcf8563". +- reg: I2C address for chip. Optional property: - #clock-cells: Should be 0. diff --git a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt new file mode 100644 index 000000000000..e2837b951237 --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt @@ -0,0 +1,27 @@ +STM32 Real Time Clock + +Required properties: +- compatible: "st,stm32-rtc". +- reg: address range of rtc register set. +- clocks: reference to the clock entry ck_rtc. +- interrupt-parent: phandle for the interrupt controller. +- interrupts: rtc alarm interrupt. +- st,syscfg: phandle for pwrcfg, mandatory to disable/enable backup domain + (RTC registers) write protection. + +Optional properties (to override default ck_rtc parent clock): +- assigned-clocks: reference to the ck_rtc clock entry. +- assigned-clock-parents: phandle of the new parent clock of ck_rtc. + +Example: + + rtc: rtc@40002800 { + compatible = "st,stm32-rtc"; + reg = <0x40002800 0x400>; + clocks = <&rcc 1 CLK_RTC>; + assigned-clocks = <&rcc 1 CLK_RTC>; + assigned-clock-parents = <&rcc 1 CLK_LSE>; + interrupt-parent = <&exti>; + interrupts = <17 1>; + st,syscfg = <&pwrcfg>; + }; diff --git a/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt b/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt index f007e428a1ab..945934918b71 100644 --- a/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt +++ b/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt @@ -8,10 +8,20 @@ Required properties: memory mapped region. - interrupts : IRQ lines for the RTC alarm 0 and alarm 1, in that order. +Required properties for new device trees +- clocks : phandle to the 32kHz external oscillator +- clock-output-names : name of the LOSC clock created +- #clock-cells : must be equals to 1. The RTC provides two clocks: the + LOSC and its external output, with index 0 and 1 + respectively. + Example: rtc: rtc@01f00000 { compatible = "allwinner,sun6i-a31-rtc"; reg = <0x01f00000 0x54>; interrupts = <0 40 4>, <0 41 4>; + clock-output-names = "osc32k"; + clocks = <&ext_osc32k>; + #clock-cells = <1>; }; diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.txt b/Documentation/devicetree/bindings/soc/rockchip/grf.txt index c6e62cb30712..a0685c209218 100644 --- a/Documentation/devicetree/bindings/soc/rockchip/grf.txt +++ b/Documentation/devicetree/bindings/soc/rockchip/grf.txt @@ -10,7 +10,7 @@ From RK3368 SoCs, the GRF is divided into two sections, Required Properties: -- compatible: GRF should be one of the followings +- compatible: GRF should be one of the following: - "rockchip,rk3036-grf", "syscon": for rk3036 - "rockchip,rk3066-grf", "syscon": for rk3066 - "rockchip,rk3188-grf", "syscon": for rk3188 @@ -18,7 +18,7 @@ Required Properties: - "rockchip,rk3288-grf", "syscon": for rk3288 - "rockchip,rk3368-grf", "syscon": for rk3368 - "rockchip,rk3399-grf", "syscon": for rk3399 -- compatible: PMUGRF should be one of the followings +- compatible: PMUGRF should be one of the following: - "rockchip,rk3368-pmugrf", "syscon": for rk3368 - "rockchip,rk3399-pmugrf", "syscon": for rk3399 - compatible: SGRF should be one of the following diff --git a/Documentation/devicetree/bindings/sound/rockchip-i2s.txt b/Documentation/devicetree/bindings/sound/rockchip-i2s.txt index 4ea29aa9af59..a6600f6dea64 100644 --- a/Documentation/devicetree/bindings/sound/rockchip-i2s.txt +++ b/Documentation/devicetree/bindings/sound/rockchip-i2s.txt @@ -5,7 +5,7 @@ audio data transfer between devices in the system. Required properties: -- compatible: should be one of the followings +- compatible: should be one of the following: - "rockchip,rk3066-i2s": for rk3066 - "rockchip,rk3188-i2s", "rockchip,rk3066-i2s": for rk3188 - "rockchip,rk3288-i2s", "rockchip,rk3066-i2s": for rk3288 @@ -17,7 +17,7 @@ Required properties: Documentation/devicetree/bindings/dma/dma.txt - dma-names: should include "tx" and "rx". - clocks: a list of phandle + clock-specifer pairs, one for each entry in clock-names. -- clock-names: should contain followings: +- clock-names: should contain the following: - "i2s_hclk": clock for I2S BUS - "i2s_clk" : clock for I2S controller - rockchip,playback-channels: max playback channels, if not set, 8 channels default. diff --git a/Documentation/devicetree/bindings/sound/sun4i-codec.txt b/Documentation/devicetree/bindings/sound/sun4i-codec.txt index 3033bd8aab0f..3863531d1e6d 100644 --- a/Documentation/devicetree/bindings/sound/sun4i-codec.txt +++ b/Documentation/devicetree/bindings/sound/sun4i-codec.txt @@ -14,7 +14,7 @@ Required properties: - dma-names: should include "tx" and "rx". - clocks: a list of phandle + clock-specifer pairs, one for each entry in clock-names. -- clock-names: should contain followings: +- clock-names: should contain the following: - "apb": the parent APB clock for this controller - "codec": the parent module clock diff --git a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt index f4adc58f82ba..ee21da865771 100644 --- a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt +++ b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt @@ -5,7 +5,7 @@ audio data transfer between devices in the system. Required properties: -- compatible: should be one of the followings +- compatible: should be one of the following: - "allwinner,sun4i-a10-i2s" - "allwinner,sun6i-a31-i2s" - reg: physical base address of the controller and length of memory mapped @@ -15,7 +15,7 @@ Required properties: Documentation/devicetree/bindings/dma/dma.txt - dma-names: should include "tx" and "rx". - clocks: a list of phandle + clock-specifer pairs, one for each entry in clock-names. -- clock-names: should contain followings: +- clock-names: should contain the following: - "apb" : clock for the I2S bus interface - "mod" : module clock for the I2S controller - #sound-dai-cells : Must be equal to 0 diff --git a/Documentation/devicetree/bindings/thermal/qoriq-thermal.txt b/Documentation/devicetree/bindings/thermal/qoriq-thermal.txt index 66223d561972..20ca4ef9d776 100644 --- a/Documentation/devicetree/bindings/thermal/qoriq-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/qoriq-thermal.txt @@ -17,6 +17,12 @@ Required properties: calibration data, as specified by the SoC reference manual. The first cell of each pair is the value to be written to TTCFGR, and the second is the value to be written to TSCFGR. +- #thermal-sensor-cells : Must be 1. The sensor specifier is the monitoring + site ID, and represents the "n" in TRITSRn and TRATSRn. + +Optional property: +- little-endian : If present, the TMU registers are little endian. If absent, + the default is big endian. Example: @@ -60,4 +66,5 @@ tmu@f0000 { 0x00030000 0x00000012 0x00030001 0x0000001d>; + #thermal-sensor-cells = <1>; }; diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt new file mode 100644 index 000000000000..07a9713ae6a7 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt @@ -0,0 +1,56 @@ +* DT bindings for Renesas R-Car Gen3 Thermal Sensor driver + +On R-Car Gen3 SoCs, the thermal sensor controllers (TSC) control the thermal +sensors (THS) which are the analog circuits for measuring temperature (Tj) +inside the LSI. + +Required properties: +- compatible : "renesas,<soctype>-thermal", + Examples with soctypes are: + - "renesas,r8a7795-thermal" (R-Car H3) + - "renesas,r8a7796-thermal" (R-Car M3-W) +- reg : Address ranges of the thermal registers. Each sensor + needs one address range. Sorting must be done in + increasing order according to datasheet, i.e. + TSC1, TSC2, ... +- clocks : Must contain a reference to the functional clock. +- #thermal-sensor-cells : must be <1>. + +Optional properties: + +- interrupts : interrupts routed to the TSC (3 for H3 and M3-W) +- power-domain : Must contain a reference to the power domain. This + property is mandatory if the thermal sensor instance + is part of a controllable power domain. + +Example: + + tsc: thermal@e6198000 { + compatible = "renesas,r8a7795-thermal"; + reg = <0 0xe6198000 0 0x68>, + <0 0xe61a0000 0 0x5c>, + <0 0xe61a8000 0 0x5c>; + interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 522>; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + #thermal-sensor-cells = <1>; + status = "okay"; + }; + + thermal-zones { + sensor_thermal1: sensor-thermal1 { + polling-delay-passive = <250>; + polling-delay = <1000>; + thermal-sensors = <&tsc 0>; + + trips { + sensor1_crit: sensor1-crit { + temperature = <90000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/thermal/zx2967-thermal.txt b/Documentation/devicetree/bindings/thermal/zx2967-thermal.txt new file mode 100644 index 000000000000..3dc1c6bf0478 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/zx2967-thermal.txt @@ -0,0 +1,116 @@ +* ZTE zx2967 family Thermal + +Required Properties: +- compatible: should be one of the following. + * zte,zx296718-thermal +- reg: physical base address of the controller and length of memory mapped + region. +- clocks : Pairs of phandle and specifier referencing the controller's clocks. +- clock-names: "topcrm" for the topcrm clock. + "apb" for the apb clock. +- #thermal-sensor-cells: must be 0. + +Please note: slope coefficient defined in thermal-zones section need to be +multiplied by 1000. + +Example for tempsensor: + + tempsensor: tempsensor@148a000 { + compatible = "zte,zx296718-thermal"; + reg = <0x0148a000 0x20>; + clocks = <&topcrm TEMPSENSOR_GATE>, <&audiocrm AUDIO_TS_PCLK>; + clock-names = "topcrm", "apb"; + #thermal-sensor-cells = <0>; + }; + +Example for cooling device: + + cooling_dev: cooling_dev { + cluster0_cooling_dev: cluster0-cooling-dev { + #cooling-cells = <2>; + cpumask = <0xf>; + capacitance = <1500>; + }; + + cluster1_cooling_dev: cluster1-cooling-dev { + #cooling-cells = <2>; + cpumask = <0x30>; + capacitance = <2000>; + }; + }; + +Example for thermal zones: + + thermal-zones { + zx296718_thermal: zx296718_thermal { + polling-delay-passive = <500>; + polling-delay = <1000>; + sustainable-power = <6500>; + + thermal-sensors = <&tempsensor 0>; + /* + * slope need to be multiplied by 1000. + */ + coefficients = <1951 (-922)>; + + trips { + trip0: switch_on_temperature { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + trip1: desired_temperature { + temperature = <100000>; + hysteresis = <2000>; + type = "passive"; + }; + + crit: critical_temperature { + temperature = <110000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&trip0>; + cooling-device = <&gpu 2 5>; + }; + + map1 { + trip = <&trip0>; + cooling-device = <&cluster0_cooling_dev 1 2>; + }; + + map2 { + trip = <&trip1>; + cooling-device = <&cluster0_cooling_dev 1 2>; + }; + + map3 { + trip = <&crit>; + cooling-device = <&cluster0_cooling_dev 1 2>; + }; + + map4 { + trip = <&trip0>; + cooling-device = <&cluster1_cooling_dev 1 2>; + contribution = <9000>; + }; + + map5 { + trip = <&trip1>; + cooling-device = <&cluster1_cooling_dev 1 2>; + contribution = <4096>; + }; + + map6 { + trip = <&crit>; + cooling-device = <&cluster1_cooling_dev 1 2>; + contribution = <4096>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index bd0ed3cb4994..ec0bfb9bbebd 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -332,6 +332,7 @@ virtio Virtual I/O Device Specification, developed by the OASIS consortium vivante Vivante Corporation voipac Voipac Technologies s.r.o. wd Western Digital Corp. +wetek WeTek Electronics, limited. wexler Wexler winbond Winbond Electronics corp. wlf Wolfson Microelectronics diff --git a/Documentation/devicetree/bindings/watchdog/cortina,gemin-watchdog.txt b/Documentation/devicetree/bindings/watchdog/cortina,gemin-watchdog.txt new file mode 100644 index 000000000000..bc4b865d178b --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/cortina,gemin-watchdog.txt @@ -0,0 +1,17 @@ +Cortina Systems Gemini SoC Watchdog + +Required properties: +- compatible : must be "cortina,gemini-watchdog" +- reg : shall contain base register location and length +- interrupts : shall contain the interrupt for the watchdog + +Optional properties: +- timeout-sec : the default watchdog timeout in seconds. + +Example: + +watchdog@41000000 { + compatible = "cortina,gemini-watchdog"; + reg = <0x41000000 0x1000>; + interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; +}; diff --git a/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt b/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt index 8f3d96af81d7..1f6e101e299a 100644 --- a/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt @@ -6,10 +6,11 @@ occurred. Required properties: - compatible : should be one among the following - (a) "samsung,s3c2410-wdt" for Exynos4 and previous SoCs - (b) "samsung,exynos5250-wdt" for Exynos5250 - (c) "samsung,exynos5420-wdt" for Exynos5420 - (c) "samsung,exynos7-wdt" for Exynos7 + - "samsung,s3c2410-wdt" for S3C2410 + - "samsung,s3c6410-wdt" for S3C6410, S5PV210 and Exynos4 + - "samsung,exynos5250-wdt" for Exynos5250 + - "samsung,exynos5420-wdt" for Exynos5420 + - "samsung,exynos7-wdt" for Exynos7 - reg : base physical address of the controller and length of memory mapped region. diff --git a/Documentation/devicetree/bindings/watchdog/zte,zx2967-wdt.txt b/Documentation/devicetree/bindings/watchdog/zte,zx2967-wdt.txt new file mode 100644 index 000000000000..06ce67766756 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/zte,zx2967-wdt.txt @@ -0,0 +1,32 @@ +ZTE zx2967 Watchdog timer + +Required properties: + +- compatible : should be one of the following. + * zte,zx296718-wdt +- reg : Specifies base physical address and size of the registers. +- clocks : Pairs of phandle and specifier referencing the controller's clocks. +- resets : Reference to the reset controller controlling the watchdog + controller. + +Optional properties: + +- timeout-sec : Contains the watchdog timeout in seconds. +- zte,wdt-reset-sysctrl : Directs how to reset system by the watchdog. + if we don't want to restart system when watchdog been triggered, + it's not required, vice versa. + It should include following fields. + * phandle of aon-sysctrl. + * offset of register that be written, should be 0xb0. + * configure value that be written to aon-sysctrl. + * bit mask, corresponding bits will be affected. + +Example: + +wdt: watchdog@1465000 { + compatible = "zte,zx296718-wdt"; + reg = <0x1465000 0x1000>; + clocks = <&topcrm WDT_WCLK>; + resets = <&toprst 35>; + zte,wdt-reset-sysctrl = <&aon_sysctrl 0xb0 1 0x115>; +}; diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index ace63cd7af8c..fdcfdd79682a 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -58,7 +58,8 @@ prototypes: int (*permission) (struct inode *, int, unsigned int); int (*get_acl)(struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); - int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *); + int (*getattr) (const struct path *, struct dentry *, struct kstat *, + u32, unsigned int); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); void (*update_time)(struct inode *, struct timespec *, int); diff --git a/Documentation/filesystems/autofs4-mount-control.txt b/Documentation/filesystems/autofs4-mount-control.txt index 50a3e01a36f8..e5177cb31a04 100644 --- a/Documentation/filesystems/autofs4-mount-control.txt +++ b/Documentation/filesystems/autofs4-mount-control.txt @@ -179,6 +179,7 @@ struct autofs_dev_ioctl { * including this struct */ __s32 ioctlfd; /* automount command fd */ + /* Command parameters */ union { struct args_protover protover; struct args_protosubver protosubver; diff --git a/Documentation/filesystems/autofs4.txt b/Documentation/filesystems/autofs4.txt index 8fac3fe7b8c9..f10dd590f69f 100644 --- a/Documentation/filesystems/autofs4.txt +++ b/Documentation/filesystems/autofs4.txt @@ -65,7 +65,7 @@ directory is a mount trap only if the filesystem is mounted *direct* and the root is empty. Directories created in the root directory are mount traps only if the -filesystem is mounted *indirect* and they are empty. +filesystem is mounted *indirect* and they are empty. Directories further down the tree depend on the *maxproto* mount option and particularly whether it is less than five or not. @@ -352,7 +352,7 @@ Communicating with autofs: root directory ioctls ------------------------------------------------ The root directory of an autofs filesystem will respond to a number of -ioctls. The process issuing the ioctl must have the CAP_SYS_ADMIN +ioctls. The process issuing the ioctl must have the CAP_SYS_ADMIN capability, or must be the automount daemon. The available ioctl commands are: @@ -425,8 +425,20 @@ Each ioctl is passed a pointer to an `autofs_dev_ioctl` structure: * including this struct */ __s32 ioctlfd; /* automount command fd */ - __u32 arg1; /* Command parameters */ - __u32 arg2; + /* Command parameters */ + union { + struct args_protover protover; + struct args_protosubver protosubver; + struct args_openmount openmount; + struct args_ready ready; + struct args_fail fail; + struct args_setpipefd setpipefd; + struct args_timeout timeout; + struct args_requester requester; + struct args_expire expire; + struct args_askumount askumount; + struct args_ismountpoint ismountpoint; + }; char path[0]; }; @@ -446,25 +458,22 @@ Commands are: set version numbers. - **AUTOFS_DEV_IOCTL_OPENMOUNT_CMD**: return an open file descriptor on the root of an autofs filesystem. The filesystem is identified - by name and device number, which is stored in `arg1`. Device - numbers for existing filesystems can be found in + by name and device number, which is stored in `openmount.devid`. + Device numbers for existing filesystems can be found in `/proc/self/mountinfo`. - **AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD**: same as `close(ioctlfd)`. - **AUTOFS_DEV_IOCTL_SETPIPEFD_CMD**: if the filesystem is in catatonic mode, this can provide the write end of a new pipe - in `arg1` to re-establish communication with a daemon. The - process group of the calling process is used to identify the + in `setpipefd.pipefd` to re-establish communication with a daemon. + The process group of the calling process is used to identify the daemon. - **AUTOFS_DEV_IOCTL_REQUESTER_CMD**: `path` should be a name within the filesystem that has been auto-mounted on. - arg1 is the dev number of the underlying autofs. On successful - return, `arg1` and `arg2` will be the UID and GID of the process - which triggered that mount. - + On successful return, `requester.uid` and `requester.gid` will be + the UID and GID of the process which triggered that mount. - **AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD**: Check if path is a mountpoint of a particular type - see separate documentation for details. - - **AUTOFS_DEV_IOCTL_PROTOVER_CMD**: - **AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD**: - **AUTOFS_DEV_IOCTL_READY_CMD**: @@ -474,7 +483,7 @@ Commands are: - **AUTOFS_DEV_IOCTL_EXPIRE_CMD**: - **AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD**: These all have the same function as the similarly named **AUTOFS_IOC** ioctls, except - that **FAIL** can be given an explicit error number in `arg1` + that **FAIL** can be given an explicit error number in `fail.status` instead of assuming `ENOENT`, and this **EXPIRE** command corresponds to **AUTOFS_IOC_EXPIRE_MULTI**. @@ -512,7 +521,7 @@ always be mounted "shared". e.g. > `mount --make-shared /autofs/mount/point` -The automount daemon is only able to mange a single mount location for +The automount daemon is only able to manage a single mount location for an autofs filesystem and if mounts on that are not 'shared', other locations will not behave as expected. In particular access to those other locations will likely result in the `ELOOP` error diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt index f5306ee40ea9..0b302a11718a 100644 --- a/Documentation/filesystems/ceph.txt +++ b/Documentation/filesystems/ceph.txt @@ -98,11 +98,10 @@ Mount Options size. rsize=X - Specify the maximum read size in bytes. By default there is no - maximum. + Specify the maximum read size in bytes. Default: 64 MB. rasize=X - Specify the maximum readahead. + Specify the maximum readahead. Default: 8 MB. mount_timeout=X Specify the timeout value for mount (in seconds), in the case diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt index 753dd4f96afe..4f6531a4701b 100644 --- a/Documentation/filesystems/f2fs.txt +++ b/Documentation/filesystems/f2fs.txt @@ -125,13 +125,14 @@ active_logs=%u Support configuring the number of active logs. In the disable_ext_identify Disable the extension list configured by mkfs, so f2fs does not aware of cold files such as media files. inline_xattr Enable the inline xattrs feature. +noinline_xattr Disable the inline xattrs feature. inline_data Enable the inline data feature: New created small(<~3.4k) files can be written into inode block. inline_dentry Enable the inline dir feature: data in new created directory entries can be written into inode block. The space of inode block which is used to store inline dentries is limited to ~3.4k. -noinline_dentry Diable the inline dentry feature. +noinline_dentry Disable the inline dentry feature. flush_merge Merge concurrent cache_flush commands as much as possible to eliminate redundant command issues. If the underlying device handles the cache_flush command relatively slowly, @@ -157,6 +158,8 @@ data_flush Enable data flushing before checkpoint in order to mode=%s Control block allocation mode which supports "adaptive" and "lfs". In "lfs" mode, there should be no random writes towards main area. +io_bits=%u Set the bit size of write IO requests. It should be set + with "mode=lfs". ================================================================================ DEBUGFS ENTRIES @@ -174,7 +177,7 @@ f2fs. Each file shows the whole f2fs information. SYSFS ENTRIES ================================================================================ -Information about mounted f2f2 file systems can be found in +Information about mounted f2fs file systems can be found in /sys/fs/f2fs. Each mounted filesystem will have a directory in /sys/fs/f2fs based on its device name (i.e., /sys/fs/f2fs/sda). The files in each per-device directory are shown in table below. diff --git a/Documentation/filesystems/quota.txt b/Documentation/filesystems/quota.txt index 29fc01552646..32874b06ebe9 100644 --- a/Documentation/filesystems/quota.txt +++ b/Documentation/filesystems/quota.txt @@ -6,7 +6,7 @@ Quota subsystem allows system administrator to set limits on used space and number of used inodes (inode is a filesystem structure which is associated with each file or directory) for users and/or groups. For both used space and number of used inodes there are actually two limits. The first one is called softlimit -and the second one hardlimit. An user can never exceed a hardlimit for any +and the second one hardlimit. A user can never exceed a hardlimit for any resource (unless he has CAP_SYS_RESOURCE capability). User is allowed to exceed softlimit but only for limited period of time. This period is called "grace period" or "grace time". When grace time is over, user is not able to allocate diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index b968084eeac1..569211703721 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -382,7 +382,8 @@ struct inode_operations { int (*permission) (struct inode *, int); int (*get_acl)(struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); - int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); + int (*getattr) (const struct path *, struct dentry *, struct kstat *, + u32, unsigned int); ssize_t (*listxattr) (struct dentry *, char *, size_t); void (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801 index 1bba38dd2637..820d9040de16 100644 --- a/Documentation/i2c/busses/i2c-i801 +++ b/Documentation/i2c/busses/i2c-i801 @@ -33,6 +33,7 @@ Supported adapters: * Intel DNV (SOC) * Intel Broxton (SOC) * Intel Lewisburg (PCH) + * Intel Gemini Lake (SOC) Datasheets: Publicly available at the Intel website On Intel Patsburg and later chipsets, both the normal host SMBus controller diff --git a/Documentation/i2c/muxes/i2c-mux-gpio b/Documentation/i2c/muxes/i2c-mux-gpio index d4d91a53fc39..7a8d7d261632 100644 --- a/Documentation/i2c/muxes/i2c-mux-gpio +++ b/Documentation/i2c/muxes/i2c-mux-gpio @@ -1,11 +1,11 @@ -Kernel driver i2c-gpio-mux +Kernel driver i2c-mux-gpio Author: Peter Korsgaard <peter.korsgaard@barco.com> Description ----------- -i2c-gpio-mux is an i2c mux driver providing access to I2C bus segments +i2c-mux-gpio is an i2c mux driver providing access to I2C bus segments from a master I2C bus and a hardware MUX controlled through GPIO pins. E.G.: @@ -26,16 +26,16 @@ according to the settings of the GPIO pins 1..N. Usage ----- -i2c-gpio-mux uses the platform bus, so you need to provide a struct +i2c-mux-gpio uses the platform bus, so you need to provide a struct platform_device with the platform_data pointing to a struct -gpio_i2cmux_platform_data with the I2C adapter number of the master +i2c_mux_gpio_platform_data with the I2C adapter number of the master bus, the number of bus segments to create and the GPIO pins used -to control it. See include/linux/i2c-gpio-mux.h for details. +to control it. See include/linux/i2c-mux-gpio.h for details. E.G. something like this for a MUX providing 4 bus segments controlled through 3 GPIO pins: -#include <linux/i2c-gpio-mux.h> +#include <linux/i2c-mux-gpio.h> #include <linux/platform_device.h> static const unsigned myboard_gpiomux_gpios[] = { @@ -46,7 +46,7 @@ static const unsigned myboard_gpiomux_values[] = { 0, 1, 2, 3 }; -static struct gpio_i2cmux_platform_data myboard_i2cmux_data = { +static struct i2c_mux_gpio_platform_data myboard_i2cmux_data = { .parent = 1, .base_nr = 2, /* optional */ .values = myboard_gpiomux_values, @@ -57,7 +57,7 @@ static struct gpio_i2cmux_platform_data myboard_i2cmux_data = { }; static struct platform_device myboard_i2cmux = { - .name = "i2c-gpio-mux", + .name = "i2c-mux-gpio", .id = 0, .dev = { .platform_data = &myboard_i2cmux_data, @@ -66,14 +66,14 @@ static struct platform_device myboard_i2cmux = { If you don't know the absolute GPIO pin numbers at registration time, you can instead provide a chip name (.chip_name) and relative GPIO pin -numbers, and the i2c-gpio-mux driver will do the work for you, +numbers, and the i2c-mux-gpio driver will do the work for you, including deferred probing if the GPIO chip isn't immediately available. Device Registration ------------------- -When registering your i2c-gpio-mux device, you should pass the number +When registering your i2c-mux-gpio device, you should pass the number of any GPIO pin it uses as the device ID. This guarantees that every instance has a different ID. diff --git a/Documentation/kselftest.txt b/Documentation/kselftest.txt index e5c7254e73d7..5bd590335839 100644 --- a/Documentation/kselftest.txt +++ b/Documentation/kselftest.txt @@ -59,14 +59,14 @@ Install selftests ================= You can use kselftest_install.sh tool installs selftests in default -location which is tools/testing/selftests/kselftest or an user specified +location which is tools/testing/selftests/kselftest or a user specified location. To install selftests in default location: $ cd tools/testing/selftests $ ./kselftest_install.sh -To install selftests in an user specified location: +To install selftests in a user specified location: $ cd tools/testing/selftests $ ./kselftest_install.sh install_dir @@ -95,3 +95,15 @@ In general, the rules for selftests are * Don't cause the top-level "make run_tests" to fail if your feature is unconfigured. + +Contributing new tests(details) +=============================== + + * Use TEST_GEN_XXX if such binaries or files are generated during + compiling. + TEST_PROGS, TEST_GEN_PROGS mean it is the excutable tested by + default. + TEST_PROGS_EXTENDED, TEST_GEN_PROGS_EXTENDED mean it is the + executable which is not tested by default. + TEST_FILES, TEST_GEN_FILES mean it is the file which is used by + test. diff --git a/Documentation/media/dvb-drivers/ci.rst b/Documentation/media/dvb-drivers/ci.rst index 8124bf5ce5ef..69b07e9d1816 100644 --- a/Documentation/media/dvb-drivers/ci.rst +++ b/Documentation/media/dvb-drivers/ci.rst @@ -20,7 +20,7 @@ existing low level CI API. ca_zap ~~~~~~ -An userspace application, like ``ca_zap`` is required to handle encrypted +A userspace application, like ``ca_zap`` is required to handle encrypted MPEG-TS streams. The ``ca_zap`` userland application is in charge of sending the diff --git a/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst b/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst index bf31411fc9df..899fd5c3545e 100644 --- a/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst +++ b/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst @@ -9,7 +9,7 @@ frontend parameters The kind of parameters passed to the frontend device for tuning depend on the kind of hardware you are using. -The struct ``dvb_frontend_parameters`` uses an union with specific +The struct ``dvb_frontend_parameters`` uses a union with specific per-system parameters. However, as newer delivery systems required more data, the structure size weren't enough to fit, and just extending its size would break the existing applications. So, those parameters were @@ -23,7 +23,7 @@ So, newer applications should use instead, in order to be able to support the newer System Delivery like DVB-S2, DVB-T2, DVB-C2, ISDB, etc. -All kinds of parameters are combined as an union in the +All kinds of parameters are combined as a union in the FrontendParameters structure: diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt index 5de846d3ecc0..670f3ded0802 100644 --- a/Documentation/memory-hotplug.txt +++ b/Documentation/memory-hotplug.txt @@ -114,11 +114,11 @@ config options. Memory model -> Sparse Memory (CONFIG_SPARSEMEM) Allow for memory hot-add (CONFIG_MEMORY_HOTPLUG) -- To enable memory removal, the followings are also necessary +- To enable memory removal, the following are also necessary Allow for memory hot remove (CONFIG_MEMORY_HOTREMOVE) Page Migration (CONFIG_MIGRATION) -- For ACPI memory hotplug, the followings are also necessary +- For ACPI memory hotplug, the following are also necessary Memory hotplug (under ACPI Support menu) (CONFIG_ACPI_HOTPLUG_MEMORY) This option can be kernel module. diff --git a/Documentation/networking/cdc_mbim.txt b/Documentation/networking/cdc_mbim.txt index a15ea602aa52..b9482ca10254 100644 --- a/Documentation/networking/cdc_mbim.txt +++ b/Documentation/networking/cdc_mbim.txt @@ -38,7 +38,7 @@ Basic usage =========== MBIM functions are inactive when unmanaged. The cdc_mbim driver only -provides an userspace interface to the MBIM control channel, and will +provides a userspace interface to the MBIM control channel, and will not participate in the management of the function. This implies that a userspace MBIM management application always is required to enable a MBIM function. @@ -200,7 +200,7 @@ structure described in section 10.5.29 of [1]. The DSS VLAN subdevices are used as a practical interface between the shared MBIM data channel and a MBIM DSS aware userspace application. It is not intended to be presented as-is to an end user. The -assumption is that an userspace application initiating a DSS session +assumption is that a userspace application initiating a DSS session also takes care of the necessary framing of the DSS data, presenting the stream to the end user in an appropriate way for the stream type. diff --git a/Documentation/power/pm_qos_interface.txt b/Documentation/power/pm_qos_interface.txt index 129f7c0e1483..21d2d48f87a2 100644 --- a/Documentation/power/pm_qos_interface.txt +++ b/Documentation/power/pm_qos_interface.txt @@ -163,8 +163,7 @@ of flags and remove sysfs attributes pm_qos_no_power_off and pm_qos_remote_wakeu under the device's power directory. Notification mechanisms: -The per-device PM QoS framework has 2 different and distinct notification trees: -a per-device notification tree and a global notification tree. +The per-device PM QoS framework has a per-device notification tree. int dev_pm_qos_add_notifier(device, notifier): Adds a notification callback function for the device. @@ -174,16 +173,6 @@ is changed (for resume latency device PM QoS only). int dev_pm_qos_remove_notifier(device, notifier): Removes the notification callback function for the device. -int dev_pm_qos_add_global_notifier(notifier): -Adds a notification callback function in the global notification tree of the -framework. -The callback is called when the aggregated value for any device is changed -(for resume latency device PM QoS only). - -int dev_pm_qos_remove_global_notifier(notifier): -Removes the notification callback function from the global notification tree -of the framework. - Active state latency tolerance diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 4870980e967e..64546eb9a16a 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -100,7 +100,7 @@ knows what to do to handle the device). * If the suspend callback returns an error code different from -EBUSY and -EAGAIN, the PM core regards this as a fatal error and will refuse to run the helper functions described in Section 4 for the device until its status - is directly set to either'active', or 'suspended' (the PM core provides + is directly set to either 'active', or 'suspended' (the PM core provides special helper functions for this purpose). In particular, if the driver requires remote wakeup capability (i.e. hardware @@ -217,7 +217,7 @@ defined in include/linux/pm.h: one to complete spinlock_t lock; - - lock used for synchronisation + - lock used for synchronization atomic_t usage_count; - the usage counter of the device @@ -565,7 +565,7 @@ appropriate to ensure that the device is not put back to sleep during the probe. This can happen with systems such as the network device layer. It may be desirable to suspend the device once ->probe() has finished. -Therefore the driver core uses the asyncronous pm_request_idle() to submit a +Therefore the driver core uses the asynchronous pm_request_idle() to submit a request to execute the subsystem-level idle callback for the device at that time. A driver that makes use of the runtime autosuspend feature, may want to update the last busy mark before returning from ->probe(). diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas index 00ffdf187f0b..234ddabb23ef 100644 --- a/Documentation/scsi/ChangeLog.megaraid_sas +++ b/Documentation/scsi/ChangeLog.megaraid_sas @@ -549,7 +549,7 @@ ii. Reduced by 1 max cmds sent to FW from Driver to make the reply_q_sz same 3 Older Version : 00.00.03.02 i. Send stop adapter to FW & Dump pending FW cmds before declaring adapter dead. - New varible added to set dbg level. + New variable added to set dbg level. ii. Disable interrupt made as fn pointer as they are different for 1068 / 1078 iii. Frame count optimization. Main frame can contain 2 SGE for 64 bit SGLs and 3 SGE for 32 bit SGL diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt index 3849814bfe6d..0e03baf271bd 100644 --- a/Documentation/security/keys.txt +++ b/Documentation/security/keys.txt @@ -1151,8 +1151,21 @@ access the data: usage. This is called key->payload.rcu_data0. The following accessors wrap the RCU calls to this element: - rcu_assign_keypointer(struct key *key, void *data); - void *rcu_dereference_key(struct key *key); + (a) Set or change the first payload pointer: + + rcu_assign_keypointer(struct key *key, void *data); + + (b) Read the first payload pointer with the key semaphore held: + + [const] void *dereference_key_locked([const] struct key *key); + + Note that the return value will inherit its constness from the key + parameter. Static analysis will give an error if it things the lock + isn't held. + + (c) Read the first payload pointer with the RCU read lock held: + + const void *dereference_key_rcu(const struct key *key); =================== diff --git a/Documentation/sound/hd-audio/notes.rst b/Documentation/sound/hd-audio/notes.rst index 168d0cfab1ce..9eeb9b468706 100644 --- a/Documentation/sound/hd-audio/notes.rst +++ b/Documentation/sound/hd-audio/notes.rst @@ -697,7 +697,7 @@ If it's a regression, at best, send alsa-info outputs of both working and non-working kernels. This is really helpful because we can compare the codec registers directly. -Send a bug report either the followings: +Send a bug report either the following: kernel-bugzilla https://bugzilla.kernel.org/ diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt index ea8d7b4e53f0..32a25fad0c1b 100644 --- a/Documentation/static-keys.txt +++ b/Documentation/static-keys.txt @@ -155,7 +155,9 @@ or: There are a few functions and macros that architectures must implement in order to take advantage of this optimization. If there is no architecture support, we -simply fall back to a traditional, load, test, and jump sequence. +simply fall back to a traditional, load, test, and jump sequence. Also, the +struct jump_entry table must be at least 4-byte aligned because the +static_key->entry field makes use of the two least significant bits. * select HAVE_ARCH_JUMP_LABEL, see: arch/x86/Kconfig diff --git a/Documentation/vm/userfaultfd.txt b/Documentation/vm/userfaultfd.txt index fe51a5aa8963..0e5543a920e5 100644 --- a/Documentation/vm/userfaultfd.txt +++ b/Documentation/vm/userfaultfd.txt @@ -149,7 +149,7 @@ migration thread in the QEMU running in the destination node will receive the page that triggered the userfault and it'll map it as usual with the UFFDIO_COPY|ZEROPAGE (without actually knowing if it was spontaneously sent by the source or if it was an urgent page -requested through an userfault). +requested through a userfault). By the time the userfaults start, the QEMU in the destination node doesn't need to keep any per-page state bitmap relative to the live diff --git a/Documentation/watchdog/watchdog-kernel-api.txt b/Documentation/watchdog/watchdog-kernel-api.txt index ea277478982f..9b93953f69cf 100644 --- a/Documentation/watchdog/watchdog-kernel-api.txt +++ b/Documentation/watchdog/watchdog-kernel-api.txt @@ -280,6 +280,12 @@ To disable the watchdog on reboot, the user must call the following helper: static inline void watchdog_stop_on_reboot(struct watchdog_device *wdd); +To disable the watchdog when unregistering the watchdog, the user must call +the following helper. Note that this will only stop the watchdog if the +nowayout flag is not set. + +static inline void watchdog_stop_on_unregister(struct watchdog_device *wdd); + To change the priority of the restart handler the following helper should be used: diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt index e21850e270a0..4f7d86dd0a5d 100644 --- a/Documentation/watchdog/watchdog-parameters.txt +++ b/Documentation/watchdog/watchdog-parameters.txt @@ -209,6 +209,11 @@ timeout: Initial watchdog timeout in seconds (0<timeout<516, default=60) nowayout: Watchdog cannot be stopped once started (default=kernel config parameter) ------------------------------------------------- +nic7018_wdt: +timeout: Initial watchdog timeout in seconds (0<timeout<464, default=80) +nowayout: Watchdog cannot be stopped once started + (default=kernel config parameter) +------------------------------------------------- nuc900_wdt: heartbeat: Watchdog heartbeats in seconds. (default = 15) diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/intel_rdt_ui.txt index d918d268cd72..51cf6fa5591f 100644 --- a/Documentation/x86/intel_rdt_ui.txt +++ b/Documentation/x86/intel_rdt_ui.txt @@ -212,3 +212,117 @@ Finally we move core 4-7 over to the new group and make sure that the kernel and the tasks running there get 50% of the cache. # echo C0 > p0/cpus + +4) Locking between applications + +Certain operations on the resctrl filesystem, composed of read/writes +to/from multiple files, must be atomic. + +As an example, the allocation of an exclusive reservation of L3 cache +involves: + + 1. Read the cbmmasks from each directory + 2. Find a contiguous set of bits in the global CBM bitmask that is clear + in any of the directory cbmmasks + 3. Create a new directory + 4. Set the bits found in step 2 to the new directory "schemata" file + +If two applications attempt to allocate space concurrently then they can +end up allocating the same bits so the reservations are shared instead of +exclusive. + +To coordinate atomic operations on the resctrlfs and to avoid the problem +above, the following locking procedure is recommended: + +Locking is based on flock, which is available in libc and also as a shell +script command + +Write lock: + + A) Take flock(LOCK_EX) on /sys/fs/resctrl + B) Read/write the directory structure. + C) funlock + +Read lock: + + A) Take flock(LOCK_SH) on /sys/fs/resctrl + B) If success read the directory structure. + C) funlock + +Example with bash: + +# Atomically read directory structure +$ flock -s /sys/fs/resctrl/ find /sys/fs/resctrl + +# Read directory contents and create new subdirectory + +$ cat create-dir.sh +find /sys/fs/resctrl/ > output.txt +mask = function-of(output.txt) +mkdir /sys/fs/resctrl/newres/ +echo mask > /sys/fs/resctrl/newres/schemata + +$ flock /sys/fs/resctrl/ ./create-dir.sh + +Example with C: + +/* + * Example code do take advisory locks + * before accessing resctrl filesystem + */ +#include <sys/file.h> +#include <stdlib.h> + +void resctrl_take_shared_lock(int fd) +{ + int ret; + + /* take shared lock on resctrl filesystem */ + ret = flock(fd, LOCK_SH); + if (ret) { + perror("flock"); + exit(-1); + } +} + +void resctrl_take_exclusive_lock(int fd) +{ + int ret; + + /* release lock on resctrl filesystem */ + ret = flock(fd, LOCK_EX); + if (ret) { + perror("flock"); + exit(-1); + } +} + +void resctrl_release_lock(int fd) +{ + int ret; + + /* take shared lock on resctrl filesystem */ + ret = flock(fd, LOCK_UN); + if (ret) { + perror("flock"); + exit(-1); + } +} + +void main(void) +{ + int fd, ret; + + fd = open("/sys/fs/resctrl", O_DIRECTORY); + if (fd == -1) { + perror("open"); + exit(-1); + } + resctrl_take_shared_lock(fd); + /* code to read directory contents */ + resctrl_release_lock(fd); + + resctrl_take_exclusive_lock(fd); + /* code to read and write directory contents */ + resctrl_release_lock(fd); +} diff --git a/MAINTAINERS b/MAINTAINERS index 8f05facab3b5..00018356f4a5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3452,6 +3452,7 @@ B: https://bugzilla.kernel.org F: Documentation/cpu-freq/ F: drivers/cpufreq/ F: include/linux/cpufreq.h +F: tools/testing/selftests/cpufreq/ CPU FREQUENCY DRIVERS - ARM BIG LITTLE M: Viresh Kumar <viresh.kumar@linaro.org> @@ -6270,6 +6271,11 @@ S: Maintained F: drivers/mfd/lpc_ich.c F: drivers/gpio/gpio-ich.c +IDT VersaClock 5 CLOCK DRIVER +M: Marek Vasut <marek.vasut@gmail.com> +S: Maintained +F: drivers/clk/clk-versaclock5.c + IDE SUBSYSTEM M: "David S. Miller" <davem@davemloft.net> L: linux-ide@vger.kernel.org @@ -7280,6 +7286,7 @@ M: Masami Hiramatsu <mhiramat@kernel.org> S: Maintained F: Documentation/kprobes.txt F: include/linux/kprobes.h +F: include/asm-generic/kprobes.h F: kernel/kprobes.c KS0108 LCD CONTROLLER DRIVER @@ -7476,18 +7483,24 @@ L: linuxppc-dev@lists.ozlabs.org Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git S: Supported +F: Documentation/ABI/stable/sysfs-firmware-opal-* +F: Documentation/devicetree/bindings/powerpc/opal/ +F: Documentation/devicetree/bindings/rtc/rtc-opal.txt +F: Documentation/devicetree/bindings/i2c/i2c-opal.txt F: Documentation/powerpc/ F: arch/powerpc/ F: drivers/char/tpm/tpm_ibmvtpm* F: drivers/crypto/nx/ F: drivers/crypto/vmx/ +F: drivers/i2c/busses/i2c-opal.c F: drivers/net/ethernet/ibm/ibmveth.* F: drivers/net/ethernet/ibm/ibmvnic.* F: drivers/pci/hotplug/pnv_php.c F: drivers/pci/hotplug/rpa* +F: drivers/rtc/rtc-opal.c F: drivers/scsi/ibmvscsi/ +F: drivers/tty/hvc/hvc_opal.c F: tools/testing/selftests/powerpc -N: opal N: /pmac N: powermac N: powernv @@ -11451,6 +11464,14 @@ F: drivers/media/usb/siano/ F: drivers/media/usb/siano/ F: drivers/media/mmc/siano/ +SILEAD TOUCHSCREEN DRIVER +M: Hans de Goede <hdegoede@redhat.com> +L: linux-input@vger.kernel.org +L: platform-driver-x86@vger.kernel.org +S: Maintained +F: drivers/input/touchscreen/silead.c +F: drivers/platform/x86/silead_dmi.c + SIMPLEFB FB DRIVER M: Hans de Goede <hdegoede@redhat.com> L: linux-fbdev@vger.kernel.org @@ -910,6 +910,18 @@ mod_sign_cmd = true endif export mod_sign_cmd +ifdef CONFIG_STACK_VALIDATION + has_libelf := $(call try-run,\ + echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0) + ifeq ($(has_libelf),1) + objtool_target := tools/objtool FORCE + else + $(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev, libelf-devel or elfutils-libelf-devel") + SKIP_STACK_VALIDATION := 1 + export SKIP_STACK_VALIDATION + endif +endif + ifeq ($(KBUILD_EXTMOD),) core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ @@ -1037,18 +1049,6 @@ prepare0: archprepare gcc-plugins # All the preparing.. prepare: prepare0 prepare-objtool -ifdef CONFIG_STACK_VALIDATION - has_libelf := $(call try-run,\ - echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0) - ifeq ($(has_libelf),1) - objtool_target := tools/objtool FORCE - else - $(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev, libelf-devel or elfutils-libelf-devel") - SKIP_STACK_VALIDATION := 1 - export SKIP_STACK_VALIDATION - endif -endif - PHONY += prepare-objtool prepare-objtool: $(objtool_target) diff --git a/arch/Kconfig b/arch/Kconfig index d0012add6b19..cd211a14a88f 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -29,7 +29,7 @@ config OPROFILE_EVENT_MULTIPLEX The number of hardware counters is limited. The multiplexing feature enables OProfile to gather more events than counters are provided by the hardware. This is realized by switching - between events at an user specified time interval. + between events at a user specified time interval. If unsure, say N. diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index 46e47c088622..d103db5af5ff 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild @@ -10,3 +10,4 @@ generic-y += preempt.h generic-y += sections.h generic-y += trace_clock.h generic-y += current.h +generic-y += kprobes.h diff --git a/arch/alpha/include/asm/a.out-core.h b/arch/alpha/include/asm/a.out-core.h index 9e33e92e524c..1610d078b064 100644 --- a/arch/alpha/include/asm/a.out-core.h +++ b/arch/alpha/include/asm/a.out-core.h @@ -15,6 +15,7 @@ #ifdef __KERNEL__ #include <linux/user.h> +#include <linux/mm_types.h> /* * Fill in the user structure for an ECOFF core dump. diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index c63b6ac19ee5..5d53666935e6 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -1,9 +1,9 @@ #ifndef _ALPHA_DMA_MAPPING_H #define _ALPHA_DMA_MAPPING_H -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return dma_ops; } diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h index 4c51c05333c6..384bd47b5187 100644 --- a/arch/alpha/include/asm/mmu_context.h +++ b/arch/alpha/include/asm/mmu_context.h @@ -7,6 +7,8 @@ * Copyright (C) 1996, Linus Torvalds */ +#include <linux/mm_types.h> + #include <asm/machvec.h> #include <asm/compiler.h> #include <asm-generic/mm_hooks.h> diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 9d27a7d333dc..0b961093ca5c 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -11,7 +11,10 @@ */ #include <linux/errno.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> +#include <linux/sched/task_stack.h> +#include <linux/sched/cputime.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c index bb152e21e5ae..ffbdb3fb672f 100644 --- a/arch/alpha/kernel/pci-noop.c +++ b/arch/alpha/kernel/pci-noop.c @@ -128,7 +128,7 @@ static int alpha_noop_supported(struct device *dev, u64 mask) return mask < 0x00ffffffUL ? 0 : 1; } -struct dma_map_ops alpha_noop_ops = { +const struct dma_map_ops alpha_noop_ops = { .alloc = alpha_noop_alloc_coherent, .free = dma_noop_free_coherent, .map_page = dma_noop_map_page, @@ -137,5 +137,5 @@ struct dma_map_ops alpha_noop_ops = { .dma_supported = alpha_noop_supported, }; -struct dma_map_ops *dma_ops = &alpha_noop_ops; +const struct dma_map_ops *dma_ops = &alpha_noop_ops; EXPORT_SYMBOL(dma_ops); diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 451fc9cdd323..7fd2329038a3 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -939,7 +939,7 @@ static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr) return dma_addr == 0; } -struct dma_map_ops alpha_pci_ops = { +const struct dma_map_ops alpha_pci_ops = { .alloc = alpha_pci_alloc_coherent, .free = alpha_pci_free_coherent, .map_page = alpha_pci_map_page, @@ -950,5 +950,5 @@ struct dma_map_ops alpha_pci_ops = { .dma_supported = alpha_pci_supported, }; -struct dma_map_ops *dma_ops = &alpha_pci_ops; +const struct dma_map_ops *dma_ops = &alpha_pci_ops; EXPORT_SYMBOL(dma_ops); diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index bca963a4aa48..0b9635040721 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -11,6 +11,9 @@ #include <linux/errno.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c index bc4d2cdcf21d..285a82d491ef 100644 --- a/arch/alpha/kernel/ptrace.c +++ b/arch/alpha/kernel/ptrace.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index 17308f925306..8129dd92cadc 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -6,7 +6,8 @@ * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson */ -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 46bf263c3153..9fc560459ebd 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -14,7 +14,7 @@ #include <linux/kernel.h> #include <linux/kernel_stat.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/threads.h> @@ -144,7 +144,7 @@ smp_callin(void) alpha_mv.smp_callin(); /* All kernel threads share the same mm context. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; /* inform the notifiers about the new cpu */ diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index af2994206b4b..b137390e87e7 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -10,7 +10,8 @@ #include <linux/jiffies.h> #include <linux/mm.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> #include <linux/tty.h> #include <linux/delay.h> #include <linux/extable.h> diff --git a/arch/alpha/math-emu/math.c b/arch/alpha/math-emu/math.c index fa5ae0ad8983..d17d705f6545 100644 --- a/arch/alpha/math-emu/math.c +++ b/arch/alpha/math-emu/math.c @@ -2,6 +2,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <asm/ptrace.h> #include <linux/uaccess.h> diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 47948b4dd157..c25e8827e7cd 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -4,7 +4,7 @@ * Copyright (C) 1995 Linus Torvalds */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/kernel.h> #include <linux/mm.h> #include <asm/io.h> diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h index 266f11c9bd59..94285031c4fb 100644 --- a/arch/arc/include/asm/dma-mapping.h +++ b/arch/arc/include/asm/dma-mapping.h @@ -18,9 +18,9 @@ #include <plat/dma.h> #endif -extern struct dma_map_ops arc_dma_ops; +extern const struct dma_map_ops arc_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &arc_dma_ops; } diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h index 944dbedb38b5..00bdbe167615 100644 --- a/arch/arc/include/asm/kprobes.h +++ b/arch/arc/include/asm/kprobes.h @@ -9,6 +9,8 @@ #ifndef _ARC_KPROBES_H #define _ARC_KPROBES_H +#include <asm-generic/kprobes.h> + #ifdef CONFIG_KPROBES typedef u16 kprobe_opcode_t; @@ -55,6 +57,6 @@ void trap_is_kprobe(unsigned long address, struct pt_regs *regs); static void trap_is_kprobe(unsigned long address, struct pt_regs *regs) { } -#endif +#endif /* CONFIG_KPROBES */ -#endif +#endif /* _ARC_KPROBES_H */ diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h index b0b87f2447f5..64b5ebae1ae8 100644 --- a/arch/arc/include/asm/mmu_context.h +++ b/arch/arc/include/asm/mmu_context.h @@ -20,6 +20,7 @@ #include <asm/arcregs.h> #include <asm/tlb.h> +#include <linux/sched/mm.h> #include <asm-generic/mm_hooks.h> diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c index 6f4cb0dab1b9..9e1ae9d41925 100644 --- a/arch/arc/kernel/ctx_sw.c +++ b/arch/arc/kernel/ctx_sw.c @@ -16,6 +16,7 @@ #include <asm/asm-offsets.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #ifdef CONFIG_ARC_PLAT_EZNPS #include <plat/ctop.h> #endif diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c index ecf6a7869375..9a3c34af2ae8 100644 --- a/arch/arc/kernel/kgdb.c +++ b/arch/arc/kernel/kgdb.c @@ -10,6 +10,7 @@ #include <linux/kgdb.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <asm/disasm.h> #include <asm/cacheflush.h> diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index a41a79a4f4fe..2a018de6d6cd 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -11,6 +11,9 @@ #include <linux/errno.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> + #include <linux/mm.h> #include <linux/fs.h> #include <linux/unistd.h> diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c index 4442204fe238..31150060d38b 100644 --- a/arch/arc/kernel/ptrace.c +++ b/arch/arc/kernel/ptrace.c @@ -8,6 +8,7 @@ #include <linux/ptrace.h> #include <linux/tracehook.h> +#include <linux/sched/task_stack.h> #include <linux/regset.h> #include <linux/unistd.h> #include <linux/elf.h> diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index d347bbc086fe..48685445002e 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -53,6 +53,8 @@ #include <linux/uaccess.h> #include <linux/syscalls.h> #include <linux/tracehook.h> +#include <linux/sched/task_stack.h> + #include <asm/ucontext.h> struct rt_sigframe { diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 2afbafadb6ab..f46267153ec2 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -13,7 +13,7 @@ */ #include <linux/spinlock.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/interrupt.h> #include <linux/profile.h> #include <linux/mm.h> @@ -139,8 +139,8 @@ void start_kernel_secondary(void) /* MMU, Caches, Vector Table, Interrupts etc */ setup_processor(); - atomic_inc(&mm->mm_users); - atomic_inc(&mm->mm_count); + mmget(mm); + mmgrab(mm); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index b9192a653b7e..74315f302971 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c @@ -28,6 +28,8 @@ #include <linux/export.h> #include <linux/stacktrace.h> #include <linux/kallsyms.h> +#include <linux/sched/debug.h> + #include <asm/arcregs.h> #include <asm/unwind.h> #include <asm/switch_to.h> diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c index c927aa84e652..ff83e78d0cfb 100644 --- a/arch/arc/kernel/traps.c +++ b/arch/arc/kernel/traps.c @@ -13,7 +13,7 @@ * Rahul Trivedi: Codito Technologies 2004 */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/kdebug.h> #include <linux/uaccess.h> #include <linux/ptrace.h> diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index 82f9bc819f4a..f9caf79186d4 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -13,6 +13,9 @@ #include <linux/fs_struct.h> #include <linux/proc_fs.h> #include <linux/file.h> +#include <linux/sched/mm.h> +#include <linux/sched/debug.h> + #include <asm/arcregs.h> #include <asm/irqflags.h> diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 61fd1ce63c56..b6e4f7a7419b 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -1051,9 +1051,9 @@ int arc_unwind(struct unwind_frame_info *frame) ++ptr; } if (cie != NULL) { - /* get code aligment factor */ + /* get code alignment factor */ state.codeAlign = get_uleb128(&ptr, end); - /* get data aligment factor */ + /* get data alignment factor */ state.dataAlign = get_sleb128(&ptr, end); if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end) cie = NULL; diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 08450a1a5b5f..2a07e6ecafbd 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -218,7 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask) return dma_mask == DMA_BIT_MASK(32); } -struct dma_map_ops arc_dma_ops = { +const struct dma_map_ops arc_dma_ops = { .alloc = arc_dma_alloc, .free = arc_dma_free, .mmap = arc_dma_mmap, diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index e94e5aa33985..162c97528872 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -9,7 +9,7 @@ #include <linux/signal.h> #include <linux/interrupt.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/uaccess.h> diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c index 2e06d56e987b..3e25e8d6486b 100644 --- a/arch/arc/mm/mmap.c +++ b/arch/arc/mm/mmap.c @@ -13,7 +13,8 @@ #include <linux/fs.h> #include <linux/mm.h> #include <linux/mman.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> + #include <asm/cacheflush.h> #define COLOUR_ALIGN(addr, pgoff) \ diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index bdb295e09160..d0126fdfe2d8 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -53,6 +53,8 @@ #include <linux/module.h> #include <linux/bug.h> +#include <linux/mm_types.h> + #include <asm/arcregs.h> #include <asm/setup.h> #include <asm/mmu_context.h> diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index fda6a46d27cf..0d4e71b42c77 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -2,6 +2,7 @@ config ARM bool default y select ARCH_CLOCKSOURCE_DATA + select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_SET_MEMORY diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu index aed66d5df7f1..b7576349528c 100644 --- a/arch/arm/Kconfig-nommu +++ b/arch/arm/Kconfig-nommu @@ -34,8 +34,7 @@ config PROCESSOR_ID used instead of the auto-probing which utilizes the register. config REMAP_VECTORS_TO_RAM - bool 'Install vectors to the beginning of RAM' if DRAM_BASE - depends on DRAM_BASE + bool 'Install vectors to the beginning of RAM' help The kernel needs to change the hardware exception vectors. In nommu mode, the hardware exception vectors are normally diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c index a0765e7ed6c7..ea7832702a8f 100644 --- a/arch/arm/boot/compressed/decompress.c +++ b/arch/arm/boot/compressed/decompress.c @@ -32,6 +32,7 @@ extern void error(char *); /* Not needed, but used in some headers pulled in by decompressors */ extern char * strstr(const char * s1, const char *s2); +extern size_t strlen(const char *s); #ifdef CONFIG_KERNEL_GZIP #include "../../../../lib/decompress_inflate.c" diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index fc6d541549a2..9150f9732785 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -1196,7 +1196,7 @@ skip: bgt loop1 finished: ldmfd sp!, {r0-r7, r9-r11} - mov r10, #0 @ swith back to cache level 0 + mov r10, #0 @ switch back to cache level 0 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr iflush: mcr p15, 0, r10, c7, c10, 4 @ DSB diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index 62b3ffe62df2..24b0f5f556f8 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi @@ -65,8 +65,9 @@ compatible = "allwinner,simple-framebuffer", "simple-framebuffer"; allwinner,pipeline = "de_be0-lcd0-hdmi"; - clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>, - <&ahb_gates 43>, <&ahb_gates 44>; + clocks = <&ccu CLK_AHB_LCD>, <&ccu CLK_AHB_HDMI>, + <&ccu CLK_AHB_DE_BE>, <&ccu CLK_DRAM_DE_BE>, + <&ccu CLK_DE_BE>, <&ccu CLK_HDMI>; status = "disabled"; }; @@ -74,8 +75,8 @@ compatible = "allwinner,simple-framebuffer", "simple-framebuffer"; allwinner,pipeline = "de_be0-lcd0"; - clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>, - <&ahb_gates 44>; + clocks = <&ccu CLK_AHB_LCD>, <&ccu CLK_AHB_DE_BE>, <&ccu CLK_DE_BE>, + <&ccu CLK_TCON_CH0>, <&ccu CLK_DRAM_DE_BE>; status = "disabled"; }; @@ -83,77 +84,19 @@ compatible = "allwinner,simple-framebuffer", "simple-framebuffer"; allwinner,pipeline = "de_be0-lcd0-tve0"; - clocks = <&pll3>, <&pll5 1>, <&ahb_gates 34>, - <&ahb_gates 36>, <&ahb_gates 44>; + clocks = <&ccu CLK_AHB_TVE>, <&ccu CLK_AHB_LCD>, + <&ccu CLK_AHB_DE_BE>, <&ccu CLK_DE_BE>, + <&ccu CLK_TCON_CH1>, <&ccu CLK_DRAM_DE_BE>; status = "disabled"; }; }; - clocks { - ahb_gates: clk@01c20060 { - #clock-cells = <1>; - compatible = "allwinner,sun5i-a10s-ahb-gates-clk"; - reg = <0x01c20060 0x8>; - clocks = <&ahb>; - clock-indices = <0>, <1>, - <2>, <5>, <6>, - <7>, <8>, <9>, - <10>, <13>, - <14>, <17>, <18>, - <20>, <21>, <22>, - <26>, <28>, <32>, - <34>, <36>, <40>, - <43>, <44>, - <46>, <51>, - <52>; - clock-output-names = "ahb_usbotg", "ahb_ehci", - "ahb_ohci", "ahb_ss", "ahb_dma", - "ahb_bist", "ahb_mmc0", "ahb_mmc1", - "ahb_mmc2", "ahb_nand", - "ahb_sdram", "ahb_emac", "ahb_ts", - "ahb_spi0", "ahb_spi1", "ahb_spi2", - "ahb_gps", "ahb_stimer", "ahb_ve", - "ahb_tve", "ahb_lcd", "ahb_csi", - "ahb_hdmi", "ahb_de_be", - "ahb_de_fe", "ahb_iep", - "ahb_mali400"; - }; - - apb0_gates: clk@01c20068 { - #clock-cells = <1>; - compatible = "allwinner,sun5i-a10s-apb0-gates-clk"; - reg = <0x01c20068 0x4>; - clocks = <&apb0>; - clock-indices = <0>, <3>, - <5>, <6>, - <10>; - clock-output-names = "apb0_codec", "apb0_iis", - "apb0_pio", "apb0_ir", - "apb0_keypad"; - }; - - apb1_gates: clk@01c2006c { - #clock-cells = <1>; - compatible = "allwinner,sun5i-a10s-apb1-gates-clk"; - reg = <0x01c2006c 0x4>; - clocks = <&apb1>; - clock-indices = <0>, <1>, - <2>, <16>, - <17>, <18>, - <19>; - clock-output-names = "apb1_i2c0", "apb1_i2c1", - "apb1_i2c2", "apb1_uart0", - "apb1_uart1", "apb1_uart2", - "apb1_uart3"; - }; - }; - soc@01c00000 { emac: ethernet@01c0b000 { compatible = "allwinner,sun4i-a10-emac"; reg = <0x01c0b000 0x1000>; interrupts = <55>; - clocks = <&ahb_gates 17>; + clocks = <&ccu CLK_AHB_EMAC>; allwinner,sram = <&emac_sram 1>; status = "disabled"; }; @@ -169,7 +112,7 @@ pwm: pwm@01c20e00 { compatible = "allwinner,sun5i-a10s-pwm"; reg = <0x01c20e00 0xc>; - clocks = <&osc24M>; + clocks = <&ccu CLK_HOSC>; #pwm-cells = <3>; status = "disabled"; }; @@ -180,7 +123,7 @@ interrupts = <1>; reg-shift = <2>; reg-io-width = <4>; - clocks = <&apb1_gates 16>; + clocks = <&ccu CLK_APB1_UART0>; status = "disabled"; }; @@ -190,12 +133,16 @@ interrupts = <3>; reg-shift = <2>; reg-io-width = <4>; - clocks = <&apb1_gates 18>; + clocks = <&ccu CLK_APB1_UART2>; status = "disabled"; }; }; }; +&ccu { + compatible = "allwinner,sun5i-a10s-ccu"; +}; + &pio { compatible = "allwinner,sun5i-a10s-pinctrl"; diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index 4131ab44558b..fb2ddb9a04c9 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi @@ -61,8 +61,8 @@ compatible = "allwinner,simple-framebuffer", "simple-framebuffer"; allwinner,pipeline = "de_be0-lcd0"; - clocks = <&ahb_gates 36>, <&ahb_gates 44>, <&de_be_clk>, - <&tcon_ch0_clk>, <&dram_gates 26>; + clocks = <&ccu CLK_AHB_LCD>, <&ccu CLK_AHB_DE_BE>, <&ccu CLK_DE_BE>, + <&ccu CLK_TCON_CH0>, <&ccu CLK_DRAM_DE_BE>; status = "disabled"; }; }; @@ -99,114 +99,6 @@ }; }; - clocks { - ahb_gates: clk@01c20060 { - #clock-cells = <1>; - compatible = "allwinner,sun5i-a13-ahb-gates-clk"; - reg = <0x01c20060 0x8>; - clocks = <&ahb>; - clock-indices = <0>, <1>, - <2>, <5>, <6>, - <7>, <8>, <9>, - <10>, <13>, - <14>, <20>, - <21>, <22>, - <28>, <32>, <34>, - <36>, <40>, <44>, - <46>, <51>, - <52>; - clock-output-names = "ahb_usbotg", "ahb_ehci", - "ahb_ohci", "ahb_ss", "ahb_dma", - "ahb_bist", "ahb_mmc0", "ahb_mmc1", - "ahb_mmc2", "ahb_nand", - "ahb_sdram", "ahb_spi0", - "ahb_spi1", "ahb_spi2", - "ahb_stimer", "ahb_ve", "ahb_tve", - "ahb_lcd", "ahb_csi", "ahb_de_be", - "ahb_de_fe", "ahb_iep", - "ahb_mali400"; - }; - - apb0_gates: clk@01c20068 { - #clock-cells = <1>; - compatible = "allwinner,sun5i-a13-apb0-gates-clk"; - reg = <0x01c20068 0x4>; - clocks = <&apb0>; - clock-indices = <0>, <5>, - <6>; - clock-output-names = "apb0_codec", "apb0_pio", - "apb0_ir"; - }; - - apb1_gates: clk@01c2006c { - #clock-cells = <1>; - compatible = "allwinner,sun5i-a13-apb1-gates-clk"; - reg = <0x01c2006c 0x4>; - clocks = <&apb1>; - clock-indices = <0>, <1>, - <2>, <17>, - <19>; - clock-output-names = "apb1_i2c0", "apb1_i2c1", - "apb1_i2c2", "apb1_uart1", - "apb1_uart3"; - }; - - dram_gates: clk@01c20100 { - #clock-cells = <1>; - compatible = "allwinner,sun5i-a13-dram-gates-clk", - "allwinner,sun4i-a10-gates-clk"; - reg = <0x01c20100 0x4>; - clocks = <&pll5 0>; - clock-indices = <0>, - <1>, - <25>, - <26>, - <29>, - <31>; - clock-output-names = "dram_ve", - "dram_csi", - "dram_de_fe", - "dram_de_be", - "dram_ace", - "dram_iep"; - }; - - de_be_clk: clk@01c20104 { - #clock-cells = <0>; - #reset-cells = <0>; - compatible = "allwinner,sun4i-a10-display-clk"; - reg = <0x01c20104 0x4>; - clocks = <&pll3>, <&pll7>, <&pll5 1>; - clock-output-names = "de-be"; - }; - - de_fe_clk: clk@01c2010c { - #clock-cells = <0>; - #reset-cells = <0>; - compatible = "allwinner,sun4i-a10-display-clk"; - reg = <0x01c2010c 0x4>; - clocks = <&pll3>, <&pll7>, <&pll5 1>; - clock-output-names = "de-fe"; - }; - - tcon_ch0_clk: clk@01c20118 { - #clock-cells = <0>; - #reset-cells = <1>; - compatible = "allwinner,sun4i-a10-tcon-ch0-clk"; - reg = <0x01c20118 0x4>; - clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>; - clock-output-names = "tcon-ch0-sclk"; - }; - - tcon_ch1_clk: clk@01c2012c { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-tcon-ch1-clk"; - reg = <0x01c2012c 0x4>; - clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>; - clock-output-names = "tcon-ch1-sclk"; - }; - }; - display-engine { compatible = "allwinner,sun5i-a13-display-engine"; allwinner,pipelines = <&fe0>; @@ -217,11 +109,11 @@ compatible = "allwinner,sun5i-a13-tcon"; reg = <0x01c0c000 0x1000>; interrupts = <44>; - resets = <&tcon_ch0_clk 1>; + resets = <&ccu RST_LCD>; reset-names = "lcd"; - clocks = <&ahb_gates 36>, - <&tcon_ch0_clk>, - <&tcon_ch1_clk>; + clocks = <&ccu CLK_AHB_LCD>, + <&ccu CLK_TCON_CH0>, + <&ccu CLK_TCON_CH1>; clock-names = "ahb", "tcon-ch0", "tcon-ch1"; @@ -254,7 +146,7 @@ pwm: pwm@01c20e00 { compatible = "allwinner,sun5i-a13-pwm"; reg = <0x01c20e00 0xc>; - clocks = <&osc24M>; + clocks = <&ccu CLK_HOSC>; #pwm-cells = <3>; status = "disabled"; }; @@ -263,11 +155,11 @@ compatible = "allwinner,sun5i-a13-display-frontend"; reg = <0x01e00000 0x20000>; interrupts = <47>; - clocks = <&ahb_gates 46>, <&de_fe_clk>, - <&dram_gates 25>; + clocks = <&ccu CLK_DE_FE>, <&ccu CLK_DE_FE>, + <&ccu CLK_DRAM_DE_FE>; clock-names = "ahb", "mod", "ram"; - resets = <&de_fe_clk>; + resets = <&ccu RST_DE_FE>; status = "disabled"; ports { @@ -290,14 +182,14 @@ be0: display-backend@01e60000 { compatible = "allwinner,sun5i-a13-display-backend"; reg = <0x01e60000 0x10000>; - clocks = <&ahb_gates 44>, <&de_be_clk>, - <&dram_gates 26>; + clocks = <&ccu CLK_AHB_DE_BE>, <&ccu CLK_DE_BE>, + <&ccu CLK_DRAM_DE_BE>; clock-names = "ahb", "mod", "ram"; - resets = <&de_be_clk>; + resets = <&ccu RST_DE_BE>; status = "disabled"; - assigned-clocks = <&de_be_clk>; + assigned-clocks = <&ccu CLK_DE_BE>; assigned-clock-rates = <300000000>; ports { @@ -330,6 +222,10 @@ }; }; +&ccu { + compatible = "allwinner,sun5i-a13-ccu"; +}; + &cpu0 { clock-latency = <244144>; /* 8 32k periods */ operating-points = < diff --git a/arch/arm/boot/dts/sun5i-gr8.dtsi b/arch/arm/boot/dts/sun5i-gr8.dtsi index f83ae3fc6329..cb9b2aaf7297 100644 --- a/arch/arm/boot/dts/sun5i-gr8.dtsi +++ b/arch/arm/boot/dts/sun5i-gr8.dtsi @@ -42,9 +42,10 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include <dt-bindings/clock/sun4i-a10-pll2.h> +#include <dt-bindings/clock/sun5i-ccu.h> #include <dt-bindings/dma/sun4i-a10.h> #include <dt-bindings/pinctrl/sun4i-a10.h> +#include <dt-bindings/reset/sun5i-ccu.h> / { interrupt-parent = <&intc>; @@ -59,7 +60,7 @@ device_type = "cpu"; compatible = "arm,cortex-a8"; reg = <0x0>; - clocks = <&cpu>; + clocks = <&ccu CLK_CPU>; }; }; @@ -68,419 +69,19 @@ #size-cells = <1>; ranges; - /* - * This is a dummy clock, to be used as placeholder on - * other mux clocks when a specific parent clock is not - * yet implemented. It should be dropped when the driver - * is complete. - */ - dummy: dummy { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <0>; - }; - osc24M: clk@01c20050 { #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-osc-clk"; - reg = <0x01c20050 0x4>; + compatible = "fixed-clock"; clock-frequency = <24000000>; clock-output-names = "osc24M"; }; - osc3M: osc3M-clk { - compatible = "fixed-factor-clock"; - #clock-cells = <0>; - clock-div = <8>; - clock-mult = <1>; - clocks = <&osc24M>; - clock-output-names = "osc3M"; - }; - osc32k: clk@0 { #clock-cells = <0>; compatible = "fixed-clock"; clock-frequency = <32768>; clock-output-names = "osc32k"; }; - - pll1: clk@01c20000 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-pll1-clk"; - reg = <0x01c20000 0x4>; - clocks = <&osc24M>; - clock-output-names = "pll1"; - }; - - pll2: clk@01c20008 { - #clock-cells = <1>; - compatible = "allwinner,sun5i-a13-pll2-clk"; - reg = <0x01c20008 0x8>; - clocks = <&osc24M>; - clock-output-names = "pll2-1x", "pll2-2x", - "pll2-4x", "pll2-8x"; - }; - - pll3: clk@01c20010 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-pll3-clk"; - reg = <0x01c20010 0x4>; - clocks = <&osc3M>; - clock-output-names = "pll3"; - }; - - pll3x2: pll3x2-clk { - compatible = "allwinner,sun4i-a10-pll3-2x-clk"; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <2>; - clocks = <&pll3>; - clock-output-names = "pll3-2x"; - }; - - pll4: clk@01c20018 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-pll1-clk"; - reg = <0x01c20018 0x4>; - clocks = <&osc24M>; - clock-output-names = "pll4"; - }; - - pll5: clk@01c20020 { - #clock-cells = <1>; - compatible = "allwinner,sun4i-a10-pll5-clk"; - reg = <0x01c20020 0x4>; - clocks = <&osc24M>; - clock-output-names = "pll5_ddr", "pll5_other"; - }; - - pll6: clk@01c20028 { - #clock-cells = <1>; - compatible = "allwinner,sun4i-a10-pll6-clk"; - reg = <0x01c20028 0x4>; - clocks = <&osc24M>; - clock-output-names = "pll6_sata", "pll6_other", "pll6"; - }; - - pll7: clk@01c20030 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-pll3-clk"; - reg = <0x01c20030 0x4>; - clocks = <&osc3M>; - clock-output-names = "pll7"; - }; - - pll7x2: pll7x2-clk { - compatible = "allwinner,sun4i-a10-pll3-2x-clk"; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <2>; - clocks = <&pll7>; - clock-output-names = "pll7-2x"; - }; - - /* dummy is 200M */ - cpu: cpu@01c20054 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-cpu-clk"; - reg = <0x01c20054 0x4>; - clocks = <&osc32k>, <&osc24M>, <&pll1>, <&dummy>; - clock-output-names = "cpu"; - }; - - axi: axi@01c20054 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-axi-clk"; - reg = <0x01c20054 0x4>; - clocks = <&cpu>; - clock-output-names = "axi"; - }; - - ahb: ahb@01c20054 { - #clock-cells = <0>; - compatible = "allwinner,sun5i-a13-ahb-clk"; - reg = <0x01c20054 0x4>; - clocks = <&axi>, <&cpu>, <&pll6 1>; - clock-output-names = "ahb"; - /* - * Use PLL6 as parent, instead of CPU/AXI - * which has rate changes due to cpufreq - */ - assigned-clocks = <&ahb>; - assigned-clock-parents = <&pll6 1>; - }; - - apb0: apb0@01c20054 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-apb0-clk"; - reg = <0x01c20054 0x4>; - clocks = <&ahb>; - clock-output-names = "apb0"; - }; - - apb1: clk@01c20058 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-apb1-clk"; - reg = <0x01c20058 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&osc32k>; - clock-output-names = "apb1"; - }; - - axi_gates: clk@01c2005c { - #clock-cells = <1>; - compatible = "allwinner,sun4i-a10-gates-clk"; - reg = <0x01c2005c 0x4>; - clocks = <&axi>; - clock-indices = <0>; - clock-output-names = "axi_dram"; - }; - - ahb_gates: clk@01c20060 { - #clock-cells = <1>; - compatible = "allwinner,sun5i-a13-ahb-gates-clk"; - reg = <0x01c20060 0x8>; - clocks = <&ahb>; - clock-indices = <0>, <1>, - <2>, <5>, <6>, - <7>, <8>, <9>, - <10>, <13>, - <14>, <17>, <20>, - <21>, <22>, - <28>, <32>, <34>, - <36>, <40>, <44>, - <46>, <51>, - <52>; - clock-output-names = "ahb_usbotg", "ahb_ehci", - "ahb_ohci", "ahb_ss", "ahb_dma", - "ahb_bist", "ahb_mmc0", "ahb_mmc1", - "ahb_mmc2", "ahb_nand", - "ahb_sdram", "ahb_emac", "ahb_spi0", - "ahb_spi1", "ahb_spi2", - "ahb_hstimer", "ahb_ve", "ahb_tve", - "ahb_lcd", "ahb_csi", "ahb_de_be", - "ahb_de_fe", "ahb_iep", - "ahb_mali400"; - }; - - apb0_gates: clk@01c20068 { - #clock-cells = <1>; - compatible = "allwinner,sun4i-a10-gates-clk"; - reg = <0x01c20068 0x4>; - clocks = <&apb0>; - clock-indices = <0>, <3>, - <5>, <6>; - clock-output-names = "apb0_codec", "apb0_i2s0", - "apb0_pio", "apb0_ir"; - }; - - apb1_gates: clk@01c2006c { - #clock-cells = <1>; - compatible = "allwinner,sun4i-a10-gates-clk"; - reg = <0x01c2006c 0x4>; - clocks = <&apb1>; - clock-indices = <0>, <1>, - <2>, <17>, - <18>, <19>; - clock-output-names = "apb1_i2c0", "apb1_i2c1", - "apb1_i2c2", "apb1_uart1", - "apb1_uart2", "apb1_uart3"; - }; - - nand_clk: clk@01c20080 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; - reg = <0x01c20080 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "nand"; - }; - - ms_clk: clk@01c20084 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; - reg = <0x01c20084 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "ms"; - }; - - mmc0_clk: clk@01c20088 { - #clock-cells = <1>; - compatible = "allwinner,sun4i-a10-mmc-clk"; - reg = <0x01c20088 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc0", - "mmc0_output", - "mmc0_sample"; - }; - - mmc1_clk: clk@01c2008c { - #clock-cells = <1>; - compatible = "allwinner,sun4i-a10-mmc-clk"; - reg = <0x01c2008c 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc1", - "mmc1_output", - "mmc1_sample"; - }; - - mmc2_clk: clk@01c20090 { - #clock-cells = <1>; - compatible = "allwinner,sun4i-a10-mmc-clk"; - reg = <0x01c20090 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc2", - "mmc2_output", - "mmc2_sample"; - }; - - ts_clk: clk@01c20098 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; - reg = <0x01c20098 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "ts"; - }; - - ss_clk: clk@01c2009c { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; - reg = <0x01c2009c 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "ss"; - }; - - spi0_clk: clk@01c200a0 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; - reg = <0x01c200a0 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "spi0"; - }; - - spi1_clk: clk@01c200a4 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; - reg = <0x01c200a4 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "spi1"; - }; - - spi2_clk: clk@01c200a8 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; - reg = <0x01c200a8 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "spi2"; - }; - - ir0_clk: clk@01c200b0 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; - reg = <0x01c200b0 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "ir0"; - }; - - i2s0_clk: clk@01c200b8 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod1-clk"; - reg = <0x01c200b8 0x4>; - clocks = <&pll2 SUN4I_A10_PLL2_8X>, - <&pll2 SUN4I_A10_PLL2_4X>, - <&pll2 SUN4I_A10_PLL2_2X>, - <&pll2 SUN4I_A10_PLL2_1X>; - clock-output-names = "i2s0"; - }; - - spdif_clk: clk@01c200c0 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod1-clk"; - reg = <0x01c200c0 0x4>; - clocks = <&pll2 SUN4I_A10_PLL2_8X>, - <&pll2 SUN4I_A10_PLL2_4X>, - <&pll2 SUN4I_A10_PLL2_2X>, - <&pll2 SUN4I_A10_PLL2_1X>; - clock-output-names = "spdif"; - }; - - usb_clk: clk@01c200cc { - #clock-cells = <1>; - #reset-cells = <1>; - compatible = "allwinner,sun5i-a13-usb-clk"; - reg = <0x01c200cc 0x4>; - clocks = <&pll6 1>; - clock-output-names = "usb_ohci0", "usb_phy"; - }; - - dram_gates: clk@01c20100 { - #clock-cells = <1>; - compatible = "nextthing,gr8-dram-gates-clk", - "allwinner,sun4i-a10-gates-clk"; - reg = <0x01c20100 0x4>; - clocks = <&pll5 0>; - clock-indices = <0>, - <1>, - <25>, - <26>, - <29>, - <31>; - clock-output-names = "dram_ve", - "dram_csi", - "dram_de_fe", - "dram_de_be", - "dram_ace", - "dram_iep"; - }; - - de_be_clk: clk@01c20104 { - #clock-cells = <0>; - #reset-cells = <0>; - compatible = "allwinner,sun4i-a10-display-clk"; - reg = <0x01c20104 0x4>; - clocks = <&pll3>, <&pll7>, <&pll5 1>; - clock-output-names = "de-be"; - }; - - de_fe_clk: clk@01c2010c { - #clock-cells = <0>; - #reset-cells = <0>; - compatible = "allwinner,sun4i-a10-display-clk"; - reg = <0x01c2010c 0x4>; - clocks = <&pll3>, <&pll7>, <&pll5 1>; - clock-output-names = "de-fe"; - }; - - tcon_ch0_clk: clk@01c20118 { - #clock-cells = <0>; - #reset-cells = <1>; - compatible = "allwinner,sun4i-a10-tcon-ch0-clk"; - reg = <0x01c20118 0x4>; - clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>; - clock-output-names = "tcon-ch0-sclk"; - }; - - tcon_ch1_clk: clk@01c2012c { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-tcon-ch1-clk"; - reg = <0x01c2012c 0x4>; - clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>; - clock-output-names = "tcon-ch1-sclk"; - }; - - codec_clk: clk@01c20140 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-codec-clk"; - reg = <0x01c20140 0x4>; - clocks = <&pll2 SUN4I_A10_PLL2_1X>; - clock-output-names = "codec"; - }; - - mbus_clk: clk@01c2015c { - #clock-cells = <0>; - compatible = "allwinner,sun5i-a13-mbus-clk"; - reg = <0x01c2015c 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mbus"; - }; }; display-engine { @@ -528,7 +129,7 @@ compatible = "allwinner,sun4i-a10-dma"; reg = <0x01c02000 0x1000>; interrupts = <27>; - clocks = <&ahb_gates 6>; + clocks = <&ccu CLK_AHB_DMA>; #dma-cells = <2>; }; @@ -536,7 +137,7 @@ compatible = "allwinner,sun4i-a10-nand"; reg = <0x01c03000 0x1000>; interrupts = <37>; - clocks = <&ahb_gates 13>, <&nand_clk>; + clocks = <&ccu CLK_AHB_NAND>, <&ccu CLK_NAND>; clock-names = "ahb", "mod"; dmas = <&dma SUN4I_DMA_DEDICATED 3>; dma-names = "rxtx"; @@ -549,7 +150,7 @@ compatible = "allwinner,sun4i-a10-spi"; reg = <0x01c05000 0x1000>; interrupts = <10>; - clocks = <&ahb_gates 20>, <&spi0_clk>; + clocks = <&ccu CLK_AHB_SPI0>, <&ccu CLK_SPI0>; clock-names = "ahb", "mod"; dmas = <&dma SUN4I_DMA_DEDICATED 27>, <&dma SUN4I_DMA_DEDICATED 26>; @@ -563,7 +164,7 @@ compatible = "allwinner,sun4i-a10-spi"; reg = <0x01c06000 0x1000>; interrupts = <11>; - clocks = <&ahb_gates 21>, <&spi1_clk>; + clocks = <&ccu CLK_AHB_SPI1>, <&ccu CLK_SPI1>; clock-names = "ahb", "mod"; dmas = <&dma SUN4I_DMA_DEDICATED 9>, <&dma SUN4I_DMA_DEDICATED 8>; @@ -576,8 +177,8 @@ tve0: tv-encoder@01c0a000 { compatible = "allwinner,sun4i-a10-tv-encoder"; reg = <0x01c0a000 0x1000>; - clocks = <&ahb_gates 34>; - resets = <&tcon_ch0_clk 0>; + clocks = <&ccu CLK_AHB_TVE>; + resets = <&ccu RST_TVE>; status = "disabled"; port { @@ -595,11 +196,11 @@ compatible = "allwinner,sun5i-a13-tcon"; reg = <0x01c0c000 0x1000>; interrupts = <44>; - resets = <&tcon_ch0_clk 1>; + resets = <&ccu RST_LCD>; reset-names = "lcd"; - clocks = <&ahb_gates 36>, - <&tcon_ch0_clk>, - <&tcon_ch1_clk>; + clocks = <&ccu CLK_AHB_LCD>, + <&ccu CLK_TCON_CH0>, + <&ccu CLK_TCON_CH1>; clock-names = "ahb", "tcon-ch0", "tcon-ch1"; @@ -637,14 +238,8 @@ mmc0: mmc@01c0f000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c0f000 0x1000>; - clocks = <&ahb_gates 8>, - <&mmc0_clk 0>, - <&mmc0_clk 1>, - <&mmc0_clk 2>; - clock-names = "ahb", - "mmc", - "output", - "sample"; + clocks = <&ccu CLK_AHB_MMC0>, <&ccu CLK_MMC0>; + clock-names = "ahb", "mmc"; interrupts = <32>; status = "disabled"; #address-cells = <1>; @@ -654,14 +249,8 @@ mmc1: mmc@01c10000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c10000 0x1000>; - clocks = <&ahb_gates 9>, - <&mmc1_clk 0>, - <&mmc1_clk 1>, - <&mmc1_clk 2>; - clock-names = "ahb", - "mmc", - "output", - "sample"; + clocks = <&ccu CLK_AHB_MMC1>, <&ccu CLK_MMC1>; + clock-names = "ahb", "mmc"; interrupts = <33>; status = "disabled"; #address-cells = <1>; @@ -671,14 +260,8 @@ mmc2: mmc@01c11000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c11000 0x1000>; - clocks = <&ahb_gates 10>, - <&mmc2_clk 0>, - <&mmc2_clk 1>, - <&mmc2_clk 2>; - clock-names = "ahb", - "mmc", - "output", - "sample"; + clocks = <&ccu CLK_AHB_MMC2>, <&ccu CLK_MMC2>; + clock-names = "ahb", "mmc"; interrupts = <34>; status = "disabled"; #address-cells = <1>; @@ -688,7 +271,7 @@ usb_otg: usb@01c13000 { compatible = "allwinner,sun4i-a10-musb"; reg = <0x01c13000 0x0400>; - clocks = <&ahb_gates 0>; + clocks = <&ccu CLK_AHB_OTG>; interrupts = <38>; interrupt-names = "mc"; phys = <&usbphy 0>; @@ -705,9 +288,9 @@ compatible = "allwinner,sun5i-a13-usb-phy"; reg = <0x01c13400 0x10 0x01c14800 0x4>; reg-names = "phy_ctrl", "pmu1"; - clocks = <&usb_clk 8>; + clocks = <&ccu CLK_USB_PHY0>; clock-names = "usb_phy"; - resets = <&usb_clk 0>, <&usb_clk 1>; + resets = <&ccu RST_USB_PHY0>, <&ccu RST_USB_PHY1>; reset-names = "usb0_reset", "usb1_reset"; status = "disabled"; }; @@ -716,7 +299,7 @@ compatible = "allwinner,sun5i-a13-ehci", "generic-ehci"; reg = <0x01c14000 0x100>; interrupts = <39>; - clocks = <&ahb_gates 1>; + clocks = <&ccu CLK_AHB_EHCI>; phys = <&usbphy 1>; phy-names = "usb"; status = "disabled"; @@ -726,7 +309,7 @@ compatible = "allwinner,sun5i-a13-ohci", "generic-ohci"; reg = <0x01c14400 0x100>; interrupts = <40>; - clocks = <&usb_clk 6>, <&ahb_gates 2>; + clocks = <&ccu CLK_USB_OHCI>, <&ccu CLK_AHB_OHCI>; phys = <&usbphy 1>; phy-names = "usb"; status = "disabled"; @@ -736,7 +319,7 @@ compatible = "allwinner,sun4i-a10-spi"; reg = <0x01c17000 0x1000>; interrupts = <12>; - clocks = <&ahb_gates 22>, <&spi2_clk>; + clocks = <&ccu CLK_AHB_SPI2>, <&ccu CLK_SPI2>; clock-names = "ahb", "mod"; dmas = <&dma SUN4I_DMA_DEDICATED 29>, <&dma SUN4I_DMA_DEDICATED 28>; @@ -746,6 +329,15 @@ #size-cells = <0>; }; + ccu: clock@01c20000 { + compatible = "nextthing,gr8-ccu"; + reg = <0x01c20000 0x400>; + clocks = <&osc24M>, <&osc32k>; + clock-names = "hosc", "losc"; + #clock-cells = <1>; + #reset-cells = <1>; + }; + intc: interrupt-controller@01c20400 { compatible = "allwinner,sun4i-a10-ic"; reg = <0x01c20400 0x400>; @@ -757,7 +349,7 @@ compatible = "nextthing,gr8-pinctrl"; reg = <0x01c20800 0x400>; interrupts = <28>; - clocks = <&apb0_gates 5>; + clocks = <&ccu CLK_APB0_PIO>; gpio-controller; interrupt-controller; #interrupt-cells = <3>; @@ -876,7 +468,7 @@ pwm: pwm@01c20e00 { compatible = "allwinner,sun5i-a10s-pwm"; reg = <0x01c20e00 0xc>; - clocks = <&osc24M>; + clocks = <&ccu CLK_HOSC>; #pwm-cells = <3>; status = "disabled"; }; @@ -885,7 +477,7 @@ compatible = "allwinner,sun4i-a10-timer"; reg = <0x01c20c00 0x90>; interrupts = <22>; - clocks = <&osc24M>; + clocks = <&ccu CLK_HOSC>; }; wdt: watchdog@01c20c90 { @@ -898,7 +490,7 @@ compatible = "allwinner,sun4i-a10-spdif"; reg = <0x01c21000 0x400>; interrupts = <13>; - clocks = <&apb0_gates 1>, <&spdif_clk>; + clocks = <&ccu CLK_APB0_SPDIF>, <&ccu CLK_SPDIF>; clock-names = "apb", "spdif"; dmas = <&dma SUN4I_DMA_NORMAL 2>, <&dma SUN4I_DMA_NORMAL 2>; @@ -908,7 +500,7 @@ ir0: ir@01c21800 { compatible = "allwinner,sun4i-a10-ir"; - clocks = <&apb0_gates 6>, <&ir0_clk>; + clocks = <&ccu CLK_APB0_IR>, <&ccu CLK_IR>; clock-names = "apb", "ir"; interrupts = <5>; reg = <0x01c21800 0x40>; @@ -920,7 +512,7 @@ compatible = "allwinner,sun4i-a10-i2s"; reg = <0x01c22400 0x400>; interrupts = <16>; - clocks = <&apb0_gates 3>, <&i2s0_clk>; + clocks = <&ccu CLK_APB0_I2S>, <&ccu CLK_I2S>; clock-names = "apb", "mod"; dmas = <&dma SUN4I_DMA_NORMAL 3>, <&dma SUN4I_DMA_NORMAL 3>; @@ -940,7 +532,7 @@ compatible = "allwinner,sun4i-a10-codec"; reg = <0x01c22c00 0x40>; interrupts = <30>; - clocks = <&apb0_gates 0>, <&codec_clk>; + clocks = <&ccu CLK_APB0_CODEC>, <&ccu CLK_CODEC>; clock-names = "apb", "codec"; dmas = <&dma SUN4I_DMA_NORMAL 19>, <&dma SUN4I_DMA_NORMAL 19>; @@ -961,7 +553,7 @@ interrupts = <2>; reg-shift = <2>; reg-io-width = <4>; - clocks = <&apb1_gates 17>; + clocks = <&ccu CLK_APB1_UART1>; status = "disabled"; }; @@ -971,7 +563,7 @@ interrupts = <3>; reg-shift = <2>; reg-io-width = <4>; - clocks = <&apb1_gates 18>; + clocks = <&ccu CLK_APB1_UART2>; status = "disabled"; }; @@ -981,7 +573,7 @@ interrupts = <4>; reg-shift = <2>; reg-io-width = <4>; - clocks = <&apb1_gates 19>; + clocks = <&ccu CLK_APB1_UART3>; status = "disabled"; }; @@ -989,7 +581,7 @@ compatible = "allwinner,sun4i-a10-i2c"; reg = <0x01c2ac00 0x400>; interrupts = <7>; - clocks = <&apb1_gates 0>; + clocks = <&ccu CLK_APB1_I2C0>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; @@ -999,7 +591,7 @@ compatible = "allwinner,sun4i-a10-i2c"; reg = <0x01c2b000 0x400>; interrupts = <8>; - clocks = <&apb1_gates 1>; + clocks = <&ccu CLK_APB1_I2C1>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; @@ -1009,7 +601,7 @@ compatible = "allwinner,sun4i-a10-i2c"; reg = <0x01c2b400 0x400>; interrupts = <9>; - clocks = <&apb1_gates 2>; + clocks = <&ccu CLK_APB1_I2C2>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; @@ -1019,18 +611,18 @@ compatible = "allwinner,sun5i-a13-hstimer"; reg = <0x01c60000 0x1000>; interrupts = <82>, <83>; - clocks = <&ahb_gates 28>; + clocks = <&ccu CLK_AHB_HSTIMER>; }; fe0: display-frontend@01e00000 { compatible = "allwinner,sun5i-a13-display-frontend"; reg = <0x01e00000 0x20000>; interrupts = <47>; - clocks = <&ahb_gates 46>, <&de_fe_clk>, - <&dram_gates 25>; + clocks = <&ccu CLK_AHB_DE_FE>, <&ccu CLK_DE_FE>, + <&ccu CLK_DRAM_DE_FE>; clock-names = "ahb", "mod", "ram"; - resets = <&de_fe_clk>; + resets = <&ccu RST_DE_FE>; status = "disabled"; ports { @@ -1053,14 +645,14 @@ be0: display-backend@01e60000 { compatible = "allwinner,sun5i-a13-display-backend"; reg = <0x01e60000 0x10000>; - clocks = <&ahb_gates 44>, <&de_be_clk>, - <&dram_gates 26>; + clocks = <&ccu CLK_AHB_DE_BE>, <&ccu CLK_DE_BE>, + <&ccu CLK_DRAM_DE_BE>; clock-names = "ahb", "mod", "ram"; - resets = <&de_be_clk>; + resets = <&ccu RST_DE_BE>; status = "disabled"; - assigned-clocks = <&de_be_clk>; + assigned-clocks = <&ccu CLK_DE_BE>; assigned-clock-rates = <300000000>; ports { diff --git a/arch/arm/boot/dts/sun5i-r8.dtsi b/arch/arm/boot/dts/sun5i-r8.dtsi index 8b058f53b7dc..4c1141396c99 100644 --- a/arch/arm/boot/dts/sun5i-r8.dtsi +++ b/arch/arm/boot/dts/sun5i-r8.dtsi @@ -51,9 +51,9 @@ compatible = "allwinner,simple-framebuffer", "simple-framebuffer"; allwinner,pipeline = "de_be0-lcd0-tve0"; - clocks = <&ahb_gates 34>, <&ahb_gates 36>, - <&ahb_gates 44>, <&de_be_clk>, - <&tcon_ch1_clk>, <&dram_gates 26>; + clocks = <&ccu CLK_AHB_TVE>, <&ccu CLK_AHB_LCD>, + <&ccu CLK_AHB_DE_BE>, <&ccu CLK_DE_BE>, + <&ccu CLK_TCON_CH1>, <&ccu CLK_DRAM_DE_BE>; status = "disabled"; }; }; @@ -62,8 +62,8 @@ tve0: tv-encoder@01c0a000 { compatible = "allwinner,sun4i-a10-tv-encoder"; reg = <0x01c0a000 0x1000>; - clocks = <&ahb_gates 34>; - resets = <&tcon_ch0_clk 0>; + clocks = <&ccu CLK_AHB_TVE>; + resets = <&ccu RST_TVE>; status = "disabled"; port { diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi index c058d37d5433..a9574a6cd95c 100644 --- a/arch/arm/boot/dts/sun5i.dtsi +++ b/arch/arm/boot/dts/sun5i.dtsi @@ -44,9 +44,10 @@ #include "skeleton.dtsi" -#include <dt-bindings/clock/sun4i-a10-pll2.h> +#include <dt-bindings/clock/sun5i-ccu.h> #include <dt-bindings/dma/sun4i-a10.h> #include <dt-bindings/pinctrl/sun4i-a10.h> +#include <dt-bindings/reset/sun5i-ccu.h> / { interrupt-parent = <&intc>; @@ -59,7 +60,7 @@ device_type = "cpu"; compatible = "arm,cortex-a8"; reg = <0x0>; - clocks = <&cpu>; + clocks = <&ccu CLK_CPU>; }; }; @@ -68,291 +69,19 @@ #size-cells = <1>; ranges; - /* - * This is a dummy clock, to be used as placeholder on - * other mux clocks when a specific parent clock is not - * yet implemented. It should be dropped when the driver - * is complete. - */ - dummy: dummy { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <0>; - }; - osc24M: clk@01c20050 { #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-osc-clk"; - reg = <0x01c20050 0x4>; + compatible = "fixed-clock"; clock-frequency = <24000000>; clock-output-names = "osc24M"; }; - osc3M: osc3M_clk { - compatible = "fixed-factor-clock"; - #clock-cells = <0>; - clock-div = <8>; - clock-mult = <1>; - clocks = <&osc24M>; - clock-output-names = "osc3M"; - }; - osc32k: clk@0 { #clock-cells = <0>; compatible = "fixed-clock"; clock-frequency = <32768>; clock-output-names = "osc32k"; }; - - pll1: clk@01c20000 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-pll1-clk"; - reg = <0x01c20000 0x4>; - clocks = <&osc24M>; - clock-output-names = "pll1"; - }; - - pll2: clk@01c20008 { - #clock-cells = <1>; - compatible = "allwinner,sun5i-a13-pll2-clk"; - reg = <0x01c20008 0x8>; - clocks = <&osc24M>; - clock-output-names = "pll2-1x", "pll2-2x", - "pll2-4x", "pll2-8x"; - }; - - pll3: clk@01c20010 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-pll3-clk"; - reg = <0x01c20010 0x4>; - clocks = <&osc3M>; - clock-output-names = "pll3"; - }; - - pll3x2: pll3x2_clk { - compatible = "allwinner,sun4i-a10-pll3-2x-clk", "fixed-factor-clock"; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <2>; - clocks = <&pll3>; - clock-output-names = "pll3-2x"; - }; - - pll4: clk@01c20018 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-pll1-clk"; - reg = <0x01c20018 0x4>; - clocks = <&osc24M>; - clock-output-names = "pll4"; - }; - - pll5: clk@01c20020 { - #clock-cells = <1>; - compatible = "allwinner,sun4i-a10-pll5-clk"; - reg = <0x01c20020 0x4>; - clocks = <&osc24M>; - clock-output-names = "pll5_ddr", "pll5_other"; - }; - - pll6: clk@01c20028 { - #clock-cells = <1>; - compatible = "allwinner,sun4i-a10-pll6-clk"; - reg = <0x01c20028 0x4>; - clocks = <&osc24M>; - clock-output-names = "pll6_sata", "pll6_other", "pll6"; - }; - - pll7: clk@01c20030 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-pll3-clk"; - reg = <0x01c20030 0x4>; - clocks = <&osc3M>; - clock-output-names = "pll7"; - }; - - pll7x2: pll7x2_clk { - compatible = "fixed-factor-clock"; - #clock-cells = <0>; - clock-div = <1>; - clock-mult = <2>; - clocks = <&pll7>; - clock-output-names = "pll7-2x"; - }; - - /* dummy is 200M */ - cpu: cpu@01c20054 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-cpu-clk"; - reg = <0x01c20054 0x4>; - clocks = <&osc32k>, <&osc24M>, <&pll1>, <&dummy>; - clock-output-names = "cpu"; - }; - - axi: axi@01c20054 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-axi-clk"; - reg = <0x01c20054 0x4>; - clocks = <&cpu>; - clock-output-names = "axi"; - }; - - ahb: ahb@01c20054 { - #clock-cells = <0>; - compatible = "allwinner,sun5i-a13-ahb-clk"; - reg = <0x01c20054 0x4>; - clocks = <&axi>, <&cpu>, <&pll6 1>; - clock-output-names = "ahb"; - /* - * Use PLL6 as parent, instead of CPU/AXI - * which has rate changes due to cpufreq - */ - assigned-clocks = <&ahb>; - assigned-clock-parents = <&pll6 1>; - }; - - apb0: apb0@01c20054 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-apb0-clk"; - reg = <0x01c20054 0x4>; - clocks = <&ahb>; - clock-output-names = "apb0"; - }; - - apb1: clk@01c20058 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-apb1-clk"; - reg = <0x01c20058 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&osc32k>; - clock-output-names = "apb1"; - }; - - axi_gates: clk@01c2005c { - #clock-cells = <1>; - compatible = "allwinner,sun4i-a10-axi-gates-clk"; - reg = <0x01c2005c 0x4>; - clocks = <&axi>; - clock-indices = <0>; - clock-output-names = "axi_dram"; - }; - - nand_clk: clk@01c20080 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; - reg = <0x01c20080 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "nand"; - }; - - ms_clk: clk@01c20084 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; - reg = <0x01c20084 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "ms"; - }; - - mmc0_clk: clk@01c20088 { - #clock-cells = <1>; - compatible = "allwinner,sun4i-a10-mmc-clk"; - reg = <0x01c20088 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc0", - "mmc0_output", - "mmc0_sample"; - }; - - mmc1_clk: clk@01c2008c { - #clock-cells = <1>; - compatible = "allwinner,sun4i-a10-mmc-clk"; - reg = <0x01c2008c 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc1", - "mmc1_output", - "mmc1_sample"; - }; - - mmc2_clk: clk@01c20090 { - #clock-cells = <1>; - compatible = "allwinner,sun4i-a10-mmc-clk"; - reg = <0x01c20090 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc2", - "mmc2_output", - "mmc2_sample"; - }; - - ts_clk: clk@01c20098 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; - reg = <0x01c20098 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "ts"; - }; - - ss_clk: clk@01c2009c { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; - reg = <0x01c2009c 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "ss"; - }; - - spi0_clk: clk@01c200a0 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; - reg = <0x01c200a0 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "spi0"; - }; - - spi1_clk: clk@01c200a4 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; - reg = <0x01c200a4 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "spi1"; - }; - - spi2_clk: clk@01c200a8 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; - reg = <0x01c200a8 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "spi2"; - }; - - ir0_clk: clk@01c200b0 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; - reg = <0x01c200b0 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "ir0"; - }; - - usb_clk: clk@01c200cc { - #clock-cells = <1>; - #reset-cells = <1>; - compatible = "allwinner,sun5i-a13-usb-clk"; - reg = <0x01c200cc 0x4>; - clocks = <&pll6 1>; - clock-output-names = "usb_ohci0", "usb_phy"; - }; - - codec_clk: clk@01c20140 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-codec-clk"; - reg = <0x01c20140 0x4>; - clocks = <&pll2 SUN4I_A10_PLL2_1X>; - clock-output-names = "codec"; - }; - - mbus_clk: clk@01c2015c { - #clock-cells = <0>; - compatible = "allwinner,sun5i-a13-mbus-clk"; - reg = <0x01c2015c 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mbus"; - }; }; soc@01c00000 { @@ -395,7 +124,7 @@ compatible = "allwinner,sun4i-a10-dma"; reg = <0x01c02000 0x1000>; interrupts = <27>; - clocks = <&ahb_gates 6>; + clocks = <&ccu CLK_AHB_DMA>; #dma-cells = <2>; }; @@ -403,7 +132,7 @@ compatible = "allwinner,sun4i-a10-spi"; reg = <0x01c05000 0x1000>; interrupts = <10>; - clocks = <&ahb_gates 20>, <&spi0_clk>; + clocks = <&ccu CLK_AHB_SPI0>, <&ccu CLK_SPI0>; clock-names = "ahb", "mod"; dmas = <&dma SUN4I_DMA_DEDICATED 27>, <&dma SUN4I_DMA_DEDICATED 26>; @@ -417,7 +146,7 @@ compatible = "allwinner,sun4i-a10-spi"; reg = <0x01c06000 0x1000>; interrupts = <11>; - clocks = <&ahb_gates 21>, <&spi1_clk>; + clocks = <&ccu CLK_AHB_SPI1>, <&ccu CLK_SPI1>; clock-names = "ahb", "mod"; dmas = <&dma SUN4I_DMA_DEDICATED 9>, <&dma SUN4I_DMA_DEDICATED 8>; @@ -430,14 +159,8 @@ mmc0: mmc@01c0f000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c0f000 0x1000>; - clocks = <&ahb_gates 8>, - <&mmc0_clk 0>, - <&mmc0_clk 1>, - <&mmc0_clk 2>; - clock-names = "ahb", - "mmc", - "output", - "sample"; + clocks = <&ccu CLK_AHB_MMC0>, <&ccu CLK_MMC0>; + clock-names = "ahb", "mmc"; interrupts = <32>; status = "disabled"; #address-cells = <1>; @@ -447,14 +170,8 @@ mmc1: mmc@01c10000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c10000 0x1000>; - clocks = <&ahb_gates 9>, - <&mmc1_clk 0>, - <&mmc1_clk 1>, - <&mmc1_clk 2>; - clock-names = "ahb", - "mmc", - "output", - "sample"; + clocks = <&ccu CLK_AHB_MMC1>, <&ccu CLK_MMC1>; + clock-names = "ahb", "mmc"; interrupts = <33>; status = "disabled"; #address-cells = <1>; @@ -464,14 +181,8 @@ mmc2: mmc@01c11000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c11000 0x1000>; - clocks = <&ahb_gates 10>, - <&mmc2_clk 0>, - <&mmc2_clk 1>, - <&mmc2_clk 2>; - clock-names = "ahb", - "mmc", - "output", - "sample"; + clocks = <&ccu CLK_AHB_MMC2>, <&ccu CLK_MMC2>; + clock-names = "ahb", "mmc"; interrupts = <34>; status = "disabled"; #address-cells = <1>; @@ -481,7 +192,7 @@ usb_otg: usb@01c13000 { compatible = "allwinner,sun4i-a10-musb"; reg = <0x01c13000 0x0400>; - clocks = <&ahb_gates 0>; + clocks = <&ccu CLK_AHB_OTG>; interrupts = <38>; interrupt-names = "mc"; phys = <&usbphy 0>; @@ -496,9 +207,9 @@ compatible = "allwinner,sun5i-a13-usb-phy"; reg = <0x01c13400 0x10 0x01c14800 0x4>; reg-names = "phy_ctrl", "pmu1"; - clocks = <&usb_clk 8>; + clocks = <&ccu CLK_USB_PHY0>; clock-names = "usb_phy"; - resets = <&usb_clk 0>, <&usb_clk 1>; + resets = <&ccu RST_USB_PHY0>, <&ccu RST_USB_PHY1>; reset-names = "usb0_reset", "usb1_reset"; status = "disabled"; }; @@ -507,7 +218,7 @@ compatible = "allwinner,sun5i-a13-ehci", "generic-ehci"; reg = <0x01c14000 0x100>; interrupts = <39>; - clocks = <&ahb_gates 1>; + clocks = <&ccu CLK_AHB_EHCI>; phys = <&usbphy 1>; phy-names = "usb"; status = "disabled"; @@ -517,7 +228,7 @@ compatible = "allwinner,sun5i-a13-ohci", "generic-ohci"; reg = <0x01c14400 0x100>; interrupts = <40>; - clocks = <&usb_clk 6>, <&ahb_gates 2>; + clocks = <&ccu CLK_USB_OHCI>, <&ccu CLK_AHB_OHCI>; phys = <&usbphy 1>; phy-names = "usb"; status = "disabled"; @@ -527,7 +238,7 @@ compatible = "allwinner,sun4i-a10-spi"; reg = <0x01c17000 0x1000>; interrupts = <12>; - clocks = <&ahb_gates 22>, <&spi2_clk>; + clocks = <&ccu CLK_AHB_SPI2>, <&ccu CLK_SPI2>; clock-names = "ahb", "mod"; dmas = <&dma SUN4I_DMA_DEDICATED 29>, <&dma SUN4I_DMA_DEDICATED 28>; @@ -537,6 +248,14 @@ #size-cells = <0>; }; + ccu: clock@01c20000 { + reg = <0x01c20000 0x400>; + clocks = <&osc24M>, <&osc32k>; + clock-names = "hosc", "losc"; + #clock-cells = <1>; + #reset-cells = <1>; + }; + intc: interrupt-controller@01c20400 { compatible = "allwinner,sun4i-a10-ic"; reg = <0x01c20400 0x400>; @@ -547,7 +266,7 @@ pio: pinctrl@01c20800 { reg = <0x01c20800 0x400>; interrupts = <28>; - clocks = <&apb0_gates 5>, <&osc24M>, <&osc32k>; + clocks = <&ccu CLK_APB0_PIO>, <&osc24M>, <&osc32k>; clock-names = "apb", "hosc", "losc"; gpio-controller; interrupt-controller; @@ -632,7 +351,7 @@ compatible = "allwinner,sun4i-a10-timer"; reg = <0x01c20c00 0x90>; interrupts = <22>; - clocks = <&osc24M>; + clocks = <&ccu CLK_HOSC>; }; wdt: watchdog@01c20c90 { @@ -652,7 +371,7 @@ compatible = "allwinner,sun4i-a10-codec"; reg = <0x01c22c00 0x40>; interrupts = <30>; - clocks = <&apb0_gates 0>, <&codec_clk>; + clocks = <&ccu CLK_APB0_CODEC>, <&ccu CLK_CODEC>; clock-names = "apb", "codec"; dmas = <&dma SUN4I_DMA_NORMAL 19>, <&dma SUN4I_DMA_NORMAL 19>; @@ -678,7 +397,7 @@ interrupts = <2>; reg-shift = <2>; reg-io-width = <4>; - clocks = <&apb1_gates 17>; + clocks = <&ccu CLK_APB1_UART1>; status = "disabled"; }; @@ -688,7 +407,7 @@ interrupts = <4>; reg-shift = <2>; reg-io-width = <4>; - clocks = <&apb1_gates 19>; + clocks = <&ccu CLK_APB1_UART3>; status = "disabled"; }; @@ -696,7 +415,7 @@ compatible = "allwinner,sun4i-a10-i2c"; reg = <0x01c2ac00 0x400>; interrupts = <7>; - clocks = <&apb1_gates 0>; + clocks = <&ccu CLK_APB1_I2C0>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; @@ -706,7 +425,7 @@ compatible = "allwinner,sun4i-a10-i2c"; reg = <0x01c2b000 0x400>; interrupts = <8>; - clocks = <&apb1_gates 1>; + clocks = <&ccu CLK_APB1_I2C1>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; @@ -716,7 +435,7 @@ compatible = "allwinner,sun4i-a10-i2c"; reg = <0x01c2b400 0x400>; interrupts = <9>; - clocks = <&apb1_gates 2>; + clocks = <&ccu CLK_APB1_I2C2>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; @@ -726,7 +445,7 @@ compatible = "allwinner,sun5i-a13-hstimer"; reg = <0x01c60000 0x1000>; interrupts = <82>, <83>; - clocks = <&ahb_gates 28>; + clocks = <&ccu CLK_AHB_HSTIMER>; }; }; }; diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi index 03f2ab47ece0..15b6d122f878 100644 --- a/arch/arm/boot/dts/sun9i-a80.dtsi +++ b/arch/arm/boot/dts/sun9i-a80.dtsi @@ -48,6 +48,13 @@ #include <dt-bindings/pinctrl/sun4i-a10.h> +#include <dt-bindings/clock/sun9i-a80-ccu.h> +#include <dt-bindings/clock/sun9i-a80-de.h> +#include <dt-bindings/clock/sun9i-a80-usb.h> +#include <dt-bindings/reset/sun9i-a80-ccu.h> +#include <dt-bindings/reset/sun9i-a80-de.h> +#include <dt-bindings/reset/sun9i-a80-usb.h> + / { interrupt-parent = <&gic>; @@ -159,228 +166,13 @@ clock-output-names = "osc32k"; }; - usb_mod_clk: clk@00a08000 { - #clock-cells = <1>; - #reset-cells = <1>; - compatible = "allwinner,sun9i-a80-usb-mod-clk"; - reg = <0x00a08000 0x4>; - clocks = <&ahb1_gates 1>; - clock-output-names = "usb0_ahb", "usb_ohci0", - "usb1_ahb", "usb_ohci1", - "usb2_ahb", "usb_ohci2"; - }; - - usb_phy_clk: clk@00a08004 { - #clock-cells = <1>; - #reset-cells = <1>; - compatible = "allwinner,sun9i-a80-usb-phy-clk"; - reg = <0x00a08004 0x4>; - clocks = <&ahb1_gates 1>; - clock-output-names = "usb_phy0", "usb_hsic1_480M", - "usb_phy1", "usb_hsic2_480M", - "usb_phy2", "usb_hsic_12M"; - }; - - pll3: clk@06000008 { - /* placeholder until implemented */ - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-rate = <0>; - clock-output-names = "pll3"; - }; - - pll4: clk@0600000c { - #clock-cells = <0>; - compatible = "allwinner,sun9i-a80-pll4-clk"; - reg = <0x0600000c 0x4>; - clocks = <&osc24M>; - clock-output-names = "pll4"; - }; - - pll12: clk@0600002c { - #clock-cells = <0>; - compatible = "allwinner,sun9i-a80-pll4-clk"; - reg = <0x0600002c 0x4>; - clocks = <&osc24M>; - clock-output-names = "pll12"; - }; - - gt_clk: clk@0600005c { - #clock-cells = <0>; - compatible = "allwinner,sun9i-a80-gt-clk"; - reg = <0x0600005c 0x4>; - clocks = <&osc24M>, <&pll4>, <&pll12>, <&pll12>; - clock-output-names = "gt"; - }; - - ahb0: clk@06000060 { - #clock-cells = <0>; - compatible = "allwinner,sun9i-a80-ahb-clk"; - reg = <0x06000060 0x4>; - clocks = <>_clk>, <&pll4>, <&pll12>, <&pll12>; - clock-output-names = "ahb0"; - }; - - ahb1: clk@06000064 { - #clock-cells = <0>; - compatible = "allwinner,sun9i-a80-ahb-clk"; - reg = <0x06000064 0x4>; - clocks = <>_clk>, <&pll4>, <&pll12>, <&pll12>; - clock-output-names = "ahb1"; - }; - - ahb2: clk@06000068 { - #clock-cells = <0>; - compatible = "allwinner,sun9i-a80-ahb-clk"; - reg = <0x06000068 0x4>; - clocks = <>_clk>, <&pll4>, <&pll12>, <&pll12>; - clock-output-names = "ahb2"; - }; - - apb0: clk@06000070 { - #clock-cells = <0>; - compatible = "allwinner,sun9i-a80-apb0-clk"; - reg = <0x06000070 0x4>; - clocks = <&osc24M>, <&pll4>; - clock-output-names = "apb0"; - }; - - apb1: clk@06000074 { - #clock-cells = <0>; - compatible = "allwinner,sun9i-a80-apb1-clk"; - reg = <0x06000074 0x4>; - clocks = <&osc24M>, <&pll4>; - clock-output-names = "apb1"; - }; - - cci400_clk: clk@06000078 { - #clock-cells = <0>; - compatible = "allwinner,sun9i-a80-gt-clk"; - reg = <0x06000078 0x4>; - clocks = <&osc24M>, <&pll4>, <&pll12>, <&pll12>; - clock-output-names = "cci400"; - }; - - mmc0_clk: clk@06000410 { - #clock-cells = <1>; - compatible = "allwinner,sun9i-a80-mmc-clk"; - reg = <0x06000410 0x4>; - clocks = <&osc24M>, <&pll4>; - clock-output-names = "mmc0", "mmc0_output", - "mmc0_sample"; - }; - - mmc1_clk: clk@06000414 { - #clock-cells = <1>; - compatible = "allwinner,sun9i-a80-mmc-clk"; - reg = <0x06000414 0x4>; - clocks = <&osc24M>, <&pll4>; - clock-output-names = "mmc1", "mmc1_output", - "mmc1_sample"; - }; - - mmc2_clk: clk@06000418 { - #clock-cells = <1>; - compatible = "allwinner,sun9i-a80-mmc-clk"; - reg = <0x06000418 0x4>; - clocks = <&osc24M>, <&pll4>; - clock-output-names = "mmc2", "mmc2_output", - "mmc2_sample"; - }; - - mmc3_clk: clk@0600041c { - #clock-cells = <1>; - compatible = "allwinner,sun9i-a80-mmc-clk"; - reg = <0x0600041c 0x4>; - clocks = <&osc24M>, <&pll4>; - clock-output-names = "mmc3", "mmc3_output", - "mmc3_sample"; - }; - - ahb0_gates: clk@06000580 { - #clock-cells = <1>; - compatible = "allwinner,sun9i-a80-ahb0-gates-clk"; - reg = <0x06000580 0x4>; - clocks = <&ahb0>; - clock-indices = <0>, <1>, <3>, - <5>, <8>, <12>, - <13>, <14>, - <15>, <16>, <18>, - <20>, <21>, <22>, - <23>; - clock-output-names = "ahb0_fd", "ahb0_ve", "ahb0_gpu", - "ahb0_ss", "ahb0_sd", "ahb0_nand1", - "ahb0_nand0", "ahb0_sdram", - "ahb0_mipi_hsi", "ahb0_sata", "ahb0_ts", - "ahb0_spi0", "ahb0_spi1", "ahb0_spi2", - "ahb0_spi3"; - }; - - ahb1_gates: clk@06000584 { - #clock-cells = <1>; - compatible = "allwinner,sun9i-a80-ahb1-gates-clk"; - reg = <0x06000584 0x4>; - clocks = <&ahb1>; - clock-indices = <0>, <1>, - <17>, <21>, - <22>, <23>, - <24>; - clock-output-names = "ahb1_usbotg", "ahb1_usbhci", - "ahb1_gmac", "ahb1_msgbox", - "ahb1_spinlock", "ahb1_hstimer", - "ahb1_dma"; - }; - - ahb2_gates: clk@06000588 { - #clock-cells = <1>; - compatible = "allwinner,sun9i-a80-ahb2-gates-clk"; - reg = <0x06000588 0x4>; - clocks = <&ahb2>; - clock-indices = <0>, <1>, - <2>, <4>, <5>, - <7>, <8>, <11>; - clock-output-names = "ahb2_lcd0", "ahb2_lcd1", - "ahb2_edp", "ahb2_csi", "ahb2_hdmi", - "ahb2_de", "ahb2_mp", "ahb2_mipi_dsi"; - }; - - apb0_gates: clk@06000590 { - #clock-cells = <1>; - compatible = "allwinner,sun9i-a80-apb0-gates-clk"; - reg = <0x06000590 0x4>; - clocks = <&apb0>; - clock-indices = <1>, <5>, - <11>, <12>, <13>, - <15>, <17>, <18>, - <19>; - clock-output-names = "apb0_spdif", "apb0_pio", - "apb0_ac97", "apb0_i2s0", "apb0_i2s1", - "apb0_lradc", "apb0_gpadc", "apb0_twd", - "apb0_cirtx"; - }; - - apb1_gates: clk@06000594 { - #clock-cells = <1>; - compatible = "allwinner,sun9i-a80-apb1-gates-clk"; - reg = <0x06000594 0x4>; - clocks = <&apb1>; - clock-indices = <0>, <1>, - <2>, <3>, <4>, - <16>, <17>, - <18>, <19>, - <20>, <21>; - clock-output-names = "apb1_i2c0", "apb1_i2c1", - "apb1_i2c2", "apb1_i2c3", "apb1_i2c4", - "apb1_uart0", "apb1_uart1", - "apb1_uart2", "apb1_uart3", - "apb1_uart4", "apb1_uart5"; - }; - cpus_clk: clk@08001410 { compatible = "allwinner,sun9i-a80-cpus-clk"; reg = <0x08001410 0x4>; #clock-cells = <0>; - clocks = <&osc32k>, <&osc24M>, <&pll4>, <&pll3>; + clocks = <&osc32k>, <&osc24M>, + <&ccu CLK_PLL_PERIPH0>, + <&ccu CLK_PLL_AUDIO>; clock-output-names = "cpus"; }; @@ -453,8 +245,8 @@ compatible = "allwinner,sun9i-a80-ehci", "generic-ehci"; reg = <0x00a00000 0x100>; interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&usb_mod_clk 1>; - resets = <&usb_mod_clk 17>; + clocks = <&usb_clocks CLK_BUS_HCI0>; + resets = <&usb_clocks RST_USB0_HCI>; phys = <&usbphy1>; phy-names = "usb"; status = "disabled"; @@ -464,8 +256,9 @@ compatible = "allwinner,sun9i-a80-ohci", "generic-ohci"; reg = <0x00a00400 0x100>; interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&usb_mod_clk 1>, <&usb_mod_clk 2>; - resets = <&usb_mod_clk 17>; + clocks = <&usb_clocks CLK_BUS_HCI0>, + <&usb_clocks CLK_USB_OHCI0>; + resets = <&usb_clocks RST_USB0_HCI>; phys = <&usbphy1>; phy-names = "usb"; status = "disabled"; @@ -474,9 +267,9 @@ usbphy1: phy@00a00800 { compatible = "allwinner,sun9i-a80-usb-phy"; reg = <0x00a00800 0x4>; - clocks = <&usb_phy_clk 1>; + clocks = <&usb_clocks CLK_USB0_PHY>; clock-names = "phy"; - resets = <&usb_phy_clk 17>; + resets = <&usb_clocks RST_USB0_PHY>; reset-names = "phy"; status = "disabled"; #phy-cells = <0>; @@ -486,8 +279,8 @@ compatible = "allwinner,sun9i-a80-ehci", "generic-ehci"; reg = <0x00a01000 0x100>; interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&usb_mod_clk 3>; - resets = <&usb_mod_clk 18>; + clocks = <&usb_clocks CLK_BUS_HCI1>; + resets = <&usb_clocks RST_USB1_HCI>; phys = <&usbphy2>; phy-names = "usb"; status = "disabled"; @@ -496,11 +289,16 @@ usbphy2: phy@00a01800 { compatible = "allwinner,sun9i-a80-usb-phy"; reg = <0x00a01800 0x4>; - clocks = <&usb_phy_clk 2>, <&usb_phy_clk 10>, - <&usb_phy_clk 3>; - clock-names = "hsic_480M", "hsic_12M", "phy"; - resets = <&usb_phy_clk 18>, <&usb_phy_clk 19>; - reset-names = "hsic", "phy"; + clocks = <&usb_clocks CLK_USB1_HSIC>, + <&usb_clocks CLK_USB_HSIC>, + <&usb_clocks CLK_USB1_PHY>; + clock-names = "hsic_480M", + "hsic_12M", + "phy"; + resets = <&usb_clocks RST_USB1_HSIC>, + <&usb_clocks RST_USB1_PHY>; + reset-names = "hsic", + "phy"; status = "disabled"; #phy-cells = <0>; /* usb1 is always used with HSIC */ @@ -511,8 +309,8 @@ compatible = "allwinner,sun9i-a80-ehci", "generic-ehci"; reg = <0x00a02000 0x100>; interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&usb_mod_clk 5>; - resets = <&usb_mod_clk 19>; + clocks = <&usb_clocks CLK_BUS_HCI2>; + resets = <&usb_clocks RST_USB2_HCI>; phys = <&usbphy3>; phy-names = "usb"; status = "disabled"; @@ -522,8 +320,9 @@ compatible = "allwinner,sun9i-a80-ohci", "generic-ohci"; reg = <0x00a02400 0x100>; interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&usb_mod_clk 5>, <&usb_mod_clk 6>; - resets = <&usb_mod_clk 19>; + clocks = <&usb_clocks CLK_BUS_HCI2>, + <&usb_clocks CLK_USB_OHCI2>; + resets = <&usb_clocks RST_USB2_HCI>; phys = <&usbphy3>; phy-names = "usb"; status = "disabled"; @@ -532,20 +331,35 @@ usbphy3: phy@00a02800 { compatible = "allwinner,sun9i-a80-usb-phy"; reg = <0x00a02800 0x4>; - clocks = <&usb_phy_clk 4>, <&usb_phy_clk 10>, - <&usb_phy_clk 5>; - clock-names = "hsic_480M", "hsic_12M", "phy"; - resets = <&usb_phy_clk 20>, <&usb_phy_clk 21>; - reset-names = "hsic", "phy"; + clocks = <&usb_clocks CLK_USB2_HSIC>, + <&usb_clocks CLK_USB_HSIC>, + <&usb_clocks CLK_USB2_PHY>; + clock-names = "hsic_480M", + "hsic_12M", + "phy"; + resets = <&usb_clocks RST_USB2_HSIC>, + <&usb_clocks RST_USB2_PHY>; + reset-names = "hsic", + "phy"; status = "disabled"; #phy-cells = <0>; }; + usb_clocks: clock@00a08000 { + compatible = "allwinner,sun9i-a80-usb-clks"; + reg = <0x00a08000 0x8>; + clocks = <&ccu CLK_BUS_USB>, <&osc24M>; + clock-names = "bus", "hosc"; + #clock-cells = <1>; + #reset-cells = <1>; + }; + mmc0: mmc@01c0f000 { compatible = "allwinner,sun9i-a80-mmc"; reg = <0x01c0f000 0x1000>; - clocks = <&mmc_config_clk 0>, <&mmc0_clk 0>, - <&mmc0_clk 1>, <&mmc0_clk 2>; + clocks = <&mmc_config_clk 0>, <&ccu CLK_MMC0>, + <&ccu CLK_MMC0_OUTPUT>, + <&ccu CLK_MMC0_SAMPLE>; clock-names = "ahb", "mmc", "output", "sample"; resets = <&mmc_config_clk 0>; reset-names = "ahb"; @@ -558,8 +372,9 @@ mmc1: mmc@01c10000 { compatible = "allwinner,sun9i-a80-mmc"; reg = <0x01c10000 0x1000>; - clocks = <&mmc_config_clk 1>, <&mmc1_clk 0>, - <&mmc1_clk 1>, <&mmc1_clk 2>; + clocks = <&mmc_config_clk 1>, <&ccu CLK_MMC1>, + <&ccu CLK_MMC1_OUTPUT>, + <&ccu CLK_MMC1_SAMPLE>; clock-names = "ahb", "mmc", "output", "sample"; resets = <&mmc_config_clk 1>; reset-names = "ahb"; @@ -572,8 +387,9 @@ mmc2: mmc@01c11000 { compatible = "allwinner,sun9i-a80-mmc"; reg = <0x01c11000 0x1000>; - clocks = <&mmc_config_clk 2>, <&mmc2_clk 0>, - <&mmc2_clk 1>, <&mmc2_clk 2>; + clocks = <&mmc_config_clk 2>, <&ccu CLK_MMC2>, + <&ccu CLK_MMC2_OUTPUT>, + <&ccu CLK_MMC2_SAMPLE>; clock-names = "ahb", "mmc", "output", "sample"; resets = <&mmc_config_clk 2>; reset-names = "ahb"; @@ -586,8 +402,9 @@ mmc3: mmc@01c12000 { compatible = "allwinner,sun9i-a80-mmc"; reg = <0x01c12000 0x1000>; - clocks = <&mmc_config_clk 3>, <&mmc3_clk 0>, - <&mmc3_clk 1>, <&mmc3_clk 2>; + clocks = <&mmc_config_clk 3>, <&ccu CLK_MMC3>, + <&ccu CLK_MMC3_OUTPUT>, + <&ccu CLK_MMC3_SAMPLE>; clock-names = "ahb", "mmc", "output", "sample"; resets = <&mmc_config_clk 3>; reset-names = "ahb"; @@ -600,9 +417,9 @@ mmc_config_clk: clk@01c13000 { compatible = "allwinner,sun9i-a80-mmc-config-clk"; reg = <0x01c13000 0x10>; - clocks = <&ahb0_gates 8>; + clocks = <&ccu CLK_BUS_MMC>; clock-names = "ahb"; - resets = <&ahb0_resets 8>; + resets = <&ccu RST_BUS_MMC>; reset-names = "ahb"; #clock-cells = <1>; #reset-cells = <1>; @@ -621,34 +438,27 @@ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; }; - ahb0_resets: reset@060005a0 { - #reset-cells = <1>; - compatible = "allwinner,sun6i-a31-clock-reset"; - reg = <0x060005a0 0x4>; - }; - - ahb1_resets: reset@060005a4 { - #reset-cells = <1>; - compatible = "allwinner,sun6i-a31-clock-reset"; - reg = <0x060005a4 0x4>; - }; - - ahb2_resets: reset@060005a8 { - #reset-cells = <1>; - compatible = "allwinner,sun6i-a31-clock-reset"; - reg = <0x060005a8 0x4>; - }; - - apb0_resets: reset@060005b0 { + de_clocks: clock@03000000 { + compatible = "allwinner,sun9i-a80-de-clks"; + reg = <0x03000000 0x30>; + clocks = <&ccu CLK_DE>, + <&ccu CLK_SDRAM>, + <&ccu CLK_BUS_DE>; + clock-names = "mod", + "dram", + "bus"; + resets = <&ccu RST_BUS_DE>; + #clock-cells = <1>; #reset-cells = <1>; - compatible = "allwinner,sun6i-a31-clock-reset"; - reg = <0x060005b0 0x4>; }; - apb1_resets: reset@060005b4 { + ccu: clock@06000000 { + compatible = "allwinner,sun9i-a80-ccu"; + reg = <0x06000000 0x800>; + clocks = <&osc24M>, <&osc32k>; + clock-names = "hosc", "losc"; + #clock-cells = <1>; #reset-cells = <1>; - compatible = "allwinner,sun6i-a31-clock-reset"; - reg = <0x060005b4 0x4>; }; timer@06000c00 { @@ -678,7 +488,7 @@ <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&apb0_gates 5>, <&osc24M>, <&osc32k>; + clocks = <&ccu CLK_BUS_PIO>, <&osc24M>, <&osc32k>; clock-names = "apb", "hosc", "losc"; gpio-controller; interrupt-controller; @@ -734,8 +544,8 @@ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>; reg-shift = <2>; reg-io-width = <4>; - clocks = <&apb1_gates 16>; - resets = <&apb1_resets 16>; + clocks = <&ccu CLK_BUS_UART0>; + resets = <&ccu RST_BUS_UART0>; status = "disabled"; }; @@ -745,8 +555,8 @@ interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>; reg-shift = <2>; reg-io-width = <4>; - clocks = <&apb1_gates 17>; - resets = <&apb1_resets 17>; + clocks = <&ccu CLK_BUS_UART1>; + resets = <&ccu RST_BUS_UART1>; status = "disabled"; }; @@ -756,8 +566,8 @@ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>; reg-shift = <2>; reg-io-width = <4>; - clocks = <&apb1_gates 18>; - resets = <&apb1_resets 18>; + clocks = <&ccu CLK_BUS_UART2>; + resets = <&ccu RST_BUS_UART2>; status = "disabled"; }; @@ -767,8 +577,8 @@ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>; reg-shift = <2>; reg-io-width = <4>; - clocks = <&apb1_gates 19>; - resets = <&apb1_resets 19>; + clocks = <&ccu CLK_BUS_UART3>; + resets = <&ccu RST_BUS_UART3>; status = "disabled"; }; @@ -778,8 +588,8 @@ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>; reg-shift = <2>; reg-io-width = <4>; - clocks = <&apb1_gates 20>; - resets = <&apb1_resets 20>; + clocks = <&ccu CLK_BUS_UART4>; + resets = <&ccu RST_BUS_UART4>; status = "disabled"; }; @@ -789,8 +599,8 @@ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>; reg-shift = <2>; reg-io-width = <4>; - clocks = <&apb1_gates 21>; - resets = <&apb1_resets 21>; + clocks = <&ccu CLK_BUS_UART5>; + resets = <&ccu RST_BUS_UART5>; status = "disabled"; }; @@ -798,8 +608,8 @@ compatible = "allwinner,sun6i-a31-i2c"; reg = <0x07002800 0x400>; interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&apb1_gates 0>; - resets = <&apb1_resets 0>; + clocks = <&ccu CLK_BUS_I2C0>; + resets = <&ccu RST_BUS_I2C0>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; @@ -809,8 +619,8 @@ compatible = "allwinner,sun6i-a31-i2c"; reg = <0x07002c00 0x400>; interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&apb1_gates 1>; - resets = <&apb1_resets 1>; + clocks = <&ccu CLK_BUS_I2C1>; + resets = <&ccu RST_BUS_I2C1>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; @@ -820,8 +630,8 @@ compatible = "allwinner,sun6i-a31-i2c"; reg = <0x07003000 0x400>; interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&apb1_gates 2>; - resets = <&apb1_resets 2>; + clocks = <&ccu CLK_BUS_I2C2>; + resets = <&ccu RST_BUS_I2C2>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; @@ -831,8 +641,8 @@ compatible = "allwinner,sun6i-a31-i2c"; reg = <0x07003400 0x400>; interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&apb1_gates 3>; - resets = <&apb1_resets 3>; + clocks = <&ccu CLK_BUS_I2C3>; + resets = <&ccu RST_BUS_I2C3>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; @@ -842,8 +652,8 @@ compatible = "allwinner,sun6i-a31-i2c"; reg = <0x07003800 0x400>; interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&apb1_gates 4>; - resets = <&apb1_resets 4>; + clocks = <&ccu CLK_BUS_I2C4>; + resets = <&ccu RST_BUS_I2C4>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c index 46730017b3c5..57f3b7512636 100644 --- a/arch/arm/common/bL_switcher.c +++ b/arch/arm/common/bL_switcher.c @@ -13,7 +13,8 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <uapi/linux/sched/types.h> #include <linux/interrupt.h> #include <linux/cpu_pm.h> #include <linux/cpu.h> diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 75055df1cda3..9b1b7be2ec0e 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -452,7 +452,7 @@ static int dmabounce_set_mask(struct device *dev, u64 dma_mask) return arm_dma_ops.set_dma_mask(dev, dma_mask); } -static struct dma_map_ops dmabounce_ops = { +static const struct dma_map_ops dmabounce_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, .mmap = arm_dma_mmap, diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index a923524d1040..cf062472e07b 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -144,7 +144,7 @@ extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) { - unsigned long val = ptr ? virt_to_phys(ptr) : 0; + unsigned long val = ptr ? __pa_symbol(ptr) : 0; mcpm_entry_vectors[cluster][cpu] = val; sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); } @@ -299,8 +299,8 @@ void mcpm_cpu_power_down(void) * the kernel as if the power_up method just had deasserted reset * on the CPU. */ - phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); - phys_reset(virt_to_phys(mcpm_entry_point)); + phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); + phys_reset(__pa_symbol(mcpm_entry_point)); /* should never get here */ BUG(); @@ -388,8 +388,8 @@ static int __init nocache_trampoline(unsigned long _arg) __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); __mcpm_cpu_down(cpu, cluster); - phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); - phys_reset(virt_to_phys(mcpm_entry_point)); + phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); + phys_reset(__pa_symbol(mcpm_entry_point)); BUG(); } @@ -449,7 +449,7 @@ int __init mcpm_sync_init( sync_cache_w(&mcpm_sync); if (power_up_setup) { - mcpm_power_up_setup_phys = virt_to_phys(power_up_setup); + mcpm_power_up_setup_phys = __pa_symbol(power_up_setup); sync_cache_w(&mcpm_power_up_setup_phys); } diff --git a/arch/arm/configs/moxart_defconfig b/arch/arm/configs/moxart_defconfig index a3cb76cfb828..b2ddd534867f 100644 --- a/arch/arm/configs/moxart_defconfig +++ b/arch/arm/configs/moxart_defconfig @@ -18,9 +18,8 @@ CONFIG_EMBEDDED=y # CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set -CONFIG_ARCH_MULTI_V4T=y +CONFIG_ARCH_MULTI_V4=y # CONFIG_ARCH_MULTI_V7 is not set -CONFIG_KEYBOARD_GPIO_POLLED=y CONFIG_ARCH_MOXART=y CONFIG_MACH_UC7112LX=y CONFIG_PREEMPT=y @@ -94,12 +93,10 @@ CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set -CONFIG_DEBUG_GPIO=y -CONFIG_GPIO_SYSFS=y CONFIG_GPIO_MOXART=y -CONFIG_POWER_SUPPLY=y CONFIG_POWER_RESET=y CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_SUPPLY=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_WATCHDOG_CORE=y @@ -107,10 +104,13 @@ CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_MOXART_WDT=y # CONFIG_USB_SUPPORT is not set CONFIG_MMC=y -CONFIG_MMC_SDHCI_MOXART=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_MOXART=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y +CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_LEDS_TRIGGER_ONESHOT=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index 4111592f0130..220ba207be91 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h @@ -7,7 +7,6 @@ #define ASMARM_DEVICE_H struct dev_archdata { - struct dma_map_ops *dma_ops; #ifdef CONFIG_DMABOUNCE struct dmabounce_device_info *dmabounce; #endif diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index bf02dbd9ccda..716656925975 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -13,28 +13,22 @@ #include <asm/xen/hypervisor.h> #define DMA_ERROR_CODE (~(dma_addr_t)0x0) -extern struct dma_map_ops arm_dma_ops; -extern struct dma_map_ops arm_coherent_dma_ops; +extern const struct dma_map_ops arm_dma_ops; +extern const struct dma_map_ops arm_coherent_dma_ops; -static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) +static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev) { - if (dev && dev->archdata.dma_ops) - return dev->archdata.dma_ops; + if (dev && dev->dma_ops) + return dev->dma_ops; return &arm_dma_ops; } -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { if (xen_initial_domain()) return xen_dma_ops; else - return __generic_dma_ops(dev); -} - -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) -{ - BUG_ON(!dev); - dev->archdata.dma_ops = ops; + return __generic_dma_ops(NULL); } #define HAVE_ARCH_DMA_SUPPORTED 1 diff --git a/arch/arm/include/asm/hardware/cache-uniphier.h b/arch/arm/include/asm/hardware/cache-uniphier.h index eaa60da7dac3..0ef42ae75b6c 100644 --- a/arch/arm/include/asm/hardware/cache-uniphier.h +++ b/arch/arm/include/asm/hardware/cache-uniphier.h @@ -16,7 +16,7 @@ #ifndef __CACHE_UNIPHIER_H #define __CACHE_UNIPHIER_H -#include <linux/types.h> +#include <linux/errno.h> #ifdef CONFIG_CACHE_UNIPHIER int uniphier_cache_init(void); diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index 3ea9be559726..59655459da59 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -16,6 +16,9 @@ #ifndef _ARM_KPROBES_H #define _ARM_KPROBES_H +#include <asm-generic/kprobes.h> + +#ifdef CONFIG_KPROBES #include <linux/types.h> #include <linux/ptrace.h> #include <linux/notifier.h> @@ -83,4 +86,5 @@ struct arch_optimized_insn { */ }; +#endif /* CONFIG_KPROBES */ #endif /* _ARM_KPROBES_H */ diff --git a/arch/arm/include/asm/mach/flash.h b/arch/arm/include/asm/mach/flash.h index 4ca69fe2c850..bada3f845a97 100644 --- a/arch/arm/include/asm/mach/flash.h +++ b/arch/arm/include/asm/mach/flash.h @@ -22,7 +22,7 @@ struct mtd_info; * set_vpp: method called to enable or disable VPP * mmcontrol: method called to enable or disable Sync. Burst Read in OneNAND * parts: optional array of mtd_partitions for static partitioning - * nr_parts: number of mtd_partitions for static partitoning + * nr_parts: number of mtd_partitions for static partitioning */ struct flash_platform_data { const char *map_name; diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 76cbd9c674df..1f54e4e98c1e 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -83,8 +83,15 @@ #define IOREMAP_MAX_ORDER 24 #endif +#define VECTORS_BASE UL(0xffff0000) + #else /* CONFIG_MMU */ +#ifndef __ASSEMBLY__ +extern unsigned long vectors_base; +#define VECTORS_BASE vectors_base +#endif + /* * The limitation of user task size can grow up to the end of free ram region. * It is difficult to define and perhaps will never meet the original meaning @@ -111,6 +118,13 @@ #endif /* !CONFIG_MMU */ +#ifdef CONFIG_XIP_KERNEL +#define KERNEL_START _sdata +#else +#define KERNEL_START _stext +#endif +#define KERNEL_END _end + /* * We fix the TCM memories max 32 KiB ITCM resp DTCM at these * locations @@ -206,7 +220,7 @@ extern const void *__pv_table_begin, *__pv_table_end; : "r" (x), "I" (__PV_BITS_31_24) \ : "cc") -static inline phys_addr_t __virt_to_phys(unsigned long x) +static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x) { phys_addr_t t; @@ -238,7 +252,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) #define PHYS_OFFSET PLAT_PHYS_OFFSET #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) -static inline phys_addr_t __virt_to_phys(unsigned long x) +static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x) { return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; } @@ -254,6 +268,16 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ PHYS_PFN_OFFSET) +#define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x)) + +#ifdef CONFIG_DEBUG_VIRTUAL +extern phys_addr_t __virt_to_phys(unsigned long x); +extern phys_addr_t __phys_addr_symbol(unsigned long x); +#else +#define __virt_to_phys(x) __virt_to_phys_nodebug(x) +#define __phys_addr_symbol(x) __pa_symbol_nodebug(x) +#endif + /* * These are *only* valid on the kernel direct mapped RAM memory. * Note: Drivers should NOT use these. They are the wrong @@ -276,6 +300,7 @@ static inline void *phys_to_virt(phys_addr_t x) * Drivers should NOT use these either. */ #define __pa(x) __virt_to_phys((unsigned long)(x)) +#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0)) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) #define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT) diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 3cc14dd8587c..7f303295ef19 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -15,7 +15,9 @@ #include <linux/compiler.h> #include <linux/sched.h> +#include <linux/mm_types.h> #include <linux/preempt.h> + #include <asm/cacheflush.h> #include <asm/cachetype.h> #include <asm/proc-fns.h> diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index add094d09e3e..302240c19a5a 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h @@ -63,9 +63,9 @@ typedef pte_t *pte_addr_t; /* * Mark the prot value as uncacheable and unbufferable. */ -#define pgprot_noncached(prot) __pgprot(0) -#define pgprot_writecombine(prot) __pgprot(0) -#define pgprot_dmacoherent(prot) __pgprot(0) +#define pgprot_noncached(prot) (prot) +#define pgprot_writecombine(prot) (prot) +#define pgprot_dmacoherent(prot) (prot) /* diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index def9e570199f..1897b5196fb5 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -10,6 +10,10 @@ #ifndef _ASMARM_TLBFLUSH_H #define _ASMARM_TLBFLUSH_H +#ifndef __ASSEMBLY__ +# include <linux/mm_types.h> +#endif + #ifdef CONFIG_MMU #include <asm/glue.h> @@ -644,9 +648,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, #elif defined(CONFIG_SMP) /* !CONFIG_MMU */ #ifndef __ASSEMBLY__ - -#include <linux/mm_types.h> - static inline void local_flush_tlb_all(void) { } static inline void local_flush_tlb_mm(struct mm_struct *mm) { } static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { } diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 6b4eb27b8758..2e21e08de747 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -152,11 +152,6 @@ __after_proc_init: #ifdef CONFIG_CPU_ICACHE_DISABLE bic r0, r0, #CR_I #endif -#ifdef CONFIG_CPU_HIGH_VECTOR - orr r0, r0, #CR_V -#else - bic r0, r0, #CR_V -#endif mcr p15, 0, r0, c1, c0, 0 @ write control reg #elif defined (CONFIG_CPU_V7M) /* For V7M systems we want to modify the CCR similarly to the SCTLR */ diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 4f14b5ce6535..80254b47dc34 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -155,8 +155,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, break; case R_ARM_PREL31: - offset = *(u32 *)loc + sym->st_value - loc; - *(u32 *)loc = offset & 0x7fffffff; + offset = (*(s32 *)loc << 1) >> 1; /* sign extend */ + offset += sym->st_value - loc; + if (offset >= 0x40000000 || offset < -0x40000000) { + pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", + module->name, relindex, i, symname, + ELF32_R_TYPE(rel->r_info), loc, + sym->st_value); + return -ENOEXEC; + } + *(u32 *)loc &= 0x80000000; + *(u32 *)loc |= offset & 0x7fffffff; break; case R_ARM_MOVW_ABS_NC: diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c index 592dda3f21ff..c366b83bf955 100644 --- a/arch/arm/kernel/perf_regs.c +++ b/arch/arm/kernel/perf_regs.c @@ -3,6 +3,7 @@ #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/bug.h> +#include <linux/sched/task_stack.h> #include <asm/perf_regs.h> #include <asm/ptrace.h> diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 91d2d5b01414..939e8b58c59d 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -12,6 +12,9 @@ #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index ae738a6319f6..58e3771e4c5b 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -10,7 +10,8 @@ * published by the Free Software Foundation. */ #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/elf.h> #include <linux/smp.h> diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 34e3f3c45634..f4e54503afa9 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -81,7 +81,7 @@ __setup("fpe=", fpe_setup); extern void init_default_cache_policy(unsigned long); extern void paging_init(const struct machine_desc *desc); extern void early_paging_init(const struct machine_desc *); -extern void sanity_check_meminfo(void); +extern void adjust_lowmem_bounds(void); extern enum reboot_mode reboot_mode; extern void setup_dma_zone(const struct machine_desc *desc); @@ -1093,8 +1093,14 @@ void __init setup_arch(char **cmdline_p) setup_dma_zone(mdesc); xen_early_init(); efi_init(); - sanity_check_meminfo(); + /* + * Make sure the calculation for lowmem/highmem is set appropriately + * before reserving/allocating any mmeory + */ + adjust_lowmem_bounds(); arm_memblock_init(mdesc); + /* Memory may have been removed so recalculate the bounds. */ + adjust_lowmem_bounds(); early_ioremap_reset(); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 7dd14e8395e6..572a8df1b766 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -11,7 +11,9 @@ #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/hotplug.h> +#include <linux/sched/task_stack.h> #include <linux/interrupt.h> #include <linux/cache.h> #include <linux/profile.h> @@ -251,7 +253,7 @@ void __cpu_die(unsigned int cpu) pr_err("CPU%u: cpu didn't die\n", cpu); return; } - pr_notice("CPU%u: shutdown\n", cpu); + pr_debug("CPU%u: shutdown\n", cpu); /* * platform_cpu_kill() is generally expected to do the powering off @@ -371,7 +373,7 @@ asmlinkage void secondary_start_kernel(void) * reference and switch to it. */ cpu = smp_processor_id(); - atomic_inc(&mm->mm_count); + mmgrab(mm); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 92b72375c4c7..3a2fa203637a 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -1,5 +1,6 @@ #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/stacktrace.h> #include <asm/stacktrace.h> diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index 9a2f882a0a2d..ef794c799cb6 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c @@ -1,5 +1,6 @@ #include <linux/init.h> #include <linux/slab.h> +#include <linux/mm_types.h> #include <asm/cacheflush.h> #include <asm/idmap.h> diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 853221f81104..3bda08bee674 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -23,6 +23,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/syscalls.h> #include <linux/perf_event.h> diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 5f221acd21ae..b9786f491873 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -76,6 +76,7 @@ #include <linux/syscalls.h> #include <linux/errno.h> #include <linux/fs.h> +#include <linux/cred.h> #include <linux/fcntl.h> #include <linux/eventpoll.h> #include <linux/sem.h> diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index ebf47d91b804..f8a3ab82e77f 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -21,6 +21,7 @@ #include <linux/nodemask.h> #include <linux/of.h> #include <linux/sched.h> +#include <linux/sched/topology.h> #include <linux/slab.h> #include <linux/string.h> diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 9688ec0c6ef4..948c648fea00 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -24,7 +24,9 @@ #include <linux/bug.h> #include <linux/delay.h> #include <linux/init.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/irq.h> #include <linux/atomic.h> diff --git a/arch/arm/mach-alpine/platsmp.c b/arch/arm/mach-alpine/platsmp.c index dd77ea25e7ca..6dc6d491f88a 100644 --- a/arch/arm/mach-alpine/platsmp.c +++ b/arch/arm/mach-alpine/platsmp.c @@ -27,7 +27,7 @@ static int alpine_boot_secondary(unsigned int cpu, struct task_struct *idle) { phys_addr_t addr; - addr = virt_to_phys(secondary_startup); + addr = __pa_symbol(secondary_startup); if (addr > (phys_addr_t)(uint32_t)(-1)) { pr_err("FAIL: resume address over 32bit (%pa)", &addr); diff --git a/arch/arm/mach-axxia/platsmp.c b/arch/arm/mach-axxia/platsmp.c index ffbd71d45008..502e3df69f69 100644 --- a/arch/arm/mach-axxia/platsmp.c +++ b/arch/arm/mach-axxia/platsmp.c @@ -25,7 +25,7 @@ static void write_release_addr(u32 release_phys) { u32 *virt = (u32 *) phys_to_virt(release_phys); - writel_relaxed(virt_to_phys(secondary_startup), virt); + writel_relaxed(__pa_symbol(secondary_startup), virt); /* Make sure this store is visible to other CPUs */ smp_wmb(); __cpuc_flush_dcache_area(virt, sizeof(u32)); diff --git a/arch/arm/mach-bcm/bcm63xx_smp.c b/arch/arm/mach-bcm/bcm63xx_smp.c index 9b6727ed68cd..f5fb10b4376f 100644 --- a/arch/arm/mach-bcm/bcm63xx_smp.c +++ b/arch/arm/mach-bcm/bcm63xx_smp.c @@ -135,7 +135,7 @@ static int bcm63138_smp_boot_secondary(unsigned int cpu, } /* Write the secondary init routine to the BootLUT reset vector */ - val = virt_to_phys(secondary_startup); + val = __pa_symbol(secondary_startup); writel_relaxed(val, bootlut_base + BOOTLUT_RESET_VECT); /* Power up the core, will jump straight to its reset vector when we diff --git a/arch/arm/mach-bcm/platsmp-brcmstb.c b/arch/arm/mach-bcm/platsmp-brcmstb.c index 40dc8448445e..12379960e982 100644 --- a/arch/arm/mach-bcm/platsmp-brcmstb.c +++ b/arch/arm/mach-bcm/platsmp-brcmstb.c @@ -151,7 +151,7 @@ static void brcmstb_cpu_boot(u32 cpu) * Set the reset vector to point to the secondary_startup * routine */ - cpu_set_boot_addr(cpu, virt_to_phys(secondary_startup)); + cpu_set_boot_addr(cpu, __pa_symbol(secondary_startup)); /* Unhalt the cpu */ cpu_rst_cfg_set(cpu, 0); diff --git a/arch/arm/mach-bcm/platsmp.c b/arch/arm/mach-bcm/platsmp.c index 3ac3a9bc663c..9e3f275934eb 100644 --- a/arch/arm/mach-bcm/platsmp.c +++ b/arch/arm/mach-bcm/platsmp.c @@ -21,6 +21,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/smp.h> #include <asm/cacheflush.h> @@ -116,7 +117,7 @@ static int nsp_write_lut(unsigned int cpu) return -ENOMEM; } - secondary_startup_phy = virt_to_phys(secondary_startup); + secondary_startup_phy = __pa_symbol(secondary_startup); BUG_ON(secondary_startup_phy > (phys_addr_t)U32_MAX); writel_relaxed(secondary_startup_phy, sku_rom_lut); @@ -189,7 +190,7 @@ static int kona_boot_secondary(unsigned int cpu, struct task_struct *idle) * Secondary cores will start in secondary_startup(), * defined in "arch/arm/kernel/head.S" */ - boot_func = virt_to_phys(secondary_startup); + boot_func = __pa_symbol(secondary_startup); BUG_ON(boot_func & BOOT_ADDR_CPUID_MASK); BUG_ON(boot_func > (phys_addr_t)U32_MAX); diff --git a/arch/arm/mach-berlin/platsmp.c b/arch/arm/mach-berlin/platsmp.c index 93f90688db18..7586b7aec272 100644 --- a/arch/arm/mach-berlin/platsmp.c +++ b/arch/arm/mach-berlin/platsmp.c @@ -15,6 +15,7 @@ #include <asm/cacheflush.h> #include <asm/cp15.h> +#include <asm/memory.h> #include <asm/smp_plat.h> #include <asm/smp_scu.h> @@ -75,7 +76,7 @@ static void __init berlin_smp_prepare_cpus(unsigned int max_cpus) if (!cpu_ctrl) goto unmap_scu; - vectors_base = ioremap(CONFIG_VECTORS_BASE, SZ_32K); + vectors_base = ioremap(VECTORS_BASE, SZ_32K); if (!vectors_base) goto unmap_scu; @@ -92,7 +93,7 @@ static void __init berlin_smp_prepare_cpus(unsigned int max_cpus) * Write the secondary startup address into the SW reset address * vector. This is used by boot_inst. */ - writel(virt_to_phys(secondary_startup), vectors_base + SW_RESET_ADDR); + writel(__pa_symbol(secondary_startup), vectors_base + SW_RESET_ADDR); iounmap(vectors_base); unmap_scu: diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c index 3b39ea353d30..8a5b6f059498 100644 --- a/arch/arm/mach-ep93xx/ts72xx.c +++ b/arch/arm/mach-ep93xx/ts72xx.c @@ -16,7 +16,6 @@ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> -#include <linux/platform_data/rtc-m48t86.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> @@ -45,16 +44,6 @@ static struct map_desc ts72xx_io_desc[] __initdata = { .pfn = __phys_to_pfn(TS72XX_OPTIONS2_PHYS_BASE), .length = TS72XX_OPTIONS2_SIZE, .type = MT_DEVICE, - }, { - .virtual = (unsigned long)TS72XX_RTC_INDEX_VIRT_BASE, - .pfn = __phys_to_pfn(TS72XX_RTC_INDEX_PHYS_BASE), - .length = TS72XX_RTC_INDEX_SIZE, - .type = MT_DEVICE, - }, { - .virtual = (unsigned long)TS72XX_RTC_DATA_VIRT_BASE, - .pfn = __phys_to_pfn(TS72XX_RTC_DATA_PHYS_BASE), - .length = TS72XX_RTC_DATA_SIZE, - .type = MT_DEVICE, } }; @@ -179,31 +168,22 @@ static void __init ts72xx_register_flash(void) } } +/************************************************************************* + * RTC M48T86 + *************************************************************************/ +#define TS72XX_RTC_INDEX_PHYS_BASE (EP93XX_CS1_PHYS_BASE + 0x00800000) +#define TS72XX_RTC_DATA_PHYS_BASE (EP93XX_CS1_PHYS_BASE + 0x01700000) -static unsigned char ts72xx_rtc_readbyte(unsigned long addr) -{ - __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE); - return __raw_readb(TS72XX_RTC_DATA_VIRT_BASE); -} - -static void ts72xx_rtc_writebyte(unsigned char value, unsigned long addr) -{ - __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE); - __raw_writeb(value, TS72XX_RTC_DATA_VIRT_BASE); -} - -static struct m48t86_ops ts72xx_rtc_ops = { - .readbyte = ts72xx_rtc_readbyte, - .writebyte = ts72xx_rtc_writebyte, +static struct resource ts72xx_rtc_resources[] = { + DEFINE_RES_MEM(TS72XX_RTC_INDEX_PHYS_BASE, 0x01), + DEFINE_RES_MEM(TS72XX_RTC_DATA_PHYS_BASE, 0x01), }; static struct platform_device ts72xx_rtc_device = { .name = "rtc-m48t86", .id = -1, - .dev = { - .platform_data = &ts72xx_rtc_ops, - }, - .num_resources = 0, + .resource = ts72xx_rtc_resources, + .num_resources = ARRAY_SIZE(ts72xx_rtc_resources), }; static struct resource ts72xx_wdt_resources[] = { diff --git a/arch/arm/mach-ep93xx/ts72xx.h b/arch/arm/mach-ep93xx/ts72xx.h index 071feaa30adc..2255ba29fdd6 100644 --- a/arch/arm/mach-ep93xx/ts72xx.h +++ b/arch/arm/mach-ep93xx/ts72xx.h @@ -9,8 +9,6 @@ * febff000 22000000 4K model number register (bits 0-2) * febfe000 22400000 4K options register * febfd000 22800000 4K options register #2 - * febf9000 10800000 4K TS-5620 RTC index register - * febf8000 11700000 4K TS-5620 RTC data register */ #define TS72XX_MODEL_PHYS_BASE 0x22000000 @@ -40,15 +38,6 @@ #define TS72XX_OPTIONS2_TS9420 0x04 #define TS72XX_OPTIONS2_TS9420_BOOT 0x02 - -#define TS72XX_RTC_INDEX_VIRT_BASE IOMEM(0xfebf9000) -#define TS72XX_RTC_INDEX_PHYS_BASE 0x10800000 -#define TS72XX_RTC_INDEX_SIZE 0x00001000 - -#define TS72XX_RTC_DATA_VIRT_BASE IOMEM(0xfebf8000) -#define TS72XX_RTC_DATA_PHYS_BASE 0x11700000 -#define TS72XX_RTC_DATA_SIZE 0x00001000 - #define TS72XX_WDT_CONTROL_PHYS_BASE 0x23800000 #define TS72XX_WDT_FEED_PHYS_BASE 0x23c00000 diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c index fd6da5419b51..e81a78b125d9 100644 --- a/arch/arm/mach-exynos/firmware.c +++ b/arch/arm/mach-exynos/firmware.c @@ -41,7 +41,7 @@ static int exynos_do_idle(unsigned long mode) case FW_DO_IDLE_AFTR: if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) exynos_save_cp15(); - writel_relaxed(virt_to_phys(exynos_cpu_resume_ns), + writel_relaxed(__pa_symbol(exynos_cpu_resume_ns), sysram_ns_base_addr + 0x24); writel_relaxed(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20); if (soc_is_exynos3250()) { @@ -135,7 +135,7 @@ static int exynos_suspend(void) exynos_save_cp15(); writel(EXYNOS_SLEEP_MAGIC, sysram_ns_base_addr + EXYNOS_BOOT_FLAG); - writel(virt_to_phys(exynos_cpu_resume_ns), + writel(__pa_symbol(exynos_cpu_resume_ns), sysram_ns_base_addr + EXYNOS_BOOT_ADDR); return cpu_suspend(0, exynos_cpu_suspend); diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c index 038fd8c993d0..b42622562ea7 100644 --- a/arch/arm/mach-exynos/mcpm-exynos.c +++ b/arch/arm/mach-exynos/mcpm-exynos.c @@ -221,7 +221,7 @@ static void exynos_mcpm_setup_entry_point(void) */ __raw_writel(0xe59f0000, ns_sram_base_addr); /* ldr r0, [pc, #0] */ __raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx r0 */ - __raw_writel(virt_to_phys(mcpm_entry_point), ns_sram_base_addr + 8); + __raw_writel(__pa_symbol(mcpm_entry_point), ns_sram_base_addr + 8); } static struct syscore_ops exynos_mcpm_syscore_ops = { diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index a5d68411a037..5a03bffe7226 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c @@ -353,7 +353,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) smp_rmb(); - boot_addr = virt_to_phys(exynos4_secondary_startup); + boot_addr = __pa_symbol(exynos4_secondary_startup); ret = exynos_set_boot_addr(core_id, boot_addr); if (ret) @@ -413,7 +413,7 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus) mpidr = cpu_logical_map(i); core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); - boot_addr = virt_to_phys(exynos4_secondary_startup); + boot_addr = __pa_symbol(exynos4_secondary_startup); ret = exynos_set_boot_addr(core_id, boot_addr); if (ret) diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c index 487295f4a56b..1a7e5b5d08d8 100644 --- a/arch/arm/mach-exynos/pm.c +++ b/arch/arm/mach-exynos/pm.c @@ -132,7 +132,7 @@ static void exynos_set_wakeupmask(long mask) static void exynos_cpu_set_boot_vector(long flags) { - writel_relaxed(virt_to_phys(exynos_cpu_resume), + writel_relaxed(__pa_symbol(exynos_cpu_resume), exynos_boot_vector_addr()); writel_relaxed(flags, exynos_boot_vector_flag()); } @@ -238,7 +238,7 @@ static int exynos_cpu0_enter_aftr(void) abort: if (cpu_online(1)) { - unsigned long boot_addr = virt_to_phys(exynos_cpu_resume); + unsigned long boot_addr = __pa_symbol(exynos_cpu_resume); /* * Set the boot vector to something non-zero @@ -330,7 +330,7 @@ cpu1_aborted: static void exynos_pre_enter_aftr(void) { - unsigned long boot_addr = virt_to_phys(exynos_cpu_resume); + unsigned long boot_addr = __pa_symbol(exynos_cpu_resume); (void)exynos_set_boot_addr(1, boot_addr); } diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index adf4e8f182bd..748cfb8d5212 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -301,7 +301,7 @@ static void exynos_pm_prepare(void) exynos_pm_enter_sleep_mode(); /* ensure at least INFORM0 has the resume address */ - pmu_raw_writel(virt_to_phys(exynos_cpu_resume), S5P_INFORM0); + pmu_raw_writel(__pa_symbol(exynos_cpu_resume), S5P_INFORM0); } static void exynos3250_pm_prepare(void) @@ -318,7 +318,7 @@ static void exynos3250_pm_prepare(void) exynos_pm_enter_sleep_mode(); /* ensure at least INFORM0 has the resume address */ - pmu_raw_writel(virt_to_phys(exynos_cpu_resume), S5P_INFORM0); + pmu_raw_writel(__pa_symbol(exynos_cpu_resume), S5P_INFORM0); } static void exynos5420_pm_prepare(void) @@ -343,7 +343,7 @@ static void exynos5420_pm_prepare(void) /* ensure at least INFORM0 has the resume address */ if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) - pmu_raw_writel(virt_to_phys(mcpm_entry_point), S5P_INFORM0); + pmu_raw_writel(__pa_symbol(mcpm_entry_point), S5P_INFORM0); tmp = pmu_raw_readl(EXYNOS_L2_OPTION(0)); tmp &= ~EXYNOS_L2_USE_RETENTION; diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c index 4b653a8cb75c..a6c117622d67 100644 --- a/arch/arm/mach-hisi/platmcpm.c +++ b/arch/arm/mach-hisi/platmcpm.c @@ -327,7 +327,7 @@ static int __init hip04_smp_init(void) */ writel_relaxed(hip04_boot_method[0], relocation); writel_relaxed(0xa5a5a5a5, relocation + 4); /* magic number */ - writel_relaxed(virt_to_phys(secondary_startup), relocation + 8); + writel_relaxed(__pa_symbol(secondary_startup), relocation + 8); writel_relaxed(0, relocation + 12); iounmap(relocation); diff --git a/arch/arm/mach-hisi/platsmp.c b/arch/arm/mach-hisi/platsmp.c index e1d67648d5d0..91bb02dec20f 100644 --- a/arch/arm/mach-hisi/platsmp.c +++ b/arch/arm/mach-hisi/platsmp.c @@ -28,7 +28,7 @@ void hi3xxx_set_cpu_jump(int cpu, void *jump_addr) cpu = cpu_logical_map(cpu); if (!cpu || !ctrl_base) return; - writel_relaxed(virt_to_phys(jump_addr), ctrl_base + ((cpu - 1) << 2)); + writel_relaxed(__pa_symbol(jump_addr), ctrl_base + ((cpu - 1) << 2)); } int hi3xxx_get_cpu_jump(int cpu) @@ -118,7 +118,7 @@ static int hix5hd2_boot_secondary(unsigned int cpu, struct task_struct *idle) { phys_addr_t jumpaddr; - jumpaddr = virt_to_phys(secondary_startup); + jumpaddr = __pa_symbol(secondary_startup); hix5hd2_set_scu_boot_addr(HIX5HD2_BOOT_ADDRESS, jumpaddr); hix5hd2_set_cpu(cpu, true); arch_send_wakeup_ipi_mask(cpumask_of(cpu)); @@ -156,7 +156,7 @@ static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle) struct device_node *node; - jumpaddr = virt_to_phys(secondary_startup); + jumpaddr = __pa_symbol(secondary_startup); hip01_set_boot_addr(HIP01_BOOT_ADDRESS, jumpaddr); node = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl"); diff --git a/arch/arm/mach-imx/platsmp.c b/arch/arm/mach-imx/platsmp.c index 711dbbd5badd..c2d1b329fba1 100644 --- a/arch/arm/mach-imx/platsmp.c +++ b/arch/arm/mach-imx/platsmp.c @@ -117,7 +117,7 @@ static void __init ls1021a_smp_prepare_cpus(unsigned int max_cpus) dcfg_base = of_iomap(np, 0); BUG_ON(!dcfg_base); - paddr = virt_to_phys(secondary_startup); + paddr = __pa_symbol(secondary_startup); writel_relaxed(cpu_to_be32(paddr), dcfg_base + DCFG_CCSR_SCRATCHRW1); iounmap(dcfg_base); diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c index 1515e498d348..e61b1d1027e1 100644 --- a/arch/arm/mach-imx/pm-imx6.c +++ b/arch/arm/mach-imx/pm-imx6.c @@ -499,7 +499,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) memset(suspend_ocram_base, 0, sizeof(*pm_info)); pm_info = suspend_ocram_base; pm_info->pbase = ocram_pbase; - pm_info->resume_addr = virt_to_phys(v7_cpu_resume); + pm_info->resume_addr = __pa_symbol(v7_cpu_resume); pm_info->pm_info_size = sizeof(*pm_info); /* diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c index 70b083fe934a..495d85d0fe7e 100644 --- a/arch/arm/mach-imx/src.c +++ b/arch/arm/mach-imx/src.c @@ -99,7 +99,7 @@ void imx_enable_cpu(int cpu, bool enable) void imx_set_cpu_jump(int cpu, void *jump_addr) { cpu = cpu_logical_map(cpu); - writel_relaxed(virt_to_phys(jump_addr), + writel_relaxed(__pa_symbol(jump_addr), src_base + SRC_GPR1 + cpu * 8); } diff --git a/arch/arm/mach-mediatek/platsmp.c b/arch/arm/mach-mediatek/platsmp.c index b821e34474b6..726eb69bb655 100644 --- a/arch/arm/mach-mediatek/platsmp.c +++ b/arch/arm/mach-mediatek/platsmp.c @@ -122,7 +122,7 @@ static void __init __mtk_smp_prepare_cpus(unsigned int max_cpus, int trustzone) * write the address of slave startup address into the system-wide * jump register */ - writel_relaxed(virt_to_phys(secondary_startup_arm), + writel_relaxed(__pa_symbol(secondary_startup_arm), mtk_smp_base + mtk_smp_info->jump_reg); } diff --git a/arch/arm/mach-mvebu/pm.c b/arch/arm/mach-mvebu/pm.c index 2990c5269b18..c487be61d6d8 100644 --- a/arch/arm/mach-mvebu/pm.c +++ b/arch/arm/mach-mvebu/pm.c @@ -110,7 +110,7 @@ static void mvebu_pm_store_armadaxp_bootinfo(u32 *store_addr) { phys_addr_t resume_pc; - resume_pc = virt_to_phys(armada_370_xp_cpu_resume); + resume_pc = __pa_symbol(armada_370_xp_cpu_resume); /* * The bootloader expects the first two words to be a magic diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c index f39bd51bce18..27a78c80e5b1 100644 --- a/arch/arm/mach-mvebu/pmsu.c +++ b/arch/arm/mach-mvebu/pmsu.c @@ -112,7 +112,7 @@ static const struct of_device_id of_pmsu_table[] = { void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr) { - writel(virt_to_phys(boot_addr), pmsu_mp_base + + writel(__pa_symbol(boot_addr), pmsu_mp_base + PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu)); } diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c index 76cbc82a7407..04d9ebe6a90a 100644 --- a/arch/arm/mach-mvebu/system-controller.c +++ b/arch/arm/mach-mvebu/system-controller.c @@ -153,7 +153,7 @@ void mvebu_system_controller_set_cpu_boot_addr(void *boot_addr) if (of_machine_is_compatible("marvell,armada375")) mvebu_armada375_smp_wa_init(); - writel(virt_to_phys(boot_addr), system_controller_base + + writel(__pa_symbol(boot_addr), system_controller_base + mvebu_sc->resume_boot_addr); } #endif diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c index 1662071bb2cc..bd8089ff929f 100644 --- a/arch/arm/mach-omap2/control.c +++ b/arch/arm/mach-omap2/control.c @@ -315,15 +315,15 @@ void omap3_save_scratchpad_contents(void) scratchpad_contents.boot_config_ptr = 0x0; if (cpu_is_omap3630()) scratchpad_contents.public_restore_ptr = - virt_to_phys(omap3_restore_3630); + __pa_symbol(omap3_restore_3630); else if (omap_rev() != OMAP3430_REV_ES3_0 && omap_rev() != OMAP3430_REV_ES3_1 && omap_rev() != OMAP3430_REV_ES3_1_2) scratchpad_contents.public_restore_ptr = - virt_to_phys(omap3_restore); + __pa_symbol(omap3_restore); else scratchpad_contents.public_restore_ptr = - virt_to_phys(omap3_restore_es3); + __pa_symbol(omap3_restore_es3); if (omap_type() == OMAP2_DEVICE_TYPE_GP) scratchpad_contents.secure_ram_restore_ptr = 0x0; @@ -395,7 +395,7 @@ void omap3_save_scratchpad_contents(void) sdrc_block_contents.flags = 0x0; sdrc_block_contents.block_size = 0x0; - arm_context_addr = virt_to_phys(omap3_arm_context); + arm_context_addr = __pa_symbol(omap3_arm_context); /* Copy all the contents to the scratchpad location */ scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD); diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index 7d62ad48c7c9..113ab2dd2ee9 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -273,7 +273,7 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) cpu_clear_prev_logic_pwrst(cpu); pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); pwrdm_set_logic_retst(pm_info->pwrdm, cpu_logic_state); - set_cpu_wakeup_addr(cpu, virt_to_phys(omap_pm_ops.resume)); + set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.resume)); omap_pm_ops.scu_prepare(cpu, power_state); l2x0_pwrst_prepare(cpu, save_state); @@ -325,7 +325,7 @@ int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state) pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); - set_cpu_wakeup_addr(cpu, virt_to_phys(omap_pm_ops.hotplug_restart)); + set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.hotplug_restart)); omap_pm_ops.scu_prepare(cpu, power_state); /* @@ -467,13 +467,13 @@ void __init omap4_mpuss_early_init(void) sar_base = omap4_get_sar_ram_base(); if (cpu_is_omap443x()) - startup_pa = virt_to_phys(omap4_secondary_startup); + startup_pa = __pa_symbol(omap4_secondary_startup); else if (cpu_is_omap446x()) - startup_pa = virt_to_phys(omap4460_secondary_startup); + startup_pa = __pa_symbol(omap4460_secondary_startup); else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) - startup_pa = virt_to_phys(omap5_secondary_hyp_startup); + startup_pa = __pa_symbol(omap5_secondary_hyp_startup); else - startup_pa = virt_to_phys(omap5_secondary_startup); + startup_pa = __pa_symbol(omap5_secondary_startup); if (cpu_is_omap44xx()) writel_relaxed(startup_pa, sar_base + diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index b4de3da6dffa..003353b0b794 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -316,9 +316,9 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) * A barrier is added to ensure that write buffer is drained */ if (omap_secure_apis_support()) - omap_auxcoreboot_addr(virt_to_phys(cfg.startup_addr)); + omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr)); else - writel_relaxed(virt_to_phys(cfg.startup_addr), + writel_relaxed(__pa_symbol(cfg.startup_addr), base + OMAP_AUX_CORE_BOOT_1); } diff --git a/arch/arm/mach-omap2/omap_twl.c b/arch/arm/mach-omap2/omap_twl.c index 6bf626700557..1346b3ab34a5 100644 --- a/arch/arm/mach-omap2/omap_twl.c +++ b/arch/arm/mach-omap2/omap_twl.c @@ -1,5 +1,5 @@ /** - * OMAP and TWL PMIC specific intializations. + * OMAP and TWL PMIC specific initializations. * * Copyright (C) 2010 Texas Instruments Incorporated. * Thara Gopinath diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c index 003a6cb248be..5c46ea6756d7 100644 --- a/arch/arm/mach-omap2/pm-debug.c +++ b/arch/arm/mach-omap2/pm-debug.c @@ -21,6 +21,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c index 8d597267d0c4..7ef80a8304c0 100644 --- a/arch/arm/mach-orion5x/ts78xx-setup.c +++ b/arch/arm/mach-orion5x/ts78xx-setup.c @@ -16,7 +16,6 @@ #include <linux/platform_device.h> #include <linux/mv643xx_eth.h> #include <linux/ata_platform.h> -#include <linux/platform_data/rtc-m48t86.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/timeriomem-rng.h> @@ -80,79 +79,38 @@ static struct mv_sata_platform_data ts78xx_sata_data = { /***************************************************************************** * RTC M48T86 - nicked^Wborrowed from arch/arm/mach-ep93xx/ts72xx.c ****************************************************************************/ -#define TS_RTC_CTRL (TS78XX_FPGA_REGS_VIRT_BASE + 0x808) -#define TS_RTC_DATA (TS78XX_FPGA_REGS_VIRT_BASE + 0x80c) +#define TS_RTC_CTRL (TS78XX_FPGA_REGS_PHYS_BASE + 0x808) +#define TS_RTC_DATA (TS78XX_FPGA_REGS_PHYS_BASE + 0x80c) -static unsigned char ts78xx_ts_rtc_readbyte(unsigned long addr) -{ - writeb(addr, TS_RTC_CTRL); - return readb(TS_RTC_DATA); -} - -static void ts78xx_ts_rtc_writebyte(unsigned char value, unsigned long addr) -{ - writeb(addr, TS_RTC_CTRL); - writeb(value, TS_RTC_DATA); -} - -static struct m48t86_ops ts78xx_ts_rtc_ops = { - .readbyte = ts78xx_ts_rtc_readbyte, - .writebyte = ts78xx_ts_rtc_writebyte, +static struct resource ts78xx_ts_rtc_resources[] = { + DEFINE_RES_MEM(TS_RTC_CTRL, 0x01), + DEFINE_RES_MEM(TS_RTC_DATA, 0x01), }; static struct platform_device ts78xx_ts_rtc_device = { .name = "rtc-m48t86", .id = -1, - .dev = { - .platform_data = &ts78xx_ts_rtc_ops, - }, - .num_resources = 0, + .resource = ts78xx_ts_rtc_resources, + .num_resources = ARRAY_SIZE(ts78xx_ts_rtc_resources), }; -/* - * TS uses some of the user storage space on the RTC chip so see if it is - * present; as it's an optional feature at purchase time and not all boards - * will have it present - * - * I've used the method TS use in their rtc7800.c example for the detection - * - * TODO: track down a guinea pig without an RTC to see if we can work out a - * better RTC detection routine - */ static int ts78xx_ts_rtc_load(void) { int rc; - unsigned char tmp_rtc0, tmp_rtc1; - - tmp_rtc0 = ts78xx_ts_rtc_readbyte(126); - tmp_rtc1 = ts78xx_ts_rtc_readbyte(127); - - ts78xx_ts_rtc_writebyte(0x00, 126); - ts78xx_ts_rtc_writebyte(0x55, 127); - if (ts78xx_ts_rtc_readbyte(127) == 0x55) { - ts78xx_ts_rtc_writebyte(0xaa, 127); - if (ts78xx_ts_rtc_readbyte(127) == 0xaa - && ts78xx_ts_rtc_readbyte(126) == 0x00) { - ts78xx_ts_rtc_writebyte(tmp_rtc0, 126); - ts78xx_ts_rtc_writebyte(tmp_rtc1, 127); - - if (ts78xx_fpga.supports.ts_rtc.init == 0) { - rc = platform_device_register(&ts78xx_ts_rtc_device); - if (!rc) - ts78xx_fpga.supports.ts_rtc.init = 1; - } else - rc = platform_device_add(&ts78xx_ts_rtc_device); - - if (rc) - pr_info("RTC could not be registered: %d\n", - rc); - return rc; - } + + if (ts78xx_fpga.supports.ts_rtc.init == 0) { + rc = platform_device_register(&ts78xx_ts_rtc_device); + if (!rc) + ts78xx_fpga.supports.ts_rtc.init = 1; + } else { + rc = platform_device_add(&ts78xx_ts_rtc_device); } - pr_info("RTC not found\n"); - return -ENODEV; -}; + if (rc) + pr_info("RTC could not be registered: %d\n", rc); + + return rc; +} static void ts78xx_ts_rtc_unload(void) { diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c index 0875b99add18..75ef5d4be554 100644 --- a/arch/arm/mach-prima2/platsmp.c +++ b/arch/arm/mach-prima2/platsmp.c @@ -65,7 +65,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle) * waiting for. This would wake up the secondary core from WFE */ #define SIRFSOC_CPU1_JUMPADDR_OFFSET 0x2bc - __raw_writel(virt_to_phys(sirfsoc_secondary_startup), + __raw_writel(__pa_symbol(sirfsoc_secondary_startup), clk_base + SIRFSOC_CPU1_JUMPADDR_OFFSET); #define SIRFSOC_CPU1_WAKEMAGIC_OFFSET 0x2b8 diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c index 83e94c95e314..b0bcf1ff02dd 100644 --- a/arch/arm/mach-prima2/pm.c +++ b/arch/arm/mach-prima2/pm.c @@ -54,7 +54,7 @@ static void sirfsoc_set_sleep_mode(u32 mode) static int sirfsoc_pre_suspend_power_off(void) { - u32 wakeup_entry = virt_to_phys(cpu_resume); + u32 wakeup_entry = __pa_symbol(cpu_resume); sirfsoc_rtc_iobrg_writel(wakeup_entry, sirfsoc_pwrc_base + SIRFSOC_PWRC_SCRATCH_PAD1); diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c index 9c308de158c6..29630061e700 100644 --- a/arch/arm/mach-pxa/palmz72.c +++ b/arch/arm/mach-pxa/palmz72.c @@ -249,7 +249,7 @@ static int palmz72_pm_suspend(void) store_ptr = *PALMZ72_SAVE_DWORD; /* Setting PSPR to a proper value */ - PSPR = virt_to_phys(&palmz72_resume_info); + PSPR = __pa_symbol(&palmz72_resume_info); return 0; } diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index c725baf119e1..ba431fad5c47 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c @@ -85,7 +85,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state) static int pxa25x_cpu_pm_prepare(void) { /* set resume return address */ - PSPR = virt_to_phys(cpu_resume); + PSPR = __pa_symbol(cpu_resume); return 0; } diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index c0185c5c5a08..9b69be4e9fe3 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c @@ -168,7 +168,7 @@ static int pxa27x_cpu_pm_valid(suspend_state_t state) static int pxa27x_cpu_pm_prepare(void) { /* set resume return address */ - PSPR = virt_to_phys(cpu_resume); + PSPR = __pa_symbol(cpu_resume); return 0; } diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index 87acc96388c7..0cc9f124c9ac 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c @@ -123,7 +123,7 @@ static void pxa3xx_cpu_pm_suspend(void) PSPR = 0x5c014000; /* overwrite with the resume address */ - *p = virt_to_phys(cpu_resume); + *p = __pa_symbol(cpu_resume); cpu_suspend(0, pxa3xx_finish_suspend); diff --git a/arch/arm/mach-realview/platsmp-dt.c b/arch/arm/mach-realview/platsmp-dt.c index 70ca99eb52c6..c242423bf8db 100644 --- a/arch/arm/mach-realview/platsmp-dt.c +++ b/arch/arm/mach-realview/platsmp-dt.c @@ -76,7 +76,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus) } /* Put the boot address in this magic register */ regmap_write(map, REALVIEW_SYS_FLAGSSET_OFFSET, - virt_to_phys(versatile_secondary_startup)); + __pa_symbol(versatile_secondary_startup)); } static const struct smp_operations realview_dt_smp_ops __initconst = { diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c index 4d827a069d49..3abafdbdd7f4 100644 --- a/arch/arm/mach-rockchip/platsmp.c +++ b/arch/arm/mach-rockchip/platsmp.c @@ -156,7 +156,7 @@ static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle) */ mdelay(1); /* ensure the cpus other than cpu0 to startup */ - writel(virt_to_phys(secondary_startup), sram_base_addr + 8); + writel(__pa_symbol(secondary_startup), sram_base_addr + 8); writel(0xDEADBEAF, sram_base_addr + 4); dsb_sev(); } @@ -195,7 +195,7 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node) } /* set the boot function for the sram code */ - rockchip_boot_fn = virt_to_phys(secondary_startup); + rockchip_boot_fn = __pa_symbol(secondary_startup); /* copy the trampoline to sram, that runs during startup of the core */ memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz); diff --git a/arch/arm/mach-rockchip/pm.c b/arch/arm/mach-rockchip/pm.c index bee8c8051929..0592534e0b88 100644 --- a/arch/arm/mach-rockchip/pm.c +++ b/arch/arm/mach-rockchip/pm.c @@ -62,7 +62,7 @@ static inline u32 rk3288_l2_config(void) static void rk3288_config_bootdata(void) { rkpm_bootdata_cpusp = rk3288_bootram_phy + (SZ_4K - 8); - rkpm_bootdata_cpu_code = virt_to_phys(cpu_resume); + rkpm_bootdata_cpu_code = __pa_symbol(cpu_resume); rkpm_bootdata_l2ctlr_f = 1; rkpm_bootdata_l2ctlr = rk3288_l2_config(); diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c index dc67a7fb3831..6b279d037774 100644 --- a/arch/arm/mach-rpc/ecard.c +++ b/arch/arm/mach-rpc/ecard.c @@ -31,6 +31,7 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/interrupt.h> #include <linux/completion.h> #include <linux/reboot.h> diff --git a/arch/arm/mach-s3c24xx/mach-jive.c b/arch/arm/mach-s3c24xx/mach-jive.c index 895aca225952..f5b5c49b56ac 100644 --- a/arch/arm/mach-s3c24xx/mach-jive.c +++ b/arch/arm/mach-s3c24xx/mach-jive.c @@ -484,7 +484,7 @@ static int jive_pm_suspend(void) * correct address to resume from. */ __raw_writel(0x2BED, S3C2412_INFORM0); - __raw_writel(virt_to_phys(s3c_cpu_resume), S3C2412_INFORM1); + __raw_writel(__pa_symbol(s3c_cpu_resume), S3C2412_INFORM1); return 0; } diff --git a/arch/arm/mach-s3c24xx/pm-s3c2410.c b/arch/arm/mach-s3c24xx/pm-s3c2410.c index 20e481d8a33a..a4588daeddb0 100644 --- a/arch/arm/mach-s3c24xx/pm-s3c2410.c +++ b/arch/arm/mach-s3c24xx/pm-s3c2410.c @@ -45,7 +45,7 @@ static void s3c2410_pm_prepare(void) { /* ensure at least GSTATUS3 has the resume address */ - __raw_writel(virt_to_phys(s3c_cpu_resume), S3C2410_GSTATUS3); + __raw_writel(__pa_symbol(s3c_cpu_resume), S3C2410_GSTATUS3); S3C_PMDBG("GSTATUS3 0x%08x\n", __raw_readl(S3C2410_GSTATUS3)); S3C_PMDBG("GSTATUS4 0x%08x\n", __raw_readl(S3C2410_GSTATUS4)); diff --git a/arch/arm/mach-s3c24xx/pm-s3c2416.c b/arch/arm/mach-s3c24xx/pm-s3c2416.c index c0e328e37bd6..b5bbf0d5985c 100644 --- a/arch/arm/mach-s3c24xx/pm-s3c2416.c +++ b/arch/arm/mach-s3c24xx/pm-s3c2416.c @@ -48,7 +48,7 @@ static void s3c2416_pm_prepare(void) * correct address to resume from. */ __raw_writel(0x2BED, S3C2412_INFORM0); - __raw_writel(virt_to_phys(s3c_cpu_resume), S3C2412_INFORM1); + __raw_writel(__pa_symbol(s3c_cpu_resume), S3C2412_INFORM1); } static int s3c2416_pm_add(struct device *dev, struct subsys_interface *sif) diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c index b0be382ff6bb..2f579be8fe67 100644 --- a/arch/arm/mach-s3c64xx/pm.c +++ b/arch/arm/mach-s3c64xx/pm.c @@ -304,7 +304,7 @@ static void s3c64xx_pm_prepare(void) wake_irqs, ARRAY_SIZE(wake_irqs)); /* store address of resume. */ - __raw_writel(virt_to_phys(s3c_cpu_resume), S3C64XX_INFORM0); + __raw_writel(__pa_symbol(s3c_cpu_resume), S3C64XX_INFORM0); /* ensure previous wakeup state is cleared before sleeping */ __raw_writel(__raw_readl(S3C64XX_WAKEUP_STAT), S3C64XX_WAKEUP_STAT); diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c index 7d69666de5ba..07cee14a363b 100644 --- a/arch/arm/mach-s5pv210/pm.c +++ b/arch/arm/mach-s5pv210/pm.c @@ -69,7 +69,7 @@ static void s5pv210_pm_prepare(void) __raw_writel(s5pv210_irqwake_intmask, S5P_WAKEUP_MASK); /* ensure at least INFORM0 has the resume address */ - __raw_writel(virt_to_phys(s5pv210_cpu_resume), S5P_INFORM0); + __raw_writel(__pa_symbol(s5pv210_cpu_resume), S5P_INFORM0); tmp = __raw_readl(S5P_SLEEP_CFG); tmp &= ~(S5P_SLEEP_CFG_OSC_EN | S5P_SLEEP_CFG_USBOSC_EN); diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c index 34853d5dfda2..9a7079f565bd 100644 --- a/arch/arm/mach-sa1100/pm.c +++ b/arch/arm/mach-sa1100/pm.c @@ -73,7 +73,7 @@ static int sa11x0_pm_enter(suspend_state_t state) RCSR = RCSR_HWR | RCSR_SWR | RCSR_WDR | RCSR_SMR; /* set resume return address */ - PSPR = virt_to_phys(cpu_resume); + PSPR = __pa_symbol(cpu_resume); /* go zzz */ cpu_suspend(0, sa1100_finish_suspend); diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c index e19266844e16..3ca2c13346f0 100644 --- a/arch/arm/mach-shmobile/platsmp-apmu.c +++ b/arch/arm/mach-shmobile/platsmp-apmu.c @@ -190,7 +190,7 @@ static void apmu_parse_dt(void (*fn)(struct resource *res, int cpu, int bit)) static void __init shmobile_smp_apmu_setup_boot(void) { /* install boot code shared by all CPUs */ - shmobile_boot_fn = virt_to_phys(shmobile_smp_boot); + shmobile_boot_fn = __pa_symbol(shmobile_smp_boot); } void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus, @@ -204,7 +204,7 @@ void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus, int shmobile_smp_apmu_boot_secondary(unsigned int cpu, struct task_struct *idle) { /* For this particular CPU register boot vector */ - shmobile_smp_hook(cpu, virt_to_phys(secondary_startup), 0); + shmobile_smp_hook(cpu, __pa_symbol(secondary_startup), 0); return apmu_wrap(cpu, apmu_power_on); } @@ -308,7 +308,7 @@ int shmobile_smp_apmu_cpu_kill(unsigned int cpu) #if defined(CONFIG_SUSPEND) static int shmobile_smp_apmu_do_suspend(unsigned long cpu) { - shmobile_smp_hook(cpu, virt_to_phys(cpu_resume), 0); + shmobile_smp_hook(cpu, __pa_symbol(cpu_resume), 0); shmobile_smp_apmu_cpu_shutdown(cpu); cpu_do_idle(); /* WFI selects Core Standby */ return 1; diff --git a/arch/arm/mach-shmobile/platsmp-scu.c b/arch/arm/mach-shmobile/platsmp-scu.c index d1ecaf37d142..f1a1efde4beb 100644 --- a/arch/arm/mach-shmobile/platsmp-scu.c +++ b/arch/arm/mach-shmobile/platsmp-scu.c @@ -24,7 +24,7 @@ static void __iomem *shmobile_scu_base; static int shmobile_scu_cpu_prepare(unsigned int cpu) { /* For this particular CPU register SCU SMP boot vector */ - shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu), + shmobile_smp_hook(cpu, __pa_symbol(shmobile_boot_scu), shmobile_scu_base_phys); return 0; } @@ -33,7 +33,7 @@ void __init shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys, unsigned int max_cpus) { /* install boot code shared by all CPUs */ - shmobile_boot_fn = virt_to_phys(shmobile_smp_boot); + shmobile_boot_fn = __pa_symbol(shmobile_smp_boot); /* enable SCU and cache coherency on booting CPU */ shmobile_scu_base_phys = scu_base_phys; diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c index 07945748b571..0ee76772b507 100644 --- a/arch/arm/mach-socfpga/platsmp.c +++ b/arch/arm/mach-socfpga/platsmp.c @@ -40,7 +40,7 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle) memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size); - writel(virt_to_phys(secondary_startup), + writel(__pa_symbol(secondary_startup), sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff)); flush_cache_all(); @@ -63,7 +63,7 @@ static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle SOCFPGA_A10_RSTMGR_MODMPURST); memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size); - writel(virt_to_phys(secondary_startup), + writel(__pa_symbol(secondary_startup), sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff)); flush_cache_all(); diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c index 8d1e2d551786..39038a03836a 100644 --- a/arch/arm/mach-spear/platsmp.c +++ b/arch/arm/mach-spear/platsmp.c @@ -117,7 +117,7 @@ static void __init spear13xx_smp_prepare_cpus(unsigned int max_cpus) * (presently it is in SRAM). The BootMonitor waits until it receives a * soft interrupt, and then the secondary CPU branches to this address. */ - __raw_writel(virt_to_phys(spear13xx_secondary_startup), SYS_LOCATION); + __raw_writel(__pa_symbol(spear13xx_secondary_startup), SYS_LOCATION); } const struct smp_operations spear13xx_smp_ops __initconst = { diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c index ea5a2277ee46..231f19e17436 100644 --- a/arch/arm/mach-sti/platsmp.c +++ b/arch/arm/mach-sti/platsmp.c @@ -103,7 +103,7 @@ static void __init sti_smp_prepare_cpus(unsigned int max_cpus) u32 __iomem *cpu_strt_ptr; u32 release_phys; int cpu; - unsigned long entry_pa = virt_to_phys(sti_secondary_startup); + unsigned long entry_pa = __pa_symbol(sti_secondary_startup); np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); diff --git a/arch/arm/mach-sunxi/platsmp.c b/arch/arm/mach-sunxi/platsmp.c index 6642267812c9..8fb5088464db 100644 --- a/arch/arm/mach-sunxi/platsmp.c +++ b/arch/arm/mach-sunxi/platsmp.c @@ -80,7 +80,7 @@ static int sun6i_smp_boot_secondary(unsigned int cpu, spin_lock(&cpu_lock); /* Set CPU boot address */ - writel(virt_to_phys(secondary_startup), + writel(__pa_symbol(secondary_startup), cpucfg_membase + CPUCFG_PRIVATE0_REG); /* Assert the CPU core in reset */ @@ -162,7 +162,7 @@ static int sun8i_smp_boot_secondary(unsigned int cpu, spin_lock(&cpu_lock); /* Set CPU boot address */ - writel(virt_to_phys(secondary_startup), + writel(__pa_symbol(secondary_startup), cpucfg_membase + CPUCFG_PRIVATE0_REG); /* Assert the CPU core in reset */ diff --git a/arch/arm/mach-tango/platsmp.c b/arch/arm/mach-tango/platsmp.c index 98c62a4a8623..2f0c6c050fed 100644 --- a/arch/arm/mach-tango/platsmp.c +++ b/arch/arm/mach-tango/platsmp.c @@ -5,7 +5,7 @@ static int tango_boot_secondary(unsigned int cpu, struct task_struct *idle) { - tango_set_aux_boot_addr(virt_to_phys(secondary_startup)); + tango_set_aux_boot_addr(__pa_symbol(secondary_startup)); tango_start_aux_core(cpu); return 0; } diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c index b05c6d6f99d0..406c0814eb6e 100644 --- a/arch/arm/mach-tango/pm.c +++ b/arch/arm/mach-tango/pm.c @@ -5,7 +5,7 @@ static int tango_pm_powerdown(unsigned long arg) { - tango_suspend(virt_to_phys(cpu_resume)); + tango_suspend(__pa_symbol(cpu_resume)); return -EIO; /* tango_suspend has failed */ } diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c index 6fd9db54887e..dc558892753c 100644 --- a/arch/arm/mach-tegra/reset.c +++ b/arch/arm/mach-tegra/reset.c @@ -94,14 +94,14 @@ void __init tegra_cpu_reset_handler_init(void) __tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_PRESENT] = *((u32 *)cpu_possible_mask); __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_SECONDARY] = - virt_to_phys((void *)secondary_startup); + __pa_symbol((void *)secondary_startup); #endif #ifdef CONFIG_PM_SLEEP __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP1] = TEGRA_IRAM_LPx_RESUME_AREA; __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP2] = - virt_to_phys((void *)tegra_resume); + __pa_symbol((void *)tegra_resume); #endif tegra_cpu_reset_handler_enable(); diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile index c2499bff4986..a9a3453548f4 100644 --- a/arch/arm/mach-ux500/Makefile +++ b/arch/arm/mach-ux500/Makefile @@ -5,7 +5,4 @@ obj-y := pm.o obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o obj-$(CONFIG_SMP) += platsmp.o -obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o - -CFLAGS_hotplug.o += -march=armv7-a diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 24529cf58df6..28083ef72819 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -31,8 +31,6 @@ #include <asm/mach/map.h> #include <asm/mach/arch.h> -#include "setup.h" - #include "db8500-regs.h" static int __init ux500_l2x0_unlock(void) diff --git a/arch/arm/mach-ux500/hotplug.c b/arch/arm/mach-ux500/hotplug.c deleted file mode 100644 index 1cbed0331fd3..000000000000 --- a/arch/arm/mach-ux500/hotplug.c +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (C) STMicroelectronics 2009 - * Copyright (C) ST-Ericsson SA 2010 - * - * License Terms: GNU General Public License v2 - * Based on ARM realview platform - * - * Author: Sundar Iyer <sundar.iyer@stericsson.com> - * - */ -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/smp.h> - -#include <asm/smp_plat.h> - -#include "setup.h" - -/* - * platform-specific code to shutdown a CPU - * - * Called with IRQs disabled - */ -void ux500_cpu_die(unsigned int cpu) -{ - /* directly enter low power state, skipping secure registers */ - for (;;) { - __asm__ __volatile__("dsb\n\t" "wfi\n\t" - : : : "memory"); - if (pen_release == cpu_logical_map(cpu)) { - /* - * OK, proper wakeup, we're done - */ - break; - } - } -} diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c index e0ee139fdebf..69c2361ca688 100644 --- a/arch/arm/mach-ux500/platsmp.c +++ b/arch/arm/mach-ux500/platsmp.c @@ -23,8 +23,6 @@ #include <asm/smp_plat.h> #include <asm/smp_scu.h> -#include "setup.h" - #include "db8500-regs.h" /* Magic triggers in backup RAM */ @@ -79,7 +77,7 @@ static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) * backup ram register at offset 0x1FF0, which is what boot rom code * is waiting for. This will wake up the secondary core from WFE. */ - writel(virt_to_phys(secondary_startup), + writel(__pa_symbol(secondary_startup), backupram + UX500_CPU1_JUMPADDR_OFFSET); writel(0xA1FEED01, backupram + UX500_CPU1_WAKEMAGIC_OFFSET); @@ -90,6 +88,13 @@ static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) return 0; } +#ifdef CONFIG_HOTPLUG_CPU +void ux500_cpu_die(unsigned int cpu) +{ + wfi(); +} +#endif + static const struct smp_operations ux500_smp_ops __initconst = { .smp_prepare_cpus = ux500_smp_prepare_cpus, .smp_boot_secondary = ux500_boot_secondary, diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h deleted file mode 100644 index 988e7c77068d..000000000000 --- a/arch/arm/mach-ux500/setup.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright (C) 2009 ST-Ericsson. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * These symbols are needed for board-specific files to call their - * own cpu-specific files - */ -#ifndef __ASM_ARCH_SETUP_H -#define __ASM_ARCH_SETUP_H - -extern void ux500_cpu_die(unsigned int cpu); - -#endif /* __ASM_ARCH_SETUP_H */ diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c index 5cedcf572104..ee2a0faafaa1 100644 --- a/arch/arm/mach-vexpress/dcscb.c +++ b/arch/arm/mach-vexpress/dcscb.c @@ -166,7 +166,7 @@ static int __init dcscb_init(void) * Future entries into the kernel can now go * through the cluster entry vectors. */ - vexpress_flags_set(virt_to_phys(mcpm_entry_point)); + vexpress_flags_set(__pa_symbol(mcpm_entry_point)); return 0; } diff --git a/arch/arm/mach-vexpress/platsmp.c b/arch/arm/mach-vexpress/platsmp.c index 98e29dee91e8..742499bac6d0 100644 --- a/arch/arm/mach-vexpress/platsmp.c +++ b/arch/arm/mach-vexpress/platsmp.c @@ -79,7 +79,7 @@ static void __init vexpress_smp_dt_prepare_cpus(unsigned int max_cpus) * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ - vexpress_flags_set(virt_to_phys(versatile_secondary_startup)); + vexpress_flags_set(__pa_symbol(versatile_secondary_startup)); } const struct smp_operations vexpress_smp_dt_ops __initconst = { diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c index 1aa4ccece69f..9b5f3c427086 100644 --- a/arch/arm/mach-vexpress/tc2_pm.c +++ b/arch/arm/mach-vexpress/tc2_pm.c @@ -54,7 +54,7 @@ static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster) if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) return -EINVAL; ve_spc_set_resume_addr(cluster, cpu, - virt_to_phys(mcpm_entry_point)); + __pa_symbol(mcpm_entry_point)); ve_spc_cpu_wakeup_irq(cluster, cpu, true); return 0; } @@ -159,7 +159,7 @@ static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster) static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster) { - ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point)); + ve_spc_set_resume_addr(cluster, cpu, __pa_symbol(mcpm_entry_point)); } static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster) diff --git a/arch/arm/mach-zx/platsmp.c b/arch/arm/mach-zx/platsmp.c index 0297f92084e0..afb9a82dedc3 100644 --- a/arch/arm/mach-zx/platsmp.c +++ b/arch/arm/mach-zx/platsmp.c @@ -76,7 +76,7 @@ void __init zx_smp_prepare_cpus(unsigned int max_cpus) * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ - __raw_writel(virt_to_phys(zx_secondary_startup), + __raw_writel(__pa_symbol(zx_secondary_startup), aonsysctrl_base + AON_SYS_CTRL_RESERVED1); iounmap(aonsysctrl_base); @@ -94,7 +94,7 @@ void __init zx_smp_prepare_cpus(unsigned int max_cpus) /* Map the first 4 KB IRAM for suspend usage */ sys_iram = __arm_ioremap_exec(ZX_IRAM_BASE, PAGE_SIZE, false); - zx_secondary_startup_pa = virt_to_phys(zx_secondary_startup); + zx_secondary_startup_pa = __pa_symbol(zx_secondary_startup); fncpy(sys_iram, &zx_resume_jump, zx_suspend_iram_sz); } diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c index 7cd9865bdeb7..caa6d5fe9078 100644 --- a/arch/arm/mach-zynq/platsmp.c +++ b/arch/arm/mach-zynq/platsmp.c @@ -89,7 +89,7 @@ EXPORT_SYMBOL(zynq_cpun_start); static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle) { - return zynq_cpun_start(virt_to_phys(secondary_startup), cpu); + return zynq_cpun_start(__pa_symbol(secondary_startup), cpu); } /* diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 35e3a56e5d86..c6c4c9c8824b 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -29,6 +29,7 @@ config CPU_ARM720T select CPU_COPY_V4WT if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WT if MMU help A 32-bit RISC processor with 8kByte Cache, Write Buffer and @@ -46,6 +47,7 @@ config CPU_ARM740T select CPU_CACHE_V4 select CPU_CP15_MPU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE help A 32-bit RISC processor with 8KB cache or 4KB variants, write buffer and MPU(Protection Unit) built around @@ -79,6 +81,7 @@ config CPU_ARM920T select CPU_COPY_V4WB if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU help The ARM920T is licensed to be produced by numerous vendors, @@ -97,6 +100,7 @@ config CPU_ARM922T select CPU_COPY_V4WB if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU help The ARM922T is a version of the ARM920T, but with smaller @@ -116,6 +120,7 @@ config CPU_ARM925T select CPU_COPY_V4WB if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU help The ARM925T is a mix between the ARM920T and ARM926T, but with @@ -134,6 +139,7 @@ config CPU_ARM926T select CPU_COPY_V4WB if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU help This is a variant of the ARM920. It has slightly different @@ -170,6 +176,7 @@ config CPU_ARM940T select CPU_CACHE_VIVT select CPU_CP15_MPU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE help ARM940T is a member of the ARM9TDMI family of general- purpose microprocessors with MPU and separate 4KB @@ -188,6 +195,7 @@ config CPU_ARM946E select CPU_CACHE_VIVT select CPU_CP15_MPU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE help ARM946E-S is a member of the ARM9E-S family of high- performance, 32-bit system-on-chip processor solutions. @@ -206,6 +214,7 @@ config CPU_ARM1020 select CPU_COPY_V4WB if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU help The ARM1020 is the 32K cached version of the ARM10 processor, @@ -225,6 +234,7 @@ config CPU_ARM1020E select CPU_COPY_V4WB if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU # ARM1022E @@ -236,6 +246,7 @@ config CPU_ARM1022 select CPU_COPY_V4WB if MMU # can probably do better select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU help The ARM1022E is an implementation of the ARMv5TE architecture @@ -254,6 +265,7 @@ config CPU_ARM1026 select CPU_COPY_V4WB if MMU # can probably do better select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU help The ARM1026EJ-S is an implementation of the ARMv5TEJ architecture @@ -302,6 +314,7 @@ config CPU_XSCALE select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU # XScale Core Version 3 @@ -312,6 +325,7 @@ config CPU_XSC3 select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU select IO_36 @@ -324,6 +338,7 @@ config CPU_MOHAWK select CPU_COPY_V4WB if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU # Feroceon @@ -335,6 +350,7 @@ config CPU_FEROCEON select CPU_COPY_FEROCEON if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_FEROCEON if MMU config CPU_FEROCEON_OLD_ID @@ -367,6 +383,7 @@ config CPU_V6 select CPU_CP15_MMU select CPU_HAS_ASID if MMU select CPU_PABRT_V6 + select CPU_THUMB_CAPABLE select CPU_TLB_V6 if MMU # ARMv6k @@ -381,6 +398,7 @@ config CPU_V6K select CPU_CP15_MMU select CPU_HAS_ASID if MMU select CPU_PABRT_V6 + select CPU_THUMB_CAPABLE select CPU_TLB_V6 if MMU # ARMv7 @@ -396,6 +414,7 @@ config CPU_V7 select CPU_CP15_MPU if !MMU select CPU_HAS_ASID if MMU select CPU_PABRT_V7 + select CPU_THUMB_CAPABLE select CPU_TLB_V7 if MMU # ARMv7M @@ -410,11 +429,17 @@ config CPU_V7M config CPU_THUMBONLY bool + select CPU_THUMB_CAPABLE # There are no CPUs available with MMU that don't implement an ARM ISA: depends on !MMU help Select this if your CPU doesn't support the 32 bit ARM instructions. +config CPU_THUMB_CAPABLE + bool + help + Select this if your CPU can support Thumb mode. + # Figure out what processor architecture version we should be using. # This defines the compiler instruction set which depends on the machine type. config CPU_32v3 @@ -655,11 +680,7 @@ config ARCH_DMA_ADDR_T_64BIT config ARM_THUMB bool "Support Thumb user binaries" if !CPU_THUMBONLY - depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || \ - CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || \ - CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || \ - CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || \ - CPU_V7 || CPU_FEROCEON || CPU_V7M + depends on CPU_THUMB_CAPABLE default y help Say Y if you want to include kernel support for running user space diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index e8698241ece9..b3dea80715b4 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -14,6 +14,7 @@ endif obj-$(CONFIG_ARM_PTDUMP) += dump.o obj-$(CONFIG_MODULES) += proc-syms.o +obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o obj-$(CONFIG_HIGHMEM) += highmem.o diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 7d5f4c736a16..2c96190e018b 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -14,12 +14,13 @@ #include <linux/moduleparam.h> #include <linux/compiler.h> #include <linux/kernel.h> +#include <linux/sched/debug.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/uaccess.h> #include <asm/cp15.h> diff --git a/arch/arm/mm/cache-uniphier.c b/arch/arm/mm/cache-uniphier.c index dfe97b409916..f57b080b6fd4 100644 --- a/arch/arm/mm/cache-uniphier.c +++ b/arch/arm/mm/cache-uniphier.c @@ -15,6 +15,7 @@ #define pr_fmt(fmt) "uniphier: " fmt +#include <linux/bitops.h> #include <linux/init.h> #include <linux/io.h> #include <linux/log2.h> @@ -71,8 +72,7 @@ * @ctrl_base: virtual base address of control registers * @rev_base: virtual base address of revision registers * @op_base: virtual base address of operation registers - * @way_present_mask: each bit specifies if the way is present - * @way_locked_mask: each bit specifies if the way is locked + * @way_mask: each bit specifies if the way is present * @nsets: number of associativity sets * @line_size: line size in bytes * @range_op_max_size: max size that can be handled by a single range operation @@ -83,8 +83,7 @@ struct uniphier_cache_data { void __iomem *rev_base; void __iomem *op_base; void __iomem *way_ctrl_base; - u32 way_present_mask; - u32 way_locked_mask; + u32 way_mask; u32 nsets; u32 line_size; u32 range_op_max_size; @@ -234,17 +233,13 @@ static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on) writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC); } -static void __init __uniphier_cache_set_locked_ways( - struct uniphier_cache_data *data, - u32 way_mask) +static void __init __uniphier_cache_set_active_ways( + struct uniphier_cache_data *data) { unsigned int cpu; - data->way_locked_mask = way_mask & data->way_present_mask; - for_each_possible_cpu(cpu) - writel_relaxed(~data->way_locked_mask & data->way_present_mask, - data->way_ctrl_base + 4 * cpu); + writel_relaxed(data->way_mask, data->way_ctrl_base + 4 * cpu); } static void uniphier_cache_maint_range(unsigned long start, unsigned long end, @@ -307,7 +302,7 @@ static void __init uniphier_cache_enable(void) list_for_each_entry(data, &uniphier_cache_list, list) { __uniphier_cache_enable(data, true); - __uniphier_cache_set_locked_ways(data, 0); + __uniphier_cache_set_active_ways(data); } } @@ -382,8 +377,8 @@ static int __init __uniphier_cache_init(struct device_node *np, goto err; } - data->way_present_mask = - ((u32)1 << cache_size / data->nsets / data->line_size) - 1; + data->way_mask = GENMASK(cache_size / data->nsets / data->line_size - 1, + 0); data->ctrl_base = of_iomap(np, 0); if (!data->ctrl_base) { diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index a134d8a13d00..de78109d002d 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -164,7 +164,7 @@ skip: cmp r3, r10 bgt flush_levels finished: - mov r10, #0 @ swith back to cache level 0 + mov r10, #0 @ switch back to cache level 0 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr dsb st isb diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S index 816a7e44e6f1..788486e830d3 100644 --- a/arch/arm/mm/cache-v7m.S +++ b/arch/arm/mm/cache-v7m.S @@ -217,7 +217,7 @@ skip: cmp r3, r10 bgt flush_levels finished: - mov r10, #0 @ swith back to cache level 0 + mov r10, #0 @ switch back to cache level 0 write_csselr r10, r3 @ select current cache level in cssr dsb st isb diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 6ffdf17e0d5c..63eabb06f9f1 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -180,7 +180,7 @@ static void arm_dma_sync_single_for_device(struct device *dev, __dma_page_cpu_to_dev(page, offset, size, dir); } -struct dma_map_ops arm_dma_ops = { +const struct dma_map_ops arm_dma_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, .mmap = arm_dma_mmap, @@ -204,7 +204,7 @@ static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); -struct dma_map_ops arm_coherent_dma_ops = { +const struct dma_map_ops arm_coherent_dma_ops = { .alloc = arm_coherent_dma_alloc, .free = arm_coherent_dma_free, .mmap = arm_coherent_dma_mmap, @@ -870,6 +870,9 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, vma->vm_end - vma->vm_start, vma->vm_page_prot); } +#else + ret = vm_iomap_memory(vma, vma->vm_start, + (vma->vm_end - vma->vm_start)); #endif /* CONFIG_MMU */ return ret; @@ -1069,7 +1072,7 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i, j; @@ -1103,7 +1106,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i; @@ -1122,7 +1125,7 @@ void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i; @@ -1141,7 +1144,7 @@ void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i; @@ -2101,7 +2104,7 @@ static void arm_iommu_sync_single_for_device(struct device *dev, __dma_page_cpu_to_dev(page, offset, size, dir); } -struct dma_map_ops iommu_ops = { +const struct dma_map_ops iommu_ops = { .alloc = arm_iommu_alloc_attrs, .free = arm_iommu_free_attrs, .mmap = arm_iommu_mmap_attrs, @@ -2121,7 +2124,7 @@ struct dma_map_ops iommu_ops = { .unmap_resource = arm_iommu_unmap_resource, }; -struct dma_map_ops iommu_coherent_ops = { +const struct dma_map_ops iommu_coherent_ops = { .alloc = arm_coherent_iommu_alloc_attrs, .free = arm_coherent_iommu_free_attrs, .mmap = arm_coherent_iommu_mmap_attrs, @@ -2321,7 +2324,7 @@ void arm_iommu_detach_device(struct device *dev) } EXPORT_SYMBOL_GPL(arm_iommu_detach_device); -static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) +static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) { return coherent ? &iommu_coherent_ops : &iommu_ops; } @@ -2376,7 +2379,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { } #endif /* CONFIG_ARM_DMA_USE_IOMMU */ -static struct dma_map_ops *arm_get_dma_map_ops(bool coherent) +static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) { return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; } @@ -2384,7 +2387,7 @@ static struct dma_map_ops *arm_get_dma_map_ops(bool coherent) void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { - struct dma_map_ops *dma_ops; + const struct dma_map_ops *dma_ops; dev->archdata.dma_coherent = coherent; if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index 9fe8e241335c..21192d6eda40 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -18,6 +18,7 @@ #include <linux/seq_file.h> #include <asm/fixmap.h> +#include <asm/memory.h> #include <asm/pgtable.h> struct addr_marker { @@ -31,8 +32,8 @@ static struct addr_marker address_markers[] = { { 0, "vmalloc() Area" }, { VMALLOC_END, "vmalloc() End" }, { FIXADDR_START, "Fixmap Area" }, - { CONFIG_VECTORS_BASE, "Vectors" }, - { CONFIG_VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" }, + { VECTORS_BASE, "Vectors" }, + { VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" }, { -1, NULL }, }; diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index c2b5b9892fd1..ff8b0aa2dfde 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -16,7 +16,8 @@ #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/page-flags.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> #include <linux/highmem.h> #include <linux/perf_event.h> diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 3cced8455727..f1e6190aa7ea 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -327,6 +327,12 @@ void flush_dcache_page(struct page *page) if (page == ZERO_PAGE(0)) return; + if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) { + if (test_bit(PG_dcache_clean, &page->flags)) + clear_bit(PG_dcache_clean, &page->flags); + return; + } + mapping = page_mapping(page); if (!cache_ops_need_broadcast() && diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index c1a48f88764e..3e511bec69b8 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -1,6 +1,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <linux/mm_types.h> #include <asm/cputype.h> #include <asm/idmap.h> diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 4be0bee4c357..1d8558ff9827 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -13,6 +13,8 @@ #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mman.h> +#include <linux/sched/signal.h> +#include <linux/sched/task.h> #include <linux/export.h> #include <linux/nodemask.h> #include <linux/initrd.h> @@ -27,6 +29,7 @@ #include <asm/cp15.h> #include <asm/mach-types.h> #include <asm/memblock.h> +#include <asm/memory.h> #include <asm/prom.h> #include <asm/sections.h> #include <asm/setup.h> @@ -227,41 +230,59 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) return phys; } -void __init arm_memblock_init(const struct machine_desc *mdesc) +static void __init arm_initrd_init(void) { - /* Register the kernel text, kernel data and initrd with memblock. */ -#ifdef CONFIG_XIP_KERNEL - memblock_reserve(__pa(_sdata), _end - _sdata); -#else - memblock_reserve(__pa(_stext), _end - _stext); -#endif #ifdef CONFIG_BLK_DEV_INITRD + phys_addr_t start; + unsigned long size; + /* FDT scan will populate initrd_start */ if (initrd_start && !phys_initrd_size) { phys_initrd_start = __virt_to_phys(initrd_start); phys_initrd_size = initrd_end - initrd_start; } + initrd_start = initrd_end = 0; - if (phys_initrd_size && - !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { + + if (!phys_initrd_size) + return; + + /* + * Round the memory region to page boundaries as per free_initrd_mem() + * This allows us to detect whether the pages overlapping the initrd + * are in use, but more importantly, reserves the entire set of pages + * as we don't want these pages allocated for other purposes. + */ + start = round_down(phys_initrd_start, PAGE_SIZE); + size = phys_initrd_size + (phys_initrd_start - start); + size = round_up(size, PAGE_SIZE); + + if (!memblock_is_region_memory(start, size)) { pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", - (u64)phys_initrd_start, phys_initrd_size); - phys_initrd_start = phys_initrd_size = 0; + (u64)start, size); + return; } - if (phys_initrd_size && - memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { + + if (memblock_is_region_reserved(start, size)) { pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", - (u64)phys_initrd_start, phys_initrd_size); - phys_initrd_start = phys_initrd_size = 0; + (u64)start, size); + return; } - if (phys_initrd_size) { - memblock_reserve(phys_initrd_start, phys_initrd_size); - /* Now convert initrd to virtual addresses */ - initrd_start = __phys_to_virt(phys_initrd_start); - initrd_end = initrd_start + phys_initrd_size; - } + memblock_reserve(start, size); + + /* Now convert initrd to virtual addresses */ + initrd_start = __phys_to_virt(phys_initrd_start); + initrd_end = initrd_start + phys_initrd_size; #endif +} + +void __init arm_memblock_init(const struct machine_desc *mdesc) +{ + /* Register the kernel text, kernel data and initrd with memblock. */ + memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START); + + arm_initrd_init(); arm_mm_memblock_reserve(); @@ -521,8 +542,7 @@ void __init mem_init(void) " .data : 0x%p" " - 0x%p" " (%4td kB)\n" " .bss : 0x%p" " - 0x%p" " (%4td kB)\n", - MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + - (PAGE_SIZE)), + MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE), #ifdef CONFIG_HAVE_TCM MLK(DTCM_OFFSET, (unsigned long) dtcm_end), MLK(ITCM_OFFSET, (unsigned long) itcm_end), diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 66353caa35b9..2239fde10b80 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -5,7 +5,8 @@ #include <linux/mm.h> #include <linux/mman.h> #include <linux/shm.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/io.h> #include <linux/personality.h> #include <linux/random.h> diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4001dd15818d..4e016d7f37b3 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1152,13 +1152,12 @@ early_param("vmalloc", early_vmalloc); phys_addr_t arm_lowmem_limit __initdata = 0; -void __init sanity_check_meminfo(void) +void __init adjust_lowmem_bounds(void) { phys_addr_t memblock_limit = 0; - int highmem = 0; u64 vmalloc_limit; struct memblock_region *reg; - bool should_use_highmem = false; + phys_addr_t lowmem_limit = 0; /* * Let's use our own (unoptimized) equivalent of __pa() that is @@ -1172,43 +1171,18 @@ void __init sanity_check_meminfo(void) for_each_memblock(memory, reg) { phys_addr_t block_start = reg->base; phys_addr_t block_end = reg->base + reg->size; - phys_addr_t size_limit = reg->size; - if (reg->base >= vmalloc_limit) - highmem = 1; - else - size_limit = vmalloc_limit - reg->base; - - - if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { - - if (highmem) { - pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", - &block_start, &block_end); - memblock_remove(reg->base, reg->size); - should_use_highmem = true; - continue; - } - - if (reg->size > size_limit) { - phys_addr_t overlap_size = reg->size - size_limit; - - pr_notice("Truncating RAM at %pa-%pa", - &block_start, &block_end); - block_end = vmalloc_limit; - pr_cont(" to -%pa", &block_end); - memblock_remove(vmalloc_limit, overlap_size); - should_use_highmem = true; - } - } - - if (!highmem) { - if (block_end > arm_lowmem_limit) { - if (reg->size > size_limit) - arm_lowmem_limit = vmalloc_limit; - else - arm_lowmem_limit = block_end; - } + if (reg->base < vmalloc_limit) { + if (block_end > lowmem_limit) + /* + * Compare as u64 to ensure vmalloc_limit does + * not get truncated. block_end should always + * fit in phys_addr_t so there should be no + * issue with assignment. + */ + lowmem_limit = min_t(u64, + vmalloc_limit, + block_end); /* * Find the first non-pmd-aligned page, and point @@ -1227,14 +1201,13 @@ void __init sanity_check_meminfo(void) if (!IS_ALIGNED(block_start, PMD_SIZE)) memblock_limit = block_start; else if (!IS_ALIGNED(block_end, PMD_SIZE)) - memblock_limit = arm_lowmem_limit; + memblock_limit = lowmem_limit; } } } - if (should_use_highmem) - pr_notice("Consider using a HIGHMEM enabled kernel.\n"); + arm_lowmem_limit = lowmem_limit; high_memory = __va(arm_lowmem_limit - 1) + 1; @@ -1248,6 +1221,18 @@ void __init sanity_check_meminfo(void) if (!memblock_limit) memblock_limit = arm_lowmem_limit; + if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { + if (memblock_end_of_DRAM() > arm_lowmem_limit) { + phys_addr_t end = memblock_end_of_DRAM(); + + pr_notice("Ignoring RAM at %pa-%pa\n", + &memblock_limit, &end); + pr_notice("Consider using a HIGHMEM enabled kernel.\n"); + + memblock_remove(memblock_limit, end - memblock_limit); + } + } + memblock_set_current_limit(memblock_limit); } @@ -1437,11 +1422,7 @@ static void __init kmap_init(void) static void __init map_lowmem(void) { struct memblock_region *reg; -#ifdef CONFIG_XIP_KERNEL - phys_addr_t kernel_x_start = round_down(__pa(_sdata), SECTION_SIZE); -#else - phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); -#endif + phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE); phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); /* Map all the lowmem memory banks. */ diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 2740967727e2..3b5c7aaf9c76 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <asm/cacheflush.h> +#include <asm/cp15.h> #include <asm/sections.h> #include <asm/page.h> #include <asm/setup.h> @@ -22,6 +23,8 @@ #include "mm.h" +unsigned long vectors_base; + #ifdef CONFIG_ARM_MPU struct mpu_rgn_info mpu_rgn_info; @@ -85,7 +88,7 @@ static unsigned long irbar_read(void) } /* MPU initialisation functions */ -void __init sanity_check_meminfo_mpu(void) +void __init adjust_lowmem_bounds_mpu(void) { phys_addr_t phys_offset = PHYS_OFFSET; phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; @@ -274,19 +277,64 @@ void __init mpu_setup(void) } } #else -static void sanity_check_meminfo_mpu(void) {} +static void adjust_lowmem_bounds_mpu(void) {} static void __init mpu_setup(void) {} #endif /* CONFIG_ARM_MPU */ +#ifdef CONFIG_CPU_CP15 +#ifdef CONFIG_CPU_HIGH_VECTOR +static unsigned long __init setup_vectors_base(void) +{ + unsigned long reg = get_cr(); + + set_cr(reg | CR_V); + return 0xffff0000; +} +#else /* CONFIG_CPU_HIGH_VECTOR */ +/* Write exception base address to VBAR */ +static inline void set_vbar(unsigned long val) +{ + asm("mcr p15, 0, %0, c12, c0, 0" : : "r" (val) : "cc"); +} + +/* + * Security extensions, bits[7:4], permitted values, + * 0b0000 - not implemented, 0b0001/0b0010 - implemented + */ +static inline bool security_extensions_enabled(void) +{ + return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4); +} + +static unsigned long __init setup_vectors_base(void) +{ + unsigned long base = 0, reg = get_cr(); + + set_cr(reg & ~CR_V); + if (security_extensions_enabled()) { + if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) + base = CONFIG_DRAM_BASE; + set_vbar(base); + } else if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) { + if (CONFIG_DRAM_BASE != 0) + pr_err("Security extensions not enabled, vectors cannot be remapped to RAM, vectors base will be 0x00000000\n"); + } + + return base; +} +#endif /* CONFIG_CPU_HIGH_VECTOR */ +#endif /* CONFIG_CPU_CP15 */ + void __init arm_mm_memblock_reserve(void) { #ifndef CONFIG_CPU_V7M + vectors_base = IS_ENABLED(CONFIG_CPU_CP15) ? setup_vectors_base() : 0; /* * Register the exception vector page. * some architectures which the DRAM is the exception vector to trap, * alloc_page breaks with error, although it is not NULL, but "0." */ - memblock_reserve(CONFIG_VECTORS_BASE, 2 * PAGE_SIZE); + memblock_reserve(vectors_base, 2 * PAGE_SIZE); #else /* ifndef CONFIG_CPU_V7M */ /* * There is no dedicated vector page on V7-M. So nothing needs to be @@ -295,10 +343,10 @@ void __init arm_mm_memblock_reserve(void) #endif } -void __init sanity_check_meminfo(void) +void __init adjust_lowmem_bounds(void) { phys_addr_t end; - sanity_check_meminfo_mpu(); + adjust_lowmem_bounds_mpu(); end = memblock_end_of_DRAM(); high_memory = __va(end - 1) + 1; memblock_set_current_limit(end); @@ -310,7 +358,7 @@ void __init sanity_check_meminfo(void) */ void __init paging_init(const struct machine_desc *mdesc) { - early_trap_init((void *)CONFIG_VECTORS_BASE); + early_trap_init((void *)vectors_base); mpu_setup(); bootmem_init(); } diff --git a/arch/arm/mm/physaddr.c b/arch/arm/mm/physaddr.c new file mode 100644 index 000000000000..02e60f495608 --- /dev/null +++ b/arch/arm/mm/physaddr.c @@ -0,0 +1,57 @@ +#include <linux/bug.h> +#include <linux/export.h> +#include <linux/types.h> +#include <linux/mmdebug.h> +#include <linux/mm.h> + +#include <asm/sections.h> +#include <asm/memory.h> +#include <asm/fixmap.h> +#include <asm/dma.h> + +#include "mm.h" + +static inline bool __virt_addr_valid(unsigned long x) +{ + /* + * high_memory does not get immediately defined, and there + * are early callers of __pa() against PAGE_OFFSET + */ + if (!high_memory && x >= PAGE_OFFSET) + return true; + + if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory) + return true; + + /* + * MAX_DMA_ADDRESS is a virtual address that may not correspond to an + * actual physical address. Enough code relies on __pa(MAX_DMA_ADDRESS) + * that we just need to work around it and always return true. + */ + if (x == MAX_DMA_ADDRESS) + return true; + + return false; +} + +phys_addr_t __virt_to_phys(unsigned long x) +{ + WARN(!__virt_addr_valid(x), + "virt_to_phys used for non-linear address: %pK (%pS)\n", + (void *)x, (void *)x); + + return __virt_to_phys_nodebug(x); +} +EXPORT_SYMBOL(__virt_to_phys); + +phys_addr_t __phys_addr_symbol(unsigned long x) +{ + /* This is bounds checking against the kernel image only. + * __pa_symbol should only be used on kernel symbol addresses. + */ + VIRTUAL_BUG_ON(x < (unsigned long)KERNEL_START || + x > (unsigned long)KERNEL_END); + + return __pa_symbol_nodebug(x); +} +EXPORT_SYMBOL(__phys_addr_symbol); diff --git a/arch/arm/nwfpe/fpmodule.c b/arch/arm/nwfpe/fpmodule.c index ec717c190e2c..1365e8650843 100644 --- a/arch/arm/nwfpe/fpmodule.c +++ b/arch/arm/nwfpe/fpmodule.c @@ -31,7 +31,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/signal.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/init.h> #include <asm/thread_notify.h> diff --git a/arch/arm/probes/decode.h b/arch/arm/probes/decode.h index f9b08ba7fe73..548d622a3159 100644 --- a/arch/arm/probes/decode.h +++ b/arch/arm/probes/decode.h @@ -22,6 +22,7 @@ #include <linux/types.h> #include <linux/stddef.h> #include <asm/probes.h> +#include <asm/kprobes.h> void __init arm_probes_decode_init(void); diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index a4ec240ee7ba..b6dc9d838a9a 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -24,6 +24,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/stop_machine.h> +#include <linux/sched/debug.h> #include <linux/stringify.h> #include <asm/traps.h> #include <asm/opcodes.h> diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c index 9775de22e2ff..c893726aa52d 100644 --- a/arch/arm/probes/kprobes/test-core.c +++ b/arch/arm/probes/kprobes/test-core.c @@ -203,6 +203,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/sched/clock.h> #include <linux/kprobes.h> #include <linux/errno.h> #include <linux/stddef.h> diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 569d5a650a4a..a71a48e71fff 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -15,7 +15,7 @@ #include <linux/kernel.h> #include <linux/notifier.h> #include <linux/signal.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/uaccess.h> diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index bd62d94f8ac5..ce18c91b50a1 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -182,10 +182,10 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) } EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); -struct dma_map_ops *xen_dma_ops; +const struct dma_map_ops *xen_dma_ops; EXPORT_SYMBOL(xen_dma_ops); -static struct dma_map_ops xen_swiotlb_dma_ops = { +static const struct dma_map_ops xen_swiotlb_dma_ops = { .alloc = xen_swiotlb_alloc_coherent, .free = xen_swiotlb_free_coherent, .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile index 0d7bfbf7d922..3f94bce33b7f 100644 --- a/arch/arm64/boot/dts/amlogic/Makefile +++ b/arch/arm64/boot/dts/amlogic/Makefile @@ -5,12 +5,14 @@ dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-p201.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-vega-s95-pro.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-vega-s95-meta.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-vega-s95-telos.dtb +dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-wetek-hub.dtb +dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-wetek-play2.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-p212.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p230.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p231.dtb -dtb-$(CONFIG_ARCH_MESON) += meson-gxl-nexbox-a95x.dtb -dtb-$(CONFIG_ARCH_MESON) += meson-gxm-s912-q200.dtb -dtb-$(CONFIG_ARCH_MESON) += meson-gxm-s912-q201.dtb +dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-nexbox-a95x.dtb +dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q200.dtb +dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q201.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxm-nexbox-a1.dtb always := $(dtb-y) diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index 0cbe24b49710..5d995f7724af 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi @@ -83,6 +83,7 @@ reg = <0x0 0x0>; enable-method = "psci"; next-level-cache = <&l2>; + clocks = <&scpi_dvfs 0>; }; cpu1: cpu@1 { @@ -91,6 +92,7 @@ reg = <0x0 0x1>; enable-method = "psci"; next-level-cache = <&l2>; + clocks = <&scpi_dvfs 0>; }; cpu2: cpu@2 { @@ -99,6 +101,7 @@ reg = <0x0 0x2>; enable-method = "psci"; next-level-cache = <&l2>; + clocks = <&scpi_dvfs 0>; }; cpu3: cpu@3 { @@ -107,6 +110,7 @@ reg = <0x0 0x3>; enable-method = "psci"; next-level-cache = <&l2>; + clocks = <&scpi_dvfs 0>; }; l2: l2-cache0 { @@ -171,6 +175,28 @@ }; }; + scpi { + compatible = "amlogic,meson-gxbb-scpi", "arm,scpi-pre-1.0"; + mboxes = <&mailbox 1 &mailbox 2>; + shmem = <&cpu_scp_lpri &cpu_scp_hpri>; + + scpi_clocks: clocks { + compatible = "arm,scpi-clocks"; + + scpi_dvfs: scpi_clocks@0 { + compatible = "arm,scpi-dvfs-clocks"; + #clock-cells = <1>; + clock-indices = <0>; + clock-output-names = "vcpu"; + }; + }; + + scpi_sensors: sensors { + compatible = "arm,scpi-sensors"; + #thermal-sensor-cells = <1>; + }; + }; + soc { compatible = "simple-bus"; #address-cells = <2>; @@ -229,6 +255,14 @@ status = "disabled"; }; + saradc: adc@8680 { + compatible = "amlogic,meson-saradc"; + reg = <0x0 0x8680 0x0 0x34>; + #io-channel-cells = <1>; + interrupts = <GIC_SPI 73 IRQ_TYPE_EDGE_RISING>; + status = "disabled"; + }; + pwm_ef: pwm@86c0 { compatible = "amlogic,meson-gx-pwm", "amlogic,meson-gxbb-pwm"; reg = <0x0 0x086c0 0x0 0x10>; @@ -282,6 +316,25 @@ #address-cells = <0>; }; + sram: sram@c8000000 { + compatible = "amlogic,meson-gxbb-sram", "mmio-sram"; + reg = <0x0 0xc8000000 0x0 0x14000>; + + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x0 0xc8000000 0x14000>; + + cpu_scp_lpri: scp-shmem@0 { + compatible = "amlogic,meson-gxbb-scp-shmem"; + reg = <0x13000 0x400>; + }; + + cpu_scp_hpri: scp-shmem@200 { + compatible = "amlogic,meson-gxbb-scp-shmem"; + reg = <0x13400 0x400>; + }; + }; + aobus: aobus@c8100000 { compatible = "simple-bus"; reg = <0x0 0xc8100000 0x0 0x100000>; @@ -297,6 +350,21 @@ status = "disabled"; }; + uart_AO_B: serial@4e0 { + compatible = "amlogic,meson-uart"; + reg = <0x0 0x004e0 0x0 0x14>; + interrupts = <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>; + clocks = <&xtal>; + status = "disabled"; + }; + + pwm_AO_ab: pwm@550 { + compatible = "amlogic,meson-gx-pwm", "amlogic,meson-gxbb-pwm"; + reg = <0x0 0x00550 0x0 0x10>; + #pwm-cells = <3>; + status = "disabled"; + }; + ir: ir@580 { compatible = "amlogic,meson-gxbb-ir"; reg = <0x0 0x00580 0x0 0x40>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts index 03e3d76626dd..fc0e86cb4cde 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts @@ -45,10 +45,55 @@ /dts-v1/; #include "meson-gxbb-p20x.dtsi" +#include <dt-bindings/input/input.h> / { compatible = "amlogic,p200", "amlogic,meson-gxbb"; model = "Amlogic Meson GXBB P200 Development Board"; + + avdd18_usb_adc: regulator-avdd18_usb_adc { + compatible = "regulator-fixed"; + regulator-name = "AVDD18_USB_ADC"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + + adc_keys { + compatible = "adc-keys"; + io-channels = <&saradc 0>; + io-channel-names = "buttons"; + keyup-threshold-microvolt = <1800000>; + + button-home { + label = "Home"; + linux,code = <KEY_HOME>; + press-threshold-microvolt = <900000>; /* 50% */ + }; + + button-esc { + label = "Esc"; + linux,code = <KEY_ESC>; + press-threshold-microvolt = <684000>; /* 38% */ + }; + + button-up { + label = "Volume Up"; + linux,code = <KEY_VOLUMEUP>; + press-threshold-microvolt = <468000>; /* 26% */ + }; + + button-down { + label = "Volume Down"; + linux,code = <KEY_VOLUMEDOWN>; + press-threshold-microvolt = <252000>; /* 14% */ + }; + + button-menu { + label = "Menu"; + linux,code = <KEY_MENU>; + press-threshold-microvolt = <0>; /* 0% */ + }; + }; }; &i2c_B { @@ -56,3 +101,8 @@ pinctrl-0 = <&i2c_b_pins>; pinctrl-names = "default"; }; + +&saradc { + status = "okay"; + vref-supply = <&avdd18_usb_adc>; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi index e59ad308192f..86709929fd20 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi @@ -53,6 +53,17 @@ stdout-path = "serial0:115200n8"; }; + leds { + compatible = "gpio-leds"; + + blue { + label = "vega-s95:blue:on"; + gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_HIGH>; + default-state = "on"; + panic-indicator; + }; + }; + usb_vbus: regulator-usb0-vbus { compatible = "regulator-fixed"; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts new file mode 100644 index 000000000000..56f855901262 --- /dev/null +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2016 BayLibre, Inc. + * Author: Neil Armstrong <narmstrong@baylibre.com> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/dts-v1/; + +#include "meson-gxbb-p20x.dtsi" + +/ { + compatible = "wetek,hub", "amlogic,meson-gxbb"; + model = "WeTek Hub"; + + leds { + compatible = "gpio-leds"; + + system { + label = "wetek-play:system-status"; + gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_HIGH>; + default-state = "on"; + panic-indicator; + }; + }; + + cvbs-connector { + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts new file mode 100644 index 000000000000..ea79fdd2c248 --- /dev/null +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2016 BayLibre, Inc. + * Author: Neil Armstrong <narmstrong@baylibre.com> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/dts-v1/; + +#include "meson-gxbb-p20x.dtsi" +#include <dt-bindings/input/input.h> + +/ { + compatible = "wetek,play2", "amlogic,meson-gxbb"; + model = "WeTek Play 2"; + + leds { + compatible = "gpio-leds"; + + system { + label = "wetek-play:system-status"; + gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_HIGH>; + default-state = "on"; + panic-indicator; + }; + + wifi { + label = "wetek-play:wifi-status"; + gpios = <&gpio GPIODV_26 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + + ethernet { + label = "wetek-play:ethernet-status"; + gpios = <&gpio GPIODV_27 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + }; + + gpio-keys-polled { + compatible = "gpio-keys-polled"; + #address-cells = <1>; + #size-cells = <0>; + poll-interval = <100>; + + button@0 { + label = "reset"; + linux,code = <KEY_RESTART>; + gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&i2c_A { + status = "okay"; + pinctrl-0 = <&i2c_a_pins>; + pinctrl-names = "default"; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi index b35307321b63..04b3324bc132 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi @@ -50,28 +50,6 @@ / { compatible = "amlogic,meson-gxbb"; - scpi { - compatible = "amlogic,meson-gxbb-scpi", "arm,scpi-pre-1.0"; - mboxes = <&mailbox 1 &mailbox 2>; - shmem = <&cpu_scp_lpri &cpu_scp_hpri>; - - scpi_clocks: clocks { - compatible = "arm,scpi-clocks"; - - scpi_dvfs: scpi_clocks@0 { - compatible = "arm,scpi-dvfs-clocks"; - #clock-cells = <1>; - clock-indices = <0>; - clock-output-names = "vcpu"; - }; - }; - - scpi_sensors: sensors { - compatible = "arm,scpi-sensors"; - #thermal-sensor-cells = <1>; - }; - }; - soc { usb0_phy: phy@c0000000 { compatible = "amlogic,meson-gxbb-usb2-phy"; @@ -93,25 +71,6 @@ status = "disabled"; }; - sram: sram@c8000000 { - compatible = "amlogic,meson-gxbb-sram", "mmio-sram"; - reg = <0x0 0xc8000000 0x0 0x14000>; - - #address-cells = <1>; - #size-cells = <1>; - ranges = <0 0x0 0xc8000000 0x14000>; - - cpu_scp_lpri: scp-shmem@0 { - compatible = "amlogic,meson-gxbb-scp-shmem"; - reg = <0x13000 0x400>; - }; - - cpu_scp_hpri: scp-shmem@200 { - compatible = "amlogic,meson-gxbb-scp-shmem"; - reg = <0x13400 0x400>; - }; - }; - usb0: usb@c9000000 { compatible = "amlogic,meson-gxbb-usb", "snps,dwc2"; reg = <0x0 0xc9000000 0x0 0x40000>; @@ -138,22 +97,6 @@ }; }; -&cpu0 { - clocks = <&scpi_dvfs 0>; -}; - -&cpu1 { - clocks = <&scpi_dvfs 0>; -}; - -&cpu2 { - clocks = <&scpi_dvfs 0>; -}; - -&cpu3 { - clocks = <&scpi_dvfs 0>; -}; - &cbus { spifc: spi@8c80 { compatible = "amlogic,meson-gxbb-spifc"; @@ -195,6 +138,29 @@ }; }; + uart_ao_a_cts_rts_pins: uart_ao_a_cts_rts { + mux { + groups = "uart_cts_ao_a", + "uart_rts_ao_a"; + function = "uart_ao"; + }; + }; + + uart_ao_b_pins: uart_ao_b { + mux { + groups = "uart_tx_ao_b", "uart_rx_ao_b"; + function = "uart_ao_b"; + }; + }; + + uart_ao_b_cts_rts_pins: uart_ao_b_cts_rts { + mux { + groups = "uart_cts_ao_b", + "uart_rts_ao_b"; + function = "uart_ao_b"; + }; + }; + remote_input_ao_pins: remote_input_ao { mux { groups = "remote_input_ao"; @@ -340,6 +306,14 @@ }; }; + uart_a_cts_rts_pins: uart_a_cts_rts { + mux { + groups = "uart_cts_a", + "uart_rts_a"; + function = "uart_a"; + }; + }; + uart_b_pins: uart_b { mux { groups = "uart_tx_b", @@ -348,6 +322,14 @@ }; }; + uart_b_cts_rts_pins: uart_b_cts_rts { + mux { + groups = "uart_cts_b", + "uart_rts_b"; + function = "uart_b"; + }; + }; + uart_c_pins: uart_c { mux { groups = "uart_tx_c", @@ -356,6 +338,14 @@ }; }; + uart_c_cts_rts_pins: uart_c_cts_rts { + mux { + groups = "uart_cts_c", + "uart_rts_c"; + function = "uart_c"; + }; + }; + i2c_a_pins: i2c_a { mux { groups = "i2c_sck_a", @@ -463,6 +453,20 @@ function = "pwm_f_y"; }; }; + + hdmi_hpd_pins: hdmi_hpd { + mux { + groups = "hdmi_hpd"; + function = "hdmi_hpd"; + }; + }; + + hdmi_i2c_pins: hdmi_i2c { + mux { + groups = "hdmi_sda", "hdmi_scl"; + function = "hdmi_i2c"; + }; + }; }; }; @@ -486,6 +490,16 @@ clocks = <&clkc CLKID_I2C>; }; +&saradc { + compatible = "amlogic,meson-gxbb-saradc", "amlogic,meson-saradc"; + clocks = <&xtal>, + <&clkc CLKID_SAR_ADC>, + <&clkc CLKID_SANA>, + <&clkc CLKID_SAR_ADC_CLK>, + <&clkc CLKID_SAR_ADC_SEL>; + clock-names = "clkin", "core", "sana", "adc_clk", "adc_sel"; +}; + &sd_emmc_a { clocks = <&clkc CLKID_SD_EMMC_A>, <&xtal>, diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts index cea4a3eded9b..cea4a3eded9b 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index 69216246275d..fe11b5fc61f7 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -88,12 +88,42 @@ }; }; + uart_ao_a_cts_rts_pins: uart_ao_a_cts_rts { + mux { + groups = "uart_cts_ao_a", + "uart_rts_ao_a"; + function = "uart_ao"; + }; + }; + + uart_ao_b_pins: uart_ao_b { + mux { + groups = "uart_tx_ao_b", "uart_rx_ao_b"; + function = "uart_ao_b"; + }; + }; + + uart_ao_b_cts_rts_pins: uart_ao_b_cts_rts { + mux { + groups = "uart_cts_ao_b", + "uart_rts_ao_b"; + function = "uart_ao_b"; + }; + }; + remote_input_ao_pins: remote_input_ao { mux { groups = "remote_input_ao"; function = "remote_input_ao"; }; }; + + pwm_ao_b_pins: pwm_ao_b { + mux { + groups = "pwm_ao_b"; + function = "pwm_ao_b"; + }; + }; }; }; @@ -163,6 +193,14 @@ }; }; + uart_a_cts_rts_pins: uart_a_cts_rts { + mux { + groups = "uart_cts_a", + "uart_rts_a"; + function = "uart_a"; + }; + }; + uart_b_pins: uart_b { mux { groups = "uart_tx_b", @@ -171,6 +209,14 @@ }; }; + uart_b_cts_rts_pins: uart_b_cts_rts { + mux { + groups = "uart_cts_b", + "uart_rts_b"; + function = "uart_b"; + }; + }; + uart_c_pins: uart_c { mux { groups = "uart_tx_c", @@ -179,6 +225,14 @@ }; }; + uart_c_cts_rts_pins: uart_c_cts_rts { + mux { + groups = "uart_cts_c", + "uart_rts_c"; + function = "uart_c"; + }; + }; + i2c_a_pins: i2c_a { mux { groups = "i2c_sck_a", @@ -229,6 +283,20 @@ function = "pwm_e"; }; }; + + hdmi_hpd_pins: hdmi_hpd { + mux { + groups = "hdmi_hpd"; + function = "hdmi_hpd"; + }; + }; + + hdmi_i2c_pins: hdmi_i2c { + mux { + groups = "hdmi_sda", "hdmi_scl"; + function = "hdmi_i2c"; + }; + }; }; eth-phy-mux { @@ -279,6 +347,16 @@ clocks = <&clkc CLKID_I2C>; }; +&saradc { + compatible = "amlogic,meson-gxl-saradc", "amlogic,meson-saradc"; + clocks = <&xtal>, + <&clkc CLKID_SAR_ADC>, + <&clkc CLKID_SANA>, + <&clkc CLKID_SAR_ADC_CLK>, + <&clkc CLKID_SAR_ADC_SEL>; + clock-names = "clkin", "core", "sana", "adc_clk", "adc_sel"; +}; + &sd_emmc_a { clocks = <&clkc CLKID_SD_EMMC_A>, <&xtal>, diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-s912-q200.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts index 5dbc66088355..5dbc66088355 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-s912-q200.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-s912-q201.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-q201.dts index 95e11d7faab8..95e11d7faab8 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-s912-q201.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-q201.dts diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi index eb2f0c3e5e53..ddea7305c644 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi @@ -85,6 +85,7 @@ reg = <0x0 0x100>; enable-method = "psci"; next-level-cache = <&l2>; + clocks = <&scpi_dvfs 1>; }; cpu5: cpu@101 { @@ -93,6 +94,7 @@ reg = <0x0 0x101>; enable-method = "psci"; next-level-cache = <&l2>; + clocks = <&scpi_dvfs 1>; }; cpu6: cpu@102 { @@ -101,6 +103,7 @@ reg = <0x0 0x102>; enable-method = "psci"; next-level-cache = <&l2>; + clocks = <&scpi_dvfs 1>; }; cpu7: cpu@103 { @@ -109,10 +112,21 @@ reg = <0x0 0x103>; enable-method = "psci"; next-level-cache = <&l2>; + clocks = <&scpi_dvfs 1>; }; }; }; +&saradc { + compatible = "amlogic,meson-gxm-saradc", "amlogic,meson-saradc"; +}; + +&scpi_dvfs { + clock-indices = <0 1>; + clock-output-names = "vbig", "vlittle"; +}; + &vpu { compatible = "amlogic,meson-gxm-vpu", "amlogic,meson-gx-vpu"; }; + diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi index 9d799d938d2f..df539e865b90 100644 --- a/arch/arm64/boot/dts/arm/juno-base.dtsi +++ b/arch/arm64/boot/dts/arm/juno-base.dtsi @@ -372,12 +372,13 @@ }; }; - coresight-replicator { - /* - * Non-configurable replicators don't show up on the - * AMBA bus. As such no need to add "arm,primecell". - */ - compatible = "arm,coresight-replicator"; + replicator@20120000 { + compatible = "qcom,coresight-replicator1x", "arm,primecell"; + reg = <0 0x20120000 0 0x1000>; + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; + power-domains = <&scpi_devpd 0>; ports { #address-cells = <1>; diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi index 53fd0683d400..098ad557fee3 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi @@ -217,18 +217,6 @@ assigned-clock-parents = <&cmu_top CLK_FOUT_AUD_PLL>; }; -&cmu_disp { - assigned-clocks = <&cmu_mif CLK_MOUT_SCLK_DECON_TV_ECLK_A>, - <&cmu_mif CLK_DIV_SCLK_DECON_TV_ECLK>, - <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>, - <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK>; - assigned-clock-parents = <&cmu_mif CLK_MOUT_BUS_PLL_DIV2>, - <0>, - <&cmu_mif CLK_SCLK_DECON_TV_ECLK_DISP>, - <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>; - assigned-clock-rates = <0>, <400000000>; -}; - &cmu_fsys { assigned-clocks = <&cmu_top CLK_MOUT_SCLK_USBDRD30>, <&cmu_top CLK_MOUT_SCLK_USBHOST30>, diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts b/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts index ddba2f889326..dea0a6f5bc18 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts +++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts @@ -18,6 +18,40 @@ compatible = "samsung,tm2", "samsung,exynos5433"; }; +&cmu_disp { + /* + * TM2 and TM2e differ only by DISP_PLL rate, but define all assigned + * clocks properties for DISP CMU for each board to keep them together + * for easier review and maintenance. + */ + assigned-clocks = <&cmu_disp CLK_FOUT_DISP_PLL>, + <&cmu_mif CLK_DIV_SCLK_DECON_TV_ECLK>, + <&cmu_disp CLK_MOUT_ACLK_DISP_333_USER>, + <&cmu_disp CLK_MOUT_SCLK_DSIM0_USER>, + <&cmu_disp CLK_MOUT_SCLK_DSIM0>, + <&cmu_disp CLK_MOUT_SCLK_DECON_ECLK_USER>, + <&cmu_disp CLK_MOUT_SCLK_DECON_ECLK>, + <&cmu_disp CLK_MOUT_PHYCLK_MIPIDPHY0_RXCLKESC0_USER>, + <&cmu_disp CLK_MOUT_PHYCLK_MIPIDPHY0_BITCLKDIV8_USER>, + <&cmu_disp CLK_MOUT_DISP_PLL>, + <&cmu_mif CLK_MOUT_SCLK_DECON_TV_ECLK_A>, + <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>, + <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK>; + assigned-clock-parents = <0>, <0>, + <&cmu_mif CLK_ACLK_DISP_333>, + <&cmu_mif CLK_SCLK_DSIM0_DISP>, + <&cmu_disp CLK_MOUT_SCLK_DSIM0_USER>, + <&cmu_mif CLK_SCLK_DECON_ECLK_DISP>, + <&cmu_disp CLK_MOUT_SCLK_DECON_ECLK_USER>, + <&cmu_disp CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY>, + <&cmu_disp CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY>, + <&cmu_disp CLK_FOUT_DISP_PLL>, + <&cmu_mif CLK_MOUT_BUS_PLL_DIV2>, + <&cmu_mif CLK_SCLK_DECON_TV_ECLK_DISP>, + <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>; + assigned-clock-rates = <250000000>, <400000000>; +}; + &hsi2c_9 { status = "okay"; diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts b/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts index 2fbf3a860316..7891a31adc17 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts +++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts @@ -18,6 +18,40 @@ compatible = "samsung,tm2e", "samsung,exynos5433"; }; +&cmu_disp { + /* + * TM2 and TM2e differ only by DISP_PLL rate, but define all assigned + * clocks properties for DISP CMU for each board to keep them together + * for easier review and maintenance. + */ + assigned-clocks = <&cmu_disp CLK_FOUT_DISP_PLL>, + <&cmu_mif CLK_DIV_SCLK_DECON_TV_ECLK>, + <&cmu_disp CLK_MOUT_ACLK_DISP_333_USER>, + <&cmu_disp CLK_MOUT_SCLK_DSIM0_USER>, + <&cmu_disp CLK_MOUT_SCLK_DSIM0>, + <&cmu_disp CLK_MOUT_SCLK_DECON_ECLK_USER>, + <&cmu_disp CLK_MOUT_SCLK_DECON_ECLK>, + <&cmu_disp CLK_MOUT_PHYCLK_MIPIDPHY0_RXCLKESC0_USER>, + <&cmu_disp CLK_MOUT_PHYCLK_MIPIDPHY0_BITCLKDIV8_USER>, + <&cmu_disp CLK_MOUT_DISP_PLL>, + <&cmu_mif CLK_MOUT_SCLK_DECON_TV_ECLK_A>, + <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>, + <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK>; + assigned-clock-parents = <0>, <0>, + <&cmu_mif CLK_ACLK_DISP_333>, + <&cmu_mif CLK_SCLK_DSIM0_DISP>, + <&cmu_disp CLK_MOUT_SCLK_DSIM0_USER>, + <&cmu_mif CLK_SCLK_DECON_ECLK_DISP>, + <&cmu_disp CLK_MOUT_SCLK_DECON_ECLK_USER>, + <&cmu_disp CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY>, + <&cmu_disp CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY>, + <&cmu_disp CLK_FOUT_DISP_PLL>, + <&cmu_mif CLK_MOUT_BUS_PLL_DIV2>, + <&cmu_mif CLK_SCLK_DECON_TV_ECLK_DISP>, + <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>; + assigned-clock-rates = <278000000>, <400000000>; +}; + &ldo31_reg { regulator-name = "TSP_VDD_1.8V_AP"; regulator-min-microvolt = <1800000>; diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts index c528dd52ba2d..e5892bb0ae6e 100644 --- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts +++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts @@ -13,6 +13,7 @@ #include "exynos7.dtsi" #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/clock/samsung,s2mps11.h> +#include <dt-bindings/gpio/gpio.h> / { model = "Samsung Exynos7 Espresso board based on EXYNOS7"; @@ -32,6 +33,29 @@ device_type = "memory"; reg = <0x0 0x40000000 0x0 0xC0000000>; }; + + usb30_vbus_reg: regulator-usb30 { + compatible = "regulator-fixed"; + regulator-name = "VBUS_5V"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&gph1 1 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&usb30_vbus_en>; + enable-active-high; + }; + + usb3drd_boost_5v: regulator-usb3drd-boost { + compatible = "regulator-fixed"; + regulator-name = "VUSB_VBUS_5V"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&gpf4 1 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&usb3drd_boost_en>; + enable-active-high; + }; + }; &fin_pll { @@ -328,8 +352,8 @@ &pinctrl_alive { pmic_irq: pmic-irq { samsung,pins = "gpa0-2"; - samsung,pin-pud = <3>; - samsung,pin-drv = <3>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; }; @@ -365,3 +389,24 @@ vqmmc-supply = <&ldo2_reg>; disable-wp; }; + +&pinctrl_bus1 { + usb30_vbus_en: usb30-vbus-en { + samsung,pins = "gph1-1"; + samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; + }; + + usb3drd_boost_en: usb3drd-boost-en { + samsung,pins = "gpf4-1"; + samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; + }; +}; + +&usbdrd_phy { + vbus-supply = <&usb30_vbus_reg>; + vbus-boost-supply = <&usb3drd_boost_5v>; +}; diff --git a/arch/arm64/boot/dts/exynos/exynos7-pinctrl.dtsi b/arch/arm64/boot/dts/exynos/exynos7-pinctrl.dtsi index 7ebb93927f13..8f58850cd28c 100644 --- a/arch/arm64/boot/dts/exynos/exynos7-pinctrl.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos7-pinctrl.dtsi @@ -12,6 +12,8 @@ * published by the Free Software Foundation. */ +#include <dt-bindings/pinctrl/samsung.h> + &pinctrl_alive { gpa0: gpa0 { gpio-controller; @@ -187,163 +189,163 @@ hs_i2c10_bus: hs-i2c10-bus { samsung,pins = "gpb0-1", "gpb0-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; hs_i2c11_bus: hs-i2c11-bus { samsung,pins = "gpb0-3", "gpb0-2"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; hs_i2c2_bus: hs-i2c2-bus { samsung,pins = "gpd0-3", "gpd0-2"; - samsung,pin-function = <3>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_3>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; uart0_data: uart0-data { samsung,pins = "gpd0-0", "gpd0-1"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; uart0_fctl: uart0-fctl { samsung,pins = "gpd0-2", "gpd0-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; uart2_data: uart2-data { samsung,pins = "gpd1-4", "gpd1-5"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; hs_i2c3_bus: hs-i2c3-bus { samsung,pins = "gpd1-3", "gpd1-2"; - samsung,pin-function = <3>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_3>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; uart1_data: uart1-data { samsung,pins = "gpd1-0", "gpd1-1"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; uart1_fctl: uart1-fctl { samsung,pins = "gpd1-2", "gpd1-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; hs_i2c0_bus: hs-i2c0-bus { samsung,pins = "gpd2-1", "gpd2-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; hs_i2c1_bus: hs-i2c1-bus { samsung,pins = "gpd2-3", "gpd2-2"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; hs_i2c9_bus: hs-i2c9-bus { samsung,pins = "gpd2-7", "gpd2-6"; - samsung,pin-function = <3>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_3>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; pwm0_out: pwm0-out { samsung,pins = "gpd2-4"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; pwm1_out: pwm1-out { samsung,pins = "gpd2-5"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; pwm2_out: pwm2-out { samsung,pins = "gpd2-6"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; pwm3_out: pwm3-out { samsung,pins = "gpd2-7"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; hs_i2c8_bus: hs-i2c8-bus { samsung,pins = "gpd5-3", "gpd5-2"; - samsung,pin-function = <3>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_3>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; uart3_data: uart3-data { samsung,pins = "gpd5-0", "gpd5-1"; - samsung,pin-function = <3>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_3>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; spi2_bus: spi2-bus { samsung,pins = "gpd5-0", "gpd5-1", "gpd5-2", "gpd5-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; spi1_bus: spi1-bus { samsung,pins = "gpd6-2", "gpd6-3", "gpd6-4", "gpd6-5"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; spi0_bus: spi0-bus { samsung,pins = "gpd8-0", "gpd8-1", "gpd6-0", "gpd6-1"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; hs_i2c4_bus: hs-i2c4-bus { samsung,pins = "gpg3-1", "gpg3-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; hs_i2c5_bus: hs-i2c5-bus { samsung,pins = "gpg3-3", "gpg3-2"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; }; @@ -358,9 +360,9 @@ hs_i2c6_bus: hs-i2c6-bus { samsung,pins = "gpj0-1", "gpj0-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; }; @@ -375,9 +377,9 @@ hs_i2c7_bus: hs-i2c7-bus { samsung,pins = "gpj1-1", "gpj1-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; }; @@ -392,9 +394,9 @@ spi3_bus: spi3-bus { samsung,pins = "gpg4-0", "gpg4-1", "gpg4-2", "gpg4-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; }; @@ -409,9 +411,9 @@ spi4_bus: spi4-bus { samsung,pins = "gpv7-0", "gpv7-1", "gpv7-2", "gpv7-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; }; @@ -426,37 +428,37 @@ sd2_clk: sd2-clk { samsung,pins = "gpr4-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <3>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd2_cmd: sd2-cmd { samsung,pins = "gpr4-1"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <3>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd2_cd: sd2-cd { samsung,pins = "gpr4-2"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <3>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd2_bus1: sd2-bus-width1 { samsung,pins = "gpr4-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <3>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; sd2_bus4: sd2-bus-width4 { samsung,pins = "gpr4-4", "gpr4-5", "gpr4-6"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <3>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>; }; }; @@ -495,107 +497,107 @@ sd0_clk: sd0-clk { samsung,pins = "gpr0-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <4>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS7_FSYS1_PIN_DRV_LV2>; }; sd0_cmd: sd0-cmd { samsung,pins = "gpr0-1"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <4>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS7_FSYS1_PIN_DRV_LV2>; }; sd0_ds: sd0-ds { samsung,pins = "gpr0-2"; - samsung,pin-function = <2>; - samsung,pin-pud = <1>; - samsung,pin-drv = <4>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>; + samsung,pin-drv = <EXYNOS7_FSYS1_PIN_DRV_LV2>; }; sd0_qrdy: sd0-qrdy { samsung,pins = "gpr0-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <1>; - samsung,pin-drv = <4>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>; + samsung,pin-drv = <EXYNOS7_FSYS1_PIN_DRV_LV2>; }; sd0_bus1: sd0-bus-width1 { samsung,pins = "gpr1-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <4>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS7_FSYS1_PIN_DRV_LV2>; }; sd0_bus4: sd0-bus-width4 { samsung,pins = "gpr1-1", "gpr1-2", "gpr1-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <4>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS7_FSYS1_PIN_DRV_LV2>; }; sd0_bus8: sd0-bus-width8 { samsung,pins = "gpr1-4", "gpr1-5", "gpr1-6", "gpr1-7"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <4>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS7_FSYS1_PIN_DRV_LV2>; }; sd1_clk: sd1-clk { samsung,pins = "gpr2-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <2>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS7_FSYS1_PIN_DRV_LV3>; }; sd1_cmd: sd1-cmd { samsung,pins = "gpr2-1"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <2>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS7_FSYS1_PIN_DRV_LV3>; }; sd1_ds: sd1-ds { samsung,pins = "gpr2-2"; - samsung,pin-function = <2>; - samsung,pin-pud = <1>; - samsung,pin-drv = <6>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>; + samsung,pin-drv = <EXYNOS7_FSYS1_PIN_DRV_LV4>; }; sd1_qrdy: sd1-qrdy { samsung,pins = "gpr2-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <1>; - samsung,pin-drv = <6>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>; + samsung,pin-drv = <EXYNOS7_FSYS1_PIN_DRV_LV4>; }; sd1_int: sd1-int { samsung,pins = "gpr2-4"; - samsung,pin-function = <2>; - samsung,pin-pud = <1>; - samsung,pin-drv = <6>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>; + samsung,pin-drv = <EXYNOS7_FSYS1_PIN_DRV_LV4>; }; sd1_bus1: sd1-bus-width1 { samsung,pins = "gpr3-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <2>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS7_FSYS1_PIN_DRV_LV3>; }; sd1_bus4: sd1-bus-width4 { samsung,pins = "gpr3-1", "gpr3-2", "gpr3-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <2>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS7_FSYS1_PIN_DRV_LV3>; }; sd1_bus8: sd1-bus-width8 { samsung,pins = "gpr3-4", "gpr3-5", "gpr3-6", "gpr3-7"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <2>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS7_FSYS1_PIN_DRV_LV3>; }; }; @@ -682,22 +684,22 @@ spi5_bus: spi5-bus { samsung,pins = "gpf2-0", "gpf2-1", "gpf2-2", "gpf2-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; ufs_refclk_out: ufs-refclk-out { samsung,pins = "gpg2-4"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <2>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV2>; }; ufs_rst_n: ufs-rst-n { samsung,pins = "gph1-5"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = <EXYNOS_PIN_FUNC_2>; + samsung,pin-pud = <EXYNOS_PIN_PULL_UP>; + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; }; }; diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi index 80aa60e38237..9a3fbed1765a 100644 --- a/arch/arm64/boot/dts/exynos/exynos7.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi @@ -603,6 +603,40 @@ #include "exynos7-trip-points.dtsi" }; }; + + usbdrd_phy: phy@15500000 { + compatible = "samsung,exynos7-usbdrd-phy"; + reg = <0x15500000 0x100>; + clocks = <&clock_fsys0 ACLK_USBDRD300>, + <&clock_fsys0 OSCCLK_PHY_CLKOUT_USB30_PHY>, + <&clock_fsys0 PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER>, + <&clock_fsys0 PHYCLK_USBDRD300_UDRD30_PHYCLK_USER>, + <&clock_fsys0 SCLK_USBDRD300_REFCLK>; + clock-names = "phy", "ref", "phy_pipe", + "phy_utmi", "itp"; + samsung,pmu-syscon = <&pmu_system_controller>; + #phy-cells = <1>; + }; + + usbdrd3 { + compatible = "samsung,exynos7-dwusb3"; + clocks = <&clock_fsys0 ACLK_USBDRD300>, + <&clock_fsys0 SCLK_USBDRD300_SUSPENDCLK>, + <&clock_fsys0 ACLK_AXIUS_USBDRD30X_FSYS0X>; + clock-names = "usbdrd30", "usbdrd30_susp_clk", + "usbdrd30_axius_clk"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + dwc3@15400000 { + compatible = "snps,dwc3"; + reg = <0x15400000 0x10000>; + interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>; + phys = <&usbdrd_phy 0>, <&usbdrd_phy 1>; + phy-names = "usb2-phy", "usb3-phy"; + }; + }; }; }; diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index eb8432bb82b8..e39d487bf724 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -23,6 +23,7 @@ */ #include <linux/types.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #define COMPAT_USER_HZ 100 #ifdef __AARCH64EB__ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 4ce82ed3e7c3..05310ad8c5ab 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -184,16 +184,22 @@ static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg) } static inline int __attribute_const__ -cpuid_feature_extract_field(u64 features, int field, bool sign) +cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign) { return (sign) ? - cpuid_feature_extract_signed_field(features, field) : - cpuid_feature_extract_unsigned_field(features, field); + cpuid_feature_extract_signed_field_width(features, field, width) : + cpuid_feature_extract_unsigned_field_width(features, field, width); +} + +static inline int __attribute_const__ +cpuid_feature_extract_field(u64 features, int field, bool sign) +{ + return cpuid_feature_extract_field_width(features, field, 4, sign); } static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val) { - return (s64)cpuid_feature_extract_field(val, ftrp->shift, ftrp->sign); + return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign); } static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h index 243ef256b8c9..73d5bab015eb 100644 --- a/arch/arm64/include/asm/device.h +++ b/arch/arm64/include/asm/device.h @@ -17,7 +17,6 @@ #define __ASM_DEVICE_H struct dev_archdata { - struct dma_map_ops *dma_ops; #ifdef CONFIG_IOMMU_API void *iommu; /* private IOMMU data */ #endif diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index ccea82c2b089..505756cdc67a 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -25,12 +25,12 @@ #include <asm/xen/hypervisor.h> #define DMA_ERROR_CODE (~(dma_addr_t)0) -extern struct dma_map_ops dummy_dma_ops; +extern const struct dma_map_ops dummy_dma_ops; -static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) +static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev) { - if (dev && dev->archdata.dma_ops) - return dev->archdata.dma_ops; + if (dev && dev->dma_ops) + return dev->dma_ops; /* * We expect no ISA devices, and all other DMA masters are expected to @@ -39,12 +39,12 @@ static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) return &dummy_dma_ops; } -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { if (xen_initial_domain()) return xen_dma_ops; else - return __generic_dma_ops(dev); + return __generic_dma_ops(NULL); } void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h index 1737aecfcc5e..6deb8d726041 100644 --- a/arch/arm64/include/asm/kprobes.h +++ b/arch/arm64/include/asm/kprobes.h @@ -16,6 +16,9 @@ #ifndef _ARM_KPROBES_H #define _ARM_KPROBES_H +#include <asm-generic/kprobes.h> + +#ifdef CONFIG_KPROBES #include <linux/types.h> #include <linux/ptrace.h> #include <linux/percpu.h> @@ -57,4 +60,5 @@ int kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr); void kretprobe_trampoline(void); void __kprobes *trampoline_probe_handler(struct pt_regs *regs); +#endif /* CONFIG_KPROBES */ #endif /* _ARM_KPROBES_H */ diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 1ef40d82cfd3..3257895a9b5e 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -25,6 +25,8 @@ #include <linux/compiler.h> #include <linux/sched.h> +#include <linux/sched/hotplug.h> +#include <linux/mm_types.h> #include <asm/cacheflush.h> #include <asm/cpufeature.h> diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 86032a012388..657977e77ec8 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -19,6 +19,7 @@ #include <asm/sysreg.h> #include <asm/system_misc.h> #include <asm/traps.h> +#include <asm/kprobes.h> #include <linux/uaccess.h> #include <asm/cpufeature.h> diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 2bd426448fc1..32913567da08 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -26,6 +26,7 @@ #include <linux/kprobes.h> #include <linux/stat.h> #include <linux/uaccess.h> +#include <linux/sched/task_stack.h> #include <asm/cpufeature.h> #include <asm/cputype.h> diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index b883f1f75216..06da8ea16bbe 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -21,7 +21,7 @@ #include <linux/cpu_pm.h> #include <linux/kernel.h> #include <linux/init.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/signal.h> #include <linux/hardirq.h> diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index b6badff5a151..3a63954a8b14 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -31,6 +31,7 @@ #include <asm/debug-monitors.h> #include <asm/fixmap.h> #include <asm/insn.h> +#include <asm/kprobes.h> #define AARCH64_INSN_SF_BIT BIT(31) #define AARCH64_INSN_N_BIT BIT(22) diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index d217c9e95b06..2122cd187f19 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -24,6 +24,8 @@ #include <linux/kdebug.h> #include <linux/kgdb.h> #include <linux/kprobes.h> +#include <linux/sched/task_stack.h> + #include <asm/debug-monitors.h> #include <asm/insn.h> #include <asm/traps.h> diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 3f62b35fb6f1..bd1b74c2436f 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -2,6 +2,7 @@ #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/bug.h> +#include <linux/sched/task_stack.h> #include <asm/compat.h> #include <asm/perf_regs.h> diff --git a/arch/arm64/kernel/probes/decode-insn.h b/arch/arm64/kernel/probes/decode-insn.h index 76d3f315407f..192ab007bacb 100644 --- a/arch/arm64/kernel/probes/decode-insn.h +++ b/arch/arm64/kernel/probes/decode-insn.h @@ -16,6 +16,8 @@ #ifndef _ARM_KERNEL_KPROBES_ARM64_H #define _ARM_KERNEL_KPROBES_ARM64_H +#include <asm/kprobes.h> + /* * ARM strongly recommends a limit of 128 bytes between LoadExcl and * StoreExcl instructions in a single thread of execution. So keep the diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index f0593c92279b..2a07aae5b8a2 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -22,6 +22,7 @@ #include <linux/extable.h> #include <linux/slab.h> #include <linux/stop_machine.h> +#include <linux/sched/debug.h> #include <linux/stringify.h> #include <asm/traps.h> #include <asm/ptrace.h> diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 1ad48f93abdd..043d373b8369 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -24,6 +24,9 @@ #include <linux/efi.h> #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index a22161ccf447..c142459a88f3 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -22,7 +22,8 @@ #include <linux/audit.h> #include <linux/compat.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/ptrace.h> diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 952e2c0dabd5..42274bda0ccb 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -42,6 +42,7 @@ #include <linux/of_fdt.h> #include <linux/efi.h> #include <linux/psci.h> +#include <linux/sched/task.h> #include <linux/mm.h> #include <asm/acpi.h> diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index a8ec5da530af..ef1caae02110 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -21,7 +21,9 @@ #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/hotplug.h> +#include <linux/sched/task_stack.h> #include <linux/interrupt.h> #include <linux/cache.h> #include <linux/profile.h> @@ -222,7 +224,7 @@ asmlinkage void secondary_start_kernel(void) * All kernel threads share the same mm context; grab a * reference and switch to it. */ - atomic_inc(&mm->mm_count); + mmgrab(mm); current->active_mm = mm; /* diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 8a552a33c6ef..feac80c22f61 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -19,6 +19,8 @@ #include <linux/export.h> #include <linux/ftrace.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <asm/irq.h> diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index abaf582fc7a8..8b8bbd3eaa52 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -21,6 +21,7 @@ #include <linux/compat.h> #include <linux/personality.h> #include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/syscalls.h> #include <linux/uaccess.h> diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 565dd69888cc..08243533e5ee 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -20,6 +20,7 @@ #include <linux/nodemask.h> #include <linux/of.h> #include <linux/sched.h> +#include <linux/sched/topology.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/cpufreq.h> diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 7d47c2cdfd93..e52be6aa44ee 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -29,8 +29,11 @@ #include <linux/kexec.h> #include <linux/delay.h> #include <linux/init.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/syscalls.h> +#include <linux/mm_types.h> #include <asm/atomic.h> #include <asm/bug.h> diff --git a/arch/arm64/lib/copy_template.S b/arch/arm64/lib/copy_template.S index 410fbdb8163f..f5b9210f1c83 100644 --- a/arch/arm64/lib/copy_template.S +++ b/arch/arm64/lib/copy_template.S @@ -62,7 +62,7 @@ D_h .req x14 sub count, count, tmp2 /* * Copy the leading memory data from src to dst in an increasing - * address order.By this way,the risk of overwritting the source + * address order.By this way,the risk of overwriting the source * memory data is eliminated when the distance between src and * dst is less than 16. The memory accesses here are alignment. */ diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index aff1d0afeb1e..81cdb2e844ed 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -363,7 +363,7 @@ static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr) return 0; } -static struct dma_map_ops swiotlb_dma_ops = { +static const struct dma_map_ops swiotlb_dma_ops = { .alloc = __dma_alloc, .free = __dma_free, .mmap = __swiotlb_mmap, @@ -516,7 +516,7 @@ static int __dummy_dma_supported(struct device *hwdev, u64 mask) return 0; } -struct dma_map_ops dummy_dma_ops = { +const struct dma_map_ops dummy_dma_ops = { .alloc = __dummy_alloc, .free = __dummy_free, .mmap = __dummy_mmap, @@ -795,7 +795,7 @@ static void __iommu_unmap_sg_attrs(struct device *dev, iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs); } -static struct dma_map_ops iommu_dma_ops = { +static const struct dma_map_ops iommu_dma_ops = { .alloc = __iommu_alloc_attrs, .free = __iommu_free_attrs, .mmap = __iommu_mmap_attrs, @@ -848,7 +848,7 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops, if (iommu_dma_init_domain(domain, dma_base, size, dev)) goto out_err; - dev->archdata.dma_ops = &iommu_dma_ops; + dev->dma_ops = &iommu_dma_ops; } return true; @@ -958,7 +958,7 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_teardown_dma_ops(struct device *dev) { - dev->archdata.dma_ops = NULL; + dev->dma_ops = NULL; } #else @@ -972,8 +972,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { - if (!dev->archdata.dma_ops) - dev->archdata.dma_ops = &swiotlb_dma_ops; + if (!dev->dma_ops) + dev->dma_ops = &swiotlb_dma_ops; dev->archdata.dma_coherent = coherent; __iommu_setup_dma_ops(dev, dma_base, size, iommu); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 81283851c9af..4bf899fb451b 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -26,7 +26,8 @@ #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/page-flags.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> #include <linux/highmem.h> #include <linux/perf_event.h> #include <linux/preempt.h> diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 201d918e7575..55d1e9205543 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -13,6 +13,7 @@ #define pr_fmt(fmt) "kasan: " fmt #include <linux/kasan.h> #include <linux/kernel.h> +#include <linux/sched/task.h> #include <linux/memblock.h> #include <linux/start_kernel.h> #include <linux/mm.h> diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 01c171723bb3..7b0d55756eb1 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -22,7 +22,8 @@ #include <linux/mman.h> #include <linux/export.h> #include <linux/shm.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/io.h> #include <linux/personality.h> #include <linux/random.h> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index b805c017f789..d28dbcf596b6 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -109,10 +109,8 @@ static bool pgattr_change_is_safe(u64 old, u64 new) static void alloc_init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot, - phys_addr_t (*pgtable_alloc)(void), - bool page_mappings_only) + phys_addr_t (*pgtable_alloc)(void)) { - pgprot_t __prot = prot; pte_t *pte; BUG_ON(pmd_sect(*pmd)); @@ -130,18 +128,7 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr, do { pte_t old_pte = *pte; - /* - * Set the contiguous bit for the subsequent group of PTEs if - * its size and alignment are appropriate. - */ - if (((addr | PFN_PHYS(pfn)) & ~CONT_PTE_MASK) == 0) { - if (end - addr >= CONT_PTE_SIZE && !page_mappings_only) - __prot = __pgprot(pgprot_val(prot) | PTE_CONT); - else - __prot = prot; - } - - set_pte(pte, pfn_pte(pfn, __prot)); + set_pte(pte, pfn_pte(pfn, prot)); pfn++; /* @@ -160,7 +147,6 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t (*pgtable_alloc)(void), bool page_mappings_only) { - pgprot_t __prot = prot; pmd_t *pmd; unsigned long next; @@ -187,18 +173,7 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, /* try section mapping first */ if (((addr | next | phys) & ~SECTION_MASK) == 0 && !page_mappings_only) { - /* - * Set the contiguous bit for the subsequent group of - * PMDs if its size and alignment are appropriate. - */ - if (((addr | phys) & ~CONT_PMD_MASK) == 0) { - if (end - addr >= CONT_PMD_SIZE) - __prot = __pgprot(pgprot_val(prot) | - PTE_CONT); - else - __prot = prot; - } - pmd_set_huge(pmd, phys, __prot); + pmd_set_huge(pmd, phys, prot); /* * After the PMD entry has been populated once, we @@ -208,8 +183,7 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, pmd_val(*pmd))); } else { alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), - prot, pgtable_alloc, - page_mappings_only); + prot, pgtable_alloc); BUG_ON(pmd_val(old_pmd) != 0 && pmd_val(old_pmd) != pmd_val(*pmd)); diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index cd4d53d7e458..877d42fb0df6 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -138,7 +138,7 @@ ENDPROC(cpu_do_resume) * - pgd_phys - physical address of new TTB */ ENTRY(cpu_do_switch_mm) - pre_ttbr0_update_workaround x0, x1, x2 + pre_ttbr0_update_workaround x0, x2, x3 mmid x1, x1 // get mm->context.id bfi x0, x1, #48, #16 // set the ASID msr ttbr0_el1, x0 // set TTBR0 diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h index 1115f2a645d1..7388451f9905 100644 --- a/arch/avr32/include/asm/dma-mapping.h +++ b/arch/avr32/include/asm/dma-mapping.h @@ -4,9 +4,9 @@ extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction); -extern struct dma_map_ops avr32_dma_ops; +extern const struct dma_map_ops avr32_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &avr32_dma_ops; } diff --git a/arch/avr32/include/asm/kprobes.h b/arch/avr32/include/asm/kprobes.h index 45f563ed73fd..28dfc61ad384 100644 --- a/arch/avr32/include/asm/kprobes.h +++ b/arch/avr32/include/asm/kprobes.h @@ -11,10 +11,14 @@ #ifndef __ASM_AVR32_KPROBES_H #define __ASM_AVR32_KPROBES_H +#include <asm-generic/kprobes.h> + +#define BREAKPOINT_INSTRUCTION 0xd673 /* breakpoint */ + +#ifdef CONFIG_KPROBES #include <linux/types.h> typedef u16 kprobe_opcode_t; -#define BREAKPOINT_INSTRUCTION 0xd673 /* breakpoint */ #define MAX_INSN_SIZE 2 #define MAX_STACK_SIZE 64 /* 32 would probably be OK */ @@ -46,4 +50,5 @@ extern int kprobe_exceptions_notify(struct notifier_block *self, #define flush_insn_slot(p) do { } while (0) +#endif /* CONFIG_KPROBES */ #endif /* __ASM_AVR32_KPROBES_H */ diff --git a/arch/avr32/include/asm/mmu_context.h b/arch/avr32/include/asm/mmu_context.h index 27ff23407100..cd87abba8db7 100644 --- a/arch/avr32/include/asm/mmu_context.h +++ b/arch/avr32/include/asm/mmu_context.h @@ -12,6 +12,8 @@ #ifndef __ASM_AVR32_MMU_CONTEXT_H #define __ASM_AVR32_MMU_CONTEXT_H +#include <linux/mm_types.h> + #include <asm/tlbflush.h> #include <asm/sysreg.h> #include <asm-generic/mm_hooks.h> diff --git a/arch/avr32/kernel/nmi_debug.c b/arch/avr32/kernel/nmi_debug.c index 3414b8566c29..25823049bb99 100644 --- a/arch/avr32/kernel/nmi_debug.c +++ b/arch/avr32/kernel/nmi_debug.c @@ -9,6 +9,7 @@ #include <linux/kdebug.h> #include <linux/notifier.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <asm/irq.h> diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index 68e5b9dac059..ad0dfccedb79 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c @@ -6,6 +6,9 @@ * published by the Free Software Foundation. */ #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/module.h> #include <linux/kallsyms.h> #include <linux/fs.h> diff --git a/arch/avr32/kernel/ptrace.c b/arch/avr32/kernel/ptrace.c index a89b893279bb..41a14e96a1db 100644 --- a/arch/avr32/kernel/ptrace.c +++ b/arch/avr32/kernel/ptrace.c @@ -8,6 +8,7 @@ #undef DEBUG #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/ptrace.h> #include <linux/errno.h> diff --git a/arch/avr32/kernel/stacktrace.c b/arch/avr32/kernel/stacktrace.c index c09f0d8dd679..f8cc995cf0e0 100644 --- a/arch/avr32/kernel/stacktrace.c +++ b/arch/avr32/kernel/stacktrace.c @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <linux/thread_info.h> #include <linux/module.h> diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c index eb4a3fcfbaff..50b541325025 100644 --- a/arch/avr32/kernel/traps.c +++ b/arch/avr32/kernel/traps.c @@ -14,7 +14,7 @@ #include <linux/extable.h> #include <linux/module.h> /* print_modules */ #include <linux/notifier.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/uaccess.h> #include <asm/addrspace.h> diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c index 54534e5d0781..555222d4f414 100644 --- a/arch/avr32/mm/dma-coherent.c +++ b/arch/avr32/mm/dma-coherent.c @@ -191,7 +191,7 @@ static void avr32_dma_sync_sg_for_device(struct device *dev, dma_cache_sync(dev, sg_virt(sg), sg->length, direction); } -struct dma_map_ops avr32_dma_ops = { +const struct dma_map_ops avr32_dma_ops = { .alloc = avr32_dma_alloc, .free = avr32_dma_free, .map_page = avr32_dma_map_page, diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild index d6fa60b158be..625db8ac815e 100644 --- a/arch/blackfin/include/asm/Kbuild +++ b/arch/blackfin/include/asm/Kbuild @@ -46,3 +46,4 @@ generic-y += unaligned.h generic-y += user.h generic-y += word-at-a-time.h generic-y += xor.h +generic-y += kprobes.h diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h index 3490570aaa82..04254ac36bed 100644 --- a/arch/blackfin/include/asm/dma-mapping.h +++ b/arch/blackfin/include/asm/dma-mapping.h @@ -36,9 +36,9 @@ _dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir) __dma_sync(addr, size, dir); } -extern struct dma_map_ops bfin_dma_ops; +extern const struct dma_map_ops bfin_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &bfin_dma_ops; } diff --git a/arch/blackfin/include/asm/mmu_context.h b/arch/blackfin/include/asm/mmu_context.h index 15b16d3e8de8..0ce6de873b27 100644 --- a/arch/blackfin/include/asm/mmu_context.h +++ b/arch/blackfin/include/asm/mmu_context.h @@ -9,6 +9,8 @@ #include <linux/slab.h> #include <linux/sched.h> +#include <linux/mm_types.h> + #include <asm/setup.h> #include <asm/page.h> #include <asm/pgalloc.h> diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c index a27a74a18fb0..477bb29a7987 100644 --- a/arch/blackfin/kernel/dma-mapping.c +++ b/arch/blackfin/kernel/dma-mapping.c @@ -159,7 +159,7 @@ static inline void bfin_dma_sync_single_for_device(struct device *dev, _dma_sync(handle, size, dir); } -struct dma_map_ops bfin_dma_ops = { +const struct dma_map_ops bfin_dma_ops = { .alloc = bfin_dma_alloc, .free = bfin_dma_free, diff --git a/arch/blackfin/kernel/dumpstack.c b/arch/blackfin/kernel/dumpstack.c index 95ba6d9e9a3d..3c992c1f8ef2 100644 --- a/arch/blackfin/kernel/dumpstack.c +++ b/arch/blackfin/kernel/dumpstack.c @@ -10,6 +10,8 @@ #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/module.h> +#include <linux/sched/debug.h> + #include <asm/trace.h> /* diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c index 61fbd2de993d..4b89af9243d3 100644 --- a/arch/blackfin/kernel/early_printk.c +++ b/arch/blackfin/kernel/early_printk.c @@ -8,6 +8,7 @@ */ #include <linux/kernel.h> +#include <linux/sched/debug.h> #include <linux/init.h> #include <linux/serial_core.h> #include <linux/console.h> diff --git a/arch/blackfin/kernel/flat.c b/arch/blackfin/kernel/flat.c index a88daddbf074..b5b658449616 100644 --- a/arch/blackfin/kernel/flat.c +++ b/arch/blackfin/kernel/flat.c @@ -6,6 +6,7 @@ #include <linux/module.h> #include <linux/sched.h> +#include <linux/mm_types.h> #include <linux/flat.h> #define FLAT_BFIN_RELOC_TYPE_16_BIT 0 diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c index 9919d29287dc..633c37083e87 100644 --- a/arch/blackfin/kernel/nmi.c +++ b/arch/blackfin/kernel/nmi.c @@ -17,6 +17,7 @@ #include <linux/nmi.h> #include <linux/smp.h> #include <linux/timer.h> +#include <linux/sched/debug.h> #include <asm/blackfin.h> #include <linux/atomic.h> #include <asm/cacheflush.h> diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 4aa5545c4fde..89d5162d4ca6 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c @@ -12,6 +12,10 @@ #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> +#include <linux/mm_types.h> #include <linux/tick.h> #include <linux/fs.h> #include <linux/err.h> diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c index 360d99645163..a6827095b99a 100644 --- a/arch/blackfin/kernel/ptrace.c +++ b/arch/blackfin/kernel/ptrace.c @@ -7,6 +7,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/elf.h> diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c index ea570db598e5..5f5172779204 100644 --- a/arch/blackfin/kernel/signal.c +++ b/arch/blackfin/kernel/signal.c @@ -12,6 +12,7 @@ #include <linux/binfmts.h> #include <linux/uaccess.h> #include <linux/tracehook.h> +#include <linux/sched/task_stack.h> #include <asm/cacheflush.h> #include <asm/ucontext.h> diff --git a/arch/blackfin/kernel/stacktrace.c b/arch/blackfin/kernel/stacktrace.c index 30301e1eace5..17198f3650b6 100644 --- a/arch/blackfin/kernel/stacktrace.c +++ b/arch/blackfin/kernel/stacktrace.c @@ -6,6 +6,7 @@ */ #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <linux/thread_info.h> #include <linux/module.h> diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c index 719dd796c12c..151f22196ab6 100644 --- a/arch/blackfin/kernel/trace.c +++ b/arch/blackfin/kernel/trace.c @@ -11,7 +11,9 @@ #include <linux/thread_info.h> #include <linux/mm.h> #include <linux/oom.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/kallsyms.h> diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index 1ed85ddadc0d..a323a40a46e9 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c @@ -9,6 +9,8 @@ #include <linux/bug.h> #include <linux/uaccess.h> #include <linux/module.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> #include <asm/traps.h> #include <asm/cplb.h> #include <asm/blackfin.h> diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index 4986b4fbcee9..13e94bf9d8ba 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -16,6 +16,7 @@ #include <linux/seq_file.h> #include <linux/irq.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/syscore_ops.h> #include <linux/gpio.h> #include <asm/delay.h> diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 23c4ef5f8bdc..b32ddab7966c 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -11,7 +11,8 @@ #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task_stack.h> #include <linux/interrupt.h> #include <linux/cache.h> #include <linux/clockchips.h> @@ -307,8 +308,8 @@ void secondary_start_kernel(void) local_irq_disable(); /* Attach the new idle task to the global mm. */ - atomic_inc(&mm->mm_users); - atomic_inc(&mm->mm_count); + mmget(mm); + mmgrab(mm); current->active_mm = mm; preempt_disable(); diff --git a/arch/blackfin/mm/isram-driver.c b/arch/blackfin/mm/isram-driver.c index 7e2e674ed444..aaa1e64b753b 100644 --- a/arch/blackfin/mm/isram-driver.c +++ b/arch/blackfin/mm/isram-driver.c @@ -14,6 +14,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <asm/blackfin.h> #include <asm/dma.h> diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c index 1f3b3ef3e103..d2a96c2c02a3 100644 --- a/arch/blackfin/mm/sram-alloc.c +++ b/arch/blackfin/mm/sram-alloc.c @@ -19,6 +19,8 @@ #include <linux/spinlock.h> #include <linux/rtc.h> #include <linux/slab.h> +#include <linux/mm_types.h> + #include <asm/blackfin.h> #include <asm/mem_map.h> #include "blackfin_sram.h" diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index 4e9f57433f3a..82619c32d25b 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -61,3 +61,4 @@ generic-y += user.h generic-y += vga.h generic-y += word-at-a-time.h generic-y += xor.h +generic-y += kprobes.h diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h index 5717b1e52d96..aca9f755e4f8 100644 --- a/arch/c6x/include/asm/dma-mapping.h +++ b/arch/c6x/include/asm/dma-mapping.h @@ -17,9 +17,9 @@ */ #define DMA_ERROR_CODE ~0 -extern struct dma_map_ops c6x_dma_ops; +extern const struct dma_map_ops c6x_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &c6x_dma_ops; } diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c index 6752df32ef06..9fff8be75f58 100644 --- a/arch/c6x/kernel/dma.c +++ b/arch/c6x/kernel/dma.c @@ -123,7 +123,7 @@ static void c6x_dma_sync_sg_for_device(struct device *dev, } -struct dma_map_ops c6x_dma_ops = { +const struct dma_map_ops c6x_dma_ops = { .alloc = c6x_dma_alloc, .free = c6x_dma_free, .map_page = c6x_dma_map_page, diff --git a/arch/c6x/kernel/process.c b/arch/c6x/kernel/process.c index 0ee7686a78f3..c4ecb24c2d5c 100644 --- a/arch/c6x/kernel/process.c +++ b/arch/c6x/kernel/process.c @@ -17,6 +17,8 @@ #include <linux/mqueue.h> #include <linux/syscalls.h> #include <linux/reboot.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <asm/syscalls.h> diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c index 3c494e84444d..a27e1f02ce18 100644 --- a/arch/c6x/kernel/ptrace.c +++ b/arch/c6x/kernel/ptrace.c @@ -14,6 +14,7 @@ #include <linux/tracehook.h> #include <linux/regset.h> #include <linux/elf.h> +#include <linux/sched/task_stack.h> #include <asm/cacheflush.h> diff --git a/arch/c6x/kernel/traps.c b/arch/c6x/kernel/traps.c index dcc2c2f6d67c..09b8a40d5680 100644 --- a/arch/c6x/kernel/traps.c +++ b/arch/c6x/kernel/traps.c @@ -10,6 +10,7 @@ */ #include <linux/module.h> #include <linux/ptrace.h> +#include <linux/sched/debug.h> #include <linux/kallsyms.h> #include <linux/bug.h> diff --git a/arch/cris/arch-v10/drivers/sync_serial.c b/arch/cris/arch-v10/drivers/sync_serial.c index 9ac75d68f184..cc62572c1b94 100644 --- a/arch/cris/arch-v10/drivers/sync_serial.c +++ b/arch/cris/arch-v10/drivers/sync_serial.c @@ -16,7 +16,7 @@ #include <linux/types.h> #include <linux/errno.h> #include <linux/major.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/interrupt.h> #include <linux/poll.h> #include <linux/init.h> diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c index 96e5afef6b47..e299d30105b5 100644 --- a/arch/cris/arch-v10/kernel/process.c +++ b/arch/cris/arch-v10/kernel/process.c @@ -11,6 +11,9 @@ */ #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/fs.h> diff --git a/arch/cris/arch-v10/kernel/ptrace.c b/arch/cris/arch-v10/kernel/ptrace.c index eca94c7d56e7..c2f2b9b83cc4 100644 --- a/arch/cris/arch-v10/kernel/ptrace.c +++ b/arch/cris/arch-v10/kernel/ptrace.c @@ -4,6 +4,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> diff --git a/arch/cris/arch-v10/kernel/signal.c b/arch/cris/arch-v10/kernel/signal.c index db30c98e4926..bab4a8dd6bfd 100644 --- a/arch/cris/arch-v10/kernel/signal.c +++ b/arch/cris/arch-v10/kernel/signal.c @@ -14,6 +14,7 @@ */ #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> diff --git a/arch/cris/arch-v10/kernel/traps.c b/arch/cris/arch-v10/kernel/traps.c index 96d004fe9740..c0a501f29bd8 100644 --- a/arch/cris/arch-v10/kernel/traps.c +++ b/arch/cris/arch-v10/kernel/traps.c @@ -10,6 +10,8 @@ #include <linux/ptrace.h> #include <linux/uaccess.h> +#include <linux/sched/debug.h> + #include <arch/sv_addr_ag.h> #include <arch/system.h> diff --git a/arch/cris/arch-v10/mm/tlb.c b/arch/cris/arch-v10/mm/tlb.c index 21d78c599bab..3225d38bdaea 100644 --- a/arch/cris/arch-v10/mm/tlb.c +++ b/arch/cris/arch-v10/mm/tlb.c @@ -10,6 +10,8 @@ * */ +#include <linux/mm_types.h> + #include <asm/tlb.h> #include <asm/mmu_context.h> #include <arch/svinto.h> diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c index 1f0636793f0c..7072341995ff 100644 --- a/arch/cris/arch-v32/drivers/pci/dma.c +++ b/arch/cris/arch-v32/drivers/pci/dma.c @@ -69,7 +69,7 @@ static inline int v32_dma_supported(struct device *dev, u64 mask) return 1; } -struct dma_map_ops v32_dma_ops = { +const struct dma_map_ops v32_dma_ops = { .alloc = v32_dma_alloc, .free = v32_dma_free, .map_page = v32_dma_map_page, diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c index ef515af1a377..8efcc1a899a8 100644 --- a/arch/cris/arch-v32/drivers/sync_serial.c +++ b/arch/cris/arch-v32/drivers/sync_serial.c @@ -11,7 +11,7 @@ #include <linux/types.h> #include <linux/errno.h> #include <linux/major.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/mutex.h> #include <linux/interrupt.h> #include <linux/poll.h> diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c index 4d1afa9f9fd3..c530a8fa87ce 100644 --- a/arch/cris/arch-v32/kernel/process.c +++ b/arch/cris/arch-v32/kernel/process.c @@ -9,6 +9,9 @@ */ #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/fs.h> diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c index c366bc05466a..0461e95bbb62 100644 --- a/arch/cris/arch-v32/kernel/ptrace.c +++ b/arch/cris/arch-v32/kernel/ptrace.c @@ -4,6 +4,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c index 816bf2ca93ef..ea2e8e1398e8 100644 --- a/arch/cris/arch-v32/kernel/signal.c +++ b/arch/cris/arch-v32/kernel/signal.c @@ -3,6 +3,7 @@ */ #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/kernel.h> diff --git a/arch/cris/arch-v32/kernel/traps.c b/arch/cris/arch-v32/kernel/traps.c index ad6174e217c9..a34256515036 100644 --- a/arch/cris/arch-v32/kernel/traps.c +++ b/arch/cris/arch-v32/kernel/traps.c @@ -5,6 +5,8 @@ #include <linux/ptrace.h> #include <linux/extable.h> #include <linux/uaccess.h> +#include <linux/sched/debug.h> + #include <hwregs/supp_reg.h> #include <hwregs/intr_vect_defs.h> #include <asm/irq.h> diff --git a/arch/cris/arch-v32/mm/tlb.c b/arch/cris/arch-v32/mm/tlb.c index c030d020660a..bc3de5b5e27c 100644 --- a/arch/cris/arch-v32/mm/tlb.c +++ b/arch/cris/arch-v32/mm/tlb.c @@ -6,6 +6,7 @@ * Authors: Bjorn Wesen <bjornw@axis.com> * Tobias Anderberg <tobiasa@axis.com>, CRISv32 port. */ +#include <linux/mm_types.h> #include <asm/tlb.h> #include <asm/mmu_context.h> diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index 8e4ef321001f..0f5132b08896 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -45,3 +45,4 @@ generic-y += types.h generic-y += vga.h generic-y += word-at-a-time.h generic-y += xor.h +generic-y += kprobes.h diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h index 5a370178a0e9..256169de3743 100644 --- a/arch/cris/include/asm/dma-mapping.h +++ b/arch/cris/include/asm/dma-mapping.h @@ -2,14 +2,14 @@ #define _ASM_CRIS_DMA_MAPPING_H #ifdef CONFIG_PCI -extern struct dma_map_ops v32_dma_ops; +extern const struct dma_map_ops v32_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &v32_dma_ops; } #else -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { BUG(); return NULL; diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h index ceefc314d64d..2a3210ba4c72 100644 --- a/arch/cris/include/asm/pgtable.h +++ b/arch/cris/include/asm/pgtable.h @@ -9,7 +9,7 @@ #include <asm-generic/pgtable-nopmd.h> #ifndef __ASSEMBLY__ -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <asm/mmu.h> #endif #include <arch/pgtable.h> diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c index 694850e8f077..09b864f46f8a 100644 --- a/arch/cris/kernel/irq.c +++ b/arch/cris/kernel/irq.c @@ -22,6 +22,7 @@ #include <linux/module.h> #include <linux/ptrace.h> #include <linux/irq.h> +#include <linux/sched/debug.h> #include <linux/kernel_stat.h> #include <linux/signal.h> diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c index 50a7dd451456..0bbd3a0c3d70 100644 --- a/arch/cris/kernel/process.c +++ b/arch/cris/kernel/process.c @@ -20,6 +20,7 @@ #include <linux/spinlock.h> #include <linux/init_task.h> #include <linux/sched.h> +#include <linux/sched/task.h> #include <linux/fs.h> #include <linux/user.h> #include <linux/elfcore.h> diff --git a/arch/cris/kernel/stacktrace.c b/arch/cris/kernel/stacktrace.c index 99838c74456d..f1cc3aaacd8d 100644 --- a/arch/cris/kernel/stacktrace.c +++ b/arch/cris/kernel/stacktrace.c @@ -1,5 +1,5 @@ #include <linux/sched.h> -#include <linux/stacktrace.h> +#include <linux/sched/debug.h> #include <linux/stacktrace.h> #include <asm/stacktrace.h> diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c index 2dda6da71521..bc562cf511a6 100644 --- a/arch/cris/kernel/time.c +++ b/arch/cris/kernel/time.c @@ -29,7 +29,7 @@ #include <linux/timex.h> #include <linux/init.h> #include <linux/profile.h> -#include <linux/sched.h> /* just for sched_clock() - funny that */ +#include <linux/sched/clock.h> #define D(x) diff --git a/arch/cris/kernel/traps.c b/arch/cris/kernel/traps.c index b2a312a7afc6..a01636a12a6e 100644 --- a/arch/cris/kernel/traps.c +++ b/arch/cris/kernel/traps.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/utsname.h> +#include <linux/sched/debug.h> #ifdef CONFIG_KALLSYMS #include <linux/kallsyms.h> #endif diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index 94183d3639ef..1fca464f1b9e 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -8,6 +8,7 @@ #include <linux/interrupt.h> #include <linux/extable.h> #include <linux/wait.h> +#include <linux/sched/signal.h> #include <linux/uaccess.h> #include <arch/system.h> diff --git a/arch/cris/mm/tlb.c b/arch/cris/mm/tlb.c index b7f8de576777..8413741cfa0f 100644 --- a/arch/cris/mm/tlb.c +++ b/arch/cris/mm/tlb.c @@ -9,6 +9,8 @@ #include <linux/init.h> #include <linux/kernel.h> +#include <linux/mm_types.h> + #include <asm/tlb.h> #define D(x) diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index 0f5b0d5d313c..c33b46715f65 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild @@ -7,3 +7,4 @@ generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h generic-y += word-at-a-time.h +generic-y += kprobes.h diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h index 9a82bfa4303b..354900917585 100644 --- a/arch/frv/include/asm/dma-mapping.h +++ b/arch/frv/include/asm/dma-mapping.h @@ -7,9 +7,9 @@ extern unsigned long __nongprelbss dma_coherent_mem_start; extern unsigned long __nongprelbss dma_coherent_mem_end; -extern struct dma_map_ops frv_dma_ops; +extern const struct dma_map_ops frv_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &frv_dma_ops; } diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c index b306241c4ef2..5a4c92abc99e 100644 --- a/arch/frv/kernel/process.c +++ b/arch/frv/kernel/process.c @@ -13,6 +13,9 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c index 31221fb4348e..ce29991e4219 100644 --- a/arch/frv/kernel/traps.c +++ b/arch/frv/kernel/traps.c @@ -9,7 +9,8 @@ * 2 of the License, or (at your option) any later version. */ -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> #include <linux/signal.h> #include <linux/kernel.h> #include <linux/mm.h> diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c index 187688128c65..4a96de7f0af4 100644 --- a/arch/frv/mb93090-mb00/pci-dma-nommu.c +++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c @@ -164,7 +164,7 @@ static int frv_dma_supported(struct device *dev, u64 mask) return 1; } -struct dma_map_ops frv_dma_ops = { +const struct dma_map_ops frv_dma_ops = { .alloc = frv_dma_alloc, .free = frv_dma_free, .map_page = frv_dma_map_page, diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c index dba7df918144..e7130abc0dae 100644 --- a/arch/frv/mb93090-mb00/pci-dma.c +++ b/arch/frv/mb93090-mb00/pci-dma.c @@ -106,7 +106,7 @@ static int frv_dma_supported(struct device *dev, u64 mask) return 1; } -struct dma_map_ops frv_dma_ops = { +const struct dma_map_ops frv_dma_ops = { .alloc = frv_dma_alloc, .free = frv_dma_free, .map_page = frv_dma_map_page, diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c index 836f14707a62..da82c25301e7 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c @@ -10,6 +10,7 @@ */ #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/elf-fdpic.h> diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c index 88a159743528..328f0a292316 100644 --- a/arch/frv/mm/init.c +++ b/arch/frv/mm/init.c @@ -18,6 +18,7 @@ #include <linux/signal.h> #include <linux/sched.h> +#include <linux/sched/task.h> #include <linux/pagemap.h> #include <linux/gfp.h> #include <linux/swap.h> diff --git a/arch/frv/mm/mmu-context.c b/arch/frv/mm/mmu-context.c index 81757d55a5b5..16946a58f64d 100644 --- a/arch/frv/mm/mmu-context.c +++ b/arch/frv/mm/mmu-context.c @@ -10,6 +10,8 @@ */ #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include <linux/mm.h> #include <asm/tlbflush.h> @@ -188,7 +190,7 @@ int cxn_pin_by_pid(pid_t pid) task_lock(tsk); if (tsk->mm) { mm = tsk->mm; - atomic_inc(&mm->mm_users); + mmget(mm); ret = 0; } task_unlock(tsk); diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index 5efd0c87f3c0..341740c3581c 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild @@ -74,3 +74,4 @@ generic-y += unaligned.h generic-y += vga.h generic-y += word-at-a-time.h generic-y += xor.h +generic-y += kprobes.h diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h index 7ac7fadffed0..847c7562e046 100644 --- a/arch/h8300/include/asm/dma-mapping.h +++ b/arch/h8300/include/asm/dma-mapping.h @@ -1,9 +1,9 @@ #ifndef _H8300_DMA_MAPPING_H #define _H8300_DMA_MAPPING_H -extern struct dma_map_ops h8300_dma_map_ops; +extern const struct dma_map_ops h8300_dma_map_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &h8300_dma_map_ops; } diff --git a/arch/h8300/kernel/dma.c b/arch/h8300/kernel/dma.c index 3651da045806..225dd0a188dc 100644 --- a/arch/h8300/kernel/dma.c +++ b/arch/h8300/kernel/dma.c @@ -60,7 +60,7 @@ static int map_sg(struct device *dev, struct scatterlist *sgl, return nents; } -struct dma_map_ops h8300_dma_map_ops = { +const struct dma_map_ops h8300_dma_map_ops = { .alloc = dma_alloc, .free = dma_free, .map_page = map_page, diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index 891974a11704..0f5db5bb561b 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c @@ -25,6 +25,9 @@ #include <linux/errno.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> diff --git a/arch/h8300/kernel/ptrace_s.c b/arch/h8300/kernel/ptrace_s.c index ef5a9c13e76d..c0af930052c0 100644 --- a/arch/h8300/kernel/ptrace_s.c +++ b/arch/h8300/kernel/ptrace_s.c @@ -10,7 +10,7 @@ */ #include <linux/linkage.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <asm/ptrace.h> diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c index d784f7117f9a..1e8070d08770 100644 --- a/arch/h8300/kernel/signal.c +++ b/arch/h8300/kernel/signal.c @@ -25,6 +25,7 @@ */ #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/signal.h> diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c index 044a36125846..e47a9e0dc278 100644 --- a/arch/h8300/kernel/traps.c +++ b/arch/h8300/kernel/traps.c @@ -16,6 +16,8 @@ #include <linux/types.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/mm_types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index a43a7c90e4af..797b64a4b80b 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -59,3 +59,4 @@ generic-y += unaligned.h generic-y += vga.h generic-y += word-at-a-time.h generic-y += xor.h +generic-y += kprobes.h diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index 7ef58df909fc..d3a87bd9b686 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -32,13 +32,10 @@ struct device; extern int bad_dma_address; #define DMA_ERROR_CODE bad_dma_address -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - if (unlikely(dev == NULL)) - return NULL; - return dma_ops; } diff --git a/arch/hexagon/include/asm/mmu_context.h b/arch/hexagon/include/asm/mmu_context.h index d423d2e73c30..d8a071afdd1d 100644 --- a/arch/hexagon/include/asm/mmu_context.h +++ b/arch/hexagon/include/asm/mmu_context.h @@ -21,6 +21,8 @@ #ifndef _ASM_MMU_CONTEXT_H #define _ASM_MMU_CONTEXT_H +#include <linux/mm_types.h> + #include <asm/setup.h> #include <asm/page.h> #include <asm/pgalloc.h> diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index dbc4f1003da4..e74b65009587 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c @@ -25,7 +25,7 @@ #include <linux/module.h> #include <asm/page.h> -struct dma_map_ops *dma_ops; +const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); int bad_dma_address; /* globals are automatically initialized to zero */ @@ -203,7 +203,7 @@ static void hexagon_sync_single_for_device(struct device *dev, dma_sync(dma_addr_to_virt(dma_handle), size, dir); } -struct dma_map_ops hexagon_dma_ops = { +const struct dma_map_ops hexagon_dma_ops = { .alloc = hexagon_dma_alloc_coherent, .free = hexagon_free_coherent, .map_sg = hexagon_map_sg, diff --git a/arch/hexagon/kernel/kgdb.c b/arch/hexagon/kernel/kgdb.c index 62dece3ad827..16c24b22d0b2 100644 --- a/arch/hexagon/kernel/kgdb.c +++ b/arch/hexagon/kernel/kgdb.c @@ -20,6 +20,7 @@ #include <linux/irq.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/kdebug.h> #include <linux/kgdb.h> diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c index d9edfd3fc52a..de715bab7956 100644 --- a/arch/hexagon/kernel/process.c +++ b/arch/hexagon/kernel/process.c @@ -19,6 +19,9 @@ */ #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/types.h> #include <linux/module.h> #include <linux/tick.h> diff --git a/arch/hexagon/kernel/ptrace.c b/arch/hexagon/kernel/ptrace.c index 390a9ad14ca1..ecd75e2e8eb3 100644 --- a/arch/hexagon/kernel/ptrace.c +++ b/arch/hexagon/kernel/ptrace.c @@ -22,6 +22,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c index c6b22b9945a7..78aa7304a5c9 100644 --- a/arch/hexagon/kernel/signal.c +++ b/arch/hexagon/kernel/signal.c @@ -21,6 +21,8 @@ #include <linux/linkage.h> #include <linux/syscalls.h> #include <linux/tracehook.h> +#include <linux/sched/task_stack.h> + #include <asm/registers.h> #include <asm/thread_info.h> #include <asm/unistd.h> diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c index 983bae7d2665..5dbc15549e01 100644 --- a/arch/hexagon/kernel/smp.c +++ b/arch/hexagon/kernel/smp.c @@ -25,10 +25,11 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/percpu.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/cpu.h> +#include <linux/mm_types.h> #include <asm/time.h> /* timer_interrupt */ #include <asm/hexagon_vm.h> @@ -162,7 +163,7 @@ void start_secondary(void) ); /* Set the memory struct */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; cpu = smp_processor_id(); diff --git a/arch/hexagon/kernel/stacktrace.c b/arch/hexagon/kernel/stacktrace.c index f94918b449a8..41866a06adf7 100644 --- a/arch/hexagon/kernel/stacktrace.c +++ b/arch/hexagon/kernel/stacktrace.c @@ -19,6 +19,7 @@ */ #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <linux/thread_info.h> #include <linux/module.h> diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c index 110dab152f82..2942a9204a9a 100644 --- a/arch/hexagon/kernel/traps.c +++ b/arch/hexagon/kernel/traps.c @@ -19,7 +19,9 @@ */ #include <linux/init.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/module.h> #include <linux/kallsyms.h> #include <linux/kdebug.h> diff --git a/arch/hexagon/kernel/vm_events.c b/arch/hexagon/kernel/vm_events.c index 741aaa917cda..04f57ef22009 100644 --- a/arch/hexagon/kernel/vm_events.c +++ b/arch/hexagon/kernel/vm_events.c @@ -19,6 +19,7 @@ */ #include <linux/kernel.h> +#include <linux/sched/debug.h> #include <asm/registers.h> #include <linux/irq.h> #include <linux/hardirq.h> diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index 489875fd2be4..3eec33c5cfd7 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -28,6 +28,7 @@ #include <asm/traps.h> #include <linux/uaccess.h> #include <linux/mm.h> +#include <linux/sched/signal.h> #include <linux/signal.h> #include <linux/extable.h> #include <linux/hardirq.h> diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 1e4cae5ae053..0310078a95f8 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c @@ -18,7 +18,7 @@ #include <linux/export.h> #include <asm/machvec.h> -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; /* swiotlb declarations & definitions: */ extern int swiotlb_late_init_with_default_size (size_t size); @@ -34,7 +34,7 @@ static inline int use_swiotlb(struct device *dev) !sba_dma_ops.dma_supported(dev, *dev->dma_mask); } -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) { if (use_swiotlb(dev)) return &swiotlb_dma_ops; diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 630ee8073899..aec4a3354abe 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -2096,7 +2096,7 @@ static int __init acpi_sba_ioc_init_acpi(void) /* This has to run before acpi_scan_init(). */ arch_initcall(acpi_sba_ioc_init_acpi); -extern struct dma_map_ops swiotlb_dma_ops; +extern const struct dma_map_ops swiotlb_dma_ops; static int __init sba_init(void) @@ -2216,7 +2216,7 @@ sba_page_override(char *str) __setup("sbapagesize=",sba_page_override); -struct dma_map_ops sba_dma_ops = { +const struct dma_map_ops sba_dma_ops = { .alloc = sba_alloc_coherent, .free = sba_free_coherent, .map_page = sba_map_page, diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index 21fd50def270..de8cba121013 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/major.h> diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index d472805edfa9..73ec3c6f4cfe 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -14,7 +14,7 @@ #define DMA_ERROR_CODE 0 -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; extern struct ia64_machine_vector ia64_mv; extern void set_iommu_machvec(void); @@ -23,7 +23,10 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); -#define get_dma_ops(dev) platform_dma_get_ops(dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) +{ + return platform_dma_get_ops(NULL); +} static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { diff --git a/arch/ia64/include/asm/kprobes.h b/arch/ia64/include/asm/kprobes.h index d5505d6f2382..0302b3664789 100644 --- a/arch/ia64/include/asm/kprobes.h +++ b/arch/ia64/include/asm/kprobes.h @@ -23,14 +23,19 @@ * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy * <anil.s.keshavamurthy@intel.com> adapted from i386 */ +#include <asm-generic/kprobes.h> +#include <asm/break.h> + +#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6) + +#ifdef CONFIG_KPROBES + #include <linux/types.h> #include <linux/ptrace.h> #include <linux/percpu.h> -#include <asm/break.h> #define __ARCH_WANT_KPROBES_INSN_SLOT #define MAX_INSN_SIZE 2 /* last half is for kprobe-booster */ -#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6) #define NOP_M_INST (long)(1<<27) #define BRL_INST(i1, i2) ((long)((0xcL << 37) | /* brl */ \ (0x1L << 12) | /* many */ \ @@ -124,4 +129,5 @@ extern void invalidate_stacked_regs(void); extern void flush_register_stack(void); extern void arch_remove_kprobe(struct kprobe *p); -#endif /* _ASM_KPROBES_H */ +#endif /* CONFIG_KPROBES */ +#endif /* _ASM_KPROBES_H */ diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index ed7f09089f12..af285c423e1e 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h @@ -44,7 +44,7 @@ typedef void ia64_mv_kernel_launch_event_t(void); /* DMA-mapping interface: */ typedef void ia64_mv_dma_init (void); typedef u64 ia64_mv_dma_get_required_mask (struct device *); -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); /* * WARNING: The legacy I/O space is _architected_. Platforms are @@ -248,7 +248,7 @@ extern void machvec_init_from_cmdline(const char *cmdline); # endif /* CONFIG_IA64_GENERIC */ extern void swiotlb_dma_init(void); -extern struct dma_map_ops *dma_get_ops(struct device *); +extern const struct dma_map_ops *dma_get_ops(struct device *); /* * Define default versions so we can extend machvec for new platforms without having diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h index 7f2a456603cb..9b99368633b5 100644 --- a/arch/ia64/include/asm/mmu_context.h +++ b/arch/ia64/include/asm/mmu_context.h @@ -26,6 +26,7 @@ #include <linux/compiler.h> #include <linux/percpu.h> #include <linux/sched.h> +#include <linux/mm_types.h> #include <linux/spinlock.h> #include <asm/processor.h> diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 9f3ed9ee8f13..384794e665fc 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -147,7 +147,7 @@ # ifndef __ASSEMBLY__ -#include <linux/sched.h> /* for mm_struct */ +#include <linux/sched/mm.h> /* for mm_struct */ #include <linux/bitops.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 03911a336406..26a63d69c599 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -19,8 +19,6 @@ #include <asm/ptrace.h> #include <asm/ustack.h> -#define ARCH_HAS_PREFETCH_SWITCH_STACK - #define IA64_NUM_PHYS_STACK_REG 96 #define IA64_NUM_DBG_REGS 8 diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 60ef83e6db71..8786c8b4f187 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -6,7 +6,7 @@ #define ASM_OFFSETS_C 1 -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/pid.h> #include <linux/clocksource.h> #include <linux/kbuild.h> diff --git a/arch/ia64/kernel/brl_emu.c b/arch/ia64/kernel/brl_emu.c index 8682df6263d6..987b11be0021 100644 --- a/arch/ia64/kernel/brl_emu.c +++ b/arch/ia64/kernel/brl_emu.c @@ -8,7 +8,7 @@ */ #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/uaccess.h> #include <asm/processor.h> diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c index 7f7916238208..e0dd97f4eb69 100644 --- a/arch/ia64/kernel/dma-mapping.c +++ b/arch/ia64/kernel/dma-mapping.c @@ -4,7 +4,7 @@ /* Set this to 1 if there is a HW IOMMU in the system */ int iommu_detected __read_mostly; -struct dma_map_ops *dma_ops; +const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) @@ -17,7 +17,7 @@ static int __init dma_init(void) } fs_initcall(dma_init); -struct dma_map_ops *dma_get_ops(struct device *dev) +const struct dma_map_ops *dma_get_ops(struct device *dev) { return dma_ops; } diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 6f27a663177c..e7a716b09350 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -455,29 +455,6 @@ GLOBAL_ENTRY(load_switch_stack) br.cond.sptk.many b7 END(load_switch_stack) -GLOBAL_ENTRY(prefetch_stack) - add r14 = -IA64_SWITCH_STACK_SIZE, sp - add r15 = IA64_TASK_THREAD_KSP_OFFSET, in0 - ;; - ld8 r16 = [r15] // load next's stack pointer - lfetch.fault.excl [r14], 128 - ;; - lfetch.fault.excl [r14], 128 - lfetch.fault [r16], 128 - ;; - lfetch.fault.excl [r14], 128 - lfetch.fault [r16], 128 - ;; - lfetch.fault.excl [r14], 128 - lfetch.fault [r16], 128 - ;; - lfetch.fault.excl [r14], 128 - lfetch.fault [r16], 128 - ;; - lfetch.fault [r16], 128 - br.ret.sptk.many rp -END(prefetch_stack) - /* * Invoke a system call, but do some tracing before and after the call. * We MUST preserve the current register frame throughout this routine diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 9509cc73b9c6..79c7c46d7dc1 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -72,7 +72,9 @@ #include <linux/jiffies.h> #include <linux/types.h> #include <linux/init.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/bootmem.h> diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index 992c1098c522..9094a73f996f 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c @@ -90,11 +90,11 @@ void __init pci_iommu_alloc(void) { dma_ops = &intel_dma_ops; - dma_ops->sync_single_for_cpu = machvec_dma_sync_single; - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg; - dma_ops->sync_single_for_device = machvec_dma_sync_single; - dma_ops->sync_sg_for_device = machvec_dma_sync_sg; - dma_ops->dma_supported = iommu_dma_supported; + intel_dma_ops.sync_single_for_cpu = machvec_dma_sync_single; + intel_dma_ops.sync_sg_for_cpu = machvec_dma_sync_sg; + intel_dma_ops.sync_single_for_device = machvec_dma_sync_single; + intel_dma_ops.sync_sg_for_device = machvec_dma_sync_sg; + intel_dma_ops.dma_supported = iommu_dma_supported; /* * The order of these functions is important for diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 2933208c0285..a14989dacded 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c @@ -30,7 +30,7 @@ static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, swiotlb_free_coherent(dev, size, vaddr, dma_addr); } -struct dma_map_ops swiotlb_dma_ops = { +const struct dma_map_ops swiotlb_dma_ops = { .alloc = ia64_swiotlb_alloc_coherent, .free = ia64_swiotlb_free_coherent, .map_page = swiotlb_map_page, diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 677a86826771..09f86ebfcc7b 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -22,6 +22,8 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 52deab683ba1..d344d0d691aa 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -20,6 +20,10 @@ #include <linux/notifier.h> #include <linux/personality.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/hotplug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/stddef.h> #include <linux/thread_info.h> #include <linux/unistd.h> diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 0b1153e610ea..3f8293378a83 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -11,6 +11,8 @@ */ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/ptrace.h> diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index c483ece3eb84..23e3fd61e335 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -29,9 +29,12 @@ #include <linux/bootmem.h> #include <linux/console.h> #include <linux/delay.h> +#include <linux/cpu.h> #include <linux/kernel.h> #include <linux/reboot.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/clock.h> +#include <linux/sched/task_stack.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/threads.h> @@ -994,7 +997,7 @@ cpu_init (void) */ ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; BUG_ON(current->mm); diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index a09c12230bc5..5ce927c854a6 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -10,6 +10,8 @@ #include <linux/mm.h> #include <linux/mman.h> #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task_stack.h> #include <linux/shm.h> #include <linux/file.h> /* doh, must come after sched.h... */ #include <linux/smp.h> diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index faa116822c4c..aa7be020a904 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -16,12 +16,13 @@ #include <linux/profile.h> #include <linux/sched.h> #include <linux/time.h> +#include <linux/nmi.h> #include <linux/interrupt.h> #include <linux/efi.h> #include <linux/timex.h> #include <linux/timekeeper_internal.h> #include <linux/platform_device.h> -#include <linux/cputime.h> +#include <linux/sched/cputime.h> #include <asm/machvec.h> #include <asm/delay.h> diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 8981ce98afb3..7b1fe9462158 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -9,7 +9,8 @@ #include <linux/kernel.h> #include <linux/init.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> #include <linux/tty.h> #include <linux/vt_kern.h> /* For unblank_screen() */ #include <linux/export.h> diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 99348d7f2255..a13680ca1e61 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -15,7 +15,7 @@ */ #include <linux/jiffies.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/tty.h> #include <linux/extable.h> #include <linux/ratelimit.h> diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index f3976da36721..583f7ff6b589 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -19,6 +19,7 @@ #include <linux/errno.h> #include <linux/string.h> #include <linux/efi.h> +#include <linux/nmi.h> #include <linux/genalloc.h> #include <linux/gfp.h> #include <asm/page.h> diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 7f2feb21753c..15f09cfff335 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -4,7 +4,7 @@ * Copyright (C) 1998-2002 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/extable.h> diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 06cdaef54b2e..8f3efa682ee8 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -12,6 +12,7 @@ #include <linux/elf.h> #include <linux/memblock.h> #include <linux/mm.h> +#include <linux/sched/signal.h> #include <linux/mmzone.h> #include <linux/module.h> #include <linux/personality.h> diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index c98dc965fe82..b73b0ebf8214 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c @@ -13,6 +13,7 @@ #include <linux/spinlock.h> #include <linux/threads.h> #include <linux/sched.h> +#include <linux/mm_types.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/irq.h> diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 4c3b84d8406a..52704f199dd6 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c @@ -525,7 +525,7 @@ static int sn_topology_show(struct seq_file *s, void *d) /* both ends local to this partition */ seq_puts(s, " local"); else if (SN_HWPERF_FOREIGN(p)) - /* both ends of the link in foreign partiton */ + /* both ends of the link in foreign partition */ seq_puts(s, " foreign"); else /* link straddles a partition */ diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index d227a6988d6b..95474460b367 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -18,6 +18,7 @@ config M32R select MODULES_USE_ELF_RELA select HAVE_DEBUG_STACKOVERFLOW select CPU_NO_EFFICIENT_FFS + select DMA_NOOP_OPS config SBUS bool diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index 8c24c5e1db66..deb298777df2 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild @@ -11,3 +11,4 @@ generic-y += preempt.h generic-y += sections.h generic-y += trace_clock.h generic-y += word-at-a-time.h +generic-y += kprobes.h diff --git a/arch/m32r/include/asm/device.h b/arch/m32r/include/asm/device.h index 4a9f35e0973f..5203fc87f080 100644 --- a/arch/m32r/include/asm/device.h +++ b/arch/m32r/include/asm/device.h @@ -4,7 +4,6 @@ * This file is released under the GPLv2 */ struct dev_archdata { - struct dma_map_ops *dma_ops; }; struct pdev_archdata { diff --git a/arch/m32r/include/asm/dma-mapping.h b/arch/m32r/include/asm/dma-mapping.h index 2c43a77fe942..c01d9f52d228 100644 --- a/arch/m32r/include/asm/dma-mapping.h +++ b/arch/m32r/include/asm/dma-mapping.h @@ -10,10 +10,8 @@ #define DMA_ERROR_CODE (~(dma_addr_t)0x0) -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - if (dev && dev->archdata.dma_ops) - return dev->archdata.dma_ops; return &dma_noop_ops; } diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h index 9fc78fc44445..1230b7050d8e 100644 --- a/arch/m32r/include/asm/mmu_context.h +++ b/arch/m32r/include/asm/mmu_context.h @@ -12,6 +12,8 @@ #ifndef __ASSEMBLY__ #include <linux/atomic.h> +#include <linux/mm_types.h> + #include <asm/pgalloc.h> #include <asm/mmu.h> #include <asm/tlbflush.h> diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c index e0568bee60c0..d8ffcfec599c 100644 --- a/arch/m32r/kernel/process.c +++ b/arch/m32r/kernel/process.c @@ -22,6 +22,9 @@ #include <linux/fs.h> #include <linux/slab.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/module.h> #include <linux/ptrace.h> #include <linux/unistd.h> diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c index a68acb9fa515..2d887400e30e 100644 --- a/arch/m32r/kernel/ptrace.c +++ b/arch/m32r/kernel/ptrace.c @@ -16,6 +16,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/smp.h> diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c index 136c69f1fb8a..1a9e977287e6 100644 --- a/arch/m32r/kernel/setup.c +++ b/arch/m32r/kernel/setup.c @@ -11,7 +11,7 @@ #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/fs.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/bootmem.h> @@ -403,7 +403,7 @@ void __init cpu_init (void) printk(KERN_INFO "Initializing CPU#%d\n", cpu_id); /* Set up and load the per-CPU TSS and LDT */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; if (current->mm) BUG(); diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index f98d2f6519d6..a7d04684d2c7 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c @@ -45,6 +45,7 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/sched.h> +#include <linux/sched/task.h> #include <linux/err.h> #include <linux/irq.h> #include <linux/bootmem.h> diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index c3c5fdfae920..647dd94a0c39 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c @@ -14,7 +14,11 @@ #include <linux/kallsyms.h> #include <linux/stddef.h> #include <linux/ptrace.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> +#include <linux/cpu.h> + #include <asm/page.h> #include <asm/processor.h> diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig index f108dd121e9a..131b4101ae5d 100644 --- a/arch/m68k/configs/amcore_defconfig +++ b/arch/m68k/configs/amcore_defconfig @@ -1,19 +1,20 @@ -CONFIG_LOCALVERSION="amcore-001" +CONFIG_LOCALVERSION="amcore-002" CONFIG_DEFAULT_HOSTNAME="amcore" CONFIG_SYSVIPC=y # CONFIG_FHANDLE is not set # CONFIG_USELIB is not set CONFIG_LOG_BUF_SHIFT=14 -CONFIG_NAMESPACES=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_AIO is not set # CONFIG_ADVISE_SYSCALLS is not set # CONFIG_MEMBARRIER is not set CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set +# CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set # CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_CFQ is not set # CONFIG_MMU is not set CONFIG_M5307=y CONFIG_AMCORE=y @@ -27,13 +28,14 @@ CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y +CONFIG_SYN_COOKIES=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set # CONFIG_IPV6 is not set # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set -CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +# CONFIG_FW_LOADER is not set # CONFIG_ALLOW_DEV_COREDUMP is not set CONFIG_CONNECTOR=y CONFIG_MTD=y @@ -53,6 +55,7 @@ CONFIG_MTD_UCLINUX=y CONFIG_MTD_PLATRAM=y CONFIG_BLK_DEV_RAM=y CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_AMAZON is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -89,14 +92,12 @@ CONFIG_I2C=y CONFIG_I2C_CHARDEV=y # CONFIG_I2C_HELPER_AUTO is not set CONFIG_I2C_IMX=y -CONFIG_PPS=y +CONFIG_GPIO_SYSFS=y # CONFIG_HWMON is not set # CONFIG_USB_SUPPORT is not set CONFIG_RTC_CLASS=y # CONFIG_RTC_SYSTOHC is not set CONFIG_RTC_DRV_DS1307=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y # CONFIG_FILE_LOCKING is not set # CONFIG_DNOTIFY is not set # CONFIG_INOTIFY_USER is not set @@ -108,6 +109,7 @@ CONFIG_ROMFS_BACKED_BY_BOTH=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_PRINTK_TIME=y # CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_PANIC_ON_OOPS=y # CONFIG_SCHED_DEBUG is not set diff --git a/arch/m68k/ifpsp060/src/isp.S b/arch/m68k/ifpsp060/src/isp.S index 6dccda766e22..b865c1a052ba 100644 --- a/arch/m68k/ifpsp060/src/isp.S +++ b/arch/m68k/ifpsp060/src/isp.S @@ -3814,7 +3814,7 @@ CAS2W2_FILLER: # (3) Save current DFC/SFC (ASSUMED TO BE EQUAL!!!); Then set # # SFC/DFC according to whether exception occurred in user or # # supervisor mode. # -# (4) Use "plpaw" instruction to pre-load ATC with efective # +# (4) Use "plpaw" instruction to pre-load ATC with effective # # address page(s). THIS SHOULD NOT FAULT!!! The relevant # # page(s) should have been made resident prior to entering # # this routine. # diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 6c76d6c24b3d..d4f9ccbfa85c 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -33,3 +33,4 @@ generic-y += trace_clock.h generic-y += types.h generic-y += word-at-a-time.h generic-y += xor.h +generic-y += kprobes.h diff --git a/arch/m68k/include/asm/a.out-core.h b/arch/m68k/include/asm/a.out-core.h index f6bfc1d63ff6..ae91ea6bb303 100644 --- a/arch/m68k/include/asm/a.out-core.h +++ b/arch/m68k/include/asm/a.out-core.h @@ -16,6 +16,7 @@ #include <linux/user.h> #include <linux/elfcore.h> +#include <linux/mm_types.h> /* * fill in the user structure for an a.out core dump diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h index 96c536194287..9210e470771b 100644 --- a/arch/m68k/include/asm/dma-mapping.h +++ b/arch/m68k/include/asm/dma-mapping.h @@ -1,9 +1,9 @@ #ifndef _M68K_DMA_MAPPING_H #define _M68K_DMA_MAPPING_H -extern struct dma_map_ops m68k_dma_ops; +extern const struct dma_map_ops m68k_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &m68k_dma_ops; } diff --git a/arch/m68k/include/asm/mmu_context.h b/arch/m68k/include/asm/mmu_context.h index dc3be991d634..4a6ae6dffa34 100644 --- a/arch/m68k/include/asm/mmu_context.h +++ b/arch/m68k/include/asm/mmu_context.h @@ -2,6 +2,7 @@ #define __M68K_MMU_CONTEXT_H #include <asm-generic/mm_hooks.h> +#include <linux/mm_types.h> static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c index 1e4f386ba31e..87ef73a93856 100644 --- a/arch/m68k/kernel/dma.c +++ b/arch/m68k/kernel/dma.c @@ -158,7 +158,7 @@ static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist, return nents; } -struct dma_map_ops m68k_dma_ops = { +const struct dma_map_ops m68k_dma_ops = { .alloc = m68k_dma_alloc, .free = m68k_dma_free, .map_page = m68k_dma_map_page, diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index f0a8e9b332cd..e475c945c8b2 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -13,6 +13,9 @@ #include <linux/errno.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c index 9cd86d7343a6..748c63bd0081 100644 --- a/arch/m68k/kernel/ptrace.c +++ b/arch/m68k/kernel/ptrace.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index 4e5aa2f4f522..87160b4415fb 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c @@ -14,6 +14,7 @@ #include <linux/export.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/loadavg.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 558f38402737..a926d2c88898 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -19,6 +19,7 @@ */ #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/signal.h> #include <linux/kernel.h> #include <linux/mm.h> diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c index b5cd06df71fd..9637dee90dac 100644 --- a/arch/m68k/mac/macints.c +++ b/arch/m68k/mac/macints.c @@ -110,6 +110,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/delay.h> diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c index e9d7fbe4d5ae..7fdc61525e0b 100644 --- a/arch/m68k/sun3/mmu_emu.c +++ b/arch/m68k/sun3/mmu_emu.c @@ -15,6 +15,7 @@ #include <linux/bootmem.h> #include <linux/bitops.h> #include <linux/module.h> +#include <linux/sched/mm.h> #include <asm/setup.h> #include <asm/traps.h> diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild index d3731f0db73b..f9b9df5d6de9 100644 --- a/arch/metag/include/asm/Kbuild +++ b/arch/metag/include/asm/Kbuild @@ -54,3 +54,4 @@ generic-y += user.h generic-y += vga.h generic-y += word-at-a-time.h generic-y += xor.h +generic-y += kprobes.h diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h index 27af5d479ce6..fad3dc3cb210 100644 --- a/arch/metag/include/asm/dma-mapping.h +++ b/arch/metag/include/asm/dma-mapping.h @@ -1,9 +1,9 @@ #ifndef _ASM_METAG_DMA_MAPPING_H #define _ASM_METAG_DMA_MAPPING_H -extern struct dma_map_ops metag_dma_ops; +extern const struct dma_map_ops metag_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &metag_dma_ops; } diff --git a/arch/metag/include/asm/mmu_context.h b/arch/metag/include/asm/mmu_context.h index ae2a71b5e0be..2e0312748197 100644 --- a/arch/metag/include/asm/mmu_context.h +++ b/arch/metag/include/asm/mmu_context.h @@ -9,6 +9,7 @@ #include <asm/cacheflush.h> #include <linux/io.h> +#include <linux/mm_types.h> static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c index 91968d92652b..f0ab3a498328 100644 --- a/arch/metag/kernel/dma.c +++ b/arch/metag/kernel/dma.c @@ -575,7 +575,7 @@ static void metag_dma_sync_sg_for_device(struct device *dev, dma_sync_for_device(sg_virt(sg), sg->length, direction); } -struct dma_map_ops metag_dma_ops = { +const struct dma_map_ops metag_dma_ops = { .alloc = metag_dma_alloc, .free = metag_dma_free, .map_page = metag_dma_map_page, diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c index 35062796edf2..c4606ce743d2 100644 --- a/arch/metag/kernel/process.c +++ b/arch/metag/kernel/process.c @@ -8,6 +8,9 @@ #include <linux/errno.h> #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/unistd.h> diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c index 7563628822bd..5fd16ee5280c 100644 --- a/arch/metag/kernel/ptrace.c +++ b/arch/metag/kernel/ptrace.c @@ -15,6 +15,8 @@ #include <linux/tracehook.h> #include <linux/elf.h> #include <linux/uaccess.h> +#include <linux/sched/task_stack.h> + #include <trace/syscall.h> #define CREATE_TRACE_POINTS diff --git a/arch/metag/kernel/signal.c b/arch/metag/kernel/signal.c index ce49d429c74a..338925d808e6 100644 --- a/arch/metag/kernel/signal.c +++ b/arch/metag/kernel/signal.c @@ -7,6 +7,7 @@ */ #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c index bad13232de51..232a12bf3f99 100644 --- a/arch/metag/kernel/smp.c +++ b/arch/metag/kernel/smp.c @@ -12,7 +12,9 @@ #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/hotplug.h> +#include <linux/sched/task_stack.h> #include <linux/interrupt.h> #include <linux/cache.h> #include <linux/profile.h> @@ -344,8 +346,8 @@ asmlinkage void secondary_start_kernel(void) * All kernel threads share the same mm context; grab a * reference and switch to it. */ - atomic_inc(&mm->mm_users); - atomic_inc(&mm->mm_count); + mmget(mm); + mmgrab(mm); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); enter_lazy_tlb(mm, current); diff --git a/arch/metag/kernel/stacktrace.c b/arch/metag/kernel/stacktrace.c index 5510361d5bea..91ffc4b75c33 100644 --- a/arch/metag/kernel/stacktrace.c +++ b/arch/metag/kernel/stacktrace.c @@ -1,5 +1,7 @@ #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <asm/stacktrace.h> diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c index 17b2e2e38d5a..444851e510d5 100644 --- a/arch/metag/kernel/traps.c +++ b/arch/metag/kernel/traps.c @@ -10,6 +10,9 @@ #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/signal.h> #include <linux/kernel.h> #include <linux/mm.h> diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c index c765b3621b9b..5055477486b6 100644 --- a/arch/metag/mm/fault.c +++ b/arch/metag/mm/fault.c @@ -8,6 +8,7 @@ #include <linux/mm.h> #include <linux/kernel.h> #include <linux/ptrace.h> +#include <linux/sched/debug.h> #include <linux/interrupt.h> #include <linux/uaccess.h> diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c index c0ec116b3993..188d4d9fbed4 100644 --- a/arch/metag/mm/init.c +++ b/arch/metag/mm/init.c @@ -12,6 +12,7 @@ #include <linux/percpu.h> #include <linux/memblock.h> #include <linux/initrd.h> +#include <linux/sched/task.h> #include <asm/setup.h> #include <asm/page.h> diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 6275eb051801..1732ec13b211 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -10,3 +10,4 @@ generic-y += preempt.h generic-y += syscalls.h generic-y += trace_clock.h generic-y += word-at-a-time.h +generic-y += kprobes.h diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 1768d4bdc8d3..3fad5e722a66 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -36,9 +36,9 @@ /* * Available generic sets of operations */ -extern struct dma_map_ops dma_direct_ops; +extern const struct dma_map_ops dma_direct_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &dma_direct_ops; } diff --git a/arch/microblaze/include/asm/mmu_context_mm.h b/arch/microblaze/include/asm/mmu_context_mm.h index d68647746448..99472d2ca340 100644 --- a/arch/microblaze/include/asm/mmu_context_mm.h +++ b/arch/microblaze/include/asm/mmu_context_mm.h @@ -12,6 +12,8 @@ #define _ASM_MICROBLAZE_MMU_CONTEXT_H #include <linux/atomic.h> +#include <linux/mm_types.h> + #include <asm/bitops.h> #include <asm/mmu.h> #include <asm-generic/mm_hooks.h> diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index 818daf230eb4..12e093a03e60 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -187,7 +187,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, #endif } -struct dma_map_ops dma_direct_ops = { +const struct dma_map_ops dma_direct_ops = { .alloc = dma_direct_alloc_coherent, .free = dma_direct_free_coherent, .mmap = dma_direct_mmap_coherent, diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c index 42dd12a62ff5..e6f338d0496b 100644 --- a/arch/microblaze/kernel/exceptions.c +++ b/arch/microblaze/kernel/exceptions.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/kallsyms.h> #include <asm/exceptions.h> diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c index 4643e3ab9414..2022130139d2 100644 --- a/arch/microblaze/kernel/heartbeat.c +++ b/arch/microblaze/kernel/heartbeat.c @@ -9,6 +9,7 @@ */ #include <linux/sched.h> +#include <linux/sched/loadavg.h> #include <linux/io.h> #include <asm/setup.h> diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index b2dd37196b3b..e92a817e645f 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c @@ -11,6 +11,9 @@ #include <linux/cpu.h> #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/pm.h> #include <linux/tick.h> #include <linux/bitops.h> diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c index 8cfa98cadf3d..badd286882ae 100644 --- a/arch/microblaze/kernel/ptrace.c +++ b/arch/microblaze/kernel/ptrace.c @@ -27,6 +27,7 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/ptrace.h> #include <linux/signal.h> #include <linux/elf.h> diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index 1d6fad50fa76..999066192715 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c @@ -12,6 +12,7 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/sched_clock.h> #include <linux/clk.h> #include <linux/clockchips.h> diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c index cb619533a192..45bbba9d919f 100644 --- a/arch/microblaze/kernel/traps.c +++ b/arch/microblaze/kernel/traps.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/kallsyms.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/debug_locks.h> #include <asm/exceptions.h> diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c index 61c04eed14d5..34c270cb11fc 100644 --- a/arch/microblaze/kernel/unwind.c +++ b/arch/microblaze/kernel/unwind.c @@ -17,6 +17,7 @@ #include <linux/kallsyms.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <linux/types.h> #include <linux/errno.h> diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index cc732fe357ad..4c0599239915 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c @@ -31,6 +31,7 @@ #include <linux/types.h> #include <linux/vmalloc.h> #include <linux/init.h> +#include <linux/mm_types.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index 1226965e1e4f..c64bd87f0b6e 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -200,7 +200,7 @@ static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr } struct octeon_dma_map_ops { - struct dma_map_ops dma_map_ops; + const struct dma_map_ops dma_map_ops; dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr); phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr); }; @@ -328,7 +328,7 @@ static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = { }, }; -struct dma_map_ops *octeon_pci_dma_map_ops; +const struct dma_map_ops *octeon_pci_dma_map_ops; void __init octeon_pci_dma_init(void) { diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 4355a4cf4d74..4b94b7fbafa3 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -11,6 +11,7 @@ #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/sched.h> +#include <linux/sched/hotplug.h> #include <linux/init.h> #include <linux/export.h> diff --git a/arch/mips/include/asm/abi.h b/arch/mips/include/asm/abi.h index 940760844e2f..dba7f4b6bebf 100644 --- a/arch/mips/include/asm/abi.h +++ b/arch/mips/include/asm/abi.h @@ -9,6 +9,8 @@ #ifndef _ASM_ABI_H #define _ASM_ABI_H +#include <linux/signal_types.h> + #include <asm/signal.h> #include <asm/siginfo.h> #include <asm/vdso.h> diff --git a/arch/mips/include/asm/device.h b/arch/mips/include/asm/device.h index 21c2082a0dfb..6aa796f1081a 100644 --- a/arch/mips/include/asm/device.h +++ b/arch/mips/include/asm/device.h @@ -6,12 +6,7 @@ #ifndef _ASM_MIPS_DEVICE_H #define _ASM_MIPS_DEVICE_H -struct dma_map_ops; - struct dev_archdata { - /* DMA operations on that device */ - struct dma_map_ops *dma_ops; - #ifdef CONFIG_DMA_PERDEV_COHERENT /* Non-zero if DMA is coherent with CPU caches */ bool dma_coherent; diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 7aa71b9b0258..aba71385f9d1 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -9,14 +9,11 @@ #include <dma-coherence.h> #endif -extern struct dma_map_ops *mips_dma_map_ops; +extern const struct dma_map_ops *mips_dma_map_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - if (dev && dev->archdata.dma_ops) - return dev->archdata.dma_ops; - else - return mips_dma_map_ops; + return mips_dma_map_ops; } static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index 7a6c466e5f2a..0eb1a75be105 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -10,6 +10,8 @@ #include <linux/auxvec.h> #include <linux/fs.h> +#include <linux/mm_types.h> + #include <uapi/linux/elf.h> #include <asm/current.h> diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index f06f97bd62df..321752bcbab6 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -11,6 +11,7 @@ #define _ASM_FPU_H #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/thread_info.h> #include <linux/bitops.h> diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h index daba1f9a4f79..291846d9ba83 100644 --- a/arch/mips/include/asm/kprobes.h +++ b/arch/mips/include/asm/kprobes.h @@ -22,6 +22,9 @@ #ifndef _ASM_KPROBES_H #define _ASM_KPROBES_H +#include <asm-generic/kprobes.h> + +#ifdef CONFIG_KPROBES #include <linux/ptrace.h> #include <linux/types.h> @@ -94,4 +97,5 @@ struct kprobe_ctlblk { extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); -#endif /* _ASM_KPROBES_H */ +#endif /* CONFIG_KPROBES */ +#endif /* _ASM_KPROBES_H */ diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h index 460042ee5d6f..9110988b92a1 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h +++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h @@ -65,7 +65,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); struct dma_map_ops; -extern struct dma_map_ops *octeon_pci_dma_map_ops; +extern const struct dma_map_ops *octeon_pci_dma_map_ops; extern char *octeon_swiotlb; #endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */ diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 2abf94f72c0a..da2004cef2d5 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -13,8 +13,10 @@ #include <linux/errno.h> #include <linux/sched.h> +#include <linux/mm_types.h> #include <linux/smp.h> #include <linux/slab.h> + #include <asm/cacheflush.h> #include <asm/dsemul.h> #include <asm/hazards.h> diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h index be52c2125d71..e0717d10e650 100644 --- a/arch/mips/include/asm/netlogic/common.h +++ b/arch/mips/include/asm/netlogic/common.h @@ -88,7 +88,7 @@ extern struct plat_smp_ops nlm_smp_ops; extern char nlm_reset_entry[], nlm_reset_entry_end[]; /* SWIOTLB */ -extern struct dma_map_ops nlm_swiotlb_dma_ops; +extern const struct dma_map_ops nlm_swiotlb_dma_ops; extern unsigned int nlm_threads_per_core; extern cpumask_t nlm_cpumask; diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index ae037a304ee4..b11facd11c9d 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -7,7 +7,7 @@ * Copyright (C) 2001 MIPS Technologies, Inc. */ #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/signal.h> #include <linux/export.h> #include <asm/branch.h> diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c index 5a71518be0f1..ca25cd393b1c 100644 --- a/arch/mips/kernel/crash.c +++ b/arch/mips/kernel/crash.c @@ -8,6 +8,7 @@ #include <linux/irq.h> #include <linux/types.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> /* This keeps a track of which one is crashing cpu. */ static int crashing_cpu = -1; diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 1a0a3b4ecc3e..8cab633e0e5a 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -9,6 +9,8 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/sched.h> +#include <linux/sched/task.h> +#include <linux/cred.h> #include <linux/security.h> #include <linux/types.h> #include <linux/uaccess.h> diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index d64056e0bb56..f298eb2ff6c2 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c @@ -15,6 +15,7 @@ */ #include <linux/perf_event.h> +#include <linux/sched/task_stack.h> #include <asm/stacktrace.h> diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 803e255b6fc3..fb6b6b650719 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -11,6 +11,9 @@ */ #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/tick.h> #include <linux/kernel.h> #include <linux/mm.h> diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index fdef26382c37..339601267265 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -19,6 +19,7 @@ #include <linux/elf.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/ptrace.h> diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 4f0998525626..40e212d6b26b 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c @@ -18,6 +18,7 @@ #include <linux/compat.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/ptrace.h> diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index c5c4fd54d797..b80dd8b17a76 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -12,6 +12,8 @@ #include <linux/syscalls.h> #include <linux/moduleloader.h> #include <linux/atomic.h> +#include <linux/sched/signal.h> + #include <asm/mipsmtregs.h> #include <asm/mips_mt.h> #include <asm/processor.h> diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c index 5e169fc5ca5c..2b3572fb5f1b 100644 --- a/arch/mips/kernel/signal_o32.c +++ b/arch/mips/kernel/signal_o32.c @@ -11,6 +11,7 @@ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/signal.h> +#include <linux/sched/signal.h> #include <linux/uaccess.h> #include <asm/abi.h> diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 16e37a28f876..3daa2cae50b0 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -10,6 +10,7 @@ #include <linux/init.h> #include <linux/sched.h> +#include <linux/sched/hotplug.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/smp.h> diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index a2544c2394e4..6d45f05538c8 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -11,7 +11,8 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/irqchip/mips-gic.h> -#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/sched/hotplug.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/types.h> diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 8c60a296294c..6e71130549ea 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -28,7 +28,7 @@ #include <linux/export.h> #include <linux/time.h> #include <linux/timex.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/cpumask.h> #include <linux/cpu.h> #include <linux/err.h> diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c index 506021f62549..7c7c902249f2 100644 --- a/arch/mips/kernel/stacktrace.c +++ b/arch/mips/kernel/stacktrace.c @@ -4,6 +4,8 @@ * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp> */ #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <linux/export.h> #include <asm/stacktrace.h> diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index c86ddbaa4598..f1d17ece4181 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -26,6 +26,7 @@ #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/elf.h> +#include <linux/sched/task_stack.h> #include <asm/asm.h> #include <asm/branch.h> diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index cb479be31a50..c7d17cfb32f6 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -23,7 +23,8 @@ #include <linux/module.h> #include <linux/extable.h> #include <linux/mm.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/debug.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/kallsyms.h> @@ -2232,7 +2233,7 @@ void per_cpu_trap_init(bool is_boot_cpu) if (!cpu_data[cpu].asid_cache) cpu_data[cpu].asid_cache = asid_first_version(cpu); - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index ed81e5ac1426..15a1b1716c2e 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -16,8 +16,10 @@ #include <linux/module.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> +#include <linux/sched/signal.h> #include <linux/fs.h> #include <linux/bootmem.h> + #include <asm/fpu.h> #include <asm/page.h> #include <asm/cacheflush.h> diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c index df7235e33499..178ca17a5667 100644 --- a/arch/mips/loongson64/common/dma-swiotlb.c +++ b/arch/mips/loongson64/common/dma-swiotlb.c @@ -114,7 +114,7 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) return daddr; } -static struct dma_map_ops loongson_dma_map_ops = { +static const struct dma_map_ops loongson_dma_map_ops = { .alloc = loongson_dma_alloc_coherent, .free = loongson_dma_free_coherent, .map_page = loongson_dma_map_page, diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c index cfcf240cedbe..64659fc73940 100644 --- a/arch/mips/loongson64/loongson-3/smp.c +++ b/arch/mips/loongson64/loongson-3/smp.c @@ -17,6 +17,8 @@ #include <linux/init.h> #include <linux/cpu.h> #include <linux/sched.h> +#include <linux/sched/hotplug.h> +#include <linux/sched/task_stack.h> #include <linux/smp.h> #include <linux/cpufreq.h> #include <asm/processor.h> diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c index c4469ff4a996..b6bfd3625369 100644 --- a/arch/mips/math-emu/dsemul.c +++ b/arch/mips/math-emu/dsemul.c @@ -1,5 +1,7 @@ #include <linux/err.h> #include <linux/slab.h> +#include <linux/mm_types.h> +#include <linux/sched/task.h> #include <asm/branch.h> #include <asm/cacheflush.h> diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 1895a692efd4..fe8df14b6169 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -417,7 +417,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, EXPORT_SYMBOL(dma_cache_sync); -static struct dma_map_ops mips_default_dma_map_ops = { +static const struct dma_map_ops mips_default_dma_map_ops = { .alloc = mips_dma_alloc_coherent, .free = mips_dma_free_coherent, .mmap = mips_dma_mmap, @@ -433,7 +433,7 @@ static struct dma_map_ops mips_default_dma_map_ops = { .dma_supported = mips_dma_supported }; -struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; +const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; EXPORT_SYMBOL(mips_dma_map_ops); #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index 1f189627440f..1986e09fb457 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -12,6 +12,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> +#include <linux/mm_types.h> #include <asm/cacheflush.h> #include <asm/io.h> #include <asm/tlbflush.h> diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index d6d92c02308d..64dd8bdd92c3 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -13,7 +13,8 @@ #include <linux/export.h> #include <linux/personality.h> #include <linux/random.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); diff --git a/arch/mips/netlogic/common/nlm-dma.c b/arch/mips/netlogic/common/nlm-dma.c index 0630693bec2a..0ec9d9da6d51 100644 --- a/arch/mips/netlogic/common/nlm-dma.c +++ b/arch/mips/netlogic/common/nlm-dma.c @@ -67,7 +67,7 @@ static void nlm_dma_free_coherent(struct device *dev, size_t size, swiotlb_free_coherent(dev, size, vaddr, dma_handle); } -struct dma_map_ops nlm_swiotlb_dma_ops = { +const struct dma_map_ops nlm_swiotlb_dma_ops = { .alloc = nlm_dma_alloc_coherent, .free = nlm_dma_free_coherent, .map_page = swiotlb_map_page, diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c index f8d3e081b2eb..72eb1a56c645 100644 --- a/arch/mips/paravirt/paravirt-smp.c +++ b/arch/mips/paravirt/paravirt-smp.c @@ -10,6 +10,7 @@ #include <linux/cpumask.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <asm/mipsregs.h> #include <asm/setup.h> diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index 308d051fc45c..9ee01936862e 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c @@ -167,7 +167,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev) pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig); } - dev->dev.archdata.dma_ops = octeon_pci_dma_map_ops; + dev->dev.dma_ops = octeon_pci_dma_map_ops; return 0; } diff --git a/arch/mips/sgi-ip22/ip22-berr.c b/arch/mips/sgi-ip22/ip22-berr.c index 3f6ccd53c15d..ff8e1935c873 100644 --- a/arch/mips/sgi-ip22/ip22-berr.c +++ b/arch/mips/sgi-ip22/ip22-berr.c @@ -6,7 +6,7 @@ #include <linux/init.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <asm/addrspace.h> #include <asm/traps.h> diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c index a36f6b87548a..03a39ac5ead9 100644 --- a/arch/mips/sgi-ip22/ip22-reset.c +++ b/arch/mips/sgi-ip22/ip22-reset.c @@ -10,7 +10,7 @@ #include <linux/rtc/ds1286.h> #include <linux/interrupt.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/notifier.h> #include <linux/pm.h> #include <linux/timer.h> diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c index 9960a8302eac..1f2a5bc4779e 100644 --- a/arch/mips/sgi-ip22/ip28-berr.c +++ b/arch/mips/sgi-ip22/ip28-berr.c @@ -8,6 +8,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/seq_file.h> #include <asm/addrspace.h> diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c index f8919b6a24c8..d12879eb2b1f 100644 --- a/arch/mips/sgi-ip27/ip27-berr.c +++ b/arch/mips/sgi-ip27/ip27-berr.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/signal.h> /* for SIGBUS */ #include <linux/sched.h> /* schow_regs(), force_sig() */ +#include <linux/sched/debug.h> #include <asm/sn/addrs.h> #include <asm/sn/arch.h> diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c index f9ae6a8fa7c7..f5ed45e8f442 100644 --- a/arch/mips/sgi-ip27/ip27-smp.c +++ b/arch/mips/sgi-ip27/ip27-smp.c @@ -8,6 +8,7 @@ */ #include <linux/init.h> #include <linux/sched.h> +#include <linux/topology.h> #include <linux/nodemask.h> #include <asm/page.h> #include <asm/processor.h> diff --git a/arch/mips/sgi-ip32/ip32-berr.c b/arch/mips/sgi-ip32/ip32-berr.c index ba8f46d80ab8..57d8c7486fe6 100644 --- a/arch/mips/sgi-ip32/ip32-berr.c +++ b/arch/mips/sgi-ip32/ip32-berr.c @@ -10,6 +10,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <asm/traps.h> #include <linux/uaccess.h> #include <asm/addrspace.h> diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c index 838d8589a1c0..a6a0ff7f5aed 100644 --- a/arch/mips/sgi-ip32/ip32-irq.c +++ b/arch/mips/sgi-ip32/ip32-irq.c @@ -18,6 +18,7 @@ #include <linux/mm.h> #include <linux/random.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <asm/irq_cpu.h> #include <asm/mipsregs.h> diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index 4c71aea25663..d0e94ffcc1b8 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c @@ -21,6 +21,7 @@ #include <linux/smp.h> #include <linux/kernel_stat.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <asm/mmu_context.h> #include <asm/io.h> diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index 1cf66f5ff23d..0a4a2c3982d8 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c @@ -21,7 +21,7 @@ #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/kernel_stat.h> -#include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <asm/mmu_context.h> #include <asm/io.h> diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h index 1dcd44757f32..737ef574b3ea 100644 --- a/arch/mn10300/include/asm/dma-mapping.h +++ b/arch/mn10300/include/asm/dma-mapping.h @@ -14,9 +14,9 @@ #include <asm/cache.h> #include <asm/io.h> -extern struct dma_map_ops mn10300_dma_ops; +extern const struct dma_map_ops mn10300_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &mn10300_dma_ops; } diff --git a/arch/mn10300/include/asm/kprobes.h b/arch/mn10300/include/asm/kprobes.h index c800b590183a..7abea0bdb549 100644 --- a/arch/mn10300/include/asm/kprobes.h +++ b/arch/mn10300/include/asm/kprobes.h @@ -21,13 +21,17 @@ #ifndef _ASM_KPROBES_H #define _ASM_KPROBES_H +#include <asm-generic/kprobes.h> + +#define BREAKPOINT_INSTRUCTION 0xff + +#ifdef CONFIG_KPROBES #include <linux/types.h> #include <linux/ptrace.h> struct kprobe; typedef unsigned char kprobe_opcode_t; -#define BREAKPOINT_INSTRUCTION 0xff #define MAX_INSN_SIZE 8 #define MAX_STACK_SIZE 128 @@ -47,4 +51,5 @@ extern int kprobe_exceptions_notify(struct notifier_block *self, extern void arch_remove_kprobe(struct kprobe *p); +#endif /* CONFIG_KPROBES */ #endif /* _ASM_KPROBES_H */ diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h index 75dbe696f830..d2034f5e6eda 100644 --- a/arch/mn10300/include/asm/mmu_context.h +++ b/arch/mn10300/include/asm/mmu_context.h @@ -23,6 +23,8 @@ #define _ASM_MMU_CONTEXT_H #include <linux/atomic.h> +#include <linux/mm_types.h> + #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm-generic/mm_hooks.h> diff --git a/arch/mn10300/kernel/fpu.c b/arch/mn10300/kernel/fpu.c index 2578b7ae7dd5..50ce7b447fed 100644 --- a/arch/mn10300/kernel/fpu.c +++ b/arch/mn10300/kernel/fpu.c @@ -9,6 +9,8 @@ * 2 of the Licence, or (at your option) any later version. */ #include <linux/uaccess.h> +#include <linux/sched/signal.h> + #include <asm/fpu.h> #include <asm/elf.h> #include <asm/exceptions.h> diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index e5def2217f72..c9fa42619c6a 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c @@ -11,6 +11,9 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> diff --git a/arch/mn10300/kernel/ptrace.c b/arch/mn10300/kernel/ptrace.c index 976020f469c1..8009876a7ac4 100644 --- a/arch/mn10300/kernel/ptrace.c +++ b/arch/mn10300/kernel/ptrace.c @@ -11,6 +11,7 @@ */ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c index 426173c4b0b9..35d2c3fe6f76 100644 --- a/arch/mn10300/kernel/smp.c +++ b/arch/mn10300/kernel/smp.c @@ -21,7 +21,8 @@ #include <linux/err.h> #include <linux/kernel.h> #include <linux/delay.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include <linux/profile.h> #include <linux/smp.h> #include <linux/cpu.h> @@ -589,7 +590,7 @@ static void __init smp_cpu_init(void) } printk(KERN_INFO "Initializing CPU#%d\n", cpu_id); - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; BUG_ON(current->mm); diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c index 67c6416a58f8..06b83b17c5f1 100644 --- a/arch/mn10300/kernel/time.c +++ b/arch/mn10300/kernel/time.c @@ -10,6 +10,7 @@ * 2 of the Licence, or (at your option) any later version. */ #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/time.h> diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c index a7a987c7954f..800fd0801969 100644 --- a/arch/mn10300/kernel/traps.c +++ b/arch/mn10300/kernel/traps.c @@ -10,6 +10,7 @@ * 2 of the Licence, or (at your option) any later version. */ #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c index 4f4b9029f0ea..86108d2496b3 100644 --- a/arch/mn10300/mm/dma-alloc.c +++ b/arch/mn10300/mm/dma-alloc.c @@ -121,7 +121,7 @@ static int mn10300_dma_supported(struct device *dev, u64 mask) return 1; } -struct dma_map_ops mn10300_dma_ops = { +const struct dma_map_ops mn10300_dma_ops = { .alloc = mn10300_dma_alloc, .free = mn10300_dma_free, .map_page = mn10300_dma_map_page, diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c index 9a39ea9031d4..085f2bb691ac 100644 --- a/arch/mn10300/mm/tlb-smp.c +++ b/arch/mn10300/mm/tlb-smp.c @@ -20,7 +20,7 @@ #include <linux/err.h> #include <linux/kernel.h> #include <linux/delay.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/profile.h> #include <linux/smp.h> #include <asm/tlbflush.h> diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index 35b0e883761a..aaa3c218b56c 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild @@ -62,3 +62,4 @@ generic-y += user.h generic-y += vga.h generic-y += word-at-a-time.h generic-y += xor.h +generic-y += kprobes.h diff --git a/arch/nios2/include/asm/dma-mapping.h b/arch/nios2/include/asm/dma-mapping.h index bec8ac8e6ad2..7b3c6f280293 100644 --- a/arch/nios2/include/asm/dma-mapping.h +++ b/arch/nios2/include/asm/dma-mapping.h @@ -10,9 +10,9 @@ #ifndef _ASM_NIOS2_DMA_MAPPING_H #define _ASM_NIOS2_DMA_MAPPING_H -extern struct dma_map_ops nios2_dma_ops; +extern const struct dma_map_ops nios2_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &nios2_dma_ops; } diff --git a/arch/nios2/include/asm/mmu_context.h b/arch/nios2/include/asm/mmu_context.h index 294b4b1f81d4..78ab3dacf579 100644 --- a/arch/nios2/include/asm/mmu_context.h +++ b/arch/nios2/include/asm/mmu_context.h @@ -13,6 +13,8 @@ #ifndef _ASM_NIOS2_MMU_CONTEXT_H #define _ASM_NIOS2_MMU_CONTEXT_H +#include <linux/mm_types.h> + #include <asm-generic/mm_hooks.h> extern void mmu_context_init(void); diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c index 2f8c74f93e70..509e7855e8dc 100644 --- a/arch/nios2/kernel/process.c +++ b/arch/nios2/kernel/process.c @@ -14,6 +14,10 @@ #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> +#include <linux/mm_types.h> #include <linux/tick.h> #include <linux/uaccess.h> diff --git a/arch/nios2/kernel/ptrace.c b/arch/nios2/kernel/ptrace.c index 681dda92eff1..de97bcb7dd44 100644 --- a/arch/nios2/kernel/ptrace.c +++ b/arch/nios2/kernel/ptrace.c @@ -14,6 +14,7 @@ #include <linux/ptrace.h> #include <linux/regset.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/tracehook.h> #include <linux/uaccess.h> #include <linux/user.h> diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c index a3fa80d1aacc..6e57ffa5db27 100644 --- a/arch/nios2/kernel/setup.c +++ b/arch/nios2/kernel/setup.c @@ -14,6 +14,7 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/sched.h> +#include <linux/sched/task.h> #include <linux/console.h> #include <linux/bootmem.h> #include <linux/initrd.h> diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c index 72ed30a93c85..8184e7d6b385 100644 --- a/arch/nios2/kernel/traps.c +++ b/arch/nios2/kernel/traps.c @@ -11,6 +11,7 @@ */ #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/export.h> diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c index f6a5dcf9d682..7040c1adbb5e 100644 --- a/arch/nios2/mm/dma-mapping.c +++ b/arch/nios2/mm/dma-mapping.c @@ -192,7 +192,7 @@ static void nios2_dma_sync_sg_for_device(struct device *dev, } -struct dma_map_ops nios2_dma_ops = { +const struct dma_map_ops nios2_dma_ops = { .alloc = nios2_dma_alloc, .free = nios2_dma_free, .map_page = nios2_dma_map_page, diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index e7a14e1e0d6b..b804dd06ea1c 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -13,6 +13,7 @@ #include <linux/signal.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/errno.h> diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index fb241757f7f0..fb01873a5aad 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -67,3 +67,4 @@ generic-y += user.h generic-y += vga.h generic-y += word-at-a-time.h generic-y += xor.h +generic-y += kprobes.h diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index 1f260bccb368..0c0075f17145 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -28,9 +28,9 @@ #define DMA_ERROR_CODE (~(dma_addr_t)0x0) -extern struct dma_map_ops or1k_dma_map_ops; +extern const struct dma_map_ops or1k_dma_map_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &or1k_dma_map_ops; } diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index 906998bac957..b10369b7e31b 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -232,7 +232,7 @@ or1k_sync_single_for_device(struct device *dev, mtspr(SPR_DCBFR, cl); } -struct dma_map_ops or1k_dma_map_ops = { +const struct dma_map_ops or1k_dma_map_ops = { .alloc = or1k_dma_alloc, .free = or1k_dma_free, .map_page = or1k_map_page, diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index bc6500860f4d..1b7160c79646 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -319,7 +319,7 @@ EXCEPTION_ENTRY(_timer_handler) l.j _ret_from_intr l.nop -/* ---[ 0x600: Aligment exception ]-------------------------------------- */ +/* ---[ 0x600: Alignment exception ]-------------------------------------- */ EXCEPTION_ENTRY(_alignment_handler) CLEAR_LWA_FLAG(r3) @@ -331,8 +331,8 @@ EXCEPTION_ENTRY(_alignment_handler) l.nop #if 0 -EXCEPTION_ENTRY(_aligment_handler) -// l.mfspr r2,r0,SPR_EEAR_BASE /* Load the efective addres */ +EXCEPTION_ENTRY(_alignment_handler) +// l.mfspr r2,r0,SPR_EEAR_BASE /* Load the effective address */ l.addi r2,r4,0 // l.mfspr r5,r0,SPR_EPCR_BASE /* Load the insn address */ l.lwz r5,PT_PC(r1) diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S index d01b82eace3e..1e87913576e3 100644 --- a/arch/openrisc/kernel/head.S +++ b/arch/openrisc/kernel/head.S @@ -325,7 +325,7 @@ _dispatch_do_ipage_fault: .org 0x500 EXCEPTION_HANDLE(_timer_handler) -/* ---[ 0x600: Aligment exception ]-------------------------------------- */ +/* ---[ 0x600: Alignment exception ]-------------------------------------- */ .org 0x600 EXCEPTION_HANDLE(_alignment_handler) @@ -640,8 +640,8 @@ _flush_tlb: /* ========================================[ cache ]=== */ - /* aligment here so we don't change memory offsets with - * memory controler defined + /* alignment here so we don't change memory offsets with + * memory controller defined */ .align 0x2000 diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c index 6e9d1cb519f2..828a29110459 100644 --- a/arch/openrisc/kernel/process.c +++ b/arch/openrisc/kernel/process.c @@ -22,6 +22,9 @@ #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mm.h> diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c index 228288887d74..eb97a8e7c8aa 100644 --- a/arch/openrisc/kernel/ptrace.c +++ b/arch/openrisc/kernel/ptrace.c @@ -18,6 +18,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/string.h> #include <linux/mm.h> diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index 7e81ad258bca..803e9e756f77 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -22,6 +22,8 @@ #include <linux/init.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/extable.h> #include <linux/kmod.h> diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S index 552544616b9d..00ddb7804be4 100644 --- a/arch/openrisc/kernel/vmlinux.lds.S +++ b/arch/openrisc/kernel/vmlinux.lds.S @@ -19,8 +19,8 @@ /* TODO * - clean up __offset & stuff - * - change all 8192 aligment to PAGE !!! - * - recheck if all aligments are really needed + * - change all 8192 alignment to PAGE !!! + * - recheck if all alignments are really needed */ # define LOAD_OFFSET PAGE_OFFSET diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index 53592a639744..e310ab499385 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -18,7 +18,7 @@ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/extable.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/uaccess.h> #include <asm/siginfo.h> diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index cc70b4116718..a9909c2d04c5 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -28,3 +28,4 @@ generic-y += user.h generic-y += vga.h generic-y += word-at-a-time.h generic-y += xor.h +generic-y += kprobes.h diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index 16e024602737..5404c6a726b2 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h @@ -21,13 +21,13 @@ */ #ifdef CONFIG_PA11 -extern struct dma_map_ops pcxl_dma_ops; -extern struct dma_map_ops pcx_dma_ops; +extern const struct dma_map_ops pcxl_dma_ops; +extern const struct dma_map_ops pcx_dma_ops; #endif -extern struct dma_map_ops *hppa_dma_ops; +extern const struct dma_map_ops *hppa_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return hppa_dma_ops; } diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 20b013cbd297..0dc72d5de861 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -18,6 +18,7 @@ #include <linux/seq_file.h> #include <linux/pagemap.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <asm/pdc.h> #include <asm/cache.h> #include <asm/cacheflush.h> diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 700e2d2da096..fa78419100c8 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -40,7 +40,7 @@ #include <asm/parisc-device.h> /* See comments in include/asm-parisc/pci.h */ -struct dma_map_ops *hppa_dma_ops __read_mostly; +const struct dma_map_ops *hppa_dma_ops __read_mostly; EXPORT_SYMBOL(hppa_dma_ops); static struct device root = { diff --git a/arch/parisc/kernel/pa7300lc.c b/arch/parisc/kernel/pa7300lc.c index 8a89780223aa..9b245fc67560 100644 --- a/arch/parisc/kernel/pa7300lc.c +++ b/arch/parisc/kernel/pa7300lc.c @@ -5,6 +5,7 @@ * Copyright (C) 2000 Philipp Rumpf */ #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/smp.h> #include <linux/kernel.h> #include <asm/io.h> diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 697c53543a4d..5f0067a62738 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -572,7 +572,7 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist * flush_kernel_vmap_range(sg_virt(sg), sg->length); } -struct dma_map_ops pcxl_dma_ops = { +const struct dma_map_ops pcxl_dma_ops = { .dma_supported = pa11_dma_supported, .alloc = pa11_dma_alloc, .free = pa11_dma_free, @@ -608,7 +608,7 @@ static void pcx_dma_free(struct device *dev, size_t size, void *vaddr, return; } -struct dma_map_ops pcx_dma_ops = { +const struct dma_map_ops pcx_dma_ops = { .dma_supported = pa11_dma_supported, .alloc = pcx_dma_alloc, .free = pcx_dma_free, diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index ea6603ee8d24..06f7ca7fe70b 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -43,6 +43,9 @@ #include <linux/personality.h> #include <linux/ptrace.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/slab.h> #include <linux/stddef.h> #include <linux/unistd.h> diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 068ed3607bac..dee6f9d6a153 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -37,6 +37,7 @@ #include <linux/proc_fs.h> #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <asm/processor.h> #include <asm/sections.h> diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index f6aaca27ac4e..26f12f45b4bb 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -13,6 +13,7 @@ */ #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 75dab2871346..63365106ea19 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -21,7 +21,7 @@ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/smp.h> @@ -279,7 +279,7 @@ smp_cpu_init(int cpunum) set_cpu_online(cpunum, true); /* Initialise the idle task for this CPU */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index bf3294171230..e5288638a1d9 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -30,6 +30,8 @@ #include <linux/linkage.h> #include <linux/mm.h> #include <linux/mman.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/shm.h> #include <linux/syscalls.h> #include <linux/utsname.h> diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 1e22f981cd81..89421df70160 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/rtc.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/sched_clock.h> #include <linux/kernel.h> #include <linux/param.h> diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 378df9207406..991654c88eec 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -11,6 +11,7 @@ */ #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 0a21067ac0a3..e36f7b75ab07 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -23,7 +23,8 @@ #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> #include <linux/signal.h> #include <linux/ratelimit.h> #include <linux/uaccess.h> diff --git a/arch/parisc/math-emu/driver.c b/arch/parisc/math-emu/driver.c index 09ef4136c693..2fb59d2e2b29 100644 --- a/arch/parisc/math-emu/driver.c +++ b/arch/parisc/math-emu/driver.c @@ -27,7 +27,8 @@ * Copyright (C) 2001 Hewlett-Packard <bame@debian.org> */ -#include <linux/sched.h> +#include <linux/sched/signal.h> + #include "float.h" #include "math-emu.h" diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index c3f8d34a11cf..deab89a8915a 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -13,6 +13,7 @@ #include <linux/mm.h> #include <linux/ptrace.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/interrupt.h> #include <linux/extable.h> #include <linux/uaccess.h> diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index 5d6eea925cf4..aa50ac090e9b 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -8,6 +8,7 @@ #include <linux/fs.h> #include <linux/mm.h> +#include <linux/sched/mm.h> #include <linux/hugetlb.h> #include <linux/pagemap.h> #include <linux/sysctl.h> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 8582121d7a45..494091762bd7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -115,7 +115,7 @@ config PPC select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 + select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) select ARCH_WANT_IPC_PARSE_VERSION select SPARSE_IRQ select IRQ_DOMAIN diff --git a/arch/powerpc/boot/dts/fsl/kmcent2.dts b/arch/powerpc/boot/dts/fsl/kmcent2.dts new file mode 100644 index 000000000000..47afa438602e --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/kmcent2.dts @@ -0,0 +1,303 @@ +/* + * Keymile kmcent2 Device Tree Source, based on T1040RDB DTS + * + * (C) Copyright 2016 + * Valentin Longchamp, Keymile AG, valentin.longchamp@keymile.com + * + * Copyright 2014 - 2015 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/include/ "t104xsi-pre.dtsi" + +/ { + model = "keymile,kmcent2"; + compatible = "keymile,kmcent2"; + + aliases { + front_phy = &front_phy; + }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + bman_fbpr: bman-fbpr { + size = <0 0x1000000>; + alignment = <0 0x1000000>; + }; + qman_fqd: qman-fqd { + size = <0 0x400000>; + alignment = <0 0x400000>; + }; + qman_pfdr: qman-pfdr { + size = <0 0x2000000>; + alignment = <0 0x2000000>; + }; + }; + + ifc: localbus@ffe124000 { + reg = <0xf 0xfe124000 0 0x2000>; + ranges = <0 0 0xf 0xe8000000 0x04000000 + 1 0 0xf 0xfa000000 0x00010000 + 2 0 0xf 0xfb000000 0x00010000 + 4 0 0xf 0xc0000000 0x08000000 + 6 0 0xf 0xd0000000 0x08000000 + 7 0 0xf 0xd8000000 0x08000000>; + + nor@0,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cfi-flash"; + reg = <0x0 0x0 0x04000000>; + bank-width = <2>; + device-width = <2>; + }; + + nand@1,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,ifc-nand"; + reg = <0x1 0x0 0x10000>; + }; + + board-control@2,0 { + compatible = "keymile,qriox"; + reg = <0x2 0x0 0x80>; + }; + + chassis-mgmt@6,0 { + compatible = "keymile,bfticu"; + reg = <6 0 0x100>; + interrupt-controller; + interrupt-parent = <&mpic>; + interrupts = <11 1 0 0>; + #interrupt-cells = <1>; + }; + + }; + + memory { + device_type = "memory"; + }; + + dcsr: dcsr@f00000000 { + ranges = <0x00000000 0xf 0x00000000 0x01072000>; + }; + + bportals: bman-portals@ff4000000 { + ranges = <0x0 0xf 0xf4000000 0x2000000>; + }; + + qportals: qman-portals@ff6000000 { + ranges = <0x0 0xf 0xf6000000 0x2000000>; + }; + + soc: soc@ffe000000 { + ranges = <0x00000000 0xf 0xfe000000 0x1000000>; + reg = <0xf 0xfe000000 0 0x00001000>; + + spi@110000 { + network-clock@1 { + compatible = "zarlink,zl30364"; + reg = <1>; + spi-max-frequency = <1000000>; + }; + }; + + sdhc@114000 { + status = "disabled"; + }; + + i2c@118000 { + clock-frequency = <100000>; + + mux@70 { + compatible = "nxp,pca9547"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + i2c-mux-idle-disconnect; + + i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@54 { + compatible = "24c02"; + reg = <0x54>; + pagesize = <2>; + read-only; + label = "ddr3-spd"; + }; + }; + + i2c@7 { + reg = <7>; + #address-cells = <1>; + #size-cells = <0>; + + temp-sensor@48 { + compatible = "national,lm75"; + reg = <0x48>; + label = "SENSOR_0"; + }; + temp-sensor@4a { + compatible = "national,lm75"; + reg = <0x4a>; + label = "SENSOR_2"; + }; + temp-sensor@4b { + compatible = "national,lm75"; + reg = <0x4b>; + label = "SENSOR_3"; + }; + }; + }; + }; + + i2c@118100 { + clock-frequency = <100000>; + + eeprom@50 { + compatible = "atmel,24c08"; + reg = <0x50>; + pagesize = <16>; + }; + + eeprom@54 { + compatible = "atmel,24c08"; + reg = <0x54>; + pagesize = <16>; + }; + }; + + i2c@119000 { + status = "disabled"; + }; + + i2c@119100 { + status = "disabled"; + }; + + serial2: serial@11d500 { + status = "disabled"; + }; + + serial3: serial@11d600 { + status = "disabled"; + }; + + usb0: usb@210000 { + status = "disabled"; + }; + usb1: usb@211000 { + status = "disabled"; + }; + + display@180000 { + status = "disabled"; + }; + + sata@220000 { + status = "disabled"; + }; + sata@221000 { + status = "disabled"; + }; + + fman@400000 { + ethernet@e0000 { + fixed-link = <0 1 1000 0 0>; + phy-connection-type = "sgmii"; + }; + + ethernet@e2000 { + fixed-link = <1 1 1000 0 0>; + phy-connection-type = "sgmii"; + }; + + ethernet@e4000 { + status = "disabled"; + }; + + ethernet@e6000 { + status = "disabled"; + }; + + ethernet@e8000 { + phy-handle = <&front_phy>; + phy-connection-type = "rgmii"; + }; + + mdio0: mdio@fc000 { + front_phy: ethernet-phy@11 { + reg = <0x11>; + }; + }; + }; + }; + + + pci0: pcie@ffe240000 { + reg = <0xf 0xfe240000 0 0x10000>; + ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000 + 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>; + pcie@0 { + ranges = <0x02000000 0 0xe0000000 + 0x02000000 0 0xe0000000 + 0 0x20000000 + + 0x01000000 0 0x00000000 + 0x01000000 0 0x00000000 + 0 0x00010000>; + }; + }; + + pci1: pcie@ffe250000 { + status = "disabled"; + }; + + pci2: pcie@ffe260000 { + status = "disabled"; + }; + + pci3: pcie@ffe270000 { + status = "disabled"; + }; + + qe: qe@ffe140000 { + ranges = <0x0 0xf 0xfe140000 0x40000>; + reg = <0xf 0xfe140000 0 0x480>; + brg-frequency = <0>; + bus-frequency = <0>; + + si1: si@700 { + compatible = "fsl,t1040-qe-si"; + reg = <0x700 0x80>; + }; + + siram1: siram@1000 { + compatible = "fsl,t1040-qe-siram"; + reg = <0x1000 0x800>; + }; + + ucc_hdlc: ucc@2000 { + device_type = "hdlc"; + compatible = "fsl,ucc-hdlc"; + rx-clock-name = "clk9"; + tx-clock-name = "clk9"; + fsl,tx-timeslot-mask = <0xfffffffe>; + fsl,rx-timeslot-mask = <0xfffffffe>; + fsl,siram-entry-id = <0>; + }; + }; +}; + +#include "t1040si-post.dtsi" diff --git a/arch/powerpc/boot/dts/fsl/kmcoge4.dts b/arch/powerpc/boot/dts/fsl/kmcoge4.dts index ae70a24094b0..e103c0f3f650 100644 --- a/arch/powerpc/boot/dts/fsl/kmcoge4.dts +++ b/arch/powerpc/boot/dts/fsl/kmcoge4.dts @@ -83,6 +83,10 @@ }; }; + sdhc@114000 { + status = "disabled"; + }; + i2c@119000 { status = "disabled"; }; diff --git a/arch/powerpc/boot/dts/fsl/mpc8569mds.dts b/arch/powerpc/boot/dts/fsl/mpc8569mds.dts index 8e94448f296c..76b2bd6f7742 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8569mds.dts +++ b/arch/powerpc/boot/dts/fsl/mpc8569mds.dts @@ -55,7 +55,7 @@ label = "kernel"; reg = <0x01c00000 0x002e0000>; }; - partiton@1ee0000 { + partition@1ee0000 { label = "dtb"; reg = <0x01ee0000 0x00020000>; }; diff --git a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi index da2894c59479..4908af501098 100644 --- a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi @@ -422,7 +422,7 @@ 0x00030001 0x0000000d 0x00030002 0x00000019 0x00030003 0x00000024>; - #thermal-sensor-cells = <0>; + #thermal-sensor-cells = <1>; }; thermal-zones { @@ -430,7 +430,7 @@ polling-delay-passive = <1000>; polling-delay = <5000>; - thermal-sensors = <&tmu>; + thermal-sensors = <&tmu 0>; trips { cpu_alert: cpu-alert { diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi index 44e399b17f6f..145c7f43b5b6 100644 --- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi @@ -526,7 +526,7 @@ 0x00030000 0x00000012 0x00030001 0x0000001d>; - #thermal-sensor-cells = <0>; + #thermal-sensor-cells = <1>; }; thermal-zones { @@ -534,7 +534,7 @@ polling-delay-passive = <1000>; polling-delay = <5000>; - thermal-sensors = <&tmu>; + thermal-sensors = <&tmu 2>; trips { cpu_alert: cpu-alert { diff --git a/arch/powerpc/configs/85xx/kmp204x_defconfig b/arch/powerpc/configs/85xx/kmp204x_defconfig deleted file mode 100644 index aaaaa609cd24..000000000000 --- a/arch/powerpc/configs/85xx/kmp204x_defconfig +++ /dev/null @@ -1,220 +0,0 @@ -CONFIG_PPC_85xx=y -CONFIG_SMP=y -CONFIG_NR_CPUS=8 -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_AUDIT=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_CGROUPS=y -CONFIG_CGROUP_SCHED=y -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_KALLSYMS_ALL=y -CONFIG_EMBEDDED=y -CONFIG_PERF_EVENTS=y -CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y -CONFIG_CORENET_GENERIC=y -CONFIG_MPIC_MSGR=y -CONFIG_HIGHMEM=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_BINFMT_MISC=m -CONFIG_KEXEC=y -CONFIG_FORCE_MAX_ZONEORDER=13 -CONFIG_PCI=y -CONFIG_PCIEPORTBUS=y -# CONFIG_PCIEASPM is not set -CONFIG_PCI_MSI=y -CONFIG_ADVANCED_OPTIONS=y -CONFIG_LOWMEM_SIZE_BOOL=y -CONFIG_LOWMEM_SIZE=0x20000000 -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=y -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_STATISTICS=y -CONFIG_NET_KEY=y -CONFIG_NET_KEY_MIGRATE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_IP_PNP_RARP=y -CONFIG_NET_IPIP=y -CONFIG_IP_MROUTE=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_INET_AH=y -CONFIG_INET_ESP=y -CONFIG_INET_IPCOMP=y -CONFIG_IPV6=y -CONFIG_IP_SCTP=m -CONFIG_TIPC=y -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_CBQ=y -CONFIG_NET_SCH_HTB=y -CONFIG_NET_SCH_HFSC=y -CONFIG_NET_SCH_PRIO=y -CONFIG_NET_SCH_MULTIQ=y -CONFIG_NET_SCH_RED=y -CONFIG_NET_SCH_SFQ=y -CONFIG_NET_SCH_TEQL=y -CONFIG_NET_SCH_TBF=y -CONFIG_NET_SCH_GRED=y -CONFIG_NET_CLS_BASIC=y -CONFIG_NET_CLS_TCINDEX=y -CONFIG_NET_CLS_U32=y -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_FLOW=y -CONFIG_NET_CLS_CGROUP=y -CONFIG_UEVENT_HELPER_PATH="/sbin/mdev" -CONFIG_DEVTMPFS=y -CONFIG_MTD=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_CFI=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_MTD_PHRAM=y -CONFIG_MTD_NAND=y -CONFIG_MTD_NAND_ECC_BCH=y -CONFIG_MTD_NAND_FSL_ELBC=y -CONFIG_MTD_UBI=y -CONFIG_MTD_UBI_GLUEBI=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=2 -CONFIG_BLK_DEV_RAM_SIZE=2048 -CONFIG_EEPROM_AT24=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=y -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SYM53C8XX_2=y -CONFIG_NETDEVICES=y -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ADAPTEC is not set -# CONFIG_NET_VENDOR_ALTEON is not set -# CONFIG_NET_VENDOR_AMD is not set -# CONFIG_NET_VENDOR_ATHEROS is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_BROCADE is not set -# CONFIG_NET_VENDOR_CHELSIO is not set -# CONFIG_NET_VENDOR_CISCO is not set -# CONFIG_NET_VENDOR_DEC is not set -# CONFIG_NET_VENDOR_DLINK is not set -# CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EXAR is not set -CONFIG_FSL_PQ_MDIO=y -CONFIG_FSL_XGMAC_MDIO=y -# CONFIG_NET_VENDOR_HP is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MELLANOX is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROCHIP is not set -# CONFIG_NET_VENDOR_MYRI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NVIDIA is not set -# CONFIG_NET_VENDOR_OKI is not set -# CONFIG_NET_PACKET_ENGINE is not set -# CONFIG_NET_VENDOR_QLOGIC is not set -# CONFIG_NET_VENDOR_REALTEK is not set -# CONFIG_NET_VENDOR_RDC is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SILAN is not set -# CONFIG_NET_VENDOR_SIS is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_TEHUTI is not set -# CONFIG_NET_VENDOR_TI is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set -# CONFIG_NET_VENDOR_XILINX is not set -CONFIG_MARVELL_PHY=y -CONFIG_VITESSE_PHY=y -CONFIG_FIXED_PHY=y -# CONFIG_WLAN is not set -# CONFIG_INPUT_MOUSEDEV is not set -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -CONFIG_SERIO_LIBPS2=y -# CONFIG_LEGACY_PTYS is not set -CONFIG_PPC_EPAPR_HV_BYTECHAN=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_DETECT_IRQ=y -CONFIG_SERIAL_8250_RSA=y -CONFIG_NVRAM=y -CONFIG_I2C=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MUX=y -CONFIG_I2C_MUX_PCA954x=y -CONFIG_I2C_MPC=y -CONFIG_SPI=y -CONFIG_SPI_FSL_SPI=y -CONFIG_SPI_FSL_ESPI=y -CONFIG_SPI_SPIDEV=m -CONFIG_PTP_1588_CLOCK=y -# CONFIG_HWMON is not set -# CONFIG_USB_SUPPORT is not set -CONFIG_EDAC=y -CONFIG_EDAC_MM_EDAC=y -CONFIG_EDAC_MPC85XX=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_DS3232=y -CONFIG_RTC_DRV_CMOS=y -CONFIG_UIO=y -CONFIG_STAGING=y -CONFIG_CLK_QORIQ=y -CONFIG_EXT2_FS=y -CONFIG_NTFS_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_JFFS2_FS=y -CONFIG_UBIFS_FS=y -CONFIG_CRAMFS=y -CONFIG_SQUASHFS=y -CONFIG_SQUASHFS_XZ=y -CONFIG_NFS_FS=y -CONFIG_NFS_V4=y -CONFIG_ROOT_NFS=y -CONFIG_NLS_ISO8859_1=y -CONFIG_NLS_UTF8=m -CONFIG_CRC_ITU_T=m -CONFIG_DEBUG_INFO=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_SHIRQ=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_SCHEDSTATS=y -CONFIG_RCU_TRACE=y -CONFIG_UPROBE_EVENT=y -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DEV_FSL_CAAM=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 6d0eb02fefa4..4ff68b752618 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -58,7 +58,6 @@ CONFIG_KEXEC_FILE=y CONFIG_IRQ_ALL_CPUS=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y -CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_PPC_64K_PAGES=y diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 59abc620f8e8..73eb794d6163 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -154,6 +154,34 @@ static __inline__ int test_and_change_bit(unsigned long nr, return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; } +#ifdef CONFIG_PPC64 +static __inline__ unsigned long clear_bit_unlock_return_word(int nr, + volatile unsigned long *addr) +{ + unsigned long old, t; + unsigned long *p = (unsigned long *)addr + BIT_WORD(nr); + unsigned long mask = BIT_MASK(nr); + + __asm__ __volatile__ ( + PPC_RELEASE_BARRIER +"1:" PPC_LLARX(%0,0,%3,0) "\n" + "andc %1,%0,%2\n" + PPC405_ERR77(0,%3) + PPC_STLCX "%1,0,%3\n" + "bne- 1b\n" + : "=&r" (old), "=&r" (t) + : "r" (mask), "r" (p) + : "cc", "memory"); + + return old; +} + +/* This is a special function for mm/filemap.c */ +#define clear_bit_unlock_is_negative_byte(nr, addr) \ + (clear_bit_unlock_return_word(nr, addr) & BIT_MASK(PG_waiters)) + +#endif /* CONFIG_PPC64 */ + #include <asm-generic/bitops/non-atomic.h> static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index d73e9dfa5237..1145dc8e726d 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -30,7 +30,7 @@ extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; #ifndef __ASSEMBLY__ /* - * ISA 3.0 partiton and process table entry format + * ISA 3.0 partition and process table entry format */ struct prtb_entry { __be64 prtb0; diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h index 406c2b1ff82d..0245bfcaac32 100644 --- a/arch/powerpc/include/asm/device.h +++ b/arch/powerpc/include/asm/device.h @@ -6,7 +6,6 @@ #ifndef _ASM_POWERPC_DEVICE_H #define _ASM_POWERPC_DEVICE_H -struct dma_map_ops; struct device_node; #ifdef CONFIG_PPC64 struct pci_dn; @@ -20,9 +19,6 @@ struct iommu_table; * drivers/macintosh/macio_asic.c */ struct dev_archdata { - /* DMA operations on that device */ - struct dma_map_ops *dma_ops; - /* * These two used to be a union. However, with the hybrid ops we need * both so here we store both a DMA offset for direct mappings and diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 84e3f8dd5e4f..181a095468e4 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -76,24 +76,16 @@ static inline unsigned long device_to_mask(struct device *dev) #ifdef CONFIG_PPC64 extern struct dma_map_ops dma_iommu_ops; #endif -extern struct dma_map_ops dma_direct_ops; +extern const struct dma_map_ops dma_direct_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { /* We don't handle the NULL dev case for ISA for now. We could * do it via an out of line call but it is not needed for now. The * only ISA DMA device we support is the floppy and we have a hack * in the floppy driver directly to get a device for us. */ - if (unlikely(dev == NULL)) - return NULL; - - return dev->archdata.dma_ops; -} - -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) -{ - dev->archdata.dma_ops = ops; + return NULL; } /* diff --git a/arch/powerpc/include/asm/fsl_hcalls.h b/arch/powerpc/include/asm/fsl_hcalls.h index 3abb58394da4..b889d13547fd 100644 --- a/arch/powerpc/include/asm/fsl_hcalls.h +++ b/arch/powerpc/include/asm/fsl_hcalls.h @@ -109,7 +109,7 @@ static inline unsigned int fh_send_nmi(unsigned int vcpu_mask) #define FH_DTPROP_MAX_PROPLEN 32768 /** - * fh_partiton_get_dtprop - get a property from a guest device tree. + * fh_partition_get_dtprop - get a property from a guest device tree. * @handle: handle of partition whose device tree is to be accessed * @dtpath_addr: physical address of device tree path to access * @propname_addr: physical address of name of property diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index d821835ade86..0503c98b2117 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -1,5 +1,8 @@ #ifndef _ASM_POWERPC_KPROBES_H #define _ASM_POWERPC_KPROBES_H + +#include <asm-generic/kprobes.h> + #ifdef __KERNEL__ /* * Kernel Probes (KProbes) diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index e9bd6cf0212f..93eded8d3843 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -53,8 +53,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) } #ifdef CONFIG_PCI -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops); -extern struct dma_map_ops *get_pci_dma_ops(void); +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops); +extern const struct dma_map_ops *get_pci_dma_ops(void); #else /* CONFIG_PCI */ #define set_pci_dma_ops(d) #define get_pci_dma_ops() NULL diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h index 696438f09aea..de9681034353 100644 --- a/arch/powerpc/include/asm/pnv-pci.h +++ b/arch/powerpc/include/asm/pnv-pci.h @@ -57,6 +57,8 @@ struct pnv_php_slot { uint64_t id; char *name; int slot_no; + unsigned int flags; +#define PNV_PHP_FLAG_BROKEN_PDC 0x1 struct kref kref; #define PNV_PHP_STATE_INITIALIZED 0 #define PNV_PHP_STATE_REGISTERED 1 diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 025833b8df9f..359c44341761 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -505,7 +505,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) #define MTMSRD(r) mtmsrd r #define MTMSR_EERI(reg) mtmsrd reg,1 #else -#define FIX_SRR1(ra, rb) #ifndef CONFIG_40x #define RFI rfi #else diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 21e0b52685b5..e0fecbcea2a2 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -225,6 +225,7 @@ struct thread_struct { #ifdef CONFIG_PPC64 unsigned long start_tb; /* Start purr when proc switched in */ unsigned long accum_tb; /* Total accumulated purr for process */ +#endif #ifdef CONFIG_HAVE_HW_BREAKPOINT struct perf_event *ptrace_bps[HBP_NUM]; /* @@ -233,7 +234,6 @@ struct thread_struct { */ struct perf_event *last_hit_ubp; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ -#endif struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */ unsigned long trap_nr; /* last trap # on this thread */ u8 load_fp; diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index 2c8001cc93b6..4a90634e8322 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -153,6 +153,7 @@ struct of_drconf_cell { #define OV5_XCMO 0x0440 /* Page Coalescing */ #define OV5_TYPE1_AFFINITY 0x0580 /* Type 1 NUMA affinity */ #define OV5_PRRN 0x0540 /* Platform Resource Reassignment */ +#define OV5_HP_EVT 0x0604 /* Hot Plug Event support */ #define OV5_RESIZE_HPT 0x0601 /* Hash Page Table resizing */ #define OV5_PFO_HW_RNG 0x1180 /* PFO Random Number Generator */ #define OV5_PFO_HW_842 0x1140 /* PFO Compression Accelerator */ diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index a19f831a4cc9..17ee719e799f 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h @@ -435,7 +435,7 @@ static inline void *ps3_system_bus_get_drvdata( return dev_get_drvdata(&dev->core); } -/* These two need global scope for get_dma_ops(). */ +/* These two need global scope for get_arch_dma_ops(). */ extern struct bus_type ps3_system_bus_type; diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index cb02d32db147..fc879fd6bdae 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -552,7 +552,9 @@ #define SPRN_IBAT7U 0x236 /* Instruction BAT 7 Upper Register */ #define SPRN_ICMP 0x3D5 /* Instruction TLB Compare Register */ #define SPRN_ICTC 0x3FB /* Instruction Cache Throttling Control Reg */ +#ifndef SPRN_ICTRL #define SPRN_ICTRL 0x3F3 /* 1011 7450 icache and interrupt ctrl */ +#endif #define ICTRL_EICE 0x08000000 /* enable icache parity errs */ #define ICTRL_EDC 0x04000000 /* enable dcache parity errs */ #define ICTRL_EICP 0x00000100 /* enable icache par. check */ diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h index 1f1636124a04..ae16fef7a4d6 100644 --- a/arch/powerpc/include/asm/reg_8xx.h +++ b/arch/powerpc/include/asm/reg_8xx.h @@ -28,6 +28,17 @@ /* Special MSR manipulation registers */ #define SPRN_EIE 80 /* External interrupt enable (EE=1, RI=1) */ #define SPRN_EID 81 /* External interrupt disable (EE=0, RI=1) */ +#define SPRN_NRI 82 /* Non recoverable interrupt (EE=0, RI=0) */ + +/* Debug registers */ +#define SPRN_CMPA 144 +#define SPRN_COUNTA 150 +#define SPRN_CMPE 152 +#define SPRN_CMPF 153 +#define SPRN_LCTRL1 156 +#define SPRN_LCTRL2 157 +#define SPRN_ICTRL 158 +#define SPRN_BAR 159 /* Commands. Only the first few are available to the instruction cache. */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 076b89247ab5..ec9dd79398ee 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -307,6 +307,7 @@ struct pseries_hp_errorlog { union { __be32 drc_index; __be32 drc_count; + struct { __be32 count, index; } ic; char drc_name[1]; } _drc_u; }; @@ -323,6 +324,7 @@ struct pseries_hp_errorlog { #define PSERIES_HP_ELOG_ID_DRC_NAME 1 #define PSERIES_HP_ELOG_ID_DRC_INDEX 2 #define PSERIES_HP_ELOG_ID_DRC_COUNT 3 +#define PSERIES_HP_ELOG_ID_DRC_IC 4 struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, uint16_t section_id); diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h index de99d6e29430..01d45a5fd00b 100644 --- a/arch/powerpc/include/asm/swiotlb.h +++ b/arch/powerpc/include/asm/swiotlb.h @@ -13,7 +13,7 @@ #include <linux/swiotlb.h> -extern struct dma_map_ops swiotlb_dma_ops; +extern const struct dma_map_ops swiotlb_dma_ops; static inline void dma_mark_clean(void *addr, size_t size) {} diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index f25239b3a06f..4367e7df51a1 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -72,205 +72,190 @@ #include <asm/fixmap.h> #endif +#define STACK_PT_REGS_OFFSET(sym, val) \ + DEFINE(sym, STACK_FRAME_OVERHEAD + offsetof(struct pt_regs, val)) + int main(void) { - DEFINE(THREAD, offsetof(struct task_struct, thread)); - DEFINE(MM, offsetof(struct task_struct, mm)); - DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); + OFFSET(THREAD, task_struct, thread); + OFFSET(MM, task_struct, mm); + OFFSET(MMCONTEXTID, mm_struct, context.id); #ifdef CONFIG_PPC64 DEFINE(SIGSEGV, SIGSEGV); DEFINE(NMI_MASK, NMI_MASK); - DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr)); + OFFSET(TASKTHREADPPR, task_struct, thread.ppr); #else - DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); + OFFSET(THREAD_INFO, task_struct, stack); DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16)); - DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit)); + OFFSET(KSP_LIMIT, thread_struct, ksp_limit); #endif /* CONFIG_PPC64 */ #ifdef CONFIG_LIVEPATCH - DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp)); + OFFSET(TI_livepatch_sp, thread_info, livepatch_sp); #endif - DEFINE(KSP, offsetof(struct thread_struct, ksp)); - DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); + OFFSET(KSP, thread_struct, ksp); + OFFSET(PT_REGS, thread_struct, regs); #ifdef CONFIG_BOOKE - DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0])); + OFFSET(THREAD_NORMSAVES, thread_struct, normsave[0]); #endif - DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); - DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state)); - DEFINE(THREAD_FPSAVEAREA, offsetof(struct thread_struct, fp_save_area)); - DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr)); - DEFINE(THREAD_LOAD_FP, offsetof(struct thread_struct, load_fp)); + OFFSET(THREAD_FPEXC_MODE, thread_struct, fpexc_mode); + OFFSET(THREAD_FPSTATE, thread_struct, fp_state); + OFFSET(THREAD_FPSAVEAREA, thread_struct, fp_save_area); + OFFSET(FPSTATE_FPSCR, thread_fp_state, fpscr); + OFFSET(THREAD_LOAD_FP, thread_struct, load_fp); #ifdef CONFIG_ALTIVEC - DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state)); - DEFINE(THREAD_VRSAVEAREA, offsetof(struct thread_struct, vr_save_area)); - DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); - DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); - DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr)); - DEFINE(THREAD_LOAD_VEC, offsetof(struct thread_struct, load_vec)); + OFFSET(THREAD_VRSTATE, thread_struct, vr_state); + OFFSET(THREAD_VRSAVEAREA, thread_struct, vr_save_area); + OFFSET(THREAD_VRSAVE, thread_struct, vrsave); + OFFSET(THREAD_USED_VR, thread_struct, used_vr); + OFFSET(VRSTATE_VSCR, thread_vr_state, vscr); + OFFSET(THREAD_LOAD_VEC, thread_struct, load_vec); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX - DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr)); + OFFSET(THREAD_USED_VSR, thread_struct, used_vsr); #endif /* CONFIG_VSX */ #ifdef CONFIG_PPC64 - DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid)); + OFFSET(KSP_VSID, thread_struct, ksp_vsid); #else /* CONFIG_PPC64 */ - DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); + OFFSET(PGDIR, thread_struct, pgdir); #ifdef CONFIG_SPE - DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0])); - DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc)); - DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr)); - DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); + OFFSET(THREAD_EVR0, thread_struct, evr[0]); + OFFSET(THREAD_ACC, thread_struct, acc); + OFFSET(THREAD_SPEFSCR, thread_struct, spefscr); + OFFSET(THREAD_USED_SPE, thread_struct, used_spe); #endif /* CONFIG_SPE */ #endif /* CONFIG_PPC64 */ #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, debug.dbcr0)); + OFFSET(THREAD_DBCR0, thread_struct, debug.dbcr0); #endif #ifdef CONFIG_KVM_BOOK3S_32_HANDLER - DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); + OFFSET(THREAD_KVM_SVCPU, thread_struct, kvm_shadow_vcpu); #endif #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE) - DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu)); + OFFSET(THREAD_KVM_VCPU, thread_struct, kvm_vcpu); #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM - DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch)); - DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar)); - DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr)); - DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar)); - DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar)); - DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr)); - DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr)); - DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); - DEFINE(THREAD_CKVRSTATE, offsetof(struct thread_struct, - ckvr_state)); - DEFINE(THREAD_CKVRSAVE, offsetof(struct thread_struct, - ckvrsave)); - DEFINE(THREAD_CKFPSTATE, offsetof(struct thread_struct, - ckfp_state)); + OFFSET(PACATMSCRATCH, paca_struct, tm_scratch); + OFFSET(THREAD_TM_TFHAR, thread_struct, tm_tfhar); + OFFSET(THREAD_TM_TEXASR, thread_struct, tm_texasr); + OFFSET(THREAD_TM_TFIAR, thread_struct, tm_tfiar); + OFFSET(THREAD_TM_TAR, thread_struct, tm_tar); + OFFSET(THREAD_TM_PPR, thread_struct, tm_ppr); + OFFSET(THREAD_TM_DSCR, thread_struct, tm_dscr); + OFFSET(PT_CKPT_REGS, thread_struct, ckpt_regs); + OFFSET(THREAD_CKVRSTATE, thread_struct, ckvr_state); + OFFSET(THREAD_CKVRSAVE, thread_struct, ckvrsave); + OFFSET(THREAD_CKFPSTATE, thread_struct, ckfp_state); /* Local pt_regs on stack for Transactional Memory funcs. */ DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ - DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); - DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); - DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); - DEFINE(TI_TASK, offsetof(struct thread_info, task)); - DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); + OFFSET(TI_FLAGS, thread_info, flags); + OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags); + OFFSET(TI_PREEMPT, thread_info, preempt_count); + OFFSET(TI_TASK, thread_info, task); + OFFSET(TI_CPU, thread_info, cpu); #ifdef CONFIG_PPC64 - DEFINE(DCACHEL1BLOCKSIZE, offsetof(struct ppc64_caches, l1d.block_size)); - DEFINE(DCACHEL1LOGBLOCKSIZE, offsetof(struct ppc64_caches, l1d.log_block_size)); - DEFINE(DCACHEL1BLOCKSPERPAGE, offsetof(struct ppc64_caches, l1d.blocks_per_page)); - DEFINE(ICACHEL1BLOCKSIZE, offsetof(struct ppc64_caches, l1i.block_size)); - DEFINE(ICACHEL1LOGBLOCKSIZE, offsetof(struct ppc64_caches, l1i.log_block_size)); - DEFINE(ICACHEL1BLOCKSPERPAGE, offsetof(struct ppc64_caches, l1i.blocks_per_page)); + OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size); + OFFSET(DCACHEL1LOGBLOCKSIZE, ppc64_caches, l1d.log_block_size); + OFFSET(DCACHEL1BLOCKSPERPAGE, ppc64_caches, l1d.blocks_per_page); + OFFSET(ICACHEL1BLOCKSIZE, ppc64_caches, l1i.block_size); + OFFSET(ICACHEL1LOGBLOCKSIZE, ppc64_caches, l1i.log_block_size); + OFFSET(ICACHEL1BLOCKSPERPAGE, ppc64_caches, l1i.blocks_per_page); /* paca */ DEFINE(PACA_SIZE, sizeof(struct paca_struct)); - DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); - DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); - DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); - DEFINE(PACACURRENT, offsetof(struct paca_struct, __current)); - DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr)); - DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); - DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); - DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); - DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase)); - DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); - DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); - DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened)); + OFFSET(PACAPACAINDEX, paca_struct, paca_index); + OFFSET(PACAPROCSTART, paca_struct, cpu_start); + OFFSET(PACAKSAVE, paca_struct, kstack); + OFFSET(PACACURRENT, paca_struct, __current); + OFFSET(PACASAVEDMSR, paca_struct, saved_msr); + OFFSET(PACASTABRR, paca_struct, stab_rr); + OFFSET(PACAR1, paca_struct, saved_r1); + OFFSET(PACATOC, paca_struct, kernel_toc); + OFFSET(PACAKBASE, paca_struct, kernelbase); + OFFSET(PACAKMSR, paca_struct, kernel_msr); + OFFSET(PACASOFTIRQEN, paca_struct, soft_enabled); + OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened); #ifdef CONFIG_PPC_BOOK3S - DEFINE(PACACONTEXTID, offsetof(struct paca_struct, mm_ctx_id)); + OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id); #ifdef CONFIG_PPC_MM_SLICES - DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, - mm_ctx_low_slices_psize)); - DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct, - mm_ctx_high_slices_psize)); + OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize); + OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize); DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def)); #endif /* CONFIG_PPC_MM_SLICES */ #endif #ifdef CONFIG_PPC_BOOK3E - DEFINE(PACAPGD, offsetof(struct paca_struct, pgd)); - DEFINE(PACA_KERNELPGD, offsetof(struct paca_struct, kernel_pgd)); - DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); - DEFINE(PACA_EXTLB, offsetof(struct paca_struct, extlb)); - DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); - DEFINE(PACA_EXCRIT, offsetof(struct paca_struct, excrit)); - DEFINE(PACA_EXDBG, offsetof(struct paca_struct, exdbg)); - DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack)); - DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack)); - DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack)); - DEFINE(PACA_TCD_PTR, offsetof(struct paca_struct, tcd_ptr)); - - DEFINE(TCD_ESEL_NEXT, - offsetof(struct tlb_core_data, esel_next)); - DEFINE(TCD_ESEL_MAX, - offsetof(struct tlb_core_data, esel_max)); - DEFINE(TCD_ESEL_FIRST, - offsetof(struct tlb_core_data, esel_first)); + OFFSET(PACAPGD, paca_struct, pgd); + OFFSET(PACA_KERNELPGD, paca_struct, kernel_pgd); + OFFSET(PACA_EXGEN, paca_struct, exgen); + OFFSET(PACA_EXTLB, paca_struct, extlb); + OFFSET(PACA_EXMC, paca_struct, exmc); + OFFSET(PACA_EXCRIT, paca_struct, excrit); + OFFSET(PACA_EXDBG, paca_struct, exdbg); + OFFSET(PACA_MC_STACK, paca_struct, mc_kstack); + OFFSET(PACA_CRIT_STACK, paca_struct, crit_kstack); + OFFSET(PACA_DBG_STACK, paca_struct, dbg_kstack); + OFFSET(PACA_TCD_PTR, paca_struct, tcd_ptr); + + OFFSET(TCD_ESEL_NEXT, tlb_core_data, esel_next); + OFFSET(TCD_ESEL_MAX, tlb_core_data, esel_max); + OFFSET(TCD_ESEL_FIRST, tlb_core_data, esel_first); #endif /* CONFIG_PPC_BOOK3E */ #ifdef CONFIG_PPC_STD_MMU_64 - DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); - DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); - DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp)); + OFFSET(PACASLBCACHE, paca_struct, slb_cache); + OFFSET(PACASLBCACHEPTR, paca_struct, slb_cache_ptr); + OFFSET(PACAVMALLOCSLLP, paca_struct, vmalloc_sllp); #ifdef CONFIG_PPC_MM_SLICES - DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp)); + OFFSET(MMUPSIZESLLP, mmu_psize_def, sllp); #else - DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, mm_ctx_sllp)); + OFFSET(PACACONTEXTSLLP, paca_struct, mm_ctx_sllp); #endif /* CONFIG_PPC_MM_SLICES */ - DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); - DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); - DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); - DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr)); - DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr)); - DEFINE(SLBSHADOW_STACKVSID, - offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); - DEFINE(SLBSHADOW_STACKESID, - offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); - DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); - DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use)); - DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); - DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count)); - DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); + OFFSET(PACA_EXGEN, paca_struct, exgen); + OFFSET(PACA_EXMC, paca_struct, exmc); + OFFSET(PACA_EXSLB, paca_struct, exslb); + OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr); + OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr); + OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid); + OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid); + OFFSET(SLBSHADOW_SAVEAREA, slb_shadow, save_area); + OFFSET(LPPACA_PMCINUSE, lppaca, pmcregs_in_use); + OFFSET(LPPACA_DTLIDX, lppaca, dtl_idx); + OFFSET(LPPACA_YIELDCOUNT, lppaca, yield_count); + OFFSET(PACA_DTL_RIDX, paca_struct, dtl_ridx); #endif /* CONFIG_PPC_STD_MMU_64 */ - DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); + OFFSET(PACAEMERGSP, paca_struct, emergency_sp); #ifdef CONFIG_PPC_BOOK3S_64 - DEFINE(PACAMCEMERGSP, offsetof(struct paca_struct, mc_emergency_sp)); - DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce)); -#endif - DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); - DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); - DEFINE(PACA_DSCR_DEFAULT, offsetof(struct paca_struct, dscr_default)); - DEFINE(ACCOUNT_STARTTIME, - offsetof(struct paca_struct, accounting.starttime)); - DEFINE(ACCOUNT_STARTTIME_USER, - offsetof(struct paca_struct, accounting.starttime_user)); - DEFINE(ACCOUNT_USER_TIME, - offsetof(struct paca_struct, accounting.utime)); - DEFINE(ACCOUNT_SYSTEM_TIME, - offsetof(struct paca_struct, accounting.stime)); - DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); - DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost)); - DEFINE(PACA_SPRG_VDSO, offsetof(struct paca_struct, sprg_vdso)); + OFFSET(PACAMCEMERGSP, paca_struct, mc_emergency_sp); + OFFSET(PACA_IN_MCE, paca_struct, in_mce); +#endif + OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); + OFFSET(PACAKEXECSTATE, paca_struct, kexec_state); + OFFSET(PACA_DSCR_DEFAULT, paca_struct, dscr_default); + OFFSET(ACCOUNT_STARTTIME, paca_struct, accounting.starttime); + OFFSET(ACCOUNT_STARTTIME_USER, paca_struct, accounting.starttime_user); + OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.utime); + OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.stime); + OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save); + OFFSET(PACA_NAPSTATELOST, paca_struct, nap_state_lost); + OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso); #else /* CONFIG_PPC64 */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - DEFINE(ACCOUNT_STARTTIME, - offsetof(struct thread_info, accounting.starttime)); - DEFINE(ACCOUNT_STARTTIME_USER, - offsetof(struct thread_info, accounting.starttime_user)); - DEFINE(ACCOUNT_USER_TIME, - offsetof(struct thread_info, accounting.utime)); - DEFINE(ACCOUNT_SYSTEM_TIME, - offsetof(struct thread_info, accounting.stime)); + OFFSET(ACCOUNT_STARTTIME, thread_info, accounting.starttime); + OFFSET(ACCOUNT_STARTTIME_USER, thread_info, accounting.starttime_user); + OFFSET(ACCOUNT_USER_TIME, thread_info, accounting.utime); + OFFSET(ACCOUNT_SYSTEM_TIME, thread_info, accounting.stime); #endif #endif /* CONFIG_PPC64 */ /* RTAS */ - DEFINE(RTASBASE, offsetof(struct rtas_t, base)); - DEFINE(RTASENTRY, offsetof(struct rtas_t, entry)); + OFFSET(RTASBASE, rtas_t, base); + OFFSET(RTASENTRY, rtas_t, entry); /* Interrupt register frame */ DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); @@ -280,38 +265,38 @@ int main(void) DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); #endif /* CONFIG_PPC64 */ - DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0])); - DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1])); - DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2])); - DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3])); - DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4])); - DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5])); - DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6])); - DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7])); - DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8])); - DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9])); - DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10])); - DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11])); - DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12])); - DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13])); + STACK_PT_REGS_OFFSET(GPR0, gpr[0]); + STACK_PT_REGS_OFFSET(GPR1, gpr[1]); + STACK_PT_REGS_OFFSET(GPR2, gpr[2]); + STACK_PT_REGS_OFFSET(GPR3, gpr[3]); + STACK_PT_REGS_OFFSET(GPR4, gpr[4]); + STACK_PT_REGS_OFFSET(GPR5, gpr[5]); + STACK_PT_REGS_OFFSET(GPR6, gpr[6]); + STACK_PT_REGS_OFFSET(GPR7, gpr[7]); + STACK_PT_REGS_OFFSET(GPR8, gpr[8]); + STACK_PT_REGS_OFFSET(GPR9, gpr[9]); + STACK_PT_REGS_OFFSET(GPR10, gpr[10]); + STACK_PT_REGS_OFFSET(GPR11, gpr[11]); + STACK_PT_REGS_OFFSET(GPR12, gpr[12]); + STACK_PT_REGS_OFFSET(GPR13, gpr[13]); #ifndef CONFIG_PPC64 - DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14])); + STACK_PT_REGS_OFFSET(GPR14, gpr[14]); #endif /* CONFIG_PPC64 */ /* * Note: these symbols include _ because they overlap with special * register names */ - DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip)); - DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr)); - DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr)); - DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link)); - DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr)); - DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer)); - DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); - DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); - DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3)); - DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result)); - DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap)); + STACK_PT_REGS_OFFSET(_NIP, nip); + STACK_PT_REGS_OFFSET(_MSR, msr); + STACK_PT_REGS_OFFSET(_CTR, ctr); + STACK_PT_REGS_OFFSET(_LINK, link); + STACK_PT_REGS_OFFSET(_CCR, ccr); + STACK_PT_REGS_OFFSET(_XER, xer); + STACK_PT_REGS_OFFSET(_DAR, dar); + STACK_PT_REGS_OFFSET(_DSISR, dsisr); + STACK_PT_REGS_OFFSET(ORIG_GPR3, orig_gpr3); + STACK_PT_REGS_OFFSET(RESULT, result); + STACK_PT_REGS_OFFSET(_TRAP, trap); #ifndef CONFIG_PPC64 /* * The PowerPC 400-class & Book-E processors have neither the DAR @@ -319,10 +304,10 @@ int main(void) * DEAR and ESR SPRs for such processors. For critical interrupts * we use them to hold SRR0 and SRR1. */ - DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); - DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); + STACK_PT_REGS_OFFSET(_DEAR, dar); + STACK_PT_REGS_OFFSET(_ESR, dsisr); #else /* CONFIG_PPC64 */ - DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe)); + STACK_PT_REGS_OFFSET(SOFTE, softe); /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */ DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)); @@ -351,17 +336,17 @@ int main(void) #endif #ifndef CONFIG_PPC64 - DEFINE(MM_PGD, offsetof(struct mm_struct, pgd)); + OFFSET(MM_PGD, mm_struct, pgd); #endif /* ! CONFIG_PPC64 */ /* About the CPU features table */ - DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); - DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); - DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore)); + OFFSET(CPU_SPEC_FEATURES, cpu_spec, cpu_features); + OFFSET(CPU_SPEC_SETUP, cpu_spec, cpu_setup); + OFFSET(CPU_SPEC_RESTORE, cpu_spec, cpu_restore); - DEFINE(pbe_address, offsetof(struct pbe, address)); - DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); - DEFINE(pbe_next, offsetof(struct pbe, next)); + OFFSET(pbe_address, pbe, address); + OFFSET(pbe_orig_address, pbe, orig_address); + OFFSET(pbe_next, pbe, next); #ifndef CONFIG_PPC64 DEFINE(TASK_SIZE, TASK_SIZE); @@ -369,40 +354,40 @@ int main(void) #endif /* ! CONFIG_PPC64 */ /* datapage offsets for use by vdso */ - DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct vdso_data, tb_orig_stamp)); - DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct vdso_data, tb_ticks_per_sec)); - DEFINE(CFG_TB_TO_XS, offsetof(struct vdso_data, tb_to_xs)); - DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct vdso_data, tb_update_count)); - DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct vdso_data, tz_minuteswest)); - DEFINE(CFG_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); - DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32)); - DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec)); - DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); - DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime)); - DEFINE(STAMP_SEC_FRAC, offsetof(struct vdso_data, stamp_sec_fraction)); - DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size)); - DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size)); - DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size)); - DEFINE(CFG_DCACHE_LOGBLOCKSZ, offsetof(struct vdso_data, dcache_log_block_size)); + OFFSET(CFG_TB_ORIG_STAMP, vdso_data, tb_orig_stamp); + OFFSET(CFG_TB_TICKS_PER_SEC, vdso_data, tb_ticks_per_sec); + OFFSET(CFG_TB_TO_XS, vdso_data, tb_to_xs); + OFFSET(CFG_TB_UPDATE_COUNT, vdso_data, tb_update_count); + OFFSET(CFG_TZ_MINUTEWEST, vdso_data, tz_minuteswest); + OFFSET(CFG_TZ_DSTTIME, vdso_data, tz_dsttime); + OFFSET(CFG_SYSCALL_MAP32, vdso_data, syscall_map_32); + OFFSET(WTOM_CLOCK_SEC, vdso_data, wtom_clock_sec); + OFFSET(WTOM_CLOCK_NSEC, vdso_data, wtom_clock_nsec); + OFFSET(STAMP_XTIME, vdso_data, stamp_xtime); + OFFSET(STAMP_SEC_FRAC, vdso_data, stamp_sec_fraction); + OFFSET(CFG_ICACHE_BLOCKSZ, vdso_data, icache_block_size); + OFFSET(CFG_DCACHE_BLOCKSZ, vdso_data, dcache_block_size); + OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_data, icache_log_block_size); + OFFSET(CFG_DCACHE_LOGBLOCKSZ, vdso_data, dcache_log_block_size); #ifdef CONFIG_PPC64 - DEFINE(CFG_SYSCALL_MAP64, offsetof(struct vdso_data, syscall_map_64)); - DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec)); - DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec)); - DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec)); - DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec)); - DEFINE(TSPC64_TV_SEC, offsetof(struct timespec, tv_sec)); - DEFINE(TSPC64_TV_NSEC, offsetof(struct timespec, tv_nsec)); - DEFINE(TSPC32_TV_SEC, offsetof(struct compat_timespec, tv_sec)); - DEFINE(TSPC32_TV_NSEC, offsetof(struct compat_timespec, tv_nsec)); + OFFSET(CFG_SYSCALL_MAP64, vdso_data, syscall_map_64); + OFFSET(TVAL64_TV_SEC, timeval, tv_sec); + OFFSET(TVAL64_TV_USEC, timeval, tv_usec); + OFFSET(TVAL32_TV_SEC, compat_timeval, tv_sec); + OFFSET(TVAL32_TV_USEC, compat_timeval, tv_usec); + OFFSET(TSPC64_TV_SEC, timespec, tv_sec); + OFFSET(TSPC64_TV_NSEC, timespec, tv_nsec); + OFFSET(TSPC32_TV_SEC, compat_timespec, tv_sec); + OFFSET(TSPC32_TV_NSEC, compat_timespec, tv_nsec); #else - DEFINE(TVAL32_TV_SEC, offsetof(struct timeval, tv_sec)); - DEFINE(TVAL32_TV_USEC, offsetof(struct timeval, tv_usec)); - DEFINE(TSPC32_TV_SEC, offsetof(struct timespec, tv_sec)); - DEFINE(TSPC32_TV_NSEC, offsetof(struct timespec, tv_nsec)); + OFFSET(TVAL32_TV_SEC, timeval, tv_sec); + OFFSET(TVAL32_TV_USEC, timeval, tv_usec); + OFFSET(TSPC32_TV_SEC, timespec, tv_sec); + OFFSET(TSPC32_TV_NSEC, timespec, tv_nsec); #endif /* timeval/timezone offsets for use by vdso */ - DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); - DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); + OFFSET(TZONE_TZ_MINWEST, timezone, tz_minuteswest); + OFFSET(TZONE_TZ_DSTTIME, timezone, tz_dsttime); /* Other bits used by the vdso */ DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); @@ -422,170 +407,170 @@ int main(void) DEFINE(PTE_SIZE, sizeof(pte_t)); #ifdef CONFIG_KVM - DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); - DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); - DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid)); - DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); - DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); - DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr)); + OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack); + OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid); + OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid); + OFFSET(VCPU_GPRS, kvm_vcpu, arch.gpr); + OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave); + OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr); #ifdef CONFIG_ALTIVEC - DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr)); + OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr); #endif - DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); - DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); - DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); + OFFSET(VCPU_XER, kvm_vcpu, arch.xer); + OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr); + OFFSET(VCPU_LR, kvm_vcpu, arch.lr); #ifdef CONFIG_PPC_BOOK3S - DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar)); + OFFSET(VCPU_TAR, kvm_vcpu, arch.tar); #endif - DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); - DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); + OFFSET(VCPU_CR, kvm_vcpu, arch.cr); + OFFSET(VCPU_PC, kvm_vcpu, arch.pc); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr)); - DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0)); - DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1)); - DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0)); - DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1)); - DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2)); - DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3)); + OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr); + OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0); + OFFSET(VCPU_SRR1, kvm_vcpu, arch.shregs.srr1); + OFFSET(VCPU_SPRG0, kvm_vcpu, arch.shregs.sprg0); + OFFSET(VCPU_SPRG1, kvm_vcpu, arch.shregs.sprg1); + OFFSET(VCPU_SPRG2, kvm_vcpu, arch.shregs.sprg2); + OFFSET(VCPU_SPRG3, kvm_vcpu, arch.shregs.sprg3); #endif #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING - DEFINE(VCPU_TB_RMENTRY, offsetof(struct kvm_vcpu, arch.rm_entry)); - DEFINE(VCPU_TB_RMINTR, offsetof(struct kvm_vcpu, arch.rm_intr)); - DEFINE(VCPU_TB_RMEXIT, offsetof(struct kvm_vcpu, arch.rm_exit)); - DEFINE(VCPU_TB_GUEST, offsetof(struct kvm_vcpu, arch.guest_time)); - DEFINE(VCPU_TB_CEDE, offsetof(struct kvm_vcpu, arch.cede_time)); - DEFINE(VCPU_CUR_ACTIVITY, offsetof(struct kvm_vcpu, arch.cur_activity)); - DEFINE(VCPU_ACTIVITY_START, offsetof(struct kvm_vcpu, arch.cur_tb_start)); - DEFINE(TAS_SEQCOUNT, offsetof(struct kvmhv_tb_accumulator, seqcount)); - DEFINE(TAS_TOTAL, offsetof(struct kvmhv_tb_accumulator, tb_total)); - DEFINE(TAS_MIN, offsetof(struct kvmhv_tb_accumulator, tb_min)); - DEFINE(TAS_MAX, offsetof(struct kvmhv_tb_accumulator, tb_max)); -#endif - DEFINE(VCPU_SHARED_SPRG3, offsetof(struct kvm_vcpu_arch_shared, sprg3)); - DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4)); - DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5)); - DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6)); - DEFINE(VCPU_SHARED_SPRG7, offsetof(struct kvm_vcpu_arch_shared, sprg7)); - DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); - DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1)); - DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); - DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); - DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); + OFFSET(VCPU_TB_RMENTRY, kvm_vcpu, arch.rm_entry); + OFFSET(VCPU_TB_RMINTR, kvm_vcpu, arch.rm_intr); + OFFSET(VCPU_TB_RMEXIT, kvm_vcpu, arch.rm_exit); + OFFSET(VCPU_TB_GUEST, kvm_vcpu, arch.guest_time); + OFFSET(VCPU_TB_CEDE, kvm_vcpu, arch.cede_time); + OFFSET(VCPU_CUR_ACTIVITY, kvm_vcpu, arch.cur_activity); + OFFSET(VCPU_ACTIVITY_START, kvm_vcpu, arch.cur_tb_start); + OFFSET(TAS_SEQCOUNT, kvmhv_tb_accumulator, seqcount); + OFFSET(TAS_TOTAL, kvmhv_tb_accumulator, tb_total); + OFFSET(TAS_MIN, kvmhv_tb_accumulator, tb_min); + OFFSET(TAS_MAX, kvmhv_tb_accumulator, tb_max); +#endif + OFFSET(VCPU_SHARED_SPRG3, kvm_vcpu_arch_shared, sprg3); + OFFSET(VCPU_SHARED_SPRG4, kvm_vcpu_arch_shared, sprg4); + OFFSET(VCPU_SHARED_SPRG5, kvm_vcpu_arch_shared, sprg5); + OFFSET(VCPU_SHARED_SPRG6, kvm_vcpu_arch_shared, sprg6); + OFFSET(VCPU_SHARED_SPRG7, kvm_vcpu_arch_shared, sprg7); + OFFSET(VCPU_SHADOW_PID, kvm_vcpu, arch.shadow_pid); + OFFSET(VCPU_SHADOW_PID1, kvm_vcpu, arch.shadow_pid1); + OFFSET(VCPU_SHARED, kvm_vcpu, arch.shared); + OFFSET(VCPU_SHARED_MSR, kvm_vcpu_arch_shared, msr); + OFFSET(VCPU_SHADOW_MSR, kvm_vcpu, arch.shadow_msr); #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) - DEFINE(VCPU_SHAREDBE, offsetof(struct kvm_vcpu, arch.shared_big_endian)); + OFFSET(VCPU_SHAREDBE, kvm_vcpu, arch.shared_big_endian); #endif - DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0)); - DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1)); - DEFINE(VCPU_SHARED_MAS2, offsetof(struct kvm_vcpu_arch_shared, mas2)); - DEFINE(VCPU_SHARED_MAS7_3, offsetof(struct kvm_vcpu_arch_shared, mas7_3)); - DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4)); - DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6)); + OFFSET(VCPU_SHARED_MAS0, kvm_vcpu_arch_shared, mas0); + OFFSET(VCPU_SHARED_MAS1, kvm_vcpu_arch_shared, mas1); + OFFSET(VCPU_SHARED_MAS2, kvm_vcpu_arch_shared, mas2); + OFFSET(VCPU_SHARED_MAS7_3, kvm_vcpu_arch_shared, mas7_3); + OFFSET(VCPU_SHARED_MAS4, kvm_vcpu_arch_shared, mas4); + OFFSET(VCPU_SHARED_MAS6, kvm_vcpu_arch_shared, mas6); - DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); - DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); + OFFSET(VCPU_KVM, kvm_vcpu, kvm); + OFFSET(KVM_LPID, kvm, arch.lpid); /* book3s */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - DEFINE(KVM_TLB_SETS, offsetof(struct kvm, arch.tlb_sets)); - DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); - DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); - DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); - DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1)); - DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits)); - DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls)); - DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); - DEFINE(KVM_RADIX, offsetof(struct kvm, arch.radix)); - DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); - DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); - DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); - DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); - DEFINE(VCPU_HEIR, offsetof(struct kvm_vcpu, arch.emul_inst)); - DEFINE(VCPU_CPU, offsetof(struct kvm_vcpu, cpu)); - DEFINE(VCPU_THREAD_CPU, offsetof(struct kvm_vcpu, arch.thread_cpu)); + OFFSET(KVM_TLB_SETS, kvm, arch.tlb_sets); + OFFSET(KVM_SDR1, kvm, arch.sdr1); + OFFSET(KVM_HOST_LPID, kvm, arch.host_lpid); + OFFSET(KVM_HOST_LPCR, kvm, arch.host_lpcr); + OFFSET(KVM_HOST_SDR1, kvm, arch.host_sdr1); + OFFSET(KVM_NEED_FLUSH, kvm, arch.need_tlb_flush.bits); + OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls); + OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v); + OFFSET(KVM_RADIX, kvm, arch.radix); + OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr); + OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar); + OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr); + OFFSET(VCPU_VPA_DIRTY, kvm_vcpu, arch.vpa.dirty); + OFFSET(VCPU_HEIR, kvm_vcpu, arch.emul_inst); + OFFSET(VCPU_CPU, kvm_vcpu, cpu); + OFFSET(VCPU_THREAD_CPU, kvm_vcpu, arch.thread_cpu); #endif #ifdef CONFIG_PPC_BOOK3S - DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); - DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); - DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic)); - DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); - DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); - DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); - DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr)); - DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); - DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); - DEFINE(VCPU_DABRX, offsetof(struct kvm_vcpu, arch.dabrx)); - DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr)); - DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx)); - DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr)); - DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); - DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); - DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); - DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions)); - DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded)); - DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); - DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); - DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); - DEFINE(VCPU_SPMC, offsetof(struct kvm_vcpu, arch.spmc)); - DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar)); - DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar)); - DEFINE(VCPU_SIER, offsetof(struct kvm_vcpu, arch.sier)); - DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); - DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); - DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); - DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); - DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); - DEFINE(VCPU_FAULT_GPA, offsetof(struct kvm_vcpu, arch.fault_gpa)); - DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr)); - DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); - DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); - DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); - DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); - DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr)); - DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb)); - DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr)); - DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr)); - DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr)); - DEFINE(VCPU_CSIGR, offsetof(struct kvm_vcpu, arch.csigr)); - DEFINE(VCPU_TACR, offsetof(struct kvm_vcpu, arch.tacr)); - DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr)); - DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop)); - DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort)); - DEFINE(VCPU_TID, offsetof(struct kvm_vcpu, arch.tid)); - DEFINE(VCPU_PSSCR, offsetof(struct kvm_vcpu, arch.psscr)); - DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_map)); - DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); - DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); - DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm)); - DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset)); - DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); - DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); - DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes)); - DEFINE(VCORE_VTB, offsetof(struct kvmppc_vcore, vtb)); - DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); - DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); + OFFSET(VCPU_PURR, kvm_vcpu, arch.purr); + OFFSET(VCPU_SPURR, kvm_vcpu, arch.spurr); + OFFSET(VCPU_IC, kvm_vcpu, arch.ic); + OFFSET(VCPU_DSCR, kvm_vcpu, arch.dscr); + OFFSET(VCPU_AMR, kvm_vcpu, arch.amr); + OFFSET(VCPU_UAMOR, kvm_vcpu, arch.uamor); + OFFSET(VCPU_IAMR, kvm_vcpu, arch.iamr); + OFFSET(VCPU_CTRL, kvm_vcpu, arch.ctrl); + OFFSET(VCPU_DABR, kvm_vcpu, arch.dabr); + OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx); + OFFSET(VCPU_DAWR, kvm_vcpu, arch.dawr); + OFFSET(VCPU_DAWRX, kvm_vcpu, arch.dawrx); + OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr); + OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags); + OFFSET(VCPU_DEC, kvm_vcpu, arch.dec); + OFFSET(VCPU_DEC_EXPIRES, kvm_vcpu, arch.dec_expires); + OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions); + OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded); + OFFSET(VCPU_PRODDED, kvm_vcpu, arch.prodded); + OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr); + OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc); + OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc); + OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar); + OFFSET(VCPU_SDAR, kvm_vcpu, arch.sdar); + OFFSET(VCPU_SIER, kvm_vcpu, arch.sier); + OFFSET(VCPU_SLB, kvm_vcpu, arch.slb); + OFFSET(VCPU_SLB_MAX, kvm_vcpu, arch.slb_max); + OFFSET(VCPU_SLB_NR, kvm_vcpu, arch.slb_nr); + OFFSET(VCPU_FAULT_DSISR, kvm_vcpu, arch.fault_dsisr); + OFFSET(VCPU_FAULT_DAR, kvm_vcpu, arch.fault_dar); + OFFSET(VCPU_FAULT_GPA, kvm_vcpu, arch.fault_gpa); + OFFSET(VCPU_INTR_MSR, kvm_vcpu, arch.intr_msr); + OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst); + OFFSET(VCPU_TRAP, kvm_vcpu, arch.trap); + OFFSET(VCPU_CFAR, kvm_vcpu, arch.cfar); + OFFSET(VCPU_PPR, kvm_vcpu, arch.ppr); + OFFSET(VCPU_FSCR, kvm_vcpu, arch.fscr); + OFFSET(VCPU_PSPB, kvm_vcpu, arch.pspb); + OFFSET(VCPU_EBBHR, kvm_vcpu, arch.ebbhr); + OFFSET(VCPU_EBBRR, kvm_vcpu, arch.ebbrr); + OFFSET(VCPU_BESCR, kvm_vcpu, arch.bescr); + OFFSET(VCPU_CSIGR, kvm_vcpu, arch.csigr); + OFFSET(VCPU_TACR, kvm_vcpu, arch.tacr); + OFFSET(VCPU_TCSCR, kvm_vcpu, arch.tcscr); + OFFSET(VCPU_ACOP, kvm_vcpu, arch.acop); + OFFSET(VCPU_WORT, kvm_vcpu, arch.wort); + OFFSET(VCPU_TID, kvm_vcpu, arch.tid); + OFFSET(VCPU_PSSCR, kvm_vcpu, arch.psscr); + OFFSET(VCORE_ENTRY_EXIT, kvmppc_vcore, entry_exit_map); + OFFSET(VCORE_IN_GUEST, kvmppc_vcore, in_guest); + OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads); + OFFSET(VCORE_KVM, kvmppc_vcore, kvm); + OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset); + OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr); + OFFSET(VCORE_PCR, kvmppc_vcore, pcr); + OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes); + OFFSET(VCORE_VTB, kvmppc_vcore, vtb); + OFFSET(VCPU_SLB_E, kvmppc_slb, orige); + OFFSET(VCPU_SLB_V, kvmppc_slb, origv); DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM - DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar)); - DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar)); - DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr)); - DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm)); - DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr)); - DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr)); - DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm)); - DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm)); - DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm)); - DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm)); - DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm)); - DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm)); - DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm)); - DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm)); - DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm)); + OFFSET(VCPU_TFHAR, kvm_vcpu, arch.tfhar); + OFFSET(VCPU_TFIAR, kvm_vcpu, arch.tfiar); + OFFSET(VCPU_TEXASR, kvm_vcpu, arch.texasr); + OFFSET(VCPU_GPR_TM, kvm_vcpu, arch.gpr_tm); + OFFSET(VCPU_FPRS_TM, kvm_vcpu, arch.fp_tm.fpr); + OFFSET(VCPU_VRS_TM, kvm_vcpu, arch.vr_tm.vr); + OFFSET(VCPU_VRSAVE_TM, kvm_vcpu, arch.vrsave_tm); + OFFSET(VCPU_CR_TM, kvm_vcpu, arch.cr_tm); + OFFSET(VCPU_XER_TM, kvm_vcpu, arch.xer_tm); + OFFSET(VCPU_LR_TM, kvm_vcpu, arch.lr_tm); + OFFSET(VCPU_CTR_TM, kvm_vcpu, arch.ctr_tm); + OFFSET(VCPU_AMR_TM, kvm_vcpu, arch.amr_tm); + OFFSET(VCPU_PPR_TM, kvm_vcpu, arch.ppr_tm); + OFFSET(VCPU_DSCR_TM, kvm_vcpu, arch.dscr_tm); + OFFSET(VCPU_TAR_TM, kvm_vcpu, arch.tar_tm); #endif #ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE - DEFINE(PACA_SVCPU, offsetof(struct paca_struct, shadow_vcpu)); + OFFSET(PACA_SVCPU, paca_struct, shadow_vcpu); # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) #else # define SVCPU_FIELD(x, f) @@ -668,11 +653,11 @@ int main(void) HSTATE_FIELD(HSTATE_DECEXP, dec_expires); HSTATE_FIELD(HSTATE_SPLIT_MODE, kvm_split_mode); DEFINE(IPI_PRIORITY, IPI_PRIORITY); - DEFINE(KVM_SPLIT_RPR, offsetof(struct kvm_split_mode, rpr)); - DEFINE(KVM_SPLIT_PMMAR, offsetof(struct kvm_split_mode, pmmar)); - DEFINE(KVM_SPLIT_LDBAR, offsetof(struct kvm_split_mode, ldbar)); - DEFINE(KVM_SPLIT_DO_NAP, offsetof(struct kvm_split_mode, do_nap)); - DEFINE(KVM_SPLIT_NAPPED, offsetof(struct kvm_split_mode, napped)); + OFFSET(KVM_SPLIT_RPR, kvm_split_mode, rpr); + OFFSET(KVM_SPLIT_PMMAR, kvm_split_mode, pmmar); + OFFSET(KVM_SPLIT_LDBAR, kvm_split_mode, ldbar); + OFFSET(KVM_SPLIT_DO_NAP, kvm_split_mode, do_nap); + OFFSET(KVM_SPLIT_NAPPED, kvm_split_mode, napped); #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #ifdef CONFIG_PPC_BOOK3S_64 @@ -682,32 +667,27 @@ int main(void) #endif /* CONFIG_PPC_BOOK3S_64 */ #else /* CONFIG_PPC_BOOK3S */ - DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); - DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); - DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); - DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); - DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); - DEFINE(VCPU_SPRG9, offsetof(struct kvm_vcpu, arch.sprg9)); - DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); - DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); - DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); - DEFINE(VCPU_CRIT_SAVE, offsetof(struct kvm_vcpu, arch.crit_save)); + OFFSET(VCPU_CR, kvm_vcpu, arch.cr); + OFFSET(VCPU_XER, kvm_vcpu, arch.xer); + OFFSET(VCPU_LR, kvm_vcpu, arch.lr); + OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr); + OFFSET(VCPU_PC, kvm_vcpu, arch.pc); + OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9); + OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst); + OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear); + OFFSET(VCPU_FAULT_ESR, kvm_vcpu, arch.fault_esr); + OFFSET(VCPU_CRIT_SAVE, kvm_vcpu, arch.crit_save); #endif /* CONFIG_PPC_BOOK3S */ #endif /* CONFIG_KVM */ #ifdef CONFIG_KVM_GUEST - DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared, - scratch1)); - DEFINE(KVM_MAGIC_SCRATCH2, offsetof(struct kvm_vcpu_arch_shared, - scratch2)); - DEFINE(KVM_MAGIC_SCRATCH3, offsetof(struct kvm_vcpu_arch_shared, - scratch3)); - DEFINE(KVM_MAGIC_INT, offsetof(struct kvm_vcpu_arch_shared, - int_pending)); - DEFINE(KVM_MAGIC_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); - DEFINE(KVM_MAGIC_CRITICAL, offsetof(struct kvm_vcpu_arch_shared, - critical)); - DEFINE(KVM_MAGIC_SR, offsetof(struct kvm_vcpu_arch_shared, sr)); + OFFSET(KVM_MAGIC_SCRATCH1, kvm_vcpu_arch_shared, scratch1); + OFFSET(KVM_MAGIC_SCRATCH2, kvm_vcpu_arch_shared, scratch2); + OFFSET(KVM_MAGIC_SCRATCH3, kvm_vcpu_arch_shared, scratch3); + OFFSET(KVM_MAGIC_INT, kvm_vcpu_arch_shared, int_pending); + OFFSET(KVM_MAGIC_MSR, kvm_vcpu_arch_shared, msr); + OFFSET(KVM_MAGIC_CRITICAL, kvm_vcpu_arch_shared, critical); + OFFSET(KVM_MAGIC_SR, kvm_vcpu_arch_shared, sr); #endif #ifdef CONFIG_44x @@ -716,45 +696,37 @@ int main(void) #endif #ifdef CONFIG_PPC_FSL_BOOK3E DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); - DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0)); - DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1)); - DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2)); - DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3)); - DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7)); + OFFSET(TLBCAM_MAS0, tlbcam, MAS0); + OFFSET(TLBCAM_MAS1, tlbcam, MAS1); + OFFSET(TLBCAM_MAS2, tlbcam, MAS2); + OFFSET(TLBCAM_MAS3, tlbcam, MAS3); + OFFSET(TLBCAM_MAS7, tlbcam, MAS7); #endif #if defined(CONFIG_KVM) && defined(CONFIG_SPE) - DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0])); - DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc)); - DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr)); - DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr)); + OFFSET(VCPU_EVR, kvm_vcpu, arch.evr[0]); + OFFSET(VCPU_ACC, kvm_vcpu, arch.acc); + OFFSET(VCPU_SPEFSCR, kvm_vcpu, arch.spefscr); + OFFSET(VCPU_HOST_SPEFSCR, kvm_vcpu, arch.host_spefscr); #endif #ifdef CONFIG_KVM_BOOKE_HV - DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4)); - DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6)); + OFFSET(VCPU_HOST_MAS4, kvm_vcpu, arch.host_mas4); + OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6); #endif #ifdef CONFIG_KVM_EXIT_TIMING - DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, - arch.timing_exit.tv32.tbu)); - DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu, - arch.timing_exit.tv32.tbl)); - DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu, - arch.timing_last_enter.tv32.tbu)); - DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu, - arch.timing_last_enter.tv32.tbl)); + OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu); + OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl); + OFFSET(VCPU_TIMING_LAST_ENTER_TBU, kvm_vcpu, arch.timing_last_enter.tv32.tbu); + OFFSET(VCPU_TIMING_LAST_ENTER_TBL, kvm_vcpu, arch.timing_last_enter.tv32.tbl); #endif #ifdef CONFIG_PPC_POWERNV - DEFINE(PACA_CORE_IDLE_STATE_PTR, - offsetof(struct paca_struct, core_idle_state_ptr)); - DEFINE(PACA_THREAD_IDLE_STATE, - offsetof(struct paca_struct, thread_idle_state)); - DEFINE(PACA_THREAD_MASK, - offsetof(struct paca_struct, thread_mask)); - DEFINE(PACA_SUBCORE_SIBLING_MASK, - offsetof(struct paca_struct, subcore_sibling_mask)); + OFFSET(PACA_CORE_IDLE_STATE_PTR, paca_struct, core_idle_state_ptr); + OFFSET(PACA_THREAD_IDLE_STATE, paca_struct, thread_idle_state); + OFFSET(PACA_THREAD_MASK, paca_struct, thread_mask); + OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask); #endif DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER); diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 917188615bf5..7fe8c79e6937 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -101,6 +101,8 @@ _GLOBAL(__setup_cpu_power9) mfspr r3,SPRN_LPCR LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) or r3, r3, r4 + LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR) + andc r3, r3, r4 bl __init_LPCR bl __init_HFSCR bl __init_tlb_power9 @@ -122,6 +124,8 @@ _GLOBAL(__restore_cpu_power9) mfspr r3,SPRN_LPCR LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) or r3, r3, r4 + LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR) + andc r3, r3, r4 bl __init_LPCR bl __init_HFSCR bl __init_tlb_power9 diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 6a82ef039c50..bb7a1890aeb7 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -386,6 +386,23 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check_early = __machine_check_early_realmode_p8, .platform = "power8", }, + { /* 3.00-compliant processor, i.e. Power9 "architected" mode */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x0f000005, + .cpu_name = "POWER9 (architected)", + .cpu_features = CPU_FTRS_POWER9, + .cpu_user_features = COMMON_USER_POWER9, + .cpu_user_features2 = COMMON_USER2_POWER9, + .mmu_features = MMU_FTRS_POWER9, + .icache_bsize = 128, + .dcache_bsize = 128, + .oprofile_type = PPC_OPROFILE_INVALID, + .oprofile_cpu_type = "ppc64/ibm-compat-v1", + .cpu_setup = __setup_cpu_power9, + .cpu_restore = __restore_cpu_power9, + .flush_tlb = __flush_tlb_power9, + .platform = "power9", + }, { /* Power7 */ .pvr_mask = 0xffff0000, .pvr_value = 0x003f0000, diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index c6689f658b50..d0ea7860e02b 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -46,7 +46,7 @@ static u64 swiotlb_powerpc_get_required(struct device *dev) * map_page, and unmap_page on highmem, use normal dma_ops * for everything else. */ -struct dma_map_ops swiotlb_dma_ops = { +const struct dma_map_ops swiotlb_dma_ops = { .alloc = __dma_direct_alloc_coherent, .free = __dma_direct_free_coherent, .mmap = dma_direct_mmap_coherent, diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 6877e3fa95bb..41c749586bd2 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -33,7 +33,7 @@ static u64 __maybe_unused get_pfn_limit(struct device *dev) struct dev_archdata __maybe_unused *sd = &dev->archdata; #ifdef CONFIG_SWIOTLB - if (sd->max_direct_dma_addr && sd->dma_ops == &swiotlb_dma_ops) + if (sd->max_direct_dma_addr && dev->dma_ops == &swiotlb_dma_ops) pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT); #endif @@ -274,7 +274,7 @@ static inline void dma_direct_sync_single(struct device *dev, } #endif -struct dma_map_ops dma_direct_ops = { +const struct dma_map_ops dma_direct_ops = { .alloc = dma_direct_alloc_coherent, .free = dma_direct_free_coherent, .mmap = dma_direct_mmap_coherent, @@ -316,7 +316,7 @@ EXPORT_SYMBOL(dma_set_coherent_mask); int __dma_set_mask(struct device *dev, u64 dma_mask) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) return dma_ops->set_dma_mask(dev, dma_mask); @@ -344,7 +344,7 @@ EXPORT_SYMBOL(dma_set_mask); u64 __dma_get_required_mask(struct device *dev) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); if (unlikely(dma_ops == NULL)) return 0; diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 3841d749a430..a38600949f3a 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -205,6 +205,9 @@ transfer_to_handler_cont: mflr r9 lwz r11,0(r9) /* virtual address of handler */ lwz r9,4(r9) /* where to go when done */ +#ifdef CONFIG_PPC_8xx_PERF_EVENT + mtspr SPRN_NRI, r0 +#endif #ifdef CONFIG_TRACE_IRQFLAGS lis r12,reenable_mmu@h ori r12,r12,reenable_mmu@l @@ -292,7 +295,9 @@ stack_ovf: lis r9,StackOverflow@ha addi r9,r9,StackOverflow@l LOAD_MSR_KERNEL(r10,MSR_KERNEL) - FIX_SRR1(r10,r12) +#ifdef CONFIG_PPC_8xx_PERF_EVENT + mtspr SPRN_NRI, r0 +#endif mtspr SPRN_SRR0,r9 mtspr SPRN_SRR1,r10 SYNC @@ -417,9 +422,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) mtlr r4 mtcr r5 lwz r7,_NIP(r1) - FIX_SRR1(r8, r0) lwz r2,GPR2(r1) lwz r1,GPR1(r1) +#ifdef CONFIG_PPC_8xx_PERF_EVENT + mtspr SPRN_NRI, r0 +#endif mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r8 SYNC @@ -699,6 +706,9 @@ fast_exception_return: lwz r10,_LINK(r11) mtlr r10 REST_GPR(10, r11) +#ifdef CONFIG_PPC_8xx_PERF_EVENT + mtspr SPRN_NRI, r0 +#endif mtspr SPRN_SRR1,r9 mtspr SPRN_SRR0,r12 REST_GPR(9, r11) @@ -947,7 +957,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) .globl exc_exit_restart exc_exit_restart: lwz r12,_NIP(r1) - FIX_SRR1(r9,r10) +#ifdef CONFIG_PPC_8xx_PERF_EVENT + mtspr SPRN_NRI, r0 +#endif mtspr SPRN_SRR0,r12 mtspr SPRN_SRR1,r9 REST_4GPRS(9, r1) @@ -1290,7 +1302,6 @@ _GLOBAL(enter_rtas) 1: tophys(r9,r1) lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */ lwz r9,8(r9) /* original msr value */ - FIX_SRR1(r9,r0) addi r1,r1,INT_FRAME_SIZE li r0,0 mtspr SPRN_SPRG_RTAS,r0 diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 9d963547d243..1607be7c0ef2 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -869,7 +869,6 @@ __secondary_start: /* enable MMU and jump to start_secondary */ li r4,MSR_KERNEL - FIX_SRR1(r4,r5) lis r3,start_secondary@h ori r3,r3,start_secondary@l mtspr SPRN_SRR0,r3 @@ -977,7 +976,6 @@ start_here: ori r4,r4,2f@l tophys(r4,r4) li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) - FIX_SRR1(r3,r5) mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r3 SYNC @@ -1001,7 +999,6 @@ start_here: /* Now turn on the MMU for real! */ li r4,MSR_KERNEL - FIX_SRR1(r4,r5) lis r3,start_kernel@h ori r3,r3,start_kernel@l mtspr SPRN_SRR0,r3 diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 1a9c99d3e5d8..c032fe8c2d26 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -329,6 +329,12 @@ InstructionTLBMiss: mtspr SPRN_SPRG_SCRATCH2, r3 #endif EXCEPTION_PROLOG_0 +#ifdef CONFIG_PPC_8xx_PERF_EVENT + lis r10, (itlb_miss_counter - PAGE_OFFSET)@ha + lwz r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10) + addi r11, r11, 1 + stw r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10) +#endif /* If we are faulting a kernel address, we have to use the * kernel page tables. @@ -429,6 +435,12 @@ InstructionTLBMiss: DataStoreTLBMiss: mtspr SPRN_SPRG_SCRATCH2, r3 EXCEPTION_PROLOG_0 +#ifdef CONFIG_PPC_8xx_PERF_EVENT + lis r10, (dtlb_miss_counter - PAGE_OFFSET)@ha + lwz r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10) + addi r11, r11, 1 + stw r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10) +#endif mfcr r3 /* If we are faulting a kernel address, we have to use the @@ -561,6 +573,7 @@ InstructionTLBError: andis. r10,r5,0x4000 beq+ 1f tlbie r4 +itlbie: /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */ 1: EXC_XFER_LITE(0x400, handle_page_fault) @@ -585,6 +598,7 @@ DARFixed:/* Return from dcbx instruction bug workaround */ andis. r10,r5,0x4000 beq+ 1f tlbie r4 +dtlbie: 1: li r10,RPN_PATTERN mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */ /* 0x300 is DataAccess exception, needed by bad_page_fault() */ @@ -602,8 +616,43 @@ DARFixed:/* Return from dcbx instruction bug workaround */ * support of breakpoints and such. Someday I will get around to * using them. */ - EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE) + . = 0x1c00 +DataBreakpoint: + EXCEPTION_PROLOG_0 + mfcr r10 + mfspr r11, SPRN_SRR0 + cmplwi cr0, r11, (dtlbie - PAGE_OFFSET)@l + cmplwi cr7, r11, (itlbie - PAGE_OFFSET)@l + beq- cr0, 11f + beq- cr7, 11f + EXCEPTION_PROLOG_1 + EXCEPTION_PROLOG_2 + addi r3,r1,STACK_FRAME_OVERHEAD + mfspr r4,SPRN_BAR + stw r4,_DAR(r11) + mfspr r5,SPRN_DSISR + EXC_XFER_EE(0x1c00, do_break) +11: + mtcr r10 + EXCEPTION_EPILOG_0 + rfi + +#ifdef CONFIG_PPC_8xx_PERF_EVENT + . = 0x1d00 +InstructionBreakpoint: + EXCEPTION_PROLOG_0 + lis r10, (instruction_counter - PAGE_OFFSET)@ha + lwz r11, (instruction_counter - PAGE_OFFSET)@l(r10) + addi r11, r11, -1 + stw r11, (instruction_counter - PAGE_OFFSET)@l(r10) + lis r10, 0xffff + ori r10, r10, 0x01 + mtspr SPRN_COUNTA, r10 + EXCEPTION_EPILOG_0 + rfi +#else EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE) +#endif EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE) EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE) @@ -977,6 +1026,14 @@ initial_mmu: lis r8, IDC_ENABLE@h mtspr SPRN_DC_CST, r8 #endif + /* Disable debug mode entry on breakpoints */ + mfspr r8, SPRN_DER +#ifdef CONFIG_PPC_8xx_PERF_EVENT + rlwinm r8, r8, 0, ~0xc +#else + rlwinm r8, r8, 0, ~0x8 +#endif + mtspr SPRN_DER, r8 blr @@ -1010,3 +1067,16 @@ cpu6_errata_word: .space 16 #endif +#ifdef CONFIG_PPC_8xx_PERF_EVENT + .globl itlb_miss_counter +itlb_miss_counter: + .space 4 + + .globl dtlb_miss_counter +dtlb_miss_counter: + .space 4 + + .globl instruction_counter +instruction_counter: + .space 4 +#endif diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 53cc9270aac8..53b9c1dfd7d9 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -211,9 +211,11 @@ int hw_breakpoint_handler(struct die_args *args) int rc = NOTIFY_STOP; struct perf_event *bp; struct pt_regs *regs = args->regs; +#ifndef CONFIG_PPC_8xx int stepped = 1; - struct arch_hw_breakpoint *info; unsigned int instr; +#endif + struct arch_hw_breakpoint *info; unsigned long dar = regs->dar; /* Disable breakpoints during exception handling */ @@ -257,6 +259,7 @@ int hw_breakpoint_handler(struct die_args *args) (dar - bp->attr.bp_addr < bp->attr.bp_len))) info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; +#ifndef CONFIG_PPC_8xx /* Do not emulate user-space instructions, instead single-step them */ if (user_mode(regs)) { current->thread.last_hit_ubp = bp; @@ -280,6 +283,7 @@ int hw_breakpoint_handler(struct die_args *args) perf_event_disable_inatomic(bp); goto out; } +#endif /* * As a policy, the callback is invoked in a 'trigger-after-execute' * fashion diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c index 5f8613ceb97f..a582e0d42525 100644 --- a/arch/powerpc/kernel/io-workarounds.c +++ b/arch/powerpc/kernel/io-workarounds.c @@ -12,7 +12,7 @@ #undef DEBUG #include <linux/kernel.h> -#include <linux/sched.h> /* for init_mm */ +#include <linux/sched/mm.h> /* for init_mm */ #include <asm/io.h> #include <asm/machdep.h> diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S index 53e429b5a29d..4937bef7652f 100644 --- a/arch/powerpc/kernel/optprobes_head.S +++ b/arch/powerpc/kernel/optprobes_head.S @@ -65,6 +65,13 @@ optprobe_template_entry: mfdsisr r5 std r5,_DSISR(r1) + /* + * We may get here from a module, so load the kernel TOC in r2. + * The original TOC gets restored when pt_regs is restored + * further below. + */ + ld r2,PACATOC(r13) + .global optprobe_template_op_address optprobe_template_op_address: /* diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index fa20060ff7a5..dfc479df9634 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -10,6 +10,7 @@ #include <linux/smp.h> #include <linux/export.h> #include <linux/memblock.h> +#include <linux/sched/task.h> #include <asm/lppaca.h> #include <asm/paca.h> diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index a3f5334f5d8c..ffda24a38dda 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -60,14 +60,14 @@ resource_size_t isa_mem_base; EXPORT_SYMBOL(isa_mem_base); -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops; -void set_pci_dma_ops(struct dma_map_ops *dma_ops) +void set_pci_dma_ops(const struct dma_map_ops *dma_ops) { pci_dma_ops = dma_ops; } -struct dma_map_ops *get_pci_dma_ops(void) +const struct dma_map_ops *get_pci_dma_ops(void) { return pci_dma_ops; } @@ -1560,16 +1560,10 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose, /* Hookup PHB Memory resources */ for (i = 0; i < 3; ++i) { res = &hose->mem_resources[i]; - if (!res->flags) { - if (i == 0) - printk(KERN_ERR "PCI: Memory resource 0 not set for " - "host bridge %s (domain %d)\n", - hose->dn->full_name, hose->global_number); + if (!res->flags) continue; - } - offset = hose->mem_offset[i]; - + offset = hose->mem_offset[i]; pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i, res, (unsigned long long)offset); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 5dd056df0baa..d645da302bf2 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -16,6 +16,9 @@ #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> @@ -730,6 +733,28 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) mtspr(SPRN_DABRX, dabrx); return 0; } +#elif defined(CONFIG_PPC_8xx) +static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) +{ + unsigned long addr = dabr & ~HW_BRK_TYPE_DABR; + unsigned long lctrl1 = 0x90000000; /* compare type: equal on E & F */ + unsigned long lctrl2 = 0x8e000002; /* watchpoint 1 on cmp E | F */ + + if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ) + lctrl1 |= 0xa0000; + else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE) + lctrl1 |= 0xf0000; + else if ((dabr & HW_BRK_TYPE_RDWR) == 0) + lctrl2 = 0; + + mtspr(SPRN_LCTRL2, 0); + mtspr(SPRN_CMPE, addr); + mtspr(SPRN_CMPF, addr + 4); + mtspr(SPRN_LCTRL1, lctrl1); + mtspr(SPRN_LCTRL2, lctrl2); + + return 0; +} #else static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) { diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 616de028f7f8..a3944540fe0d 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -839,7 +839,7 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { 0, #endif .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN), - .bin_opts = OV5_FEAT(OV5_RESIZE_HPT), + .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT), .micro_checkpoint = 0, .reserved0 = 0, .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */ diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index b9855f1b290a..adf2084f214b 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -113,14 +113,12 @@ void __init setup_tlb_core_data(void) * If we have threads, we need either tlbsrx. * or e6500 tablewalk mode, or else TLB handlers * will be racy and could produce duplicate entries. + * Should we panic instead? */ - if (smt_enabled_at_boot >= 2 && - !mmu_has_feature(MMU_FTR_USE_TLBRSRV) && - book3e_htw_mode != PPC_HTW_E6500) { - /* Should we panic instead? */ - WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n", - __func__); - } + WARN_ONCE(smt_enabled_at_boot >= 2 && + !mmu_has_feature(MMU_FTR_USE_TLBRSRV) && + book3e_htw_mode != PPC_HTW_E6500, + "%s: unsupported MMU configuration\n", __func__); } } #endif diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 893bd7f79be6..46f89e66a273 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -19,7 +19,8 @@ #include <linux/kernel.h> #include <linux/export.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/topology.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/delay.h> @@ -707,7 +708,7 @@ void start_secondary(void *unused) unsigned int cpu = smp_processor_id(); int i, base; - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; smp_store_cpu_info(cpu); @@ -795,7 +796,7 @@ void __init smp_cpus_done(unsigned int max_cpus) * se we pin us down to CPU 0 for a short while */ alloc_cpumask_var(&old_mask, GFP_NOWAIT); - cpumask_copy(old_mask, tsk_cpus_allowed(current)); + cpumask_copy(old_mask, ¤t->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); if (smp_ops && smp_ops->setup_cpu) diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index 4f24606afc3f..66711958493c 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -12,6 +12,7 @@ #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/stacktrace.h> #include <asm/ptrace.h> #include <asm/processor.h> diff --git a/arch/powerpc/kernel/swsusp_64.c b/arch/powerpc/kernel/swsusp_64.c index 0e899e47c325..51db012808f5 100644 --- a/arch/powerpc/kernel/swsusp_64.c +++ b/arch/powerpc/kernel/swsusp_64.c @@ -10,6 +10,7 @@ #include <linux/irq.h> #include <linux/sched.h> #include <linux/interrupt.h> +#include <linux/nmi.h> void do_after_copyback(void) { diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 14e485525e31..07b90725855e 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -34,6 +34,7 @@ #include <linux/errno.h> #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> @@ -57,7 +58,7 @@ #include <linux/clk-provider.h> #include <linux/suspend.h> #include <linux/rtc.h> -#include <linux/cputime.h> +#include <linux/sched/cputime.h> #include <asm/trace.h> #include <asm/io.h> @@ -709,7 +710,7 @@ unsigned long long running_clock(void) * time and on a host which doesn't do any virtualisation TB *should* equal * VTB so it makes no difference anyway. */ - return local_clock() - cputime_to_nsecs(kcpustat_this_cpu->cpustat[CPUTIME_STEAL]); + return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL]; } #endif diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index e6cc56b61d01..ff365f9de27a 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -17,6 +17,7 @@ #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index ab9d14c0e460..3e26cd4979f9 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -24,6 +24,7 @@ #include <linux/highmem.h> #include <linux/gfp.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/hugetlb.h> #include <linux/list.h> #include <linux/anon_inodes.h> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 1e107ece4e37..1ec86d9e2a82 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -22,7 +22,8 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/preempt.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/stat.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/fs.h> diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c index 5a1ab1250a05..905a934c1ef4 100644 --- a/arch/powerpc/kvm/book3s_mmu_hpte.c +++ b/arch/powerpc/kvm/book3s_mmu_hpte.c @@ -21,6 +21,7 @@ #include <linux/kvm_host.h> #include <linux/hash.h> #include <linux/slab.h> +#include <linux/rculist.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index b0333cc737dd..0fda4230f6c0 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -25,7 +25,7 @@ #include <linux/highmem.h> #include <linux/log2.h> #include <linux/uaccess.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/rwsem.h> #include <linux/vmalloc.h> #include <linux/hugetlb.h> diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 2b38d824e9e5..95c91a9de351 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -23,6 +23,7 @@ #include <linux/kvm_host.h> #include <linux/vmalloc.h> #include <linux/hrtimer.h> +#include <linux/sched/signal.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/file.h> diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 0899315e1434..0d3002b7e2b4 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -14,6 +14,7 @@ #include <asm/page.h> #include <asm/code-patching.h> #include <linux/uaccess.h> +#include <linux/kprobes.h> int patch_instruction(unsigned int *addr, unsigned int instr) diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 043415f0bdb1..f3917705c686 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/string.h> #include <linux/init.h> +#include <linux/sched/mm.h> #include <asm/cputable.h> #include <asm/code-patching.h> #include <asm/page.h> diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 8dc758658972..51def8a515be 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -17,6 +17,7 @@ #include <linux/signal.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 12d679df50bd..c554768b1fa2 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -23,7 +23,7 @@ #include <linux/spinlock.h> #include <linux/errno.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/sysctl.h> diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index 2f1e44362198..a5d9ef59debe 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -25,7 +25,8 @@ #include <linux/personality.h> #include <linux/mm.h> #include <linux/random.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/elf-randomize.h> #include <linux/security.h> #include <linux/mman.h> diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index 7de7124ac91b..497130c5c742 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c @@ -10,7 +10,7 @@ * */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/rculist.h> #include <linux/vmalloc.h> diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index b798ff674fab..5fcb3dd74c13 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -8,6 +8,8 @@ */ #include <linux/sched.h> +#include <linux/mm_types.h> + #include <asm/pgalloc.h> #include <asm/tlb.h> diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c index c23e286a6b8f..8b85a14b08ea 100644 --- a/arch/powerpc/mm/pgtable-hash64.c +++ b/arch/powerpc/mm/pgtable-hash64.c @@ -10,6 +10,8 @@ */ #include <linux/sched.h> +#include <linux/mm_types.h> + #include <asm/pgalloc.h> #include <asm/tlb.h> diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index feeda90cd06d..2a590a98e652 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -8,7 +8,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/memblock.h> #include <linux/of_fdt.h> diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index cb39c8bd2436..a03ff3d99e0c 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -193,9 +193,7 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, */ VM_WARN_ON(pte_present(*ptep) && !pte_protnone(*ptep)); - /* - * Add the pte bit when tryint set a pte - */ + /* Add the pte bit when trying to set a pte */ pte = __pte(pte_val(pte) | _PAGE_PTE); /* Note: mm->context.id might not yet have been assigned as diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 48fc28bab544..5e01b2ece1d0 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -22,6 +22,8 @@ #include <asm/cacheflush.h> #include <asm/smp.h> #include <linux/compiler.h> +#include <linux/mm_types.h> + #include <asm/udbg.h> #include <asm/code-patching.h> diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index e2974fcd20f1..a85e06ea6c20 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -71,9 +71,9 @@ slb_miss_kernel_load_linear: BEGIN_FTR_SECTION - b slb_finish_load + b .Lslb_finish_load END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) - b slb_finish_load_1T + b .Lslb_finish_load_1T 1: #ifdef CONFIG_SPARSEMEM_VMEMMAP @@ -109,9 +109,9 @@ slb_miss_kernel_load_io: addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l BEGIN_FTR_SECTION - b slb_finish_load + b .Lslb_finish_load END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) - b slb_finish_load_1T + b .Lslb_finish_load_1T 0: /* * For userspace addresses, make sure this is region 0. @@ -174,9 +174,9 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) ld r9,PACACONTEXTID(r13) BEGIN_FTR_SECTION cmpldi r10,0x1000 - bge slb_finish_load_1T + bge .Lslb_finish_load_1T END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) - b slb_finish_load + b .Lslb_finish_load 8: /* invalid EA - return an error indication */ crset 4*cr0+eq /* indicate failure */ @@ -187,7 +187,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) * * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET */ -slb_finish_load: +.Lslb_finish_load: rldimi r10,r9,ESID_BITS,0 ASM_VSID_SCRAMBLE(r10,r9,256M) /* @@ -256,7 +256,7 @@ slb_compare_rr_to_size: * * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9 */ -slb_finish_load_1T: +.Lslb_finish_load_1T: srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */ rldimi r10,r9,ESID_BITS_1T,0 ASM_VSID_SCRAMBLE(r10,r9,1T) @@ -272,3 +272,11 @@ slb_finish_load_1T: clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */ b 7b + +_ASM_NOKPROBE_SYMBOL(slb_allocate_realmode) +_ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_linear) +_ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_io) +_ASM_NOKPROBE_SYMBOL(slb_compare_rr_to_size) +#ifdef CONFIG_SPARSEMEM_VMEMMAP +_ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_vmemmap) +#endif diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c new file mode 100644 index 000000000000..3c39f05f0af3 --- /dev/null +++ b/arch/powerpc/perf/8xx-pmu.c @@ -0,0 +1,173 @@ +/* + * Performance event support - PPC 8xx + * + * Copyright 2016 Christophe Leroy, CS Systemes d'Information + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/perf_event.h> +#include <linux/percpu.h> +#include <linux/hardirq.h> +#include <asm/pmc.h> +#include <asm/machdep.h> +#include <asm/firmware.h> +#include <asm/ptrace.h> + +#define PERF_8xx_ID_CPU_CYCLES 1 +#define PERF_8xx_ID_HW_INSTRUCTIONS 2 +#define PERF_8xx_ID_ITLB_LOAD_MISS 3 +#define PERF_8xx_ID_DTLB_LOAD_MISS 4 + +#define C(x) PERF_COUNT_HW_CACHE_##x +#define DTLB_LOAD_MISS (C(DTLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16)) +#define ITLB_LOAD_MISS (C(ITLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16)) + +extern unsigned long itlb_miss_counter, dtlb_miss_counter; +extern atomic_t instruction_counter; + +static atomic_t insn_ctr_ref; + +static s64 get_insn_ctr(void) +{ + int ctr; + unsigned long counta; + + do { + ctr = atomic_read(&instruction_counter); + counta = mfspr(SPRN_COUNTA); + } while (ctr != atomic_read(&instruction_counter)); + + return ((s64)ctr << 16) | (counta >> 16); +} + +static int event_type(struct perf_event *event) +{ + switch (event->attr.type) { + case PERF_TYPE_HARDWARE: + if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) + return PERF_8xx_ID_CPU_CYCLES; + if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) + return PERF_8xx_ID_HW_INSTRUCTIONS; + break; + case PERF_TYPE_HW_CACHE: + if (event->attr.config == ITLB_LOAD_MISS) + return PERF_8xx_ID_ITLB_LOAD_MISS; + if (event->attr.config == DTLB_LOAD_MISS) + return PERF_8xx_ID_DTLB_LOAD_MISS; + break; + case PERF_TYPE_RAW: + break; + default: + return -ENOENT; + } + return -EOPNOTSUPP; +} + +static int mpc8xx_pmu_event_init(struct perf_event *event) +{ + int type = event_type(event); + + if (type < 0) + return type; + return 0; +} + +static int mpc8xx_pmu_add(struct perf_event *event, int flags) +{ + int type = event_type(event); + s64 val = 0; + + if (type < 0) + return type; + + switch (type) { + case PERF_8xx_ID_CPU_CYCLES: + val = get_tb(); + break; + case PERF_8xx_ID_HW_INSTRUCTIONS: + if (atomic_inc_return(&insn_ctr_ref) == 1) + mtspr(SPRN_ICTRL, 0xc0080007); + val = get_insn_ctr(); + break; + case PERF_8xx_ID_ITLB_LOAD_MISS: + val = itlb_miss_counter; + break; + case PERF_8xx_ID_DTLB_LOAD_MISS: + val = dtlb_miss_counter; + break; + } + local64_set(&event->hw.prev_count, val); + return 0; +} + +static void mpc8xx_pmu_read(struct perf_event *event) +{ + int type = event_type(event); + s64 prev, val = 0, delta = 0; + + if (type < 0) + return; + + do { + prev = local64_read(&event->hw.prev_count); + switch (type) { + case PERF_8xx_ID_CPU_CYCLES: + val = get_tb(); + delta = 16 * (val - prev); + break; + case PERF_8xx_ID_HW_INSTRUCTIONS: + val = get_insn_ctr(); + delta = prev - val; + if (delta < 0) + delta += 0x1000000000000LL; + break; + case PERF_8xx_ID_ITLB_LOAD_MISS: + val = itlb_miss_counter; + delta = (s64)((s32)val - (s32)prev); + break; + case PERF_8xx_ID_DTLB_LOAD_MISS: + val = dtlb_miss_counter; + delta = (s64)((s32)val - (s32)prev); + break; + } + } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); + + local64_add(delta, &event->count); +} + +static void mpc8xx_pmu_del(struct perf_event *event, int flags) +{ + mpc8xx_pmu_read(event); + if (event_type(event) != PERF_8xx_ID_HW_INSTRUCTIONS) + return; + + /* If it was the last user, stop counting to avoid useles overhead */ + if (atomic_dec_return(&insn_ctr_ref) == 0) + mtspr(SPRN_ICTRL, 7); +} + +static struct pmu mpc8xx_pmu = { + .event_init = mpc8xx_pmu_event_init, + .add = mpc8xx_pmu_add, + .del = mpc8xx_pmu_del, + .read = mpc8xx_pmu_read, + .capabilities = PERF_PMU_CAP_NO_INTERRUPT | + PERF_PMU_CAP_NO_NMI, +}; + +static int init_mpc8xx_pmu(void) +{ + mtspr(SPRN_ICTRL, 7); + mtspr(SPRN_CMPA, 0); + mtspr(SPRN_COUNTA, 0xffff); + + return perf_pmu_register(&mpc8xx_pmu, "cpu", PERF_TYPE_RAW); +} + +early_initcall(init_mpc8xx_pmu); diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile index f102d5370101..4d606b99a5cb 100644 --- a/arch/powerpc/perf/Makefile +++ b/arch/powerpc/perf/Makefile @@ -13,5 +13,7 @@ obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o e6500-pmu.o obj-$(CONFIG_HV_PERF_CTRS) += hv-24x7.o hv-gpci.o hv-common.o +obj-$(CONFIG_PPC_8xx_PERF_EVENT) += 8xx-pmu.o + obj-$(CONFIG_PPC64) += $(obj64-y) obj-$(CONFIG_PPC32) += $(obj32-y) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 270eb9b74e2e..595dd718ea87 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -57,6 +57,7 @@ struct cpu_hw_events { void *bhrb_context; struct perf_branch_stack bhrb_stack; struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES]; + u64 ic_init; }; static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); @@ -127,6 +128,10 @@ static inline void power_pmu_bhrb_disable(struct perf_event *event) {} static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {} static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} static void pmao_restore_workaround(bool ebb) { } +static bool use_ic(u64 event) +{ + return false; +} #endif /* CONFIG_PPC32 */ static bool regs_use_siar(struct pt_regs *regs) @@ -243,7 +248,7 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs) */ if (ppmu->flags & PPMU_NO_SIPR) { unsigned long siar = mfspr(SPRN_SIAR); - if (siar >= PAGE_OFFSET) + if (is_kernel_addr(siar)) return PERF_RECORD_MISC_KERNEL; return PERF_RECORD_MISC_USER; } @@ -688,6 +693,15 @@ static void pmao_restore_workaround(bool ebb) mtspr(SPRN_PMC5, pmcs[4]); mtspr(SPRN_PMC6, pmcs[5]); } + +static bool use_ic(u64 event) +{ + if (cpu_has_feature(CPU_FTR_POWER9_DD1) && + (event == 0x200f2 || event == 0x300f2)) + return true; + + return false; +} #endif /* CONFIG_PPC64 */ static void perf_event_interrupt(struct pt_regs *regs); @@ -1007,6 +1021,7 @@ static u64 check_and_compute_delta(u64 prev, u64 val) static void power_pmu_read(struct perf_event *event) { s64 val, delta, prev; + struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); if (event->hw.state & PERF_HES_STOPPED) return; @@ -1016,6 +1031,13 @@ static void power_pmu_read(struct perf_event *event) if (is_ebb_event(event)) { val = read_pmc(event->hw.idx); + if (use_ic(event->attr.config)) { + val = mfspr(SPRN_IC); + if (val > cpuhw->ic_init) + val = val - cpuhw->ic_init; + else + val = val + (0 - cpuhw->ic_init); + } local64_set(&event->hw.prev_count, val); return; } @@ -1029,6 +1051,13 @@ static void power_pmu_read(struct perf_event *event) prev = local64_read(&event->hw.prev_count); barrier(); val = read_pmc(event->hw.idx); + if (use_ic(event->attr.config)) { + val = mfspr(SPRN_IC); + if (val > cpuhw->ic_init) + val = val - cpuhw->ic_init; + else + val = val + (0 - cpuhw->ic_init); + } delta = check_and_compute_delta(prev, val); if (!delta) return; @@ -1466,6 +1495,13 @@ nocheck: event->attr.branch_sample_type); } + /* + * Workaround for POWER9 DD1 to use the Instruction Counter + * register value for instruction counting + */ + if (use_ic(event->attr.config)) + cpuhw->ic_init = mfspr(SPRN_IC); + perf_pmu_enable(event->pmu); local_irq_restore(flags); return ret; diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 50e598cf644b..e79fb5fb817d 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -97,6 +97,28 @@ static unsigned long combine_shift(unsigned long pmc) return MMCR1_COMBINE_SHIFT(pmc); } +static inline bool event_is_threshold(u64 event) +{ + return (event >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK; +} + +static bool is_thresh_cmp_valid(u64 event) +{ + unsigned int cmp, exp; + + /* + * Check the mantissa upper two bits are not zero, unless the + * exponent is also zero. See the THRESH_CMP_MANTISSA doc. + */ + cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; + exp = cmp >> 7; + + if (exp && (cmp & 0x60) == 0) + return false; + + return true; +} + int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) { unsigned int unit, pmc, cache, ebb; @@ -163,28 +185,26 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT); } - /* - * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC, - * the threshold control bits are used for the match value. - */ - if (event_is_fab_match(event)) { - mask |= CNST_FAB_MATCH_MASK; - value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT); + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + if (event_is_threshold(event) && is_thresh_cmp_valid(event)) { + mask |= CNST_THRESH_MASK; + value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); + } } else { /* - * Check the mantissa upper two bits are not zero, unless the - * exponent is also zero. See the THRESH_CMP_MANTISSA doc. + * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC, + * the threshold control bits are used for the match value. */ - unsigned int cmp, exp; - - cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; - exp = cmp >> 7; - - if (exp && (cmp & 0x60) == 0) - return -1; + if (event_is_fab_match(event)) { + mask |= CNST_FAB_MATCH_MASK; + value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT); + } else { + if (!is_thresh_cmp_valid(event)) + return -1; - mask |= CNST_THRESH_MASK; - value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); + mask |= CNST_THRESH_MASK; + value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); + } } if (!pmc && ebb) @@ -279,7 +299,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev, * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC, * the threshold bits are used for the match value. */ - if (event_is_fab_match(event[i])) { + if (!cpu_has_feature(CPU_FTR_ARCH_300) && event_is_fab_match(event[i])) { mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT; } else { @@ -338,3 +358,39 @@ void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]) if (pmc <= 3) mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1)); } + +static int find_alternative(u64 event, const unsigned int ev_alt[][MAX_ALT], int size) +{ + int i, j; + + for (i = 0; i < size; ++i) { + if (event < ev_alt[i][0]) + break; + + for (j = 0; j < MAX_ALT && ev_alt[i][j]; ++j) + if (event == ev_alt[i][j]) + return i; + } + + return -1; +} + +int isa207_get_alternatives(u64 event, u64 alt[], + const unsigned int ev_alt[][MAX_ALT], int size) +{ + int i, j, num_alt = 0; + u64 alt_event; + + alt[num_alt++] = event; + i = find_alternative(event, ev_alt, size); + if (i >= 0) { + /* Filter out the original event, it's already in alt[0] */ + for (j = 0; j < MAX_ALT; ++j) { + alt_event = ev_alt[i][j]; + if (alt_event && alt_event != event) + alt[num_alt++] = alt_event; + } + } + + return num_alt; +} diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h index 90495f1580c7..cf9bd8990159 100644 --- a/arch/powerpc/perf/isa207-common.h +++ b/arch/powerpc/perf/isa207-common.h @@ -222,6 +222,10 @@ CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \ CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL +/* + * Lets restrict use of PMC5 for instruction counting. + */ +#define P9_DD1_TEST_ADDER (ISA207_TEST_ADDER | CNST_PMC_VAL(5)) /* Bits in MMCR1 for PowerISA v2.07 */ #define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1))) @@ -260,5 +264,8 @@ int isa207_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[]); void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]); +int isa207_get_alternatives(u64 event, u64 alt[], + const unsigned int ev_alt[][MAX_ALT], int size); + #endif diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index d24a8a3668fa..cbd82fde5770 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c @@ -10,6 +10,7 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/perf_event.h> #include <linux/bug.h> #include <linux/stddef.h> diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index d07186382f3a..ce15b19a7962 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -48,43 +48,12 @@ static const unsigned int event_alternatives[][MAX_ALT] = { { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL }, }; -/* - * Scan the alternatives table for a match and return the - * index into the alternatives table if found, else -1. - */ -static int find_alternative(u64 event) -{ - int i, j; - - for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { - if (event < event_alternatives[i][0]) - break; - - for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) - if (event == event_alternatives[i][j]) - return i; - } - - return -1; -} - static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { int i, j, num_alt = 0; - u64 alt_event; - - alt[num_alt++] = event; - - i = find_alternative(event); - if (i >= 0) { - /* Filter out the original event, it's already in alt[0] */ - for (j = 0; j < MAX_ALT; ++j) { - alt_event = event_alternatives[i][j]; - if (alt_event && alt_event != event) - alt[num_alt++] = alt_event; - } - } + num_alt = isa207_get_alternatives(event, alt, event_alternatives, + (int)ARRAY_SIZE(event_alternatives)); if (flags & PPMU_ONLY_COUNT_RUN) { /* * We're only counting in RUN state, so PM_CYC is equivalent to diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h index 929b56d47ad9..71a6bfee5c02 100644 --- a/arch/powerpc/perf/power9-events-list.h +++ b/arch/powerpc/perf/power9-events-list.h @@ -53,3 +53,6 @@ EVENT(PM_ITLB_MISS, 0x400fc) EVENT(PM_RUN_INST_CMPL, 0x500fa) /* Run_cycles */ EVENT(PM_RUN_CYC, 0x600f4) +/* Instruction Dispatched */ +EVENT(PM_INST_DISP, 0x200f2) +EVENT(PM_INST_DISP_ALT, 0x300f2) diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index 7332634e18c9..7f6582708e06 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -22,7 +22,7 @@ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | * | | [ ] [ ] [ thresh_cmp ] [ thresh_ctl ] * | | | | | - * | | *- IFM (Linux) | thresh start/stop OR FAB match -* + * | | *- IFM (Linux) | thresh start/stop -* * | *- BHRB (Linux) *sm * *- EBB (Linux) * @@ -50,11 +50,9 @@ * MMCR1[31] = pmc4combine[1] * * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011 - * # PM_MRK_FAB_RSP_MATCH - * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) + * MMCR1[20:27] = thresh_ctl * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001 - * # PM_MRK_FAB_RSP_MATCH_CYC - * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) + * MMCR1[20:27] = thresh_ctl * else * MMCRA[48:55] = thresh_ctl (THRESH START/END) * @@ -106,6 +104,21 @@ enum { /* PowerISA v2.07 format attribute structure*/ extern struct attribute_group isa207_pmu_format_group; +/* Table of alternatives, sorted by column 0 */ +static const unsigned int power9_event_alternatives[][MAX_ALT] = { + { PM_INST_DISP, PM_INST_DISP_ALT }, +}; + +static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[]) +{ + int num_alt = 0; + + num_alt = isa207_get_alternatives(event, alt, power9_event_alternatives, + (int)ARRAY_SIZE(power9_event_alternatives)); + + return num_alt; +} + GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC); GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC); GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL); @@ -213,6 +226,17 @@ static const struct attribute_group *power9_pmu_attr_groups[] = { NULL, }; +static int power9_generic_events_dd1[] = { + [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL, + [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_DISP, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_CMPL, + [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, + [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, + [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN, +}; + static int power9_generic_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC, @@ -383,10 +407,11 @@ static struct power_pmu power9_isa207_pmu = { .config_bhrb = power9_config_bhrb, .bhrb_filter_map = power9_bhrb_filter_map, .get_constraint = isa207_get_constraint, + .get_alternatives = power9_get_alternatives, .disable_pmc = isa207_disable_pmc, .flags = PPMU_NO_SIAR | PPMU_ARCH_207S, - .n_generic = ARRAY_SIZE(power9_generic_events), - .generic_events = power9_generic_events, + .n_generic = ARRAY_SIZE(power9_generic_events_dd1), + .generic_events = power9_generic_events_dd1, .cache_events = &power9_cache_events, .attr_groups = power9_isa207_pmu_attr_groups, .bhrb_nr = 32, @@ -396,11 +421,12 @@ static struct power_pmu power9_pmu = { .name = "POWER9", .n_counter = MAX_PMU_COUNTERS, .add_fields = ISA207_ADD_FIELDS, - .test_adder = ISA207_TEST_ADDER, + .test_adder = P9_DD1_TEST_ADDER, .compute_mmcr = isa207_compute_mmcr, .config_bhrb = power9_config_bhrb, .bhrb_filter_map = power9_bhrb_filter_map, .get_constraint = isa207_get_constraint, + .get_alternatives = power9_get_alternatives, .disable_pmc = isa207_disable_pmc, .flags = PPMU_HAS_SIER | PPMU_ARCH_207S, .n_generic = ARRAY_SIZE(power9_generic_events), @@ -420,6 +446,11 @@ static int __init init_power9_pmu(void) return -ENODEV; if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { + /* + * Since PM_INST_CMPL may not provide right counts in all + * sampling scenarios in power9 DD1, instead use PM_INST_DISP. + */ + EVENT_VAR(PM_INST_CMPL, _g).id = PM_INST_DISP; rc = register_power_pmu(&power9_isa207_pmu); } else { rc = register_power_pmu(&power9_pmu); diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index 08f92f6ed228..978b85bb3233 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -15,6 +15,7 @@ #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/wait.h> +#include <linux/sched/signal.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/suspend.h> diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile index 7bc86dae9517..fe19dad568e2 100644 --- a/arch/powerpc/platforms/85xx/Makefile +++ b/arch/powerpc/platforms/85xx/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_P1022_RDK) += p1022_rdk.o obj-$(CONFIG_P1023_RDB) += p1023_rdb.o obj-$(CONFIG_TWR_P102x) += twr_p102x.o obj-$(CONFIG_CORENET_GENERIC) += corenet_generic.o +obj-$(CONFIG_FB_FSL_DIU) += t1042rdb_diu.o obj-$(CONFIG_STX_GP3) += stx_gp3.o obj-$(CONFIG_TQM85xx) += tqm85xx.o obj-$(CONFIG_SBC8548) += sbc8548.o diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c index 6c0ba75fb256..ac191a7a1337 100644 --- a/arch/powerpc/platforms/85xx/corenet_generic.c +++ b/arch/powerpc/platforms/85xx/corenet_generic.c @@ -157,6 +157,7 @@ static const char * const boards[] __initconst = { "fsl,T1040RDB", "fsl,T1042RDB", "fsl,T1042RDB_PI", + "keymile,kmcent2", "keymile,kmcoge4", "varisys,CYRUS", NULL diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index a83a6d26090d..078097a0b09d 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -12,6 +12,7 @@ #include <linux/stddef.h> #include <linux/kernel.h> +#include <linux/sched/hotplug.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/of.h> diff --git a/arch/powerpc/platforms/85xx/t1042rdb_diu.c b/arch/powerpc/platforms/85xx/t1042rdb_diu.c new file mode 100644 index 000000000000..58fa3d319f1c --- /dev/null +++ b/arch/powerpc/platforms/85xx/t1042rdb_diu.c @@ -0,0 +1,152 @@ +/* + * T1042 platform DIU operation + * + * Copyright 2014 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_address.h> + +#include <sysdev/fsl_soc.h> + +/*DIU Pixel ClockCR offset in scfg*/ +#define CCSR_SCFG_PIXCLKCR 0x28 + +/* DIU Pixel Clock bits of the PIXCLKCR */ +#define PIXCLKCR_PXCKEN 0x80000000 +#define PIXCLKCR_PXCKINV 0x40000000 +#define PIXCLKCR_PXCKDLY 0x0000FF00 +#define PIXCLKCR_PXCLK_MASK 0x00FF0000 + +/* Some CPLD register definitions */ +#define CPLD_DIUCSR 0x16 +#define CPLD_DIUCSR_DVIEN 0x80 +#define CPLD_DIUCSR_BACKLIGHT 0x0f + +struct device_node *cpld_node; + +/** + * t1042rdb_set_monitor_port: switch the output to a different monitor port + */ +static void t1042rdb_set_monitor_port(enum fsl_diu_monitor_port port) +{ + static void __iomem *cpld_base; + + cpld_base = of_iomap(cpld_node, 0); + if (!cpld_base) { + pr_err("%s: Could not map cpld registers\n", __func__); + goto exit; + } + + switch (port) { + case FSL_DIU_PORT_DVI: + /* Enable the DVI(HDMI) port, disable the DFP and + * the backlight + */ + clrbits8(cpld_base + CPLD_DIUCSR, CPLD_DIUCSR_DVIEN); + break; + case FSL_DIU_PORT_LVDS: + /* + * LVDS also needs backlight enabled, otherwise the display + * will be blank. + */ + /* Enable the DFP port, disable the DVI*/ + setbits8(cpld_base + CPLD_DIUCSR, 0x01 << 8); + setbits8(cpld_base + CPLD_DIUCSR, 0x01 << 4); + setbits8(cpld_base + CPLD_DIUCSR, CPLD_DIUCSR_BACKLIGHT); + break; + default: + pr_err("%s: Unsupported monitor port %i\n", __func__, port); + } + + iounmap(cpld_base); +exit: + of_node_put(cpld_node); +} + +/** + * t1042rdb_set_pixel_clock: program the DIU's clock + * @pixclock: pixel clock in ps (pico seconds) + */ +static void t1042rdb_set_pixel_clock(unsigned int pixclock) +{ + struct device_node *scfg_np; + void __iomem *scfg; + unsigned long freq; + u64 temp; + u32 pxclk; + + scfg_np = of_find_compatible_node(NULL, NULL, "fsl,t1040-scfg"); + if (!scfg_np) { + pr_err("%s: Missing scfg node. Can not display video.\n", + __func__); + return; + } + + scfg = of_iomap(scfg_np, 0); + of_node_put(scfg_np); + if (!scfg) { + pr_err("%s: Could not map device. Can not display video.\n", + __func__); + return; + } + + /* Convert pixclock into frequency */ + temp = 1000000000000ULL; + do_div(temp, pixclock); + freq = temp; + + /* + * 'pxclk' is the ratio of the platform clock to the pixel clock. + * This number is programmed into the PIXCLKCR register, and the valid + * range of values is 2-255. + */ + pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq); + pxclk = clamp_t(u32, pxclk, 2, 255); + + /* Disable the pixel clock, and set it to non-inverted and no delay */ + clrbits32(scfg + CCSR_SCFG_PIXCLKCR, + PIXCLKCR_PXCKEN | PIXCLKCR_PXCKDLY | PIXCLKCR_PXCLK_MASK); + + /* Enable the clock and set the pxclk */ + setbits32(scfg + CCSR_SCFG_PIXCLKCR, PIXCLKCR_PXCKEN | (pxclk << 16)); + + iounmap(scfg); +} + +/** + * t1042rdb_valid_monitor_port: set the monitor port for sysfs + */ +static enum fsl_diu_monitor_port +t1042rdb_valid_monitor_port(enum fsl_diu_monitor_port port) +{ + switch (port) { + case FSL_DIU_PORT_DVI: + case FSL_DIU_PORT_LVDS: + return port; + default: + return FSL_DIU_PORT_DVI; /* Dual-link LVDS is not supported */ + } +} + +static int __init t1042rdb_diu_init(void) +{ + cpld_node = of_find_compatible_node(NULL, NULL, "fsl,t1042rdb-cpld"); + if (!cpld_node) + return 0; + + diu_ops.set_monitor_port = t1042rdb_set_monitor_port; + diu_ops.set_pixel_clock = t1042rdb_set_pixel_clock; + diu_ops.valid_monitor_port = t1042rdb_valid_monitor_port; + + return 0; +} + +early_initcall(t1042rdb_diu_init); diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 6e89e5a8d4fb..99b0ae8acb78 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -172,6 +172,13 @@ config PPC_FPU bool default y if PPC64 +config PPC_8xx_PERF_EVENT + bool "PPC 8xx perf events" + depends on PPC_8xx && PERF_EVENTS + help + This is Performance Events support for PPC 8xx. The 8xx doesn't + have a PMU but some events are emulated using 8xx features. + config FSL_EMB_PERFMON bool "Freescale Embedded Perfmon" depends on E500 || PPC_83xx diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c index 88301e53f085..882944c36ef5 100644 --- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c +++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c @@ -22,6 +22,7 @@ #include <linux/cpufreq.h> #include <linux/sched.h> +#include <linux/sched/loadavg.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/workqueue.h> diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 7ff51f96a00e..71b995bbcae0 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -651,7 +651,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask) static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); -static struct dma_map_ops dma_iommu_fixed_ops = { +static const struct dma_map_ops dma_iommu_fixed_ops = { .alloc = dma_fixed_alloc_coherent, .free = dma_fixed_free_coherent, .map_sg = dma_fixed_map_sg, @@ -692,7 +692,7 @@ static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action, return 0; /* We use the PCI DMA ops */ - dev->archdata.dma_ops = get_pci_dma_ops(); + dev->dma_ops = get_pci_dma_ops(); cell_dma_dev_setup(dev); @@ -1172,7 +1172,7 @@ __setup("iommu_fixed=", setup_iommu_fixed); static u64 cell_dma_get_required_mask(struct device *dev) { - struct dma_map_ops *dma_ops; + const struct dma_map_ops *dma_ops; if (!dev->dma_mask) return 0; diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 3b4152faeb1f..b500b17254a0 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -25,6 +25,8 @@ #include <linux/slab.h> #include <linux/atomic.h> #include <linux/sched.h> +#include <linux/sched/mm.h> + #include <asm/spu.h> #include <asm/spu_csa.h> #include "spufs.h" diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c index e29e4d5afa2d..870c0a82d560 100644 --- a/arch/powerpc/platforms/cell/spufs/fault.c +++ b/arch/powerpc/platforms/cell/spufs/fault.c @@ -19,7 +19,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/mm.h> #include <asm/spu.h> diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index e5ec1368f0cd..ae2f740a82f1 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -683,23 +683,13 @@ size_t spu_ibox_read(struct spu_context *ctx, u32 *data) return ctx->ops->ibox_read(ctx, data); } -static int spufs_ibox_fasync(int fd, struct file *file, int on) -{ - struct spu_context *ctx = file->private_data; - - return fasync_helper(fd, file, on, &ctx->ibox_fasync); -} - /* interrupt-level ibox callback function. */ void spufs_ibox_callback(struct spu *spu) { struct spu_context *ctx = spu->ctx; - if (!ctx) - return; - - wake_up_all(&ctx->ibox_wq); - kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN); + if (ctx) + wake_up_all(&ctx->ibox_wq); } /* @@ -794,7 +784,6 @@ static const struct file_operations spufs_ibox_fops = { .open = spufs_pipe_open, .read = spufs_ibox_read, .poll = spufs_ibox_poll, - .fasync = spufs_ibox_fasync, .llseek = no_llseek, }; @@ -832,26 +821,13 @@ size_t spu_wbox_write(struct spu_context *ctx, u32 data) return ctx->ops->wbox_write(ctx, data); } -static int spufs_wbox_fasync(int fd, struct file *file, int on) -{ - struct spu_context *ctx = file->private_data; - int ret; - - ret = fasync_helper(fd, file, on, &ctx->wbox_fasync); - - return ret; -} - /* interrupt-level wbox callback function. */ void spufs_wbox_callback(struct spu *spu) { struct spu_context *ctx = spu->ctx; - if (!ctx) - return; - - wake_up_all(&ctx->wbox_wq); - kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT); + if (ctx) + wake_up_all(&ctx->wbox_wq); } /* @@ -944,7 +920,6 @@ static const struct file_operations spufs_wbox_fops = { .open = spufs_pipe_open, .write = spufs_wbox_write, .poll = spufs_wbox_poll, - .fasync = spufs_wbox_fasync, .llseek = no_llseek, }; @@ -1520,28 +1495,8 @@ void spufs_mfc_callback(struct spu *spu) { struct spu_context *ctx = spu->ctx; - if (!ctx) - return; - - wake_up_all(&ctx->mfc_wq); - - pr_debug("%s %s\n", __func__, spu->name); - if (ctx->mfc_fasync) { - u32 free_elements, tagstatus; - unsigned int mask; - - /* no need for spu_acquire in interrupt context */ - free_elements = ctx->ops->get_mfc_free_elements(ctx); - tagstatus = ctx->ops->read_mfc_tagstatus(ctx); - - mask = 0; - if (free_elements & 0xffff) - mask |= POLLOUT; - if (tagstatus & ctx->tagwait) - mask |= POLLIN; - - kill_fasync(&ctx->mfc_fasync, SIGIO, mask); - } + if (ctx) + wake_up_all(&ctx->mfc_wq); } static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status) @@ -1803,13 +1758,6 @@ static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int data return err; } -static int spufs_mfc_fasync(int fd, struct file *file, int on) -{ - struct spu_context *ctx = file->private_data; - - return fasync_helper(fd, file, on, &ctx->mfc_fasync); -} - static const struct file_operations spufs_mfc_fops = { .open = spufs_mfc_open, .release = spufs_mfc_release, @@ -1818,7 +1766,6 @@ static const struct file_operations spufs_mfc_fops = { .poll = spufs_mfc_poll, .flush = spufs_mfc_flush, .fsync = spufs_mfc_fsync, - .fasync = spufs_mfc_fasync, .mmap = spufs_mfc_mmap, .llseek = no_llseek, }; diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 460f5f31d5cb..1fbb5da17dd2 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -23,7 +23,8 @@ #undef DEBUG #include <linux/errno.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/loadavg.h> #include <linux/sched/rt.h> #include <linux/kernel.h> #include <linux/mm.h> @@ -140,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx) * runqueue. The context will be rescheduled on the proper node * if it is timesliced or preempted. */ - cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current)); + cpumask_copy(&ctx->cpus_allowed, ¤t->cpus_allowed); /* Save the current cpu id for spu interrupt routing. */ ctx->last_ran = raw_smp_processor_id(); diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index bcfd6f063efa..5e59f80e95db 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -27,6 +27,7 @@ #include <linux/spinlock.h> #include <linux/fs.h> #include <linux/cpumask.h> +#include <linux/sched/signal.h> #include <asm/spu.h> #include <asm/spu_csa.h> @@ -102,9 +103,6 @@ struct spu_context { wait_queue_head_t stop_wq; wait_queue_head_t mfc_wq; wait_queue_head_t run_wq; - struct fasync_struct *ibox_fasync; - struct fasync_struct *wbox_fasync; - struct fasync_struct *mfc_fasync; u32 tagwait; struct spu_context_ops *ops; struct work_struct reap_work; diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index e74adc4e7fd8..7fec04de27fc 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c @@ -186,7 +186,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev) */ if (dev->vendor == 0x1959 && dev->device == 0xa007 && !firmware_has_feature(FW_FEATURE_LPAR)) { - dev->dev.archdata.dma_ops = &dma_direct_ops; + dev->dev.dma_ops = &dma_direct_ops; /* * Set the coherent DMA mask to prevent the iommu * being used unnecessarily diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index 3182400cf48f..c4a3e93dc324 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -363,7 +363,7 @@ static int pcmcia_notify(struct notifier_block *nb, unsigned long action, return 0; /* We use the direct ops for localbus */ - dev->archdata.dma_ops = &dma_direct_ops; + dev->dma_ops = &dma_direct_ops; return 0; } diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index c9eb7d6540ea..746ca7321b03 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -23,6 +23,7 @@ */ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/hotplug.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig index 604190cab522..3a07e4dcf97c 100644 --- a/arch/powerpc/platforms/powernv/Kconfig +++ b/arch/powerpc/platforms/powernv/Kconfig @@ -5,7 +5,8 @@ config PPC_POWERNV select PPC_XICS select PPC_ICP_NATIVE select PPC_P7_NAP - select PPC_PCI_CHOICE if EMBEDDED + select PCI + select PCI_MSI select EPAPR_BOOT select PPC_INDIRECT_PIO select PPC_UDBG_16550 diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 73b155fd4481..1c383f38031d 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -115,7 +115,7 @@ static u64 dma_npu_get_required_mask(struct device *dev) return 0; } -static struct dma_map_ops dma_npu_ops = { +static const struct dma_map_ops dma_npu_ops = { .map_page = dma_npu_map_page, .map_sg = dma_npu_map_sg, .alloc = dma_npu_alloc, diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 8278f43ad4b8..6901a06da2f9 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1468,14 +1468,12 @@ void pnv_pci_sriov_disable(struct pci_dev *pdev) struct pnv_phb *phb; struct pnv_ioda_pe *pe; struct pci_dn *pdn; - struct pci_sriov *iov; u16 num_vfs, i; bus = pdev->bus; hose = pci_bus_to_host(bus); phb = hose->private_data; pdn = pci_get_pdn(pdev); - iov = pdev->sriov; num_vfs = pdn->num_vfs; /* Release VF PEs */ @@ -3034,7 +3032,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, /* * This function is supposed to be called on basis of PE from top * to bottom style. So the the I/O or MMIO segment assigned to - * parent PE could be overrided by its child PEs if necessary. + * parent PE could be overridden by its child PEs if necessary. */ static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe) { diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index e39e6c428af1..8b67e1eefb5c 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/hotplug.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/delay.h> diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index 8af1c15aef85..2d2e5f80a3d3 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -701,7 +701,7 @@ static u64 ps3_dma_get_required_mask(struct device *_dev) return DMA_BIT_MASK(32); } -static struct dma_map_ops ps3_sb_dma_ops = { +static const struct dma_map_ops ps3_sb_dma_ops = { .alloc = ps3_alloc_coherent, .free = ps3_free_coherent, .map_sg = ps3_sb_map_sg, @@ -712,7 +712,7 @@ static struct dma_map_ops ps3_sb_dma_ops = { .unmap_page = ps3_unmap_page, }; -static struct dma_map_ops ps3_ioc0_dma_ops = { +static const struct dma_map_ops ps3_ioc0_dma_ops = { .alloc = ps3_alloc_coherent, .free = ps3_free_coherent, .map_sg = ps3_ioc0_map_sg, @@ -756,11 +756,11 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev) switch (dev->dev_type) { case PS3_DEVICE_TYPE_IOC0: - dev->core.archdata.dma_ops = &ps3_ioc0_dma_ops; + dev->core.dma_ops = &ps3_ioc0_dma_ops; dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count); break; case PS3_DEVICE_TYPE_SB: - dev->core.archdata.dma_ops = &ps3_sb_dma_ops; + dev->core.dma_ops = &ps3_sb_dma_ops; dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count); break; diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index d3a81e746fc4..193e052fa0dd 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -354,11 +354,17 @@ static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) switch (hp_elog->id_type) { case PSERIES_HP_ELOG_ID_DRC_COUNT: hp_elog->_drc_u.drc_count = - be32_to_cpu(hp_elog->_drc_u.drc_count); + be32_to_cpu(hp_elog->_drc_u.drc_count); break; case PSERIES_HP_ELOG_ID_DRC_INDEX: hp_elog->_drc_u.drc_index = - be32_to_cpu(hp_elog->_drc_u.drc_index); + be32_to_cpu(hp_elog->_drc_u.drc_index); + break; + case PSERIES_HP_ELOG_ID_DRC_IC: + hp_elog->_drc_u.ic.count = + be32_to_cpu(hp_elog->_drc_u.ic.count); + hp_elog->_drc_u.ic.index = + be32_to_cpu(hp_elog->_drc_u.ic.index); } switch (hp_elog->resource) { @@ -467,7 +473,33 @@ static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog) if (!arg) return -EINVAL; - if (sysfs_streq(arg, "index")) { + if (sysfs_streq(arg, "indexed-count")) { + hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC; + arg = strsep(cmd, " "); + if (!arg) { + pr_err("No DRC count specified.\n"); + return -EINVAL; + } + + if (kstrtou32(arg, 0, &count)) { + pr_err("Invalid DRC count specified.\n"); + return -EINVAL; + } + + arg = strsep(cmd, " "); + if (!arg) { + pr_err("No DRC Index specified.\n"); + return -EINVAL; + } + + if (kstrtou32(arg, 0, &index)) { + pr_err("Invalid DRC Index specified.\n"); + return -EINVAL; + } + + hp_elog->_drc_u.ic.count = cpu_to_be32(count); + hp_elog->_drc_u.ic.index = cpu_to_be32(index); + } else if (sysfs_streq(arg, "index")) { hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX; arg = strsep(cmd, " "); if (!arg) { diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index a1b63e00b2f7..7bc0e91f8715 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -24,6 +24,7 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/sched.h> /* for idle_task_exit */ +#include <linux/sched/hotplug.h> #include <linux/cpu.h> #include <linux/of.h> #include <linux/slab.h> diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 3381c20edbc0..e104c71ea44a 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -320,6 +320,19 @@ static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb) return dlpar_update_device_tree_lmb(lmb); } +static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) +{ + unsigned long section_nr; + struct mem_section *mem_sect; + struct memory_block *mem_block; + + section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr)); + mem_sect = __nr_to_section(section_nr); + + mem_block = find_memory_block(mem_sect); + return mem_block; +} + #ifdef CONFIG_MEMORY_HOTREMOVE static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) { @@ -407,19 +420,6 @@ static bool lmb_is_removable(struct of_drconf_cell *lmb) static int dlpar_add_lmb(struct of_drconf_cell *); -static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) -{ - unsigned long section_nr; - struct mem_section *mem_sect; - struct memory_block *mem_block; - - section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr)); - mem_sect = __nr_to_section(section_nr); - - mem_block = find_memory_block(mem_sect); - return mem_block; -} - static int dlpar_remove_lmb(struct of_drconf_cell *lmb) { struct memory_block *mem_block; @@ -601,6 +601,94 @@ static int dlpar_memory_readd_by_index(u32 drc_index, struct property *prop) return rc; } + +static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index, + struct property *prop) +{ + struct of_drconf_cell *lmbs; + u32 num_lmbs, *p; + int i, rc, start_lmb_found; + int lmbs_available = 0, start_index = 0, end_index; + + pr_info("Attempting to hot-remove %u LMB(s) at %x\n", + lmbs_to_remove, drc_index); + + if (lmbs_to_remove == 0) + return -EINVAL; + + p = prop->value; + num_lmbs = *p++; + lmbs = (struct of_drconf_cell *)p; + start_lmb_found = 0; + + /* Navigate to drc_index */ + while (start_index < num_lmbs) { + if (lmbs[start_index].drc_index == drc_index) { + start_lmb_found = 1; + break; + } + + start_index++; + } + + if (!start_lmb_found) + return -EINVAL; + + end_index = start_index + lmbs_to_remove; + + /* Validate that there are enough LMBs to satisfy the request */ + for (i = start_index; i < end_index; i++) { + if (lmbs[i].flags & DRCONF_MEM_RESERVED) + break; + + lmbs_available++; + } + + if (lmbs_available < lmbs_to_remove) + return -EINVAL; + + for (i = start_index; i < end_index; i++) { + if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED)) + continue; + + rc = dlpar_remove_lmb(&lmbs[i]); + if (rc) + break; + + lmbs[i].reserved = 1; + } + + if (rc) { + pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n"); + + for (i = start_index; i < end_index; i++) { + if (!lmbs[i].reserved) + continue; + + rc = dlpar_add_lmb(&lmbs[i]); + if (rc) + pr_err("Failed to add LMB, drc index %x\n", + be32_to_cpu(lmbs[i].drc_index)); + + lmbs[i].reserved = 0; + } + rc = -EINVAL; + } else { + for (i = start_index; i < end_index; i++) { + if (!lmbs[i].reserved) + continue; + + dlpar_release_drc(lmbs[i].drc_index); + pr_info("Memory at %llx (drc index %x) was hot-removed\n", + lmbs[i].base_addr, lmbs[i].drc_index); + + lmbs[i].reserved = 0; + } + } + + return rc; +} + #else static inline int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) @@ -628,9 +716,32 @@ static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop) { return -EOPNOTSUPP; } +static int dlpar_memory_readd_by_index(u32 drc_index, struct property *prop) +{ + return -EOPNOTSUPP; +} +static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index, + struct property *prop) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_MEMORY_HOTREMOVE */ +static int dlpar_online_lmb(struct of_drconf_cell *lmb) +{ + struct memory_block *mem_block; + int rc; + + mem_block = lmb_to_memblock(lmb); + if (!mem_block) + return -EINVAL; + + rc = device_online(&mem_block->dev); + put_device(&mem_block->dev); + return rc; +} + static int dlpar_add_lmb(struct of_drconf_cell *lmb) { unsigned long block_sz; @@ -654,10 +765,18 @@ static int dlpar_add_lmb(struct of_drconf_cell *lmb) /* Add the memory */ rc = add_memory(nid, lmb->base_addr, block_sz); - if (rc) + if (rc) { dlpar_remove_device_tree_lmb(lmb); - else + return rc; + } + + rc = dlpar_online_lmb(lmb); + if (rc) { + remove_memory(nid, lmb->base_addr, block_sz); + dlpar_remove_device_tree_lmb(lmb); + } else { lmb->flags |= DRCONF_MEM_ASSIGNED; + } return rc; } @@ -776,6 +895,97 @@ static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop) return rc; } +static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index, + struct property *prop) +{ + struct of_drconf_cell *lmbs; + u32 num_lmbs, *p; + int i, rc, start_lmb_found; + int lmbs_available = 0, start_index = 0, end_index; + + pr_info("Attempting to hot-add %u LMB(s) at index %x\n", + lmbs_to_add, drc_index); + + if (lmbs_to_add == 0) + return -EINVAL; + + p = prop->value; + num_lmbs = *p++; + lmbs = (struct of_drconf_cell *)p; + start_lmb_found = 0; + + /* Navigate to drc_index */ + while (start_index < num_lmbs) { + if (lmbs[start_index].drc_index == drc_index) { + start_lmb_found = 1; + break; + } + + start_index++; + } + + if (!start_lmb_found) + return -EINVAL; + + end_index = start_index + lmbs_to_add; + + /* Validate that the LMBs in this range are not reserved */ + for (i = start_index; i < end_index; i++) { + if (lmbs[i].flags & DRCONF_MEM_RESERVED) + break; + + lmbs_available++; + } + + if (lmbs_available < lmbs_to_add) + return -EINVAL; + + for (i = start_index; i < end_index; i++) { + if (lmbs[i].flags & DRCONF_MEM_ASSIGNED) + continue; + + rc = dlpar_acquire_drc(lmbs[i].drc_index); + if (rc) + break; + + rc = dlpar_add_lmb(&lmbs[i]); + if (rc) { + dlpar_release_drc(lmbs[i].drc_index); + break; + } + + lmbs[i].reserved = 1; + } + + if (rc) { + pr_err("Memory indexed-count-add failed, removing any added LMBs\n"); + + for (i = start_index; i < end_index; i++) { + if (!lmbs[i].reserved) + continue; + + rc = dlpar_remove_lmb(&lmbs[i]); + if (rc) + pr_err("Failed to remove LMB, drc index %x\n", + be32_to_cpu(lmbs[i].drc_index)); + else + dlpar_release_drc(lmbs[i].drc_index); + } + rc = -EINVAL; + } else { + for (i = start_index; i < end_index; i++) { + if (!lmbs[i].reserved) + continue; + + pr_info("Memory at %llx (drc index %x) was hot-added\n", + lmbs[i].base_addr, lmbs[i].drc_index); + lmbs[i].reserved = 0; + } + } + + return rc; +} + int dlpar_memory(struct pseries_hp_errorlog *hp_elog) { struct device_node *dn; @@ -783,9 +993,6 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) u32 count, drc_index; int rc; - count = hp_elog->_drc_u.drc_count; - drc_index = hp_elog->_drc_u.drc_index; - lock_device_hotplug(); dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); @@ -802,22 +1009,39 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) switch (hp_elog->action) { case PSERIES_HP_ELOG_ACTION_ADD: - if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) + if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) { + count = hp_elog->_drc_u.drc_count; rc = dlpar_memory_add_by_count(count, prop); - else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) + } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) { + drc_index = hp_elog->_drc_u.drc_index; rc = dlpar_memory_add_by_index(drc_index, prop); - else + } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) { + count = hp_elog->_drc_u.ic.count; + drc_index = hp_elog->_drc_u.ic.index; + rc = dlpar_memory_add_by_ic(count, drc_index, prop); + } else { rc = -EINVAL; + } + break; case PSERIES_HP_ELOG_ACTION_REMOVE: - if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) + if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) { + count = hp_elog->_drc_u.drc_count; rc = dlpar_memory_remove_by_count(count, prop); - else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) + } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) { + drc_index = hp_elog->_drc_u.drc_index; rc = dlpar_memory_remove_by_index(drc_index, prop); - else + } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) { + count = hp_elog->_drc_u.ic.count; + drc_index = hp_elog->_drc_u.ic.index; + rc = dlpar_memory_remove_by_ic(count, drc_index, prop); + } else { rc = -EINVAL; + } + break; case PSERIES_HP_ELOG_ACTION_READD: + drc_index = hp_elog->_drc_u.drc_index; rc = dlpar_memory_readd_by_index(drc_index, prop); break; default: diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c index 614c28537141..99a6bf7f3bcf 100644 --- a/arch/powerpc/platforms/pseries/ibmebus.c +++ b/arch/powerpc/platforms/pseries/ibmebus.c @@ -136,7 +136,7 @@ static u64 ibmebus_dma_get_required_mask(struct device *dev) return DMA_BIT_MASK(64); } -static struct dma_map_ops ibmebus_dma_ops = { +static const struct dma_map_ops ibmebus_dma_ops = { .alloc = ibmebus_alloc_coherent, .free = ibmebus_free_coherent, .map_sg = ibmebus_map_sg, @@ -169,7 +169,7 @@ static int ibmebus_create_device(struct device_node *dn) return -ENOMEM; dev->dev.bus = &ibmebus_bus_type; - dev->dev.archdata.dma_ops = &ibmebus_dma_ops; + dev->dev.dma_ops = &ibmebus_dma_ops; ret = of_device_add(dev); if (ret) diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 0024e451bb36..4d757eaa46bf 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -1020,7 +1020,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) /* check largest block * page size > max memory hotplug addr */ max_addr = memory_hotplug_max(); if (query.largest_available_block < (max_addr >> page_shift)) { - dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u " + dev_dbg(&dev->dev, "can't map partition max 0x%llx with %u " "%llu-sized pages\n", max_addr, query.largest_available_block, 1ULL << page_shift); goto out_failed; diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 2c8fb3ec989e..720493932486 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -615,7 +615,7 @@ static u64 vio_dma_get_required_mask(struct device *dev) return dma_iommu_ops.get_required_mask(dev); } -static struct dma_map_ops vio_dma_mapping_ops = { +static const struct dma_map_ops vio_dma_mapping_ops = { .alloc = vio_dma_iommu_alloc_coherent, .free = vio_dma_iommu_free_coherent, .mmap = dma_direct_mmap_coherent, diff --git a/arch/powerpc/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c index ee9891734149..31db8c072acd 100644 --- a/arch/powerpc/xmon/ppc-dis.c +++ b/arch/powerpc/xmon/ppc-dis.c @@ -1,6 +1,5 @@ /* ppc-dis.c -- Disassemble PowerPC instructions - Copyright 1994, 1995, 2000, 2001, 2002, 2003, 2004, 2005, 2006 - Free Software Foundation, Inc. + Copyright (C) 1994-2016 Free Software Foundation, Inc. Written by Ian Lance Taylor, Cygnus Support This file is part of GDB, GAS, and the GNU binutils. @@ -26,57 +25,94 @@ Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, US #include "ppc.h" #include "dis-asm.h" -/* Print a PowerPC or POWER instruction. */ +/* This file provides several disassembler functions, all of which use + the disassembler interface defined in dis-asm.h. Several functions + are provided because this file handles disassembly for the PowerPC + in both big and little endian mode and also for the POWER (RS/6000) + chip. */ + +/* Extract the operand value from the PowerPC or POWER instruction. */ -int -print_insn_powerpc (unsigned long insn, unsigned long memaddr) +static long +operand_value_powerpc (const struct powerpc_operand *operand, + unsigned long insn, ppc_cpu_t dialect) { - const struct powerpc_opcode *opcode; - const struct powerpc_opcode *opcode_end; - unsigned long op; - int dialect; + long value; + int invalid; + /* Extract the value from the instruction. */ + if (operand->extract) + value = (*operand->extract) (insn, dialect, &invalid); + else + { + if (operand->shift >= 0) + value = (insn >> operand->shift) & operand->bitm; + else + value = (insn << -operand->shift) & operand->bitm; + if ((operand->flags & PPC_OPERAND_SIGNED) != 0) + { + /* BITM is always some number of zeros followed by some + number of ones, followed by some number of zeros. */ + unsigned long top = operand->bitm; + /* top & -top gives the rightmost 1 bit, so this + fills in any trailing zeros. */ + top |= (top & -top) - 1; + top &= ~(top >> 1); + value = (value ^ top) - top; + } + } - dialect = PPC_OPCODE_PPC | PPC_OPCODE_CLASSIC | PPC_OPCODE_COMMON - | PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_ALTIVEC; + return value; +} - if (cpu_has_feature(CPU_FTRS_POWER5)) - dialect |= PPC_OPCODE_POWER5; +/* Determine whether the optional operand(s) should be printed. */ - if (cpu_has_feature(CPU_FTRS_CELL)) - dialect |= PPC_OPCODE_CELL | PPC_OPCODE_ALTIVEC; +static int +skip_optional_operands (const unsigned char *opindex, + unsigned long insn, ppc_cpu_t dialect) +{ + const struct powerpc_operand *operand; - if (cpu_has_feature(CPU_FTRS_POWER6)) - dialect |= PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_ALTIVEC; + for (; *opindex != 0; opindex++) + { + operand = &powerpc_operands[*opindex]; + if ((operand->flags & PPC_OPERAND_NEXT) != 0 + || ((operand->flags & PPC_OPERAND_OPTIONAL) != 0 + && operand_value_powerpc (operand, insn, dialect) != + ppc_optional_operand_value (operand))) + return 0; + } + + return 1; +} + +/* Find a match for INSN in the opcode table, given machine DIALECT. + A DIALECT of -1 is special, matching all machine opcode variations. */ + +static const struct powerpc_opcode * +lookup_powerpc (unsigned long insn, ppc_cpu_t dialect) +{ + const struct powerpc_opcode *opcode; + const struct powerpc_opcode *opcode_end; + unsigned long op; /* Get the major opcode of the instruction. */ op = PPC_OP (insn); - /* Find the first match in the opcode table. We could speed this up - a bit by doing a binary search on the major opcode. */ opcode_end = powerpc_opcodes + powerpc_num_opcodes; - again: - for (opcode = powerpc_opcodes; opcode < opcode_end; opcode++) + /* Find the first match in the opcode table for this major opcode. */ + for (opcode = powerpc_opcodes; opcode < opcode_end; ++opcode) { - unsigned long table_op; const unsigned char *opindex; const struct powerpc_operand *operand; int invalid; - int need_comma; - int need_paren; - - table_op = PPC_OP (opcode->opcode); - if (op < table_op) - break; - if (op > table_op) - continue; if ((insn & opcode->mask) != opcode->opcode - || (opcode->flags & dialect) == 0) + || (dialect != (ppc_cpu_t) -1 + && ((opcode->flags & dialect) == 0 + || (opcode->deprecated & dialect) != 0))) continue; - /* Make two passes over the operands. First see if any of them - have extraction functions, and, if they do, make sure the - instruction is valid. */ + /* Check validity of operands. */ invalid = 0; for (opindex = opcode->operands; *opindex != 0; opindex++) { @@ -87,14 +123,77 @@ print_insn_powerpc (unsigned long insn, unsigned long memaddr) if (invalid) continue; - /* The instruction is valid. */ - printf("%s", opcode->name); + return opcode; + } + + return NULL; +} + +/* Print a PowerPC or POWER instruction. */ + +int print_insn_powerpc (unsigned long insn, unsigned long memaddr) +{ + const struct powerpc_opcode *opcode; + bool insn_is_short; + ppc_cpu_t dialect; + + dialect = PPC_OPCODE_PPC | PPC_OPCODE_COMMON + | PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_ALTIVEC; + + if (cpu_has_feature(CPU_FTRS_POWER5)) + dialect |= PPC_OPCODE_POWER5; + + if (cpu_has_feature(CPU_FTRS_CELL)) + dialect |= (PPC_OPCODE_CELL | PPC_OPCODE_ALTIVEC); + + if (cpu_has_feature(CPU_FTRS_POWER6)) + dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_ALTIVEC); + + if (cpu_has_feature(CPU_FTRS_POWER7)) + dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 + | PPC_OPCODE_ALTIVEC | PPC_OPCODE_VSX); + + if (cpu_has_feature(CPU_FTRS_POWER8)) + dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 + | PPC_OPCODE_POWER8 | PPC_OPCODE_HTM + | PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2 | PPC_OPCODE_VSX); + + if (cpu_has_feature(CPU_FTRS_POWER9)) + dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 + | PPC_OPCODE_POWER8 | PPC_OPCODE_POWER9 | PPC_OPCODE_HTM + | PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2 + | PPC_OPCODE_VSX | PPC_OPCODE_VSX3), + + /* Get the major opcode of the insn. */ + opcode = NULL; + insn_is_short = false; + + if (opcode == NULL) + opcode = lookup_powerpc (insn, dialect); + if (opcode == NULL && (dialect & PPC_OPCODE_ANY) != 0) + opcode = lookup_powerpc (insn, (ppc_cpu_t) -1); + + if (opcode != NULL) + { + const unsigned char *opindex; + const struct powerpc_operand *operand; + int need_comma; + int need_paren; + int skip_optional; + if (opcode->operands[0] != 0) - printf("\t"); + printf("%-7s ", opcode->name); + else + printf("%s", opcode->name); + + if (insn_is_short) + /* The operands will be fetched out of the 16-bit instruction. */ + insn >>= 16; /* Now extract and print the operands. */ need_comma = 0; need_paren = 0; + skip_optional = -1; for (opindex = opcode->operands; *opindex != 0; opindex++) { long value; @@ -107,23 +206,18 @@ print_insn_powerpc (unsigned long insn, unsigned long memaddr) if ((operand->flags & PPC_OPERAND_FAKE) != 0) continue; - /* Extract the value from the instruction. */ - if (operand->extract) - value = (*operand->extract) (insn, dialect, &invalid); - else + /* If all of the optional operands have the value zero, + then don't print any of them. */ + if ((operand->flags & PPC_OPERAND_OPTIONAL) != 0) { - value = (insn >> operand->shift) & ((1 << operand->bits) - 1); - if ((operand->flags & PPC_OPERAND_SIGNED) != 0 - && (value & (1 << (operand->bits - 1))) != 0) - value -= 1 << operand->bits; + if (skip_optional < 0) + skip_optional = skip_optional_operands (opindex, insn, + dialect); + if (skip_optional) + continue; } - /* If the operand is optional, and the value is zero, don't - print anything. */ - if ((operand->flags & PPC_OPERAND_OPTIONAL) != 0 - && (operand->flags & PPC_OPERAND_NEXT) == 0 - && value == 0) - continue; + value = operand_value_powerpc (operand, insn, dialect); if (need_comma) { @@ -139,30 +233,38 @@ print_insn_powerpc (unsigned long insn, unsigned long memaddr) printf("f%ld", value); else if ((operand->flags & PPC_OPERAND_VR) != 0) printf("v%ld", value); + else if ((operand->flags & PPC_OPERAND_VSR) != 0) + printf("vs%ld", value); else if ((operand->flags & PPC_OPERAND_RELATIVE) != 0) - print_address (memaddr + value); + print_address(memaddr + value); else if ((operand->flags & PPC_OPERAND_ABSOLUTE) != 0) - print_address (value & 0xffffffff); - else if ((operand->flags & PPC_OPERAND_CR) == 0 - || (dialect & PPC_OPCODE_PPC) == 0) + print_address(value & 0xffffffff); + else if ((operand->flags & PPC_OPERAND_FSL) != 0) + printf("fsl%ld", value); + else if ((operand->flags & PPC_OPERAND_FCR) != 0) + printf("fcr%ld", value); + else if ((operand->flags & PPC_OPERAND_UDI) != 0) printf("%ld", value); - else + else if ((operand->flags & PPC_OPERAND_CR_REG) != 0 + && (((dialect & PPC_OPCODE_PPC) != 0) + || ((dialect & PPC_OPCODE_VLE) != 0))) + printf("cr%ld", value); + else if (((operand->flags & PPC_OPERAND_CR_BIT) != 0) + && (((dialect & PPC_OPCODE_PPC) != 0) + || ((dialect & PPC_OPCODE_VLE) != 0))) { - if (operand->bits == 3) - printf("cr%ld", value); - else - { - static const char *cbnames[4] = { "lt", "gt", "eq", "so" }; - int cr; - int cc; - - cr = value >> 2; - if (cr != 0) - printf("4*cr%d+", cr); - cc = value & 3; - printf("%s", cbnames[cc]); - } + static const char *cbnames[4] = { "lt", "gt", "eq", "so" }; + int cr; + int cc; + + cr = value >> 2; + if (cr != 0) + printf("4*cr%d+", cr); + cc = value & 3; + printf("%s", cbnames[cc]); } + else + printf("%d", (int) value); if (need_paren) { @@ -179,14 +281,16 @@ print_insn_powerpc (unsigned long insn, unsigned long memaddr) } } - /* We have found and printed an instruction; return. */ - return 4; - } - - if ((dialect & PPC_OPCODE_ANY) != 0) - { - dialect = ~PPC_OPCODE_ANY; - goto again; + /* We have found and printed an instruction. + If it was a short VLE instruction we have more to do. */ + if (insn_is_short) + { + memaddr += 2; + return 2; + } + else + /* Otherwise, return. */ + return 4; } /* We could not find a match. */ diff --git a/arch/powerpc/xmon/ppc-opc.c b/arch/powerpc/xmon/ppc-opc.c index 6845e91ba04a..ac2b55b1332e 100644 --- a/arch/powerpc/xmon/ppc-opc.c +++ b/arch/powerpc/xmon/ppc-opc.c @@ -1,6 +1,5 @@ /* ppc-opc.c -- PowerPC opcode list - Copyright 1994, 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004, - 2005 Free Software Foundation, Inc. + Copyright (C) 1994-2016 Free Software Foundation, Inc. Written by Ian Lance Taylor, Cygnus Support This file is part of GDB, GAS, and the GNU binutils. @@ -42,66 +41,97 @@ /* Local insertion and extraction functions. */ -static unsigned long insert_bat (unsigned long, long, int, const char **); -static long extract_bat (unsigned long, int, int *); -static unsigned long insert_bba (unsigned long, long, int, const char **); -static long extract_bba (unsigned long, int, int *); -static unsigned long insert_bd (unsigned long, long, int, const char **); -static long extract_bd (unsigned long, int, int *); -static unsigned long insert_bdm (unsigned long, long, int, const char **); -static long extract_bdm (unsigned long, int, int *); -static unsigned long insert_bdp (unsigned long, long, int, const char **); -static long extract_bdp (unsigned long, int, int *); -static unsigned long insert_bo (unsigned long, long, int, const char **); -static long extract_bo (unsigned long, int, int *); -static unsigned long insert_boe (unsigned long, long, int, const char **); -static long extract_boe (unsigned long, int, int *); -static unsigned long insert_dq (unsigned long, long, int, const char **); -static long extract_dq (unsigned long, int, int *); -static unsigned long insert_ds (unsigned long, long, int, const char **); -static long extract_ds (unsigned long, int, int *); -static unsigned long insert_de (unsigned long, long, int, const char **); -static long extract_de (unsigned long, int, int *); -static unsigned long insert_des (unsigned long, long, int, const char **); -static long extract_des (unsigned long, int, int *); -static unsigned long insert_fxm (unsigned long, long, int, const char **); -static long extract_fxm (unsigned long, int, int *); -static unsigned long insert_li (unsigned long, long, int, const char **); -static long extract_li (unsigned long, int, int *); -static unsigned long insert_mbe (unsigned long, long, int, const char **); -static long extract_mbe (unsigned long, int, int *); -static unsigned long insert_mb6 (unsigned long, long, int, const char **); -static long extract_mb6 (unsigned long, int, int *); -static unsigned long insert_nb (unsigned long, long, int, const char **); -static long extract_nb (unsigned long, int, int *); -static unsigned long insert_nsi (unsigned long, long, int, const char **); -static long extract_nsi (unsigned long, int, int *); -static unsigned long insert_ral (unsigned long, long, int, const char **); -static unsigned long insert_ram (unsigned long, long, int, const char **); -static unsigned long insert_raq (unsigned long, long, int, const char **); -static unsigned long insert_ras (unsigned long, long, int, const char **); -static unsigned long insert_rbs (unsigned long, long, int, const char **); -static long extract_rbs (unsigned long, int, int *); -static unsigned long insert_rsq (unsigned long, long, int, const char **); -static unsigned long insert_rtq (unsigned long, long, int, const char **); -static unsigned long insert_sh6 (unsigned long, long, int, const char **); -static long extract_sh6 (unsigned long, int, int *); -static unsigned long insert_spr (unsigned long, long, int, const char **); -static long extract_spr (unsigned long, int, int *); -static unsigned long insert_sprg (unsigned long, long, int, const char **); -static long extract_sprg (unsigned long, int, int *); -static unsigned long insert_tbr (unsigned long, long, int, const char **); -static long extract_tbr (unsigned long, int, int *); -static unsigned long insert_ev2 (unsigned long, long, int, const char **); -static long extract_ev2 (unsigned long, int, int *); -static unsigned long insert_ev4 (unsigned long, long, int, const char **); -static long extract_ev4 (unsigned long, int, int *); -static unsigned long insert_ev8 (unsigned long, long, int, const char **); -static long extract_ev8 (unsigned long, int, int *); +static unsigned long insert_arx (unsigned long, long, ppc_cpu_t, const char **); +static long extract_arx (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_ary (unsigned long, long, ppc_cpu_t, const char **); +static long extract_ary (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_bat (unsigned long, long, ppc_cpu_t, const char **); +static long extract_bat (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_bba (unsigned long, long, ppc_cpu_t, const char **); +static long extract_bba (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_bdm (unsigned long, long, ppc_cpu_t, const char **); +static long extract_bdm (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_bdp (unsigned long, long, ppc_cpu_t, const char **); +static long extract_bdp (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_bo (unsigned long, long, ppc_cpu_t, const char **); +static long extract_bo (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_boe (unsigned long, long, ppc_cpu_t, const char **); +static long extract_boe (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_esync (unsigned long, long, ppc_cpu_t, const char **); +static unsigned long insert_dcmxs (unsigned long, long, ppc_cpu_t, const char **); +static long extract_dcmxs (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_dxd (unsigned long, long, ppc_cpu_t, const char **); +static long extract_dxd (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_dxdn (unsigned long, long, ppc_cpu_t, const char **); +static long extract_dxdn (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_fxm (unsigned long, long, ppc_cpu_t, const char **); +static long extract_fxm (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_li20 (unsigned long, long, ppc_cpu_t, const char **); +static long extract_li20 (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_ls (unsigned long, long, ppc_cpu_t, const char **); +static unsigned long insert_mbe (unsigned long, long, ppc_cpu_t, const char **); +static long extract_mbe (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_mb6 (unsigned long, long, ppc_cpu_t, const char **); +static long extract_mb6 (unsigned long, ppc_cpu_t, int *); +static long extract_nb (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_nbi (unsigned long, long, ppc_cpu_t, const char **); +static unsigned long insert_nsi (unsigned long, long, ppc_cpu_t, const char **); +static long extract_nsi (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_oimm (unsigned long, long, ppc_cpu_t, const char **); +static long extract_oimm (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_ral (unsigned long, long, ppc_cpu_t, const char **); +static unsigned long insert_ram (unsigned long, long, ppc_cpu_t, const char **); +static unsigned long insert_raq (unsigned long, long, ppc_cpu_t, const char **); +static unsigned long insert_ras (unsigned long, long, ppc_cpu_t, const char **); +static unsigned long insert_rbs (unsigned long, long, ppc_cpu_t, const char **); +static long extract_rbs (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_rbx (unsigned long, long, ppc_cpu_t, const char **); +static unsigned long insert_rx (unsigned long, long, ppc_cpu_t, const char **); +static long extract_rx (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_ry (unsigned long, long, ppc_cpu_t, const char **); +static long extract_ry (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_sh6 (unsigned long, long, ppc_cpu_t, const char **); +static long extract_sh6 (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_sci8 (unsigned long, long, ppc_cpu_t, const char **); +static long extract_sci8 (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_sci8n (unsigned long, long, ppc_cpu_t, const char **); +static long extract_sci8n (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_sd4h (unsigned long, long, ppc_cpu_t, const char **); +static long extract_sd4h (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_sd4w (unsigned long, long, ppc_cpu_t, const char **); +static long extract_sd4w (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_spr (unsigned long, long, ppc_cpu_t, const char **); +static long extract_spr (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_sprg (unsigned long, long, ppc_cpu_t, const char **); +static long extract_sprg (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_tbr (unsigned long, long, ppc_cpu_t, const char **); +static long extract_tbr (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_xt6 (unsigned long, long, ppc_cpu_t, const char **); +static long extract_xt6 (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_xtq6 (unsigned long, long, ppc_cpu_t, const char **); +static long extract_xtq6 (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_xa6 (unsigned long, long, ppc_cpu_t, const char **); +static long extract_xa6 (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_xb6 (unsigned long, long, ppc_cpu_t, const char **); +static long extract_xb6 (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_xb6s (unsigned long, long, ppc_cpu_t, const char **); +static long extract_xb6s (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_xc6 (unsigned long, long, ppc_cpu_t, const char **); +static long extract_xc6 (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_dm (unsigned long, long, ppc_cpu_t, const char **); +static long extract_dm (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_vlesi (unsigned long, long, ppc_cpu_t, const char **); +static long extract_vlesi (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_vlensi (unsigned long, long, ppc_cpu_t, const char **); +static long extract_vlensi (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_vleui (unsigned long, long, ppc_cpu_t, const char **); +static long extract_vleui (unsigned long, ppc_cpu_t, int *); +static unsigned long insert_vleil (unsigned long, long, ppc_cpu_t, const char **); +static long extract_vleil (unsigned long, ppc_cpu_t, int *); /* The operands table. - The fields are bits, shift, insert, extract, flags. + The fields are bitm, shift, insert, extract, flags. We used to put parens around the various additions, like the one for BA just below. However, that caused trouble with feeble @@ -119,493 +149,934 @@ const struct powerpc_operand powerpc_operands[] = /* The BA field in an XL form instruction. */ #define BA UNUSED + 1 -#define BA_MASK (0x1f << 16) - { 5, 16, NULL, NULL, PPC_OPERAND_CR }, + /* The BI field in a B form or XL form instruction. */ +#define BI BA +#define BI_MASK (0x1f << 16) + { 0x1f, 16, NULL, NULL, PPC_OPERAND_CR_BIT }, /* The BA field in an XL form instruction when it must be the same as the BT field in the same instruction. */ #define BAT BA + 1 - { 5, 16, insert_bat, extract_bat, PPC_OPERAND_FAKE }, + { 0x1f, 16, insert_bat, extract_bat, PPC_OPERAND_FAKE }, /* The BB field in an XL form instruction. */ #define BB BAT + 1 #define BB_MASK (0x1f << 11) - { 5, 11, NULL, NULL, PPC_OPERAND_CR }, + { 0x1f, 11, NULL, NULL, PPC_OPERAND_CR_BIT }, /* The BB field in an XL form instruction when it must be the same as the BA field in the same instruction. */ #define BBA BB + 1 - { 5, 11, insert_bba, extract_bba, PPC_OPERAND_FAKE }, + /* The VB field in a VX form instruction when it must be the same + as the VA field in the same instruction. */ +#define VBA BBA + { 0x1f, 11, insert_bba, extract_bba, PPC_OPERAND_FAKE }, /* The BD field in a B form instruction. The lower two bits are forced to zero. */ #define BD BBA + 1 - { 16, 0, insert_bd, extract_bd, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, + { 0xfffc, 0, NULL, NULL, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, /* The BD field in a B form instruction when absolute addressing is used. */ #define BDA BD + 1 - { 16, 0, insert_bd, extract_bd, PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED }, + { 0xfffc, 0, NULL, NULL, PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED }, /* The BD field in a B form instruction when the - modifier is used. This sets the y bit of the BO field appropriately. */ #define BDM BDA + 1 - { 16, 0, insert_bdm, extract_bdm, - PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, + { 0xfffc, 0, insert_bdm, extract_bdm, + PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, /* The BD field in a B form instruction when the - modifier is used and absolute address is used. */ #define BDMA BDM + 1 - { 16, 0, insert_bdm, extract_bdm, - PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED }, + { 0xfffc, 0, insert_bdm, extract_bdm, + PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED }, /* The BD field in a B form instruction when the + modifier is used. This sets the y bit of the BO field appropriately. */ #define BDP BDMA + 1 - { 16, 0, insert_bdp, extract_bdp, - PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, + { 0xfffc, 0, insert_bdp, extract_bdp, + PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, /* The BD field in a B form instruction when the + modifier is used and absolute addressing is used. */ #define BDPA BDP + 1 - { 16, 0, insert_bdp, extract_bdp, - PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED }, + { 0xfffc, 0, insert_bdp, extract_bdp, + PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED }, /* The BF field in an X or XL form instruction. */ #define BF BDPA + 1 - { 3, 23, NULL, NULL, PPC_OPERAND_CR }, + /* The CRFD field in an X form instruction. */ +#define CRFD BF + /* The CRD field in an XL form instruction. */ +#define CRD BF + { 0x7, 23, NULL, NULL, PPC_OPERAND_CR_REG }, + + /* The BF field in an X or XL form instruction. */ +#define BFF BF + 1 + { 0x7, 23, NULL, NULL, 0 }, /* An optional BF field. This is used for comparison instructions, in which an omitted BF field is taken as zero. */ -#define OBF BF + 1 - { 3, 23, NULL, NULL, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL }, +#define OBF BFF + 1 + { 0x7, 23, NULL, NULL, PPC_OPERAND_CR_REG | PPC_OPERAND_OPTIONAL }, /* The BFA field in an X or XL form instruction. */ #define BFA OBF + 1 - { 3, 18, NULL, NULL, PPC_OPERAND_CR }, - - /* The BI field in a B form or XL form instruction. */ -#define BI BFA + 1 -#define BI_MASK (0x1f << 16) - { 5, 16, NULL, NULL, PPC_OPERAND_CR }, + { 0x7, 18, NULL, NULL, PPC_OPERAND_CR_REG }, /* The BO field in a B form instruction. Certain values are illegal. */ -#define BO BI + 1 +#define BO BFA + 1 #define BO_MASK (0x1f << 21) - { 5, 21, insert_bo, extract_bo, 0 }, + { 0x1f, 21, insert_bo, extract_bo, 0 }, /* The BO field in a B form instruction when the + or - modifier is used. This is like the BO field, but it must be even. */ #define BOE BO + 1 - { 5, 21, insert_boe, extract_boe, 0 }, + { 0x1e, 21, insert_boe, extract_boe, 0 }, + + /* The RM field in an X form instruction. */ +#define RM BOE + 1 + { 0x3, 11, NULL, NULL, 0 }, -#define BH BOE + 1 - { 2, 11, NULL, NULL, PPC_OPERAND_OPTIONAL }, +#define BH RM + 1 + { 0x3, 11, NULL, NULL, PPC_OPERAND_OPTIONAL }, /* The BT field in an X or XL form instruction. */ #define BT BH + 1 - { 5, 21, NULL, NULL, PPC_OPERAND_CR }, + { 0x1f, 21, NULL, NULL, PPC_OPERAND_CR_BIT }, + + /* The BI16 field in a BD8 form instruction. */ +#define BI16 BT + 1 + { 0x3, 8, NULL, NULL, PPC_OPERAND_CR_BIT }, + + /* The BI32 field in a BD15 form instruction. */ +#define BI32 BI16 + 1 + { 0xf, 16, NULL, NULL, PPC_OPERAND_CR_BIT }, + + /* The BO32 field in a BD15 form instruction. */ +#define BO32 BI32 + 1 + { 0x3, 20, NULL, NULL, 0 }, + + /* The B8 field in a BD8 form instruction. */ +#define B8 BO32 + 1 + { 0x1fe, -1, NULL, NULL, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, + + /* The B15 field in a BD15 form instruction. The lowest bit is + forced to zero. */ +#define B15 B8 + 1 + { 0xfffe, 0, NULL, NULL, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, + + /* The B24 field in a BD24 form instruction. The lowest bit is + forced to zero. */ +#define B24 B15 + 1 + { 0x1fffffe, 0, NULL, NULL, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, /* The condition register number portion of the BI field in a B form or XL form instruction. This is used for the extended conditional branch mnemonics, which set the lower two bits of the BI field. This field is optional. */ -#define CR BT + 1 - { 3, 18, NULL, NULL, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL }, +#define CR B24 + 1 + { 0x7, 18, NULL, NULL, PPC_OPERAND_CR_REG | PPC_OPERAND_OPTIONAL }, /* The CRB field in an X form instruction. */ #define CRB CR + 1 - { 5, 6, NULL, NULL, 0 }, + /* The MB field in an M form instruction. */ +#define MB CRB +#define MB_MASK (0x1f << 6) + { 0x1f, 6, NULL, NULL, 0 }, - /* The CRFD field in an X form instruction. */ -#define CRFD CRB + 1 - { 3, 23, NULL, NULL, PPC_OPERAND_CR }, + /* The CRD32 field in an XL form instruction. */ +#define CRD32 CRB + 1 + { 0x3, 21, NULL, NULL, PPC_OPERAND_CR_REG }, /* The CRFS field in an X form instruction. */ -#define CRFS CRFD + 1 - { 3, 0, NULL, NULL, PPC_OPERAND_CR }, +#define CRFS CRD32 + 1 + { 0x7, 0, NULL, NULL, PPC_OPERAND_CR_REG }, + +#define CRS CRFS + 1 + { 0x3, 18, NULL, NULL, PPC_OPERAND_CR_REG | PPC_OPERAND_OPTIONAL }, /* The CT field in an X form instruction. */ -#define CT CRFS + 1 - { 5, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, +#define CT CRS + 1 + /* The MO field in an mbar instruction. */ +#define MO CT + { 0x1f, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, /* The D field in a D form instruction. This is a displacement off a register, and implies that the next operand is a register in parentheses. */ #define D CT + 1 - { 16, 0, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED }, + { 0xffff, 0, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED }, - /* The DE field in a DE form instruction. This is like D, but is 12 - bits only. */ -#define DE D + 1 - { 14, 0, insert_de, extract_de, PPC_OPERAND_PARENS }, + /* The D8 field in a D form instruction. This is a displacement off + a register, and implies that the next operand is a register in + parentheses. */ +#define D8 D + 1 + { 0xff, 0, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED }, + + /* The DCMX field in an X form instruction. */ +#define DCMX D8 + 1 + { 0x7f, 16, NULL, NULL, 0 }, - /* The DES field in a DES form instruction. This is like DS, but is 14 - bits only (12 stored.) */ -#define DES DE + 1 - { 14, 0, insert_des, extract_des, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED }, + /* The split DCMX field in an X form instruction. */ +#define DCMXS DCMX + 1 + { 0x7f, PPC_OPSHIFT_INV, insert_dcmxs, extract_dcmxs, 0 }, /* The DQ field in a DQ form instruction. This is like D, but the lower four bits are forced to zero. */ -#define DQ DES + 1 - { 16, 0, insert_dq, extract_dq, - PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED | PPC_OPERAND_DQ }, +#define DQ DCMXS + 1 + { 0xfff0, 0, NULL, NULL, + PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED | PPC_OPERAND_DQ }, /* The DS field in a DS form instruction. This is like D, but the lower two bits are forced to zero. */ #define DS DQ + 1 - { 16, 0, insert_ds, extract_ds, - PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED | PPC_OPERAND_DS }, + { 0xfffc, 0, NULL, NULL, + PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED | PPC_OPERAND_DS }, + + /* The DUIS or BHRBE fields in a XFX form instruction, 10 bits + unsigned imediate */ +#define DUIS DS + 1 +#define BHRBE DUIS + { 0x3ff, 11, NULL, NULL, 0 }, + + /* The split D field in a DX form instruction. */ +#define DXD DUIS + 1 + { 0xffff, PPC_OPSHIFT_INV, insert_dxd, extract_dxd, + PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT}, + + /* The split ND field in a DX form instruction. + This is the same as the DX field, only negated. */ +#define NDXD DXD + 1 + { 0xffff, PPC_OPSHIFT_INV, insert_dxdn, extract_dxdn, + PPC_OPERAND_NEGATIVE | PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT}, /* The E field in a wrteei instruction. */ -#define E DS + 1 - { 1, 15, NULL, NULL, 0 }, + /* And the W bit in the pair singles instructions. */ + /* And the ST field in a VX form instruction. */ +#define E NDXD + 1 +#define PSW E +#define ST E + { 0x1, 15, NULL, NULL, 0 }, /* The FL1 field in a POWER SC form instruction. */ #define FL1 E + 1 - { 4, 12, NULL, NULL, 0 }, + /* The U field in an X form instruction. */ +#define U FL1 + { 0xf, 12, NULL, NULL, 0 }, /* The FL2 field in a POWER SC form instruction. */ #define FL2 FL1 + 1 - { 3, 2, NULL, NULL, 0 }, + { 0x7, 2, NULL, NULL, 0 }, /* The FLM field in an XFL form instruction. */ #define FLM FL2 + 1 - { 8, 17, NULL, NULL, 0 }, + { 0xff, 17, NULL, NULL, 0 }, /* The FRA field in an X or A form instruction. */ #define FRA FLM + 1 #define FRA_MASK (0x1f << 16) - { 5, 16, NULL, NULL, PPC_OPERAND_FPR }, + { 0x1f, 16, NULL, NULL, PPC_OPERAND_FPR }, + + /* The FRAp field of DFP instructions. */ +#define FRAp FRA + 1 + { 0x1e, 16, NULL, NULL, PPC_OPERAND_FPR }, /* The FRB field in an X or A form instruction. */ -#define FRB FRA + 1 +#define FRB FRAp + 1 #define FRB_MASK (0x1f << 11) - { 5, 11, NULL, NULL, PPC_OPERAND_FPR }, + { 0x1f, 11, NULL, NULL, PPC_OPERAND_FPR }, + + /* The FRBp field of DFP instructions. */ +#define FRBp FRB + 1 + { 0x1e, 11, NULL, NULL, PPC_OPERAND_FPR }, /* The FRC field in an A form instruction. */ -#define FRC FRB + 1 +#define FRC FRBp + 1 #define FRC_MASK (0x1f << 6) - { 5, 6, NULL, NULL, PPC_OPERAND_FPR }, + { 0x1f, 6, NULL, NULL, PPC_OPERAND_FPR }, /* The FRS field in an X form instruction or the FRT field in a D, X or A form instruction. */ #define FRS FRC + 1 #define FRT FRS - { 5, 21, NULL, NULL, PPC_OPERAND_FPR }, + { 0x1f, 21, NULL, NULL, PPC_OPERAND_FPR }, + + /* The FRSp field of stfdp or the FRTp field of lfdp and DFP + instructions. */ +#define FRSp FRS + 1 +#define FRTp FRSp + { 0x1e, 21, NULL, NULL, PPC_OPERAND_FPR }, /* The FXM field in an XFX instruction. */ -#define FXM FRS + 1 -#define FXM_MASK (0xff << 12) - { 8, 12, insert_fxm, extract_fxm, 0 }, +#define FXM FRSp + 1 + { 0xff, 12, insert_fxm, extract_fxm, 0 }, /* Power4 version for mfcr. */ #define FXM4 FXM + 1 - { 8, 12, insert_fxm, extract_fxm, PPC_OPERAND_OPTIONAL }, + { 0xff, 12, insert_fxm, extract_fxm, + PPC_OPERAND_OPTIONAL | PPC_OPERAND_OPTIONAL_VALUE}, + /* If the FXM4 operand is ommitted, use the sentinel value -1. */ + { -1, -1, NULL, NULL, 0}, + + /* The IMM20 field in an LI instruction. */ +#define IMM20 FXM4 + 2 + { 0xfffff, PPC_OPSHIFT_INV, insert_li20, extract_li20, PPC_OPERAND_SIGNED}, /* The L field in a D or X form instruction. */ -#define L FXM4 + 1 - { 1, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, +#define L IMM20 + 1 + { 0x1, 21, NULL, NULL, 0 }, + + /* The optional L field in tlbie and tlbiel instructions. */ +#define LOPT L + 1 + /* The R field in a HTM X form instruction. */ +#define HTM_R LOPT + { 0x1, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, + + /* The optional (for 32-bit) L field in cmp[l][i] instructions. */ +#define L32OPT LOPT + 1 + { 0x1, 21, NULL, NULL, PPC_OPERAND_OPTIONAL | PPC_OPERAND_OPTIONAL32 }, + + /* The L field in dcbf instruction. */ +#define L2OPT L32OPT + 1 + { 0x3, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, /* The LEV field in a POWER SVC form instruction. */ -#define SVC_LEV L + 1 - { 7, 5, NULL, NULL, 0 }, +#define SVC_LEV L2OPT + 1 + { 0x7f, 5, NULL, NULL, 0 }, /* The LEV field in an SC form instruction. */ #define LEV SVC_LEV + 1 - { 7, 5, NULL, NULL, PPC_OPERAND_OPTIONAL }, + { 0x7f, 5, NULL, NULL, PPC_OPERAND_OPTIONAL }, /* The LI field in an I form instruction. The lower two bits are forced to zero. */ #define LI LEV + 1 - { 26, 0, insert_li, extract_li, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, + { 0x3fffffc, 0, NULL, NULL, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED }, /* The LI field in an I form instruction when used as an absolute address. */ #define LIA LI + 1 - { 26, 0, insert_li, extract_li, PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED }, + { 0x3fffffc, 0, NULL, NULL, PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED }, - /* The LS field in an X (sync) form instruction. */ + /* The LS or WC field in an X (sync or wait) form instruction. */ #define LS LIA + 1 - { 2, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, - - /* The MB field in an M form instruction. */ -#define MB LS + 1 -#define MB_MASK (0x1f << 6) - { 5, 6, NULL, NULL, 0 }, +#define WC LS + { 0x3, 21, insert_ls, NULL, PPC_OPERAND_OPTIONAL }, /* The ME field in an M form instruction. */ -#define ME MB + 1 +#define ME LS + 1 #define ME_MASK (0x1f << 1) - { 5, 1, NULL, NULL, 0 }, + { 0x1f, 1, NULL, NULL, 0 }, /* The MB and ME fields in an M form instruction expressed a single operand which is a bitmask indicating which bits to select. This is a two operand form using PPC_OPERAND_NEXT. See the description in opcode/ppc.h for what this means. */ #define MBE ME + 1 - { 5, 6, NULL, NULL, PPC_OPERAND_OPTIONAL | PPC_OPERAND_NEXT }, - { 32, 0, insert_mbe, extract_mbe, 0 }, + { 0x1f, 6, NULL, NULL, PPC_OPERAND_OPTIONAL | PPC_OPERAND_NEXT }, + { -1, 0, insert_mbe, extract_mbe, 0 }, /* The MB or ME field in an MD or MDS form instruction. The high bit is wrapped to the low end. */ #define MB6 MBE + 2 #define ME6 MB6 #define MB6_MASK (0x3f << 5) - { 6, 5, insert_mb6, extract_mb6, 0 }, - - /* The MO field in an mbar instruction. */ -#define MO MB6 + 1 - { 5, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, + { 0x3f, 5, insert_mb6, extract_mb6, 0 }, /* The NB field in an X form instruction. The value 32 is stored as 0. */ -#define NB MO + 1 - { 6, 11, insert_nb, extract_nb, 0 }, +#define NB MB6 + 1 + { 0x1f, 11, NULL, extract_nb, PPC_OPERAND_PLUS1 }, + + /* The NBI field in an lswi instruction, which has special value + restrictions. The value 32 is stored as 0. */ +#define NBI NB + 1 + { 0x1f, 11, insert_nbi, extract_nb, PPC_OPERAND_PLUS1 }, /* The NSI field in a D form instruction. This is the same as the SI field, only negated. */ -#define NSI NB + 1 - { 16, 0, insert_nsi, extract_nsi, - PPC_OPERAND_NEGATIVE | PPC_OPERAND_SIGNED }, +#define NSI NBI + 1 + { 0xffff, 0, insert_nsi, extract_nsi, + PPC_OPERAND_NEGATIVE | PPC_OPERAND_SIGNED }, + + /* The NSI field in a D form instruction when we accept a wide range + of positive values. */ +#define NSISIGNOPT NSI + 1 + { 0xffff, 0, insert_nsi, extract_nsi, + PPC_OPERAND_NEGATIVE | PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT }, /* The RA field in an D, DS, DQ, X, XO, M, or MDS form instruction. */ -#define RA NSI + 1 +#define RA NSISIGNOPT + 1 #define RA_MASK (0x1f << 16) - { 5, 16, NULL, NULL, PPC_OPERAND_GPR }, + { 0x1f, 16, NULL, NULL, PPC_OPERAND_GPR }, /* As above, but 0 in the RA field means zero, not r0. */ #define RA0 RA + 1 - { 5, 16, NULL, NULL, PPC_OPERAND_GPR_0 }, + { 0x1f, 16, NULL, NULL, PPC_OPERAND_GPR_0 }, - /* The RA field in the DQ form lq instruction, which has special + /* The RA field in the DQ form lq or an lswx instruction, which have special value restrictions. */ #define RAQ RA0 + 1 - { 5, 16, insert_raq, NULL, PPC_OPERAND_GPR_0 }, +#define RAX RAQ + { 0x1f, 16, insert_raq, NULL, PPC_OPERAND_GPR_0 }, /* The RA field in a D or X form instruction which is an updating load, which means that the RA field may not be zero and may not equal the RT field. */ #define RAL RAQ + 1 - { 5, 16, insert_ral, NULL, PPC_OPERAND_GPR_0 }, + { 0x1f, 16, insert_ral, NULL, PPC_OPERAND_GPR_0 }, /* The RA field in an lmw instruction, which has special value restrictions. */ #define RAM RAL + 1 - { 5, 16, insert_ram, NULL, PPC_OPERAND_GPR_0 }, + { 0x1f, 16, insert_ram, NULL, PPC_OPERAND_GPR_0 }, /* The RA field in a D or X form instruction which is an updating store or an updating floating point load, which means that the RA field may not be zero. */ #define RAS RAM + 1 - { 5, 16, insert_ras, NULL, PPC_OPERAND_GPR_0 }, + { 0x1f, 16, insert_ras, NULL, PPC_OPERAND_GPR_0 }, - /* The RA field of the tlbwe instruction, which is optional. */ + /* The RA field of the tlbwe, dccci and iccci instructions, + which are optional. */ #define RAOPT RAS + 1 - { 5, 16, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL }, + { 0x1f, 16, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL }, /* The RB field in an X, XO, M, or MDS form instruction. */ #define RB RAOPT + 1 #define RB_MASK (0x1f << 11) - { 5, 11, NULL, NULL, PPC_OPERAND_GPR }, + { 0x1f, 11, NULL, NULL, PPC_OPERAND_GPR }, /* The RB field in an X form instruction when it must be the same as the RS field in the instruction. This is used for extended mnemonics like mr. */ #define RBS RB + 1 - { 5, 1, insert_rbs, extract_rbs, PPC_OPERAND_FAKE }, + { 0x1f, 11, insert_rbs, extract_rbs, PPC_OPERAND_FAKE }, + + /* The RB field in an lswx instruction, which has special value + restrictions. */ +#define RBX RBS + 1 + { 0x1f, 11, insert_rbx, NULL, PPC_OPERAND_GPR }, + + /* The RB field of the dccci and iccci instructions, which are optional. */ +#define RBOPT RBX + 1 + { 0x1f, 11, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL }, + + /* The RC register field in an maddld, maddhd or maddhdu instruction. */ +#define RC RBOPT + 1 + { 0x1f, 6, NULL, NULL, PPC_OPERAND_GPR }, /* The RS field in a D, DS, X, XFX, XS, M, MD or MDS form instruction or the RT field in a D, DS, X, XFX or XO form instruction. */ -#define RS RBS + 1 +#define RS RC + 1 #define RT RS #define RT_MASK (0x1f << 21) - { 5, 21, NULL, NULL, PPC_OPERAND_GPR }, +#define RD RS + { 0x1f, 21, NULL, NULL, PPC_OPERAND_GPR }, - /* The RS field of the DS form stq instruction, which has special - value restrictions. */ + /* The RS and RT fields of the DS form stq and DQ form lq instructions, + which have special value restrictions. */ #define RSQ RS + 1 - { 5, 21, insert_rsq, NULL, PPC_OPERAND_GPR_0 }, - - /* The RT field of the DQ form lq instruction, which has special - value restrictions. */ -#define RTQ RSQ + 1 - { 5, 21, insert_rtq, NULL, PPC_OPERAND_GPR_0 }, +#define RTQ RSQ + { 0x1e, 21, NULL, NULL, PPC_OPERAND_GPR }, /* The RS field of the tlbwe instruction, which is optional. */ -#define RSO RTQ + 1 +#define RSO RSQ + 1 #define RTO RSO - { 5, 21, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL }, + { 0x1f, 21, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL }, + + /* The RX field of the SE_RR form instruction. */ +#define RX RSO + 1 + { 0x1f, PPC_OPSHIFT_INV, insert_rx, extract_rx, PPC_OPERAND_GPR }, + + /* The ARX field of the SE_RR form instruction. */ +#define ARX RX + 1 + { 0x1f, PPC_OPSHIFT_INV, insert_arx, extract_arx, PPC_OPERAND_GPR }, + + /* The RY field of the SE_RR form instruction. */ +#define RY ARX + 1 +#define RZ RY + { 0x1f, PPC_OPSHIFT_INV, insert_ry, extract_ry, PPC_OPERAND_GPR }, + + /* The ARY field of the SE_RR form instruction. */ +#define ARY RY + 1 + { 0x1f, PPC_OPSHIFT_INV, insert_ary, extract_ary, PPC_OPERAND_GPR }, + + /* The SCLSCI8 field in a D form instruction. */ +#define SCLSCI8 ARY + 1 + { 0xffffffff, PPC_OPSHIFT_INV, insert_sci8, extract_sci8, 0 }, + + /* The SCLSCI8N field in a D form instruction. This is the same as the + SCLSCI8 field, only negated. */ +#define SCLSCI8N SCLSCI8 + 1 + { 0xffffffff, PPC_OPSHIFT_INV, insert_sci8n, extract_sci8n, + PPC_OPERAND_NEGATIVE | PPC_OPERAND_SIGNED }, + + /* The SD field of the SD4 form instruction. */ +#define SE_SD SCLSCI8N + 1 + { 0xf, 8, NULL, NULL, PPC_OPERAND_PARENS }, + + /* The SD field of the SD4 form instruction, for halfword. */ +#define SE_SDH SE_SD + 1 + { 0x1e, PPC_OPSHIFT_INV, insert_sd4h, extract_sd4h, PPC_OPERAND_PARENS }, + + /* The SD field of the SD4 form instruction, for word. */ +#define SE_SDW SE_SDH + 1 + { 0x3c, PPC_OPSHIFT_INV, insert_sd4w, extract_sd4w, PPC_OPERAND_PARENS }, /* The SH field in an X or M form instruction. */ -#define SH RSO + 1 +#define SH SE_SDW + 1 #define SH_MASK (0x1f << 11) - { 5, 11, NULL, NULL, 0 }, + /* The other UIMM field in a EVX form instruction. */ +#define EVUIMM SH + /* The FC field in an atomic X form instruction. */ +#define FC SH + { 0x1f, 11, NULL, NULL, 0 }, + + /* The SI field in a HTM X form instruction. */ +#define HTM_SI SH + 1 + { 0x1f, 11, NULL, NULL, PPC_OPERAND_SIGNED }, /* The SH field in an MD form instruction. This is split. */ -#define SH6 SH + 1 +#define SH6 HTM_SI + 1 #define SH6_MASK ((0x1f << 11) | (1 << 1)) - { 6, 1, insert_sh6, extract_sh6, 0 }, + { 0x3f, PPC_OPSHIFT_INV, insert_sh6, extract_sh6, 0 }, /* The SH field of the tlbwe instruction, which is optional. */ #define SHO SH6 + 1 - { 5, 11,NULL, NULL, PPC_OPERAND_OPTIONAL }, + { 0x1f, 11, NULL, NULL, PPC_OPERAND_OPTIONAL }, /* The SI field in a D form instruction. */ #define SI SHO + 1 - { 16, 0, NULL, NULL, PPC_OPERAND_SIGNED }, + { 0xffff, 0, NULL, NULL, PPC_OPERAND_SIGNED }, /* The SI field in a D form instruction when we accept a wide range of positive values. */ #define SISIGNOPT SI + 1 - { 16, 0, NULL, NULL, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT }, + { 0xffff, 0, NULL, NULL, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT }, + + /* The SI8 field in a D form instruction. */ +#define SI8 SISIGNOPT + 1 + { 0xff, 0, NULL, NULL, PPC_OPERAND_SIGNED }, /* The SPR field in an XFX form instruction. This is flipped--the lower 5 bits are stored in the upper 5 and vice- versa. */ -#define SPR SISIGNOPT + 1 +#define SPR SI8 + 1 #define PMR SPR +#define TMR SPR #define SPR_MASK (0x3ff << 11) - { 10, 11, insert_spr, extract_spr, 0 }, + { 0x3ff, 11, insert_spr, extract_spr, 0 }, /* The BAT index number in an XFX form m[ft]ibat[lu] instruction. */ #define SPRBAT SPR + 1 #define SPRBAT_MASK (0x3 << 17) - { 2, 17, NULL, NULL, 0 }, + { 0x3, 17, NULL, NULL, 0 }, /* The SPRG register number in an XFX form m[ft]sprg instruction. */ #define SPRG SPRBAT + 1 - { 5, 16, insert_sprg, extract_sprg, 0 }, + { 0x1f, 16, insert_sprg, extract_sprg, 0 }, /* The SR field in an X form instruction. */ #define SR SPRG + 1 - { 4, 16, NULL, NULL, 0 }, + /* The 4-bit UIMM field in a VX form instruction. */ +#define UIMM4 SR + { 0xf, 16, NULL, NULL, 0 }, /* The STRM field in an X AltiVec form instruction. */ #define STRM SR + 1 -#define STRM_MASK (0x3 << 21) - { 2, 21, NULL, NULL, 0 }, + /* The T field in a tlbilx form instruction. */ +#define T STRM + /* The L field in wclr instructions. */ +#define L2 STRM + { 0x3, 21, NULL, NULL, 0 }, + + /* The ESYNC field in an X (sync) form instruction. */ +#define ESYNC STRM + 1 + { 0xf, 16, insert_esync, NULL, PPC_OPERAND_OPTIONAL }, /* The SV field in a POWER SC form instruction. */ -#define SV STRM + 1 - { 14, 2, NULL, NULL, 0 }, +#define SV ESYNC + 1 + { 0x3fff, 2, NULL, NULL, 0 }, /* The TBR field in an XFX form instruction. This is like the SPR field, but it is optional. */ #define TBR SV + 1 - { 10, 11, insert_tbr, extract_tbr, PPC_OPERAND_OPTIONAL }, + { 0x3ff, 11, insert_tbr, extract_tbr, + PPC_OPERAND_OPTIONAL | PPC_OPERAND_OPTIONAL_VALUE}, + /* If the TBR operand is ommitted, use the value 268. */ + { -1, 268, NULL, NULL, 0}, /* The TO field in a D or X form instruction. */ -#define TO TBR + 1 +#define TO TBR + 2 +#define DUI TO #define TO_MASK (0x1f << 21) - { 5, 21, NULL, NULL, 0 }, - - /* The U field in an X form instruction. */ -#define U TO + 1 - { 4, 12, NULL, NULL, 0 }, + { 0x1f, 21, NULL, NULL, 0 }, /* The UI field in a D form instruction. */ -#define UI U + 1 - { 16, 0, NULL, NULL, 0 }, +#define UI TO + 1 + { 0xffff, 0, NULL, NULL, 0 }, + +#define UISIGNOPT UI + 1 + { 0xffff, 0, NULL, NULL, PPC_OPERAND_SIGNOPT }, + + /* The IMM field in an SE_IM5 instruction. */ +#define UI5 UISIGNOPT + 1 + { 0x1f, 4, NULL, NULL, 0 }, + + /* The OIMM field in an SE_OIM5 instruction. */ +#define OIMM5 UI5 + 1 + { 0x1f, PPC_OPSHIFT_INV, insert_oimm, extract_oimm, PPC_OPERAND_PLUS1 }, + + /* The UI7 field in an SE_LI instruction. */ +#define UI7 OIMM5 + 1 + { 0x7f, 4, NULL, NULL, 0 }, /* The VA field in a VA, VX or VXR form instruction. */ -#define VA UI + 1 -#define VA_MASK (0x1f << 16) - { 5, 16, NULL, NULL, PPC_OPERAND_VR }, +#define VA UI7 + 1 + { 0x1f, 16, NULL, NULL, PPC_OPERAND_VR }, /* The VB field in a VA, VX or VXR form instruction. */ #define VB VA + 1 -#define VB_MASK (0x1f << 11) - { 5, 11, NULL, NULL, PPC_OPERAND_VR }, + { 0x1f, 11, NULL, NULL, PPC_OPERAND_VR }, /* The VC field in a VA form instruction. */ #define VC VB + 1 -#define VC_MASK (0x1f << 6) - { 5, 6, NULL, NULL, PPC_OPERAND_VR }, + { 0x1f, 6, NULL, NULL, PPC_OPERAND_VR }, /* The VD or VS field in a VA, VX, VXR or X form instruction. */ #define VD VC + 1 #define VS VD -#define VD_MASK (0x1f << 21) - { 5, 21, NULL, NULL, PPC_OPERAND_VR }, + { 0x1f, 21, NULL, NULL, PPC_OPERAND_VR }, - /* The SIMM field in a VX form instruction. */ + /* The SIMM field in a VX form instruction, and TE in Z form. */ #define SIMM VD + 1 - { 5, 16, NULL, NULL, PPC_OPERAND_SIGNED}, +#define TE SIMM + { 0x1f, 16, NULL, NULL, PPC_OPERAND_SIGNED}, /* The UIMM field in a VX form instruction. */ #define UIMM SIMM + 1 - { 5, 16, NULL, NULL, 0 }, +#define DCTL UIMM + { 0x1f, 16, NULL, NULL, 0 }, - /* The SHB field in a VA form instruction. */ -#define SHB UIMM + 1 - { 4, 6, NULL, NULL, 0 }, + /* The 3-bit UIMM field in a VX form instruction. */ +#define UIMM3 UIMM + 1 + { 0x7, 16, NULL, NULL, 0 }, - /* The other UIMM field in a EVX form instruction. */ -#define EVUIMM SHB + 1 - { 5, 11, NULL, NULL, 0 }, + /* The 6-bit UIM field in a X form instruction. */ +#define UIM6 UIMM3 + 1 + { 0x3f, 16, NULL, NULL, 0 }, + + /* The SIX field in a VX form instruction. */ +#define SIX UIM6 + 1 + { 0xf, 11, NULL, NULL, 0 }, + + /* The PS field in a VX form instruction. */ +#define PS SIX + 1 + { 0x1, 9, NULL, NULL, 0 }, + + /* The SHB field in a VA form instruction. */ +#define SHB PS + 1 + { 0xf, 6, NULL, NULL, 0 }, /* The other UIMM field in a half word EVX form instruction. */ -#define EVUIMM_2 EVUIMM + 1 - { 32, 11, insert_ev2, extract_ev2, PPC_OPERAND_PARENS }, +#define EVUIMM_2 SHB + 1 + { 0x3e, 10, NULL, NULL, PPC_OPERAND_PARENS }, /* The other UIMM field in a word EVX form instruction. */ #define EVUIMM_4 EVUIMM_2 + 1 - { 32, 11, insert_ev4, extract_ev4, PPC_OPERAND_PARENS }, + { 0x7c, 9, NULL, NULL, PPC_OPERAND_PARENS }, /* The other UIMM field in a double EVX form instruction. */ #define EVUIMM_8 EVUIMM_4 + 1 - { 32, 11, insert_ev8, extract_ev8, PPC_OPERAND_PARENS }, + { 0xf8, 8, NULL, NULL, PPC_OPERAND_PARENS }, - /* The WS field. */ + /* The WS or DRM field in an X form instruction. */ #define WS EVUIMM_8 + 1 -#define WS_MASK (0x7 << 11) - { 3, 11, NULL, NULL, 0 }, - - /* The L field in an mtmsrd or A form instruction. */ -#define MTMSRD_L WS + 1 -#define A_L MTMSRD_L - { 1, 16, NULL, NULL, PPC_OPERAND_OPTIONAL }, +#define DRM WS + { 0x7, 11, NULL, NULL, 0 }, + + /* PowerPC paired singles extensions. */ + /* W bit in the pair singles instructions for x type instructions. */ +#define PSWM WS + 1 + /* The BO16 field in a BD8 form instruction. */ +#define BO16 PSWM + { 0x1, 10, 0, 0, 0 }, + + /* IDX bits for quantization in the pair singles instructions. */ +#define PSQ PSWM + 1 + { 0x7, 12, 0, 0, 0 }, + + /* IDX bits for quantization in the pair singles x-type instructions. */ +#define PSQM PSQ + 1 + { 0x7, 7, 0, 0, 0 }, + + /* Smaller D field for quantization in the pair singles instructions. */ +#define PSD PSQM + 1 + { 0xfff, 0, 0, 0, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED }, + + /* The L field in an mtmsrd or A form instruction or R or W in an X form. */ +#define A_L PSD + 1 +#define W A_L +#define X_R A_L + { 0x1, 16, NULL, NULL, PPC_OPERAND_OPTIONAL }, + + /* The RMC or CY field in a Z23 form instruction. */ +#define RMC A_L + 1 +#define CY RMC + { 0x3, 9, NULL, NULL, 0 }, - /* The DCM field in a Z form instruction. */ -#define DCM MTMSRD_L + 1 - { 6, 16, NULL, NULL, 0 }, - - /* Likewise, the DGM field in a Z form instruction. */ -#define DGM DCM + 1 - { 6, 16, NULL, NULL, 0 }, +#define R RMC + 1 + { 0x1, 16, NULL, NULL, 0 }, -#define TE DGM + 1 - { 5, 11, NULL, NULL, 0 }, +#define RIC R + 1 + { 0x3, 18, NULL, NULL, PPC_OPERAND_OPTIONAL }, -#define RMC TE + 1 - { 2, 21, NULL, NULL, 0 }, +#define PRS RIC + 1 + { 0x1, 17, NULL, NULL, PPC_OPERAND_OPTIONAL }, -#define R RMC + 1 - { 1, 15, NULL, NULL, 0 }, - -#define SP R + 1 - { 2, 11, NULL, NULL, 0 }, +#define SP PRS + 1 + { 0x3, 19, NULL, NULL, 0 }, #define S SP + 1 - { 1, 11, NULL, NULL, 0 }, + { 0x1, 20, NULL, NULL, 0 }, - /* SH field starting at bit position 16. */ -#define SH16 S + 1 - { 6, 10, NULL, NULL, 0 }, + /* The S field in a XL form instruction. */ +#define SXL S + 1 + { 0x1, 11, NULL, NULL, PPC_OPERAND_OPTIONAL | PPC_OPERAND_OPTIONAL_VALUE}, + /* If the SXL operand is ommitted, use the value 1. */ + { -1, 1, NULL, NULL, 0}, - /* The L field in an X form with the RT field fixed instruction. */ -#define XRT_L SH16 + 1 - { 2, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, + /* SH field starting at bit position 16. */ +#define SH16 SXL + 2 + /* The DCM and DGM fields in a Z form instruction. */ +#define DCM SH16 +#define DGM DCM + { 0x3f, 10, NULL, NULL, 0 }, /* The EH field in larx instruction. */ -#define EH XRT_L + 1 - { 1, 0, NULL, NULL, PPC_OPERAND_OPTIONAL }, +#define EH SH16 + 1 + { 0x1, 0, NULL, NULL, PPC_OPERAND_OPTIONAL }, + + /* The L field in an mtfsf or XFL form instruction. */ + /* The A field in a HTM X form instruction. */ +#define XFL_L EH + 1 +#define HTM_A XFL_L + { 0x1, 25, NULL, NULL, PPC_OPERAND_OPTIONAL}, + + /* Xilinx APU related masks and macros */ +#define FCRT XFL_L + 1 +#define FCRT_MASK (0x1f << 21) + { 0x1f, 21, 0, 0, PPC_OPERAND_FCR }, + + /* Xilinx FSL related masks and macros */ +#define FSL FCRT + 1 +#define FSL_MASK (0x1f << 11) + { 0x1f, 11, 0, 0, PPC_OPERAND_FSL }, + + /* Xilinx UDI related masks and macros */ +#define URT FSL + 1 + { 0x1f, 21, 0, 0, PPC_OPERAND_UDI }, + +#define URA URT + 1 + { 0x1f, 16, 0, 0, PPC_OPERAND_UDI }, + +#define URB URA + 1 + { 0x1f, 11, 0, 0, PPC_OPERAND_UDI }, + +#define URC URB + 1 + { 0x1f, 6, 0, 0, PPC_OPERAND_UDI }, + + /* The VLESIMM field in a D form instruction. */ +#define VLESIMM URC + 1 + { 0xffff, PPC_OPSHIFT_INV, insert_vlesi, extract_vlesi, + PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT }, + + /* The VLENSIMM field in a D form instruction. */ +#define VLENSIMM VLESIMM + 1 + { 0xffff, PPC_OPSHIFT_INV, insert_vlensi, extract_vlensi, + PPC_OPERAND_NEGATIVE | PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT }, + + /* The VLEUIMM field in a D form instruction. */ +#define VLEUIMM VLENSIMM + 1 + { 0xffff, PPC_OPSHIFT_INV, insert_vleui, extract_vleui, 0 }, + + /* The VLEUIMML field in a D form instruction. */ +#define VLEUIMML VLEUIMM + 1 + { 0xffff, PPC_OPSHIFT_INV, insert_vleil, extract_vleil, 0 }, + + /* The XT and XS fields in an XX1 or XX3 form instruction. This is split. */ +#define XS6 VLEUIMML + 1 +#define XT6 XS6 + { 0x3f, PPC_OPSHIFT_INV, insert_xt6, extract_xt6, PPC_OPERAND_VSR }, + + /* The XT and XS fields in an DQ form VSX instruction. This is split. */ +#define XSQ6 XT6 + 1 +#define XTQ6 XSQ6 + { 0x3f, PPC_OPSHIFT_INV, insert_xtq6, extract_xtq6, PPC_OPERAND_VSR }, + + /* The XA field in an XX3 form instruction. This is split. */ +#define XA6 XTQ6 + 1 + { 0x3f, PPC_OPSHIFT_INV, insert_xa6, extract_xa6, PPC_OPERAND_VSR }, + + /* The XB field in an XX2 or XX3 form instruction. This is split. */ +#define XB6 XA6 + 1 + { 0x3f, PPC_OPSHIFT_INV, insert_xb6, extract_xb6, PPC_OPERAND_VSR }, + + /* The XB field in an XX3 form instruction when it must be the same as + the XA field in the instruction. This is used in extended mnemonics + like xvmovdp. This is split. */ +#define XB6S XB6 + 1 + { 0x3f, PPC_OPSHIFT_INV, insert_xb6s, extract_xb6s, PPC_OPERAND_FAKE }, + + /* The XC field in an XX4 form instruction. This is split. */ +#define XC6 XB6S + 1 + { 0x3f, PPC_OPSHIFT_INV, insert_xc6, extract_xc6, PPC_OPERAND_VSR }, + + /* The DM or SHW field in an XX3 form instruction. */ +#define DM XC6 + 1 +#define SHW DM + { 0x3, 8, NULL, NULL, 0 }, + + /* The DM field in an extended mnemonic XX3 form instruction. */ +#define DMEX DM + 1 + { 0x3, 8, insert_dm, extract_dm, 0 }, + + /* The UIM field in an XX2 form instruction. */ +#define UIM DMEX + 1 + /* The 2-bit UIMM field in a VX form instruction. */ +#define UIMM2 UIM + /* The 2-bit L field in a darn instruction. */ +#define LRAND UIM + { 0x3, 16, NULL, NULL, 0 }, + +#define ERAT_T UIM + 1 + { 0x7, 21, NULL, NULL, 0 }, + +#define IH ERAT_T + 1 + { 0x7, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, + + /* The 8-bit IMM8 field in a XX1 form instruction. */ +#define IMM8 IH + 1 + { 0xff, 11, NULL, NULL, PPC_OPERAND_SIGNOPT }, }; +const unsigned int num_powerpc_operands = (sizeof (powerpc_operands) + / sizeof (powerpc_operands[0])); + /* The functions used to insert and extract complicated operands. */ +/* The ARX, ARY, RX and RY operands are alternate encodings of GPRs. */ + +static unsigned long +insert_arx (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + if (value >= 8 && value < 24) + return insn | ((value - 8) & 0xf); + else + { + *errmsg = _("invalid register"); + return 0; + } +} + +static long +extract_arx (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + return (insn & 0xf) + 8; +} + +static unsigned long +insert_ary (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + if (value >= 8 && value < 24) + return insn | (((value - 8) & 0xf) << 4); + else + { + *errmsg = _("invalid register"); + return 0; + } +} + +static long +extract_ary (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + return ((insn >> 4) & 0xf) + 8; +} + +static unsigned long +insert_rx (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg) +{ + if (value >= 0 && value < 8) + return insn | value; + else if (value >= 24 && value <= 31) + return insn | (value - 16); + else + { + *errmsg = _("invalid register"); + return 0; + } +} + +static long +extract_rx (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + int value = insn & 0xf; + if (value >= 0 && value < 8) + return value; + else + return value + 16; +} + +static unsigned long +insert_ry (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg) +{ + if (value >= 0 && value < 8) + return insn | (value << 4); + else if (value >= 24 && value <= 31) + return insn | ((value - 16) << 4); + else + { + *errmsg = _("invalid register"); + return 0; + } +} + +static long +extract_ry (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + int value = (insn >> 4) & 0xf; + if (value >= 0 && value < 8) + return value; + else + return value + 16; +} + /* The BA field in an XL form instruction when it must be the same as the BT field in the same instruction. This operand is marked FAKE. The insertion function just copies the BT field into the BA field, @@ -615,7 +1086,7 @@ const struct powerpc_operand powerpc_operands[] = static unsigned long insert_bat (unsigned long insn, long value ATTRIBUTE_UNUSED, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | (((insn >> 21) & 0x1f) << 16); @@ -623,7 +1094,7 @@ insert_bat (unsigned long insn, static long extract_bat (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid) { if (((insn >> 21) & 0x1f) != ((insn >> 16) & 0x1f)) @@ -640,7 +1111,7 @@ extract_bat (unsigned long insn, static unsigned long insert_bba (unsigned long insn, long value ATTRIBUTE_UNUSED, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | (((insn >> 16) & 0x1f) << 11); @@ -648,7 +1119,7 @@ insert_bba (unsigned long insn, static long extract_bba (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid) { if (((insn >> 16) & 0x1f) != ((insn >> 11) & 0x1f)) @@ -656,26 +1127,6 @@ extract_bba (unsigned long insn, return 0; } -/* The BD field in a B form instruction. The lower two bits are - forced to zero. */ - -static unsigned long -insert_bd (unsigned long insn, - long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg ATTRIBUTE_UNUSED) -{ - return insn | (value & 0xfffc); -} - -static long -extract_bd (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid ATTRIBUTE_UNUSED) -{ - return ((insn & 0xfffc) ^ 0x8000) - 0x8000; -} - /* The BD field in a B form instruction when the - modifier is used. This modifier means that the branch is not expected to be taken. For chips built to versions of the architecture prior to version 2 @@ -687,15 +1138,21 @@ extract_bd (unsigned long insn, the "y" bit. "at" == 00 => no hint, "at" == 01 => unpredictable, "at" == 10 => not taken, "at" == 11 => taken. The "t" bit is 00001 in BO field, the "a" bit is 00010 for branch on CR(BI) and 01000 - for branch on CTR. We only handle the taken/not-taken hint here. */ + for branch on CTR. We only handle the taken/not-taken hint here. + Note that we don't relax the conditions tested here when + disassembling with -Many because insns using extract_bdm and + extract_bdp always occur in pairs. One or the other will always + be valid. */ + +#define ISA_V2 (PPC_OPCODE_POWER4 | PPC_OPCODE_E500MC | PPC_OPCODE_TITAN) static unsigned long insert_bdm (unsigned long insn, long value, - int dialect, + ppc_cpu_t dialect, const char **errmsg ATTRIBUTE_UNUSED) { - if ((dialect & PPC_OPCODE_POWER4) == 0) + if ((dialect & ISA_V2) == 0) { if ((value & 0x8000) != 0) insn |= 1 << 21; @@ -712,10 +1169,10 @@ insert_bdm (unsigned long insn, static long extract_bdm (unsigned long insn, - int dialect, + ppc_cpu_t dialect, int *invalid) { - if ((dialect & PPC_OPCODE_POWER4) == 0) + if ((dialect & ISA_V2) == 0) { if (((insn & (1 << 21)) == 0) != ((insn & (1 << 15)) == 0)) *invalid = 1; @@ -737,10 +1194,10 @@ extract_bdm (unsigned long insn, static unsigned long insert_bdp (unsigned long insn, long value, - int dialect, + ppc_cpu_t dialect, const char **errmsg ATTRIBUTE_UNUSED) { - if ((dialect & PPC_OPCODE_POWER4) == 0) + if ((dialect & ISA_V2) == 0) { if ((value & 0x8000) == 0) insn |= 1 << 21; @@ -757,10 +1214,10 @@ insert_bdp (unsigned long insn, static long extract_bdp (unsigned long insn, - int dialect, + ppc_cpu_t dialect, int *invalid) { - if ((dialect & PPC_OPCODE_POWER4) == 0) + if ((dialect & ISA_V2) == 0) { if (((insn & (1 << 21)) == 0) == ((insn & (1 << 15)) == 0)) *invalid = 1; @@ -775,55 +1232,70 @@ extract_bdp (unsigned long insn, return ((insn & 0xfffc) ^ 0x8000) - 0x8000; } +static inline int +valid_bo_pre_v2 (long value) +{ + /* Certain encodings have bits that are required to be zero. + These are (z must be zero, y may be anything): + 0000y + 0001y + 001zy + 0100y + 0101y + 011zy + 1z00y + 1z01y + 1z1zz + */ + if ((value & 0x14) == 0) + return 1; + else if ((value & 0x14) == 0x4) + return (value & 0x2) == 0; + else if ((value & 0x14) == 0x10) + return (value & 0x8) == 0; + else + return value == 0x14; +} + +static inline int +valid_bo_post_v2 (long value) +{ + /* Certain encodings have bits that are required to be zero. + These are (z must be zero, a & t may be anything): + 0000z + 0001z + 001at + 0100z + 0101z + 011at + 1a00t + 1a01t + 1z1zz + */ + if ((value & 0x14) == 0) + return (value & 0x1) == 0; + else if ((value & 0x14) == 0x14) + return value == 0x14; + else + return 1; +} + /* Check for legal values of a BO field. */ static int -valid_bo (long value, int dialect) +valid_bo (long value, ppc_cpu_t dialect, int extract) { - if ((dialect & PPC_OPCODE_POWER4) == 0) - { - /* Certain encodings have bits that are required to be zero. - These are (z must be zero, y may be anything): - 001zy - 011zy - 1z00y - 1z01y - 1z1zz - */ - switch (value & 0x14) - { - default: - case 0: - return 1; - case 0x4: - return (value & 0x2) == 0; - case 0x10: - return (value & 0x8) == 0; - case 0x14: - return value == 0x14; - } - } + int valid_y = valid_bo_pre_v2 (value); + int valid_at = valid_bo_post_v2 (value); + + /* When disassembling with -Many, accept either encoding on the + second pass through opcodes. */ + if (extract && dialect == ~(ppc_cpu_t) PPC_OPCODE_ANY) + return valid_y || valid_at; + if ((dialect & ISA_V2) == 0) + return valid_y; else - { - /* Certain encodings have bits that are required to be zero. - These are (z must be zero, a & t may be anything): - 0000z - 0001z - 0100z - 0101z - 001at - 011at - 1a00t - 1a01t - 1z1zz - */ - if ((value & 0x14) == 0) - return (value & 0x1) == 0; - else if ((value & 0x14) == 0x14) - return value == 0x14; - else - return 1; - } + return valid_at; } /* The BO field in a B form instruction. Warn about attempts to set @@ -832,23 +1304,25 @@ valid_bo (long value, int dialect) static unsigned long insert_bo (unsigned long insn, long value, - int dialect, + ppc_cpu_t dialect, const char **errmsg) { - if (!valid_bo (value, dialect)) + if (!valid_bo (value, dialect, 0)) *errmsg = _("invalid conditional option"); + else if (PPC_OP (insn) == 19 && (insn & 0x400) && ! (value & 4)) + *errmsg = _("invalid counter access"); return insn | ((value & 0x1f) << 21); } static long extract_bo (unsigned long insn, - int dialect, + ppc_cpu_t dialect, int *invalid) { long value; value = (insn >> 21) & 0x1f; - if (!valid_bo (value, dialect)) + if (!valid_bo (value, dialect, 1)) *invalid = 1; return value; } @@ -860,11 +1334,13 @@ extract_bo (unsigned long insn, static unsigned long insert_boe (unsigned long insn, long value, - int dialect, + ppc_cpu_t dialect, const char **errmsg) { - if (!valid_bo (value, dialect)) + if (!valid_bo (value, dialect, 0)) *errmsg = _("invalid conditional option"); + else if (PPC_OP (insn) == 19 && (insn & 0x400) && ! (value & 4)) + *errmsg = _("invalid counter access"); else if ((value & 1) != 0) *errmsg = _("attempt to set y bit when using + or - modifier"); @@ -873,166 +1349,73 @@ insert_boe (unsigned long insn, static long extract_boe (unsigned long insn, - int dialect, + ppc_cpu_t dialect, int *invalid) { long value; value = (insn >> 21) & 0x1f; - if (!valid_bo (value, dialect)) + if (!valid_bo (value, dialect, 1)) *invalid = 1; return value & 0x1e; } -/* The DQ field in a DQ form instruction. This is like D, but the - lower four bits are forced to zero. */ - -static unsigned long -insert_dq (unsigned long insn, - long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg) -{ - if ((value & 0xf) != 0) - *errmsg = _("offset not a multiple of 16"); - return insn | (value & 0xfff0); -} - -static long -extract_dq (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid ATTRIBUTE_UNUSED) -{ - return ((insn & 0xfff0) ^ 0x8000) - 0x8000; -} +/* The DCMX field in a X form instruction when the field is split + into separate DC, DM and DX fields. */ static unsigned long -insert_ev2 (unsigned long insn, +insert_dcmxs (unsigned long insn, long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg) + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) { - if ((value & 1) != 0) - *errmsg = _("offset not a multiple of 2"); - if ((value > 62) != 0) - *errmsg = _("offset greater than 62"); - return insn | ((value & 0x3e) << 10); + return insn | ((value & 0x1f) << 16) | ((value & 0x20) >> 3) | (value & 0x40); } static long -extract_ev2 (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, +extract_dcmxs (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { - return (insn >> 10) & 0x3e; + return (insn & 0x40) | ((insn << 3) & 0x20) | ((insn >> 16) & 0x1f); } -static unsigned long -insert_ev4 (unsigned long insn, - long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg) -{ - if ((value & 3) != 0) - *errmsg = _("offset not a multiple of 4"); - if ((value > 124) != 0) - *errmsg = _("offset greater than 124"); - return insn | ((value & 0x7c) << 9); -} - -static long -extract_ev4 (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid ATTRIBUTE_UNUSED) -{ - return (insn >> 9) & 0x7c; -} +/* The D field in a DX form instruction when the field is split + into separate D0, D1 and D2 fields. */ static unsigned long -insert_ev8 (unsigned long insn, +insert_dxd (unsigned long insn, long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg) + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) { - if ((value & 7) != 0) - *errmsg = _("offset not a multiple of 8"); - if ((value > 248) != 0) - *errmsg = _("offset greater than 248"); - return insn | ((value & 0xf8) << 8); + return insn | (value & 0xffc1) | ((value & 0x3e) << 15); } static long -extract_ev8 (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, +extract_dxd (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { - return (insn >> 8) & 0xf8; + unsigned long dxd = (insn & 0xffc1) | ((insn >> 15) & 0x3e); + return (dxd ^ 0x8000) - 0x8000; } -/* The DS field in a DS form instruction. This is like D, but the - lower two bits are forced to zero. */ - static unsigned long -insert_ds (unsigned long insn, - long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg) -{ - if ((value & 3) != 0) - *errmsg = _("offset not a multiple of 4"); - return insn | (value & 0xfffc); -} - -static long -extract_ds (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid ATTRIBUTE_UNUSED) -{ - return ((insn & 0xfffc) ^ 0x8000) - 0x8000; -} - -/* The DE field in a DE form instruction. */ - -static unsigned long -insert_de (unsigned long insn, - long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg) -{ - if (value > 2047 || value < -2048) - *errmsg = _("offset not between -2048 and 2047"); - return insn | ((value << 4) & 0xfff0); -} - -static long -extract_de (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid ATTRIBUTE_UNUSED) -{ - return (insn & 0xfff0) >> 4; -} - -/* The DES field in a DES form instruction. */ - -static unsigned long -insert_des (unsigned long insn, +insert_dxdn (unsigned long insn, long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg) + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) { - if (value > 8191 || value < -8192) - *errmsg = _("offset not between -8192 and 8191"); - else if ((value & 3) != 0) - *errmsg = _("offset not a multiple of 4"); - return insn | ((value << 2) & 0xfff0); + return insert_dxd (insn, -value, dialect, errmsg); } static long -extract_des (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, +extract_dxdn (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { - return (((insn >> 2) & 0x3ffc) ^ 0x2000) - 0x2000; + return -extract_dxd (insn, dialect, invalid); } /* FXM mask in mfcr and mtcrf instructions. */ @@ -1040,7 +1423,7 @@ extract_des (unsigned long insn, static unsigned long insert_fxm (unsigned long insn, long value, - int dialect, + ppc_cpu_t dialect, const char **errmsg) { /* If we're handling the mfocrf and mtocrf insns ensure that exactly @@ -1054,19 +1437,13 @@ insert_fxm (unsigned long insn, } } - /* If the optional field on mfcr is missing that means we want to use - the old form of the instruction that moves the whole cr. In that - case we'll have VALUE zero. There doesn't seem to be a way to - distinguish this from the case where someone writes mfcr %r3,0. */ - else if (value == 0) - ; - /* If only one bit of the FXM field is set, we can use the new form of the instruction, which is faster. Unlike the Power4 branch hint encoding, this is not backward compatible. Do not generate the new form unless -mpower4 has been given, or -many and the two operand form of mfcr was used. */ - else if ((value & -value) == value + else if (value > 0 + && (value & -value) == value && ((dialect & PPC_OPCODE_POWER4) != 0 || ((dialect & PPC_OPCODE_ANY) != 0 && (insn & (0x3ff << 1)) == 19 << 1))) @@ -1075,7 +1452,10 @@ insert_fxm (unsigned long insn, /* Any other value on mfcr is an error. */ else if ((insn & (0x3ff << 1)) == 19 << 1) { - *errmsg = _("ignoring invalid mfcr mask"); + /* A value of -1 means we used the one operand form of + mfcr which is valid. */ + if (value != -1) + *errmsg = _("invalid mfcr mask"); value = 0; } @@ -1084,7 +1464,7 @@ insert_fxm (unsigned long insn, static long extract_fxm (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid) { long mask = (insn >> 12) & 0xff; @@ -1102,31 +1482,86 @@ extract_fxm (unsigned long insn, { if (mask != 0) *invalid = 1; + else + mask = -1; } return mask; } -/* The LI field in an I form instruction. The lower two bits are - forced to zero. */ +static unsigned long +insert_li20 (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | ((value & 0xf0000) >> 5) | ((value & 0x0f800) << 5) | (value & 0x7ff); +} + +static long +extract_li20 (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + long ext = ((insn & 0x4000) == 0x4000) ? 0xfff00000 : 0x00000000; + + return ext + | (((insn >> 11) & 0xf) << 16) + | (((insn >> 17) & 0xf) << 12) + | (((insn >> 16) & 0x1) << 11) + | (insn & 0x7ff); +} + +/* The 2-bit L field in a SYNC or WC field in a WAIT instruction. + For SYNC, some L values are reserved: + * Value 3 is reserved on newer server cpus. + * Values 2 and 3 are reserved on all other cpus. */ static unsigned long -insert_li (unsigned long insn, +insert_ls (unsigned long insn, long value, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect, const char **errmsg) { - if ((value & 3) != 0) - *errmsg = _("ignoring least significant bits in branch offset"); - return insn | (value & 0x3fffffc); + /* For SYNC, some L values are illegal. */ + if (((insn >> 1) & 0x3ff) == 598) + { + long max_lvalue = (dialect & PPC_OPCODE_POWER4) ? 2 : 1; + if (value > max_lvalue) + { + *errmsg = _("illegal L operand value"); + return insn; + } + } + + return insn | ((value & 0x3) << 21); } -static long -extract_li (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid ATTRIBUTE_UNUSED) +/* The 4-bit E field in a sync instruction that accepts 2 operands. + If ESYNC is non-zero, then the L field must be either 0 or 1 and + the complement of ESYNC-bit2. */ + +static unsigned long +insert_esync (unsigned long insn, + long value, + ppc_cpu_t dialect, + const char **errmsg) { - return ((insn & 0x3fffffc) ^ 0x2000000) - 0x2000000; + unsigned long ls = (insn >> 21) & 0x03; + + if (value == 0) + { + if (((dialect & PPC_OPCODE_E6500) != 0 && ls > 1) + || ((dialect & PPC_OPCODE_POWER9) != 0 && ls > 2)) + *errmsg = _("illegal L operand value"); + return insn; + } + + if ((ls & ~0x1) + || (((value >> 1) & 0x1) ^ ls) == 0) + *errmsg = _("incompatible L operand value"); + + return insn | ((value & 0xf) << 16); } /* The MB and ME fields in an M form instruction expressed as a single @@ -1137,7 +1572,7 @@ extract_li (unsigned long insn, static unsigned long insert_mbe (unsigned long insn, long value, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { unsigned long uval, mask; @@ -1189,7 +1624,7 @@ insert_mbe (unsigned long insn, static long extract_mbe (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid) { long ret; @@ -1223,7 +1658,7 @@ extract_mbe (unsigned long insn, static unsigned long insert_mb6 (unsigned long insn, long value, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | ((value & 0x1f) << 6) | (value & 0x20); @@ -1231,7 +1666,7 @@ insert_mb6 (unsigned long insn, static long extract_mb6 (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return ((insn >> 6) & 0x1f) | (insn & 0x20); @@ -1240,22 +1675,9 @@ extract_mb6 (unsigned long insn, /* The NB field in an X form instruction. The value 32 is stored as 0. */ -static unsigned long -insert_nb (unsigned long insn, - long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg) -{ - if (value < 0 || value > 32) - *errmsg = _("value out of range"); - if (value == 32) - value = 0; - return insn | ((value & 0x1f) << 11); -} - static long extract_nb (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { long ret; @@ -1266,6 +1688,26 @@ extract_nb (unsigned long insn, return ret; } +/* The NB field in an lswi instruction, which has special value + restrictions. The value 32 is stored as 0. */ + +static unsigned long +insert_nbi (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + long rtvalue = (insn & RT_MASK) >> 21; + long ravalue = (insn & RA_MASK) >> 16; + + if (value == 0) + value = 32; + if (rtvalue + (value + 3) / 4 > (rtvalue > ravalue ? ravalue + 32 + : ravalue)) + *errmsg = _("address register in load range"); + return insn | ((value & 0x1f) << 11); +} + /* The NSI field in a D form instruction. This is the same as the SI field, only negated. The extraction function always marks it as invalid, since we never want to recognize an instruction which uses @@ -1274,7 +1716,7 @@ extract_nb (unsigned long insn, static unsigned long insert_nsi (unsigned long insn, long value, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | (-value & 0xffff); @@ -1282,7 +1724,7 @@ insert_nsi (unsigned long insn, static long extract_nsi (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid) { *invalid = 1; @@ -1296,7 +1738,7 @@ extract_nsi (unsigned long insn, static unsigned long insert_ral (unsigned long insn, long value, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { if (value == 0 @@ -1311,7 +1753,7 @@ insert_ral (unsigned long insn, static unsigned long insert_ram (unsigned long insn, long value, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { if ((unsigned long) value >= ((insn >> 21) & 0x1f)) @@ -1319,13 +1761,13 @@ insert_ram (unsigned long insn, return insn | ((value & 0x1f) << 16); } -/* The RA field in the DQ form lq instruction, which has special +/* The RA field in the DQ form lq or an lswx instruction, which have special value restrictions. */ static unsigned long insert_raq (unsigned long insn, long value, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { long rtvalue = (insn & RT_MASK) >> 21; @@ -1342,7 +1784,7 @@ insert_raq (unsigned long insn, static unsigned long insert_ras (unsigned long insn, long value, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { if (value == 0) @@ -1359,7 +1801,7 @@ insert_ras (unsigned long insn, static unsigned long insert_rbs (unsigned long insn, long value ATTRIBUTE_UNUSED, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | (((insn >> 21) & 0x1f) << 11); @@ -1367,7 +1809,7 @@ insert_rbs (unsigned long insn, static long extract_rbs (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid) { if (((insn >> 21) & 0x1f) != ((insn >> 11) & 0x1f)) @@ -1375,32 +1817,155 @@ extract_rbs (unsigned long insn, return 0; } -/* The RT field of the DQ form lq instruction, which has special - value restrictions. */ +/* The RB field in an lswx instruction, which has special value + restrictions. */ static unsigned long -insert_rtq (unsigned long insn, +insert_rbx (unsigned long insn, long value, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg) { - if ((value & 1) != 0) - *errmsg = _("target register operand must be even"); - return insn | ((value & 0x1f) << 21); + long rtvalue = (insn & RT_MASK) >> 21; + + if (value == rtvalue) + *errmsg = _("source and target register operands must be different"); + return insn | ((value & 0x1f) << 11); } -/* The RS field of the DS form stq instruction, which has special - value restrictions. */ +/* The SCI8 field is made up of SCL and {U,N}I8 fields. */ +static unsigned long +insert_sci8 (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg) +{ + unsigned int fill_scale = 0; + unsigned long ui8 = value; + + if ((ui8 & 0xffffff00) == 0) + ; + else if ((ui8 & 0xffffff00) == 0xffffff00) + fill_scale = 0x400; + else if ((ui8 & 0xffff00ff) == 0) + { + fill_scale = 1 << 8; + ui8 >>= 8; + } + else if ((ui8 & 0xffff00ff) == 0xffff00ff) + { + fill_scale = 0x400 | (1 << 8); + ui8 >>= 8; + } + else if ((ui8 & 0xff00ffff) == 0) + { + fill_scale = 2 << 8; + ui8 >>= 16; + } + else if ((ui8 & 0xff00ffff) == 0xff00ffff) + { + fill_scale = 0x400 | (2 << 8); + ui8 >>= 16; + } + else if ((ui8 & 0x00ffffff) == 0) + { + fill_scale = 3 << 8; + ui8 >>= 24; + } + else if ((ui8 & 0x00ffffff) == 0x00ffffff) + { + fill_scale = 0x400 | (3 << 8); + ui8 >>= 24; + } + else + { + *errmsg = _("illegal immediate value"); + ui8 = 0; + } + + return insn | fill_scale | (ui8 & 0xff); +} + +static long +extract_sci8 (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + int fill = insn & 0x400; + int scale_factor = (insn & 0x300) >> 5; + long value = (insn & 0xff) << scale_factor; + + if (fill != 0) + value |= ~((long) 0xff << scale_factor); + return value; +} static unsigned long -insert_rsq (unsigned long insn, - long value ATTRIBUTE_UNUSED, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg) +insert_sci8n (unsigned long insn, + long value, + ppc_cpu_t dialect, + const char **errmsg) { - if ((value & 1) != 0) - *errmsg = _("source register operand must be even"); - return insn | ((value & 0x1f) << 21); + return insert_sci8 (insn, -value, dialect, errmsg); +} + +static long +extract_sci8n (unsigned long insn, + ppc_cpu_t dialect, + int *invalid) +{ + return -extract_sci8 (insn, dialect, invalid); +} + +static unsigned long +insert_sd4h (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | ((value & 0x1e) << 7); +} + +static long +extract_sd4h (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + return ((insn >> 8) & 0xf) << 1; +} + +static unsigned long +insert_sd4w (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | ((value & 0x3c) << 6); +} + +static long +extract_sd4w (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + return ((insn >> 8) & 0xf) << 2; +} + +static unsigned long +insert_oimm (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | (((value - 1) & 0x1f) << 4); +} + +static long +extract_oimm (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + return ((insn >> 4) & 0x1f) + 1; } /* The SH field in an MD form instruction. This is split. */ @@ -1408,18 +1973,26 @@ insert_rsq (unsigned long insn, static unsigned long insert_sh6 (unsigned long insn, long value, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { - return insn | ((value & 0x1f) << 11) | ((value & 0x20) >> 4); + /* SH6 operand in the rldixor instructions. */ + if (PPC_OP (insn) == 4) + return insn | ((value & 0x1f) << 6) | ((value & 0x20) >> 5); + else + return insn | ((value & 0x1f) << 11) | ((value & 0x20) >> 4); } static long extract_sh6 (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { - return ((insn >> 11) & 0x1f) | ((insn << 4) & 0x20); + /* SH6 operand in the rldixor instructions. */ + if (PPC_OP (insn) == 4) + return ((insn >> 6) & 0x1f) | ((insn << 5) & 0x20); + else + return ((insn >> 11) & 0x1f) | ((insn << 4) & 0x20); } /* The SPR field in an XFX form instruction. This is flipped--the @@ -1428,7 +2001,7 @@ extract_sh6 (unsigned long insn, static unsigned long insert_spr (unsigned long insn, long value, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, const char **errmsg ATTRIBUTE_UNUSED) { return insn | ((value & 0x1f) << 16) | ((value & 0x3e0) << 6); @@ -1436,26 +2009,23 @@ insert_spr (unsigned long insn, static long extract_spr (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, int *invalid ATTRIBUTE_UNUSED) { return ((insn >> 16) & 0x1f) | ((insn >> 6) & 0x3e0); } /* Some dialects have 8 SPRG registers instead of the standard 4. */ +#define ALLOW8_SPRG (PPC_OPCODE_BOOKE | PPC_OPCODE_405) static unsigned long insert_sprg (unsigned long insn, long value, - int dialect, + ppc_cpu_t dialect, const char **errmsg) { - /* This check uses PPC_OPCODE_403 because PPC405 is later defined - as a synonym. If ever a 405 specific dialect is added this - check should use that instead. */ if (value > 7 - || (value > 3 - && (dialect & (PPC_OPCODE_BOOKE | PPC_OPCODE_403)) == 0)) + || (value > 3 && (dialect & ALLOW8_SPRG) == 0)) *errmsg = _("invalid sprg number"); /* If this is mfsprg4..7 then use spr 260..263 which can be read in @@ -1468,54 +2038,272 @@ insert_sprg (unsigned long insn, static long extract_sprg (unsigned long insn, - int dialect, + ppc_cpu_t dialect, int *invalid) { unsigned long val = (insn >> 16) & 0x1f; /* mfsprg can use 260..263 and 272..279. mtsprg only uses spr 272..279 - If not BOOKE or 405, then both use only 272..275. */ - if (val <= 3 - || (val < 0x10 && (insn & 0x100) != 0) - || (val - 0x10 > 3 - && (dialect & (PPC_OPCODE_BOOKE | PPC_OPCODE_403)) == 0)) + If not BOOKE, 405 or VLE, then both use only 272..275. */ + if ((val - 0x10 > 3 && (dialect & ALLOW8_SPRG) == 0) + || (val - 0x10 > 7 && (insn & 0x100) != 0) + || val <= 3 + || (val & 8) != 0) *invalid = 1; return val & 7; } /* The TBR field in an XFX instruction. This is just like SPR, but it - is optional. When TBR is omitted, it must be inserted as 268 (the - magic number of the TB register). These functions treat 0 - (indicating an omitted optional operand) as 268. This means that - ``mftb 4,0'' is not handled correctly. This does not matter very - much, since the architecture manual does not define mftb as - accepting any values other than 268 or 269. */ - -#define TB (268) + is optional. */ static unsigned long insert_tbr (unsigned long insn, long value, - int dialect ATTRIBUTE_UNUSED, - const char **errmsg ATTRIBUTE_UNUSED) + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg) { - if (value == 0) - value = TB; + if (value != 268 && value != 269) + *errmsg = _("invalid tbr number"); return insn | ((value & 0x1f) << 16) | ((value & 0x3e0) << 6); } static long extract_tbr (unsigned long insn, - int dialect ATTRIBUTE_UNUSED, - int *invalid ATTRIBUTE_UNUSED) + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid) { long ret; ret = ((insn >> 16) & 0x1f) | ((insn >> 6) & 0x3e0); - if (ret == TB) - ret = 0; + if (ret != 268 && ret != 269) + *invalid = 1; return ret; } + +/* The XT and XS fields in an XX1 or XX3 form instruction. This is split. */ + +static unsigned long +insert_xt6 (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | ((value & 0x1f) << 21) | ((value & 0x20) >> 5); +} + +static long +extract_xt6 (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + return ((insn << 5) & 0x20) | ((insn >> 21) & 0x1f); +} + +/* The XT and XS fields in an DQ form VSX instruction. This is split. */ +static unsigned long +insert_xtq6 (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | ((value & 0x1f) << 21) | ((value & 0x20) >> 2); +} + +static long +extract_xtq6 (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + return ((insn << 2) & 0x20) | ((insn >> 21) & 0x1f); +} + +/* The XA field in an XX3 form instruction. This is split. */ + +static unsigned long +insert_xa6 (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | ((value & 0x1f) << 16) | ((value & 0x20) >> 3); +} + +static long +extract_xa6 (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + return ((insn << 3) & 0x20) | ((insn >> 16) & 0x1f); +} + +/* The XB field in an XX3 form instruction. This is split. */ + +static unsigned long +insert_xb6 (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | ((value & 0x1f) << 11) | ((value & 0x20) >> 4); +} + +static long +extract_xb6 (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + return ((insn << 4) & 0x20) | ((insn >> 11) & 0x1f); +} + +/* The XB field in an XX3 form instruction when it must be the same as + the XA field in the instruction. This is used for extended + mnemonics like xvmovdp. This operand is marked FAKE. The insertion + function just copies the XA field into the XB field, and the + extraction function just checks that the fields are the same. */ + +static unsigned long +insert_xb6s (unsigned long insn, + long value ATTRIBUTE_UNUSED, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | (((insn >> 16) & 0x1f) << 11) | (((insn >> 2) & 0x1) << 1); +} + +static long +extract_xb6s (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid) +{ + if ((((insn >> 16) & 0x1f) != ((insn >> 11) & 0x1f)) + || (((insn >> 2) & 0x1) != ((insn >> 1) & 0x1))) + *invalid = 1; + return 0; +} + +/* The XC field in an XX4 form instruction. This is split. */ + +static unsigned long +insert_xc6 (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | ((value & 0x1f) << 6) | ((value & 0x20) >> 2); +} + +static long +extract_xc6 (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + return ((insn << 2) & 0x20) | ((insn >> 6) & 0x1f); +} + +static unsigned long +insert_dm (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg) +{ + if (value != 0 && value != 1) + *errmsg = _("invalid constant"); + return insn | (((value) ? 3 : 0) << 8); +} + +static long +extract_dm (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid) +{ + long value; + + value = (insn >> 8) & 3; + if (value != 0 && value != 3) + *invalid = 1; + return (value) ? 1 : 0; +} + +/* The VLESIMM field in an I16A form instruction. This is split. */ + +static unsigned long +insert_vlesi (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | ((value & 0xf800) << 10) | (value & 0x7ff); +} + +static long +extract_vlesi (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + long value = ((insn >> 10) & 0xf800) | (insn & 0x7ff); + value = (value ^ 0x8000) - 0x8000; + return value; +} + +static unsigned long +insert_vlensi (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + value = -value; + return insn | ((value & 0xf800) << 10) | (value & 0x7ff); +} +static long +extract_vlensi (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + long value = ((insn >> 10) & 0xf800) | (insn & 0x7ff); + value = (value ^ 0x8000) - 0x8000; + /* Don't use for disassembly. */ + *invalid = 1; + return -value; +} + +/* The VLEUIMM field in an I16A form instruction. This is split. */ + +static unsigned long +insert_vleui (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | ((value & 0xf800) << 10) | (value & 0x7ff); +} + +static long +extract_vleui (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + return ((insn >> 10) & 0xf800) | (insn & 0x7ff); +} + +/* The VLEUIMML field in an I16L form instruction. This is split. */ + +static unsigned long +insert_vleil (unsigned long insn, + long value, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | ((value & 0xf800) << 5) | (value & 0x7ff); +} + +static long +extract_vleil (unsigned long insn, + ppc_cpu_t dialect ATTRIBUTE_UNUSED, + int *invalid ATTRIBUTE_UNUSED) +{ + return ((insn >> 5) & 0xf800) | (insn & 0x7ff); +} + /* Macros used to form opcodes. */ @@ -1535,6 +2323,17 @@ extract_tbr (unsigned long insn, #define OPL(x,l) (OP (x) | ((((unsigned long)(l)) & 1) << 21)) #define OPL_MASK OPL (0x3f,1) +/* The main opcode combined with an update code in D form instruction. + Used for extended mnemonics for VLE memory instructions. */ +#define OPVUP(x,vup) (OP (x) | ((((unsigned long)(vup)) & 0xff) << 8)) +#define OPVUP_MASK OPVUP (0x3f, 0xff) + +/* The main opcode combined with an update code and the RT fields specified in + D form instruction. Used for VLE volatile context save/restore + instructions. */ +#define OPVUPRT(x,vup,rt) (OPVUP (x, vup) | ((((unsigned long)(rt)) & 0x1f) << 21)) +#define OPVUPRT_MASK OPVUPRT (0x3f, 0xff, 0x1f) + /* An A form instruction. */ #define A(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 1) | (((unsigned long)(rc)) & 1)) #define A_MASK A (0x3f, 0x1f, 1) @@ -1555,6 +2354,43 @@ extract_tbr (unsigned long insn, #define B(op, aa, lk) (OP (op) | ((((unsigned long)(aa)) & 1) << 1) | ((lk) & 1)) #define B_MASK B (0x3f, 1, 1) +/* A BD8 form instruction. This is a 16-bit instruction. */ +#define BD8(op, aa, lk) (((((unsigned long)(op)) & 0x3f) << 10) | (((aa) & 1) << 9) | (((lk) & 1) << 8)) +#define BD8_MASK BD8 (0x3f, 1, 1) + +/* Another BD8 form instruction. This is a 16-bit instruction. */ +#define BD8IO(op) ((((unsigned long)(op)) & 0x1f) << 11) +#define BD8IO_MASK BD8IO (0x1f) + +/* A BD8 form instruction for simplified mnemonics. */ +#define EBD8IO(op, bo, bi) (BD8IO ((op)) | ((bo) << 10) | ((bi) << 8)) +/* A mask that excludes BO32 and BI32. */ +#define EBD8IO1_MASK 0xf800 +/* A mask that includes BO32 and excludes BI32. */ +#define EBD8IO2_MASK 0xfc00 +/* A mask that include BO32 AND BI32. */ +#define EBD8IO3_MASK 0xff00 + +/* A BD15 form instruction. */ +#define BD15(op, aa, lk) (OP (op) | ((((unsigned long)(aa)) & 0xf) << 22) | ((lk) & 1)) +#define BD15_MASK BD15 (0x3f, 0xf, 1) + +/* A BD15 form instruction for extended conditional branch mnemonics. */ +#define EBD15(op, aa, bo, lk) (((op) & 0x3f) << 26) | (((aa) & 0xf) << 22) | (((bo) & 0x3) << 20) | ((lk) & 1) +#define EBD15_MASK 0xfff00001 + +/* A BD15 form instruction for extended conditional branch mnemonics with BI. */ +#define EBD15BI(op, aa, bo, bi, lk) (((op) & 0x3f) << 26) \ + | (((aa) & 0xf) << 22) \ + | (((bo) & 0x3) << 20) \ + | (((bi) & 0x3) << 16) \ + | ((lk) & 1) +#define EBD15BI_MASK 0xfff30001 + +/* A BD24 form instruction. */ +#define BD24(op, aa, lk) (OP (op) | ((((unsigned long)(aa)) & 1) << 25) | ((lk) & 1)) +#define BD24_MASK BD24 (0x3f, 1, 1) + /* A B form instruction setting the BO field. */ #define BBO(op, bo, aa, lk) (B ((op), (aa), (lk)) | ((((unsigned long)(bo)) & 0x1f) << 21)) #define BBO_MASK BBO (0x3f, 0x1f, 1, 1) @@ -1562,7 +2398,7 @@ extract_tbr (unsigned long insn, /* A BBO_MASK with the y bit of the BO field removed. This permits matching a conditional branch regardless of the setting of the y bit. Similarly for the 'at' bits used for power4 branch hints. */ -#define Y_MASK (((unsigned long) 1) << 21) +#define Y_MASK (((unsigned long) 1) << 21) #define AT1_MASK (((unsigned long) 3) << 21) #define AT2_MASK (((unsigned long) 9) << 21) #define BBOY_MASK (BBO_MASK &~ Y_MASK) @@ -1583,33 +2419,63 @@ extract_tbr (unsigned long insn, #define BBOYBI_MASK (BBOYCB_MASK | BI_MASK) #define BBOATBI_MASK (BBOAT2CB_MASK | BI_MASK) +/* A VLE C form instruction. */ +#define C_LK(x, lk) (((((unsigned long)(x)) & 0x7fff) << 1) | ((lk) & 1)) +#define C_LK_MASK C_LK(0x7fff, 1) +#define C(x) ((((unsigned long)(x)) & 0xffff)) +#define C_MASK C(0xffff) + /* An Context form instruction. */ #define CTX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x7)) #define CTX_MASK CTX(0x3f, 0x7) -/* An User Context form instruction. */ +/* A User Context form instruction. */ #define UCTX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f)) #define UCTX_MASK UCTX(0x3f, 0x1f) /* The main opcode mask with the RA field clear. */ #define DRA_MASK (OP_MASK | RA_MASK) +/* A DQ form VSX instruction. */ +#define DQX(op, xop) (OP (op) | ((xop) & 0x7)) +#define DQX_MASK DQX (0x3f, 7) + /* A DS form instruction. */ #define DSO(op, xop) (OP (op) | ((xop) & 0x3)) #define DS_MASK DSO (0x3f, 3) -/* A DE form instruction. */ -#define DEO(op, xop) (OP (op) | ((xop) & 0xf)) -#define DE_MASK DEO (0x3e, 0xf) +/* An DX form instruction. */ +#define DX(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 1)) +#define DX_MASK DX (0x3f, 0x1f) /* An EVSEL form instruction. */ #define EVSEL(op, xop) (OP (op) | (((unsigned long)(xop)) & 0xff) << 3) #define EVSEL_MASK EVSEL(0x3f, 0xff) +/* An IA16 form instruction. */ +#define IA16(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f) << 11) +#define IA16_MASK IA16(0x3f, 0x1f) + +/* An I16A form instruction. */ +#define I16A(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f) << 11) +#define I16A_MASK I16A(0x3f, 0x1f) + +/* An I16L form instruction. */ +#define I16L(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f) << 11) +#define I16L_MASK I16L(0x3f, 0x1f) + +/* An IM7 form instruction. */ +#define IM7(op) ((((unsigned long)(op)) & 0x1f) << 11) +#define IM7_MASK IM7(0x1f) + /* An M form instruction. */ #define M(op, rc) (OP (op) | ((rc) & 1)) #define M_MASK M (0x3f, 1) +/* An LI20 form instruction. */ +#define LI20(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1) << 15) +#define LI20_MASK LI20(0x3f, 0x1) + /* An M form instruction with the ME field specified. */ #define MME(op, me, rc) (M ((op), (rc)) | ((((unsigned long)(me)) & 0x1f) << 1)) @@ -1640,44 +2506,189 @@ extract_tbr (unsigned long insn, #define SC(op, sa, lk) (OP (op) | ((((unsigned long)(sa)) & 1) << 1) | ((lk) & 1)) #define SC_MASK (OP_MASK | (((unsigned long)0x3ff) << 16) | (((unsigned long)1) << 1) | 1) -/* An VX form instruction. */ +/* An SCI8 form instruction. */ +#define SCI8(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 11)) +#define SCI8_MASK SCI8(0x3f, 0x1f) + +/* An SCI8 form instruction. */ +#define SCI8BF(op, fop, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 11) | (((fop) & 7) << 23)) +#define SCI8BF_MASK SCI8BF(0x3f, 7, 0x1f) + +/* An SD4 form instruction. This is a 16-bit instruction. */ +#define SD4(op) ((((unsigned long)(op)) & 0xf) << 12) +#define SD4_MASK SD4(0xf) + +/* An SE_IM5 form instruction. This is a 16-bit instruction. */ +#define SE_IM5(op, xop) (((((unsigned long)(op)) & 0x3f) << 10) | (((xop) & 0x1) << 9)) +#define SE_IM5_MASK SE_IM5(0x3f, 1) + +/* An SE_R form instruction. This is a 16-bit instruction. */ +#define SE_R(op, xop) (((((unsigned long)(op)) & 0x3f) << 10) | (((xop) & 0x3f) << 4)) +#define SE_R_MASK SE_R(0x3f, 0x3f) + +/* An SE_RR form instruction. This is a 16-bit instruction. */ +#define SE_RR(op, xop) (((((unsigned long)(op)) & 0x3f) << 10) | (((xop) & 0x3) << 8)) +#define SE_RR_MASK SE_RR(0x3f, 3) + +/* A VX form instruction. */ #define VX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x7ff)) /* The mask for an VX form instruction. */ #define VX_MASK VX(0x3f, 0x7ff) -/* An VA form instruction. */ +/* A VX_MASK with the VA field fixed. */ +#define VXVA_MASK (VX_MASK | (0x1f << 16)) + +/* A VX_MASK with the VB field fixed. */ +#define VXVB_MASK (VX_MASK | (0x1f << 11)) + +/* A VX_MASK with the VA and VB fields fixed. */ +#define VXVAVB_MASK (VX_MASK | (0x1f << 16) | (0x1f << 11)) + +/* A VX_MASK with the VD and VA fields fixed. */ +#define VXVDVA_MASK (VX_MASK | (0x1f << 21) | (0x1f << 16)) + +/* A VX_MASK with a UIMM4 field. */ +#define VXUIMM4_MASK (VX_MASK | (0x1 << 20)) + +/* A VX_MASK with a UIMM3 field. */ +#define VXUIMM3_MASK (VX_MASK | (0x3 << 19)) + +/* A VX_MASK with a UIMM2 field. */ +#define VXUIMM2_MASK (VX_MASK | (0x7 << 18)) + +/* A VX_MASK with a PS field. */ +#define VXPS_MASK (VX_MASK & ~(0x1 << 9)) + +/* A VX_MASK with the VA field fixed with a PS field. */ +#define VXVAPS_MASK ((VX_MASK | (0x1f << 16)) & ~(0x1 << 9)) + +/* A VA form instruction. */ #define VXA(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x03f)) /* The mask for an VA form instruction. */ #define VXA_MASK VXA(0x3f, 0x3f) -/* An VXR form instruction. */ +/* A VXA_MASK with a SHB field. */ +#define VXASHB_MASK (VXA_MASK | (1 << 10)) + +/* A VXR form instruction. */ #define VXR(op, xop, rc) (OP (op) | (((rc) & 1) << 10) | (((unsigned long)(xop)) & 0x3ff)) /* The mask for a VXR form instruction. */ #define VXR_MASK VXR(0x3f, 0x3ff, 1) +/* A VX form instruction with a VA tertiary opcode. */ +#define VXVA(op, xop, vaop) (VX(op,xop) | (((vaop) & 0x1f) << 16)) + +#define VXASH(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 1)) +#define VXASH_MASK VXASH (0x3f, 0x1f) + /* An X form instruction. */ #define X(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1)) +/* A X form instruction for Quad-Precision FP Instructions. */ +#define XVA(op, xop, vaop) (X(op,xop) | (((vaop) & 0x1f) << 16)) + +/* An EX form instruction. */ +#define EX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x7ff)) + +/* The mask for an EX form instruction. */ +#define EX_MASK EX (0x3f, 0x7ff) + +/* An XX2 form instruction. */ +#define XX2(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 2)) + +/* A XX2 form instruction with the VA bits specified. */ +#define XX2VA(op, xop, vaop) (XX2(op,xop) | (((vaop) & 0x1f) << 16)) + +/* An XX3 form instruction. */ +#define XX3(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0xff) << 3)) + +/* An XX3 form instruction with the RC bit specified. */ +#define XX3RC(op, xop, rc) (OP (op) | (((rc) & 1) << 10) | ((((unsigned long)(xop)) & 0x7f) << 3)) + +/* An XX4 form instruction. */ +#define XX4(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x3) << 4)) + /* A Z form instruction. */ #define Z(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 1)) /* An X form instruction with the RC bit specified. */ #define XRC(op, xop, rc) (X ((op), (xop)) | ((rc) & 1)) +/* A X form instruction for Quad-Precision FP Instructions with RC bit. */ +#define XVARC(op, xop, vaop, rc) (XVA ((op), (xop), (vaop)) | ((rc) & 1)) + +/* An X form instruction with the RA bits specified as two ops. */ +#define XMMF(op, xop, mop0, mop1) (X ((op), (xop)) | ((mop0) & 3) << 19 | ((mop1) & 7) << 16) + /* A Z form instruction with the RC bit specified. */ #define ZRC(op, xop, rc) (Z ((op), (xop)) | ((rc) & 1)) /* The mask for an X form instruction. */ #define X_MASK XRC (0x3f, 0x3ff, 1) +/* The mask for an X form instruction with the BF bits specified. */ +#define XBF_MASK (X_MASK | (3 << 21)) + +/* An X form wait instruction with everything filled in except the WC field. */ +#define XWC_MASK (XRC (0x3f, 0x3ff, 1) | (7 << 23) | RA_MASK | RB_MASK) + +/* The mask for an XX1 form instruction. */ +#define XX1_MASK X (0x3f, 0x3ff) + +/* An XX1_MASK with the RB field fixed. */ +#define XX1RB_MASK (XX1_MASK | RB_MASK) + +/* The mask for an XX2 form instruction. */ +#define XX2_MASK (XX2 (0x3f, 0x1ff) | (0x1f << 16)) + +/* The mask for an XX2 form instruction with the UIM bits specified. */ +#define XX2UIM_MASK (XX2 (0x3f, 0x1ff) | (7 << 18)) + +/* The mask for an XX2 form instruction with the 4 UIM bits specified. */ +#define XX2UIM4_MASK (XX2 (0x3f, 0x1ff) | (1 << 20)) + +/* The mask for an XX2 form instruction with the BF bits specified. */ +#define XX2BF_MASK (XX2_MASK | (3 << 21) | (1)) + +/* The mask for an XX2 form instruction with the BF and DCMX bits specified. */ +#define XX2BFD_MASK (XX2 (0x3f, 0x1ff) | 1) + +/* The mask for an XX2 form instruction with a split DCMX bits specified. */ +#define XX2DCMXS_MASK XX2 (0x3f, 0x1ee) + +/* The mask for an XX3 form instruction. */ +#define XX3_MASK XX3 (0x3f, 0xff) + +/* The mask for an XX3 form instruction with the BF bits specified. */ +#define XX3BF_MASK (XX3 (0x3f, 0xff) | (3 << 21) | (1)) + +/* The mask for an XX3 form instruction with the DM or SHW bits specified. */ +#define XX3DM_MASK (XX3 (0x3f, 0x1f) | (1 << 10)) +#define XX3SHW_MASK XX3DM_MASK + +/* The mask for an XX4 form instruction. */ +#define XX4_MASK XX4 (0x3f, 0x3) + +/* An X form wait instruction with everything filled in except the WC field. */ +#define XWC_MASK (XRC (0x3f, 0x3ff, 1) | (7 << 23) | RA_MASK | RB_MASK) + +/* The mask for an XMMF form instruction. */ +#define XMMF_MASK (XMMF (0x3f, 0x3ff, 3, 7) | (1)) + /* The mask for a Z form instruction. */ #define Z_MASK ZRC (0x3f, 0x1ff, 1) +#define Z2_MASK ZRC (0x3f, 0xff, 1) -/* An X_MASK with the RA field fixed. */ +/* An X_MASK with the RA/VA field fixed. */ #define XRA_MASK (X_MASK | RA_MASK) +#define XVA_MASK XRA_MASK + +/* An XRA_MASK with the A_L/W field clear. */ +#define XWRA_MASK (XRA_MASK & ~((unsigned long) 1 << 16)) +#define XRLA_MASK XWRA_MASK /* An X_MASK with the RB field fixed. */ #define XRB_MASK (X_MASK | RB_MASK) @@ -1691,18 +2702,54 @@ extract_tbr (unsigned long insn, /* An X_MASK with the RA and RB fields fixed. */ #define XRARB_MASK (X_MASK | RA_MASK | RB_MASK) +/* An XBF_MASK with the RA and RB fields fixed. */ +#define XBFRARB_MASK (XBF_MASK | RA_MASK | RB_MASK) + /* An XRARB_MASK, but with the L bit clear. */ #define XRLARB_MASK (XRARB_MASK & ~((unsigned long) 1 << 16)) +/* An XRARB_MASK, but with the L bits in a darn instruction clear. */ +#define XLRAND_MASK (XRARB_MASK & ~((unsigned long) 3 << 16)) + /* An X_MASK with the RT and RA fields fixed. */ #define XRTRA_MASK (X_MASK | RT_MASK | RA_MASK) +/* An X_MASK with the RT and RB fields fixed. */ +#define XRTRB_MASK (X_MASK | RT_MASK | RB_MASK) + /* An XRTRA_MASK, but with L bit clear. */ #define XRTLRA_MASK (XRTRA_MASK & ~((unsigned long) 1 << 21)) +/* An X_MASK with the RT, RA and RB fields fixed. */ +#define XRTRARB_MASK (X_MASK | RT_MASK | RA_MASK | RB_MASK) + +/* An XRTRARB_MASK, but with L bit clear. */ +#define XRTLRARB_MASK (XRTRARB_MASK & ~((unsigned long) 1 << 21)) + +/* An XRTRARB_MASK, but with A bit clear. */ +#define XRTARARB_MASK (XRTRARB_MASK & ~((unsigned long) 1 << 25)) + +/* An XRTRARB_MASK, but with BF bits clear. */ +#define XRTBFRARB_MASK (XRTRARB_MASK & ~((unsigned long) 7 << 23)) + /* An X form instruction with the L bit specified. */ #define XOPL(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 1) << 21)) +/* An X form instruction with the L bits specified. */ +#define XOPL2(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 3) << 21)) + +/* An X form instruction with the L bit and RC bit specified. */ +#define XRCL(op, xop, l, rc) (XRC ((op), (xop), (rc)) | ((((unsigned long)(l)) & 1) << 21)) + +/* An X form instruction with RT fields specified */ +#define XRT(op, xop, rt) (X ((op), (xop)) \ + | ((((unsigned long)(rt)) & 0x1f) << 21)) + +/* An X form instruction with RT and RA fields specified */ +#define XRTRA(op, xop, rt, ra) (X ((op), (xop)) \ + | ((((unsigned long)(rt)) & 0x1f) << 21) \ + | ((((unsigned long)(ra)) & 0x1f) << 16)) + /* The mask for an X form comparison instruction. */ #define XCMP_MASK (X_MASK | (((unsigned long)1) << 22)) @@ -1724,6 +2771,9 @@ extract_tbr (unsigned long insn, /* An X form sync instruction with everything filled in except the LS field. */ #define XSYNC_MASK (0xff9fffff) +/* An X form sync instruction with everything filled in except the L and E fields. */ +#define XSYNCLE_MASK (0xff90ffff) + /* An X_MASK, but with the EH bit clear. */ #define XEH_MASK (X_MASK & ~((unsigned long )1)) @@ -1733,11 +2783,11 @@ extract_tbr (unsigned long insn, /* An XFL form instruction. */ #define XFL(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1) | (((unsigned long)(rc)) & 1)) -#define XFL_MASK (XFL (0x3f, 0x3ff, 1) | (((unsigned long)1) << 25) | (((unsigned long)1) << 16)) +#define XFL_MASK XFL (0x3f, 0x3ff, 1) /* An X form isel instruction. */ -#define XISEL(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 1)) -#define XISEL_MASK XISEL(0x3f, 0x1f) +#define XISEL(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 1)) +#define XISEL_MASK XISEL(0x3f, 0x1f) /* An XL form instruction with the LK field set to 0. */ #define XL(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1)) @@ -1748,6 +2798,9 @@ extract_tbr (unsigned long insn, /* The mask for an XL form instruction. */ #define XL_MASK XLLK (0x3f, 0x3ff, 1) +/* An XL_MASK with the RT, RA and RB fields fixed, but S bit clear. */ +#define XLS_MASK ((XL_MASK | RT_MASK | RA_MASK | RB_MASK) & ~(1 << 11)) + /* An XL form instruction which explicitly sets the BO field. */ #define XLO(op, bo, xop, lk) \ (XLLK ((op), (xop), (lk)) | ((((unsigned long)(bo)) & 0x1f) << 21)) @@ -1778,6 +2831,9 @@ extract_tbr (unsigned long insn, /* An XL_MASK with the BO, BI and BB fields fixed. */ #define XLBOBIBB_MASK (XL_MASK | BO_MASK | BI_MASK | BB_MASK) +/* An X form mbar instruction with MO field. */ +#define XMBAR(op, xop, mo) (X ((op), (xop)) | ((((unsigned long)(mo)) & 1) << 21)) + /* An XO form instruction. */ #define XO(op, xop, oe, rc) \ (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 1) | ((((unsigned long)(oe)) & 1) << 10) | (((unsigned long)(rc)) & 1)) @@ -1786,6 +2842,12 @@ extract_tbr (unsigned long insn, /* An XO_MASK with the RB field fixed. */ #define XORB_MASK (XO_MASK | RB_MASK) +/* An XOPS form instruction for paired singles. */ +#define XOPS(op, xop, rc) \ + (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1) | (((unsigned long)(rc)) & 1)) +#define XOPS_MASK XOPS (0x3f, 0x3ff, 1) + + /* An XS form instruction. */ #define XS(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 2) | (((unsigned long)(rc)) & 1)) #define XS_MASK XS (0x3f, 0x1ff, 1) @@ -1809,7 +2871,7 @@ extract_tbr (unsigned long insn, /* An XFX form instruction with the SPR field filled in except for the SPRG field. */ -#define XSPRG_MASK (XSPR_MASK & ~(0x17 << 16)) +#define XSPRG_MASK (XSPR_MASK & ~(0x1f << 16)) /* An X form instruction with everything filled in except the E field. */ #define XE_MASK (0xffff7fff) @@ -1818,6 +2880,19 @@ extract_tbr (unsigned long insn, #define XUC(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f)) #define XUC_MASK XUC(0x3f, 0x1f) +/* An XW form instruction. */ +#define XW(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x3f) << 1) | ((rc) & 1)) +/* The mask for a G form instruction. rc not supported at present. */ +#define XW_MASK XW (0x3f, 0x3f, 0) + +/* An APU form instruction. */ +#define APU(op, xop, rc) (OP (op) | (((unsigned long)(xop)) & 0x3ff) << 1 | ((rc) & 1)) + +/* The mask for an APU form instruction. */ +#define APU_MASK APU (0x3f, 0x3ff, 1) +#define APU_RT_MASK (APU_MASK | RT_MASK) +#define APU_RA_MASK (APU_MASK | RA_MASK) + /* The BO encodings used in extended conditional branch mnemonics. */ #define BODNZF (0x0) #define BODNZFP (0x1) @@ -1848,6 +2923,16 @@ extract_tbr (unsigned long insn, #define BOU (0x14) +/* The BO16 encodings used in extended VLE conditional branch mnemonics. */ +#define BO16F (0x0) +#define BO16T (0x1) + +/* The BO32 encodings used in extended VLE conditional branch mnemonics. */ +#define BO32F (0x0) +#define BO32T (0x1) +#define BO32DNZ (0x2) +#define BO32DZ (0x3) + /* The BI condition bit encodings used in extended conditional branch mnemonics. */ #define CBLT (0) @@ -1875,3066 +2960,4267 @@ extract_tbr (unsigned long insn, /* Smaller names for the flags so each entry in the opcodes table will fit on a single line. */ #undef PPC -#define PPC PPC_OPCODE_PPC +#define PPC PPC_OPCODE_PPC #define PPCCOM PPC_OPCODE_PPC | PPC_OPCODE_COMMON -#define NOPOWER4 PPC_OPCODE_NOPOWER4 | PPCCOM #define POWER4 PPC_OPCODE_POWER4 #define POWER5 PPC_OPCODE_POWER5 #define POWER6 PPC_OPCODE_POWER6 +#define POWER7 PPC_OPCODE_POWER7 +#define POWER8 PPC_OPCODE_POWER8 +#define POWER9 PPC_OPCODE_POWER9 #define CELL PPC_OPCODE_CELL -#define PPC32 PPC_OPCODE_32 | PPC_OPCODE_PPC -#define PPC64 PPC_OPCODE_64 | PPC_OPCODE_PPC +#define PPC64 PPC_OPCODE_64 | PPC_OPCODE_64_BRIDGE +#define NON32 (PPC_OPCODE_64 | PPC_OPCODE_POWER4 \ + | PPC_OPCODE_EFS | PPC_OPCODE_E500MC | PPC_OPCODE_TITAN) #define PPC403 PPC_OPCODE_403 -#define PPC405 PPC403 +#define PPC405 PPC_OPCODE_405 #define PPC440 PPC_OPCODE_440 -#define PPC750 PPC -#define PPC860 PPC +#define PPC464 PPC440 +#define PPC476 PPC_OPCODE_476 +#define PPC750 PPC_OPCODE_750 +#define PPC7450 PPC_OPCODE_7450 +#define PPC860 PPC_OPCODE_860 +#define PPCPS PPC_OPCODE_PPCPS #define PPCVEC PPC_OPCODE_ALTIVEC -#define POWER PPC_OPCODE_POWER -#define POWER2 PPC_OPCODE_POWER | PPC_OPCODE_POWER2 -#define PPCPWR2 PPC_OPCODE_PPC | PPC_OPCODE_POWER | PPC_OPCODE_POWER2 -#define POWER32 PPC_OPCODE_POWER | PPC_OPCODE_32 -#define COM PPC_OPCODE_POWER | PPC_OPCODE_PPC | PPC_OPCODE_COMMON -#define COM32 PPC_OPCODE_POWER | PPC_OPCODE_PPC | PPC_OPCODE_COMMON | PPC_OPCODE_32 -#define M601 PPC_OPCODE_POWER | PPC_OPCODE_601 +#define PPCVEC2 PPC_OPCODE_ALTIVEC2 +#define PPCVEC3 PPC_OPCODE_ALTIVEC2 +#define PPCVSX PPC_OPCODE_VSX +#define PPCVSX2 PPC_OPCODE_VSX +#define PPCVSX3 PPC_OPCODE_VSX3 +#define POWER PPC_OPCODE_POWER +#define POWER2 PPC_OPCODE_POWER | PPC_OPCODE_POWER2 +#define PWR2COM PPC_OPCODE_POWER | PPC_OPCODE_POWER2 | PPC_OPCODE_COMMON +#define PPCPWR2 PPC_OPCODE_PPC | PPC_OPCODE_POWER | PPC_OPCODE_POWER2 | PPC_OPCODE_COMMON +#define COM PPC_OPCODE_POWER | PPC_OPCODE_PPC | PPC_OPCODE_COMMON +#define M601 PPC_OPCODE_POWER | PPC_OPCODE_601 #define PWRCOM PPC_OPCODE_POWER | PPC_OPCODE_601 | PPC_OPCODE_COMMON -#define MFDEC1 PPC_OPCODE_POWER -#define MFDEC2 PPC_OPCODE_PPC | PPC_OPCODE_601 | PPC_OPCODE_BOOKE +#define MFDEC1 PPC_OPCODE_POWER +#define MFDEC2 PPC_OPCODE_PPC | PPC_OPCODE_601 | PPC_OPCODE_BOOKE | PPC_OPCODE_TITAN #define BOOKE PPC_OPCODE_BOOKE -#define BOOKE64 PPC_OPCODE_BOOKE64 -#define CLASSIC PPC_OPCODE_CLASSIC +#define NO371 PPC_OPCODE_BOOKE | PPC_OPCODE_PPCPS | PPC_OPCODE_EFS #define PPCE300 PPC_OPCODE_E300 #define PPCSPE PPC_OPCODE_SPE -#define PPCISEL PPC_OPCODE_ISEL +#define PPCISEL PPC_OPCODE_ISEL #define PPCEFS PPC_OPCODE_EFS -#define PPCBRLK PPC_OPCODE_BRLOCK +#define PPCBRLK PPC_OPCODE_BRLOCK #define PPCPMR PPC_OPCODE_PMR -#define PPCCHLK PPC_OPCODE_CACHELCK -#define PPCCHLK64 PPC_OPCODE_CACHELCK | PPC_OPCODE_BOOKE64 +#define PPCTMR PPC_OPCODE_TMR +#define PPCCHLK PPC_OPCODE_CACHELCK #define PPCRFMCI PPC_OPCODE_RFMCI +#define E500MC PPC_OPCODE_E500MC +#define PPCA2 PPC_OPCODE_A2 +#define TITAN PPC_OPCODE_TITAN +#define MULHW PPC_OPCODE_405 | PPC_OPCODE_440 | TITAN +#define E500 PPC_OPCODE_E500 +#define E6500 PPC_OPCODE_E6500 +#define PPCVLE PPC_OPCODE_VLE +#define PPCHTM PPC_OPCODE_HTM +#define E200Z4 PPC_OPCODE_E200Z4 +/* The list of embedded processors that use the embedded operand ordering + for the 3 operand dcbt and dcbtst instructions. */ +#define DCBT_EO (PPC_OPCODE_E500 | PPC_OPCODE_E500MC | PPC_OPCODE_476 \ + | PPC_OPCODE_A2) + + /* The opcode table. The format of the opcode table is: - NAME OPCODE MASK FLAGS { OPERANDS } + NAME OPCODE MASK FLAGS ANTI {OPERANDS} NAME is the name of the instruction. OPCODE is the instruction opcode. MASK is the opcode mask; this is used to tell the disassembler which bits in the actual opcode must match OPCODE. - FLAGS are flags indicated what processors support the instruction. + FLAGS are flags indicating which processors support the instruction. + ANTI indicates which processors don't support the instruction. OPERANDS is the list of operands. The disassembler reads the table in order and prints the first instruction which matches, so this table is sorted to put more - specific instructions before more general instructions. It is also - sorted by major opcode. */ + specific instructions before more general instructions. + + This table must be sorted by major opcode. Please try to keep it + vaguely sorted within major opcode too, except of course where + constrained otherwise by disassembler operation. */ const struct powerpc_opcode powerpc_opcodes[] = { -{ "attn", X(0,256), X_MASK, POWER4, { 0 } }, -{ "tdlgti", OPTO(2,TOLGT), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdllti", OPTO(2,TOLLT), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdeqi", OPTO(2,TOEQ), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdlgei", OPTO(2,TOLGE), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdlnli", OPTO(2,TOLNL), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdllei", OPTO(2,TOLLE), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdlngi", OPTO(2,TOLNG), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdgti", OPTO(2,TOGT), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdgei", OPTO(2,TOGE), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdnli", OPTO(2,TONL), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdlti", OPTO(2,TOLT), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdlei", OPTO(2,TOLE), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdngi", OPTO(2,TONG), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdnei", OPTO(2,TONE), OPTO_MASK, PPC64, { RA, SI } }, -{ "tdi", OP(2), OP_MASK, PPC64, { TO, RA, SI } }, - -{ "twlgti", OPTO(3,TOLGT), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tlgti", OPTO(3,TOLGT), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twllti", OPTO(3,TOLLT), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tllti", OPTO(3,TOLLT), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "tweqi", OPTO(3,TOEQ), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "teqi", OPTO(3,TOEQ), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twlgei", OPTO(3,TOLGE), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tlgei", OPTO(3,TOLGE), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twlnli", OPTO(3,TOLNL), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tlnli", OPTO(3,TOLNL), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twllei", OPTO(3,TOLLE), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tllei", OPTO(3,TOLLE), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twlngi", OPTO(3,TOLNG), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tlngi", OPTO(3,TOLNG), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twgti", OPTO(3,TOGT), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tgti", OPTO(3,TOGT), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twgei", OPTO(3,TOGE), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tgei", OPTO(3,TOGE), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twnli", OPTO(3,TONL), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tnli", OPTO(3,TONL), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twlti", OPTO(3,TOLT), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tlti", OPTO(3,TOLT), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twlei", OPTO(3,TOLE), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tlei", OPTO(3,TOLE), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twngi", OPTO(3,TONG), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tngi", OPTO(3,TONG), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twnei", OPTO(3,TONE), OPTO_MASK, PPCCOM, { RA, SI } }, -{ "tnei", OPTO(3,TONE), OPTO_MASK, PWRCOM, { RA, SI } }, -{ "twi", OP(3), OP_MASK, PPCCOM, { TO, RA, SI } }, -{ "ti", OP(3), OP_MASK, PWRCOM, { TO, RA, SI } }, - -{ "macchw", XO(4,172,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchw.", XO(4,172,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwo", XO(4,172,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwo.", XO(4,172,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchws", XO(4,236,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchws.", XO(4,236,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwso", XO(4,236,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwso.", XO(4,236,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwsu", XO(4,204,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwsu.", XO(4,204,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwsuo", XO(4,204,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwsuo.", XO(4,204,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwu", XO(4,140,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwu.", XO(4,140,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwuo", XO(4,140,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "macchwuo.", XO(4,140,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhw", XO(4,44,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhw.", XO(4,44,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwo", XO(4,44,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwo.", XO(4,44,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhws", XO(4,108,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhws.", XO(4,108,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwso", XO(4,108,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwso.", XO(4,108,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwsu", XO(4,76,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwsu.", XO(4,76,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwsuo", XO(4,76,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwsuo.", XO(4,76,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwu", XO(4,12,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwu.", XO(4,12,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwuo", XO(4,12,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "machhwuo.", XO(4,12,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhw", XO(4,428,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhw.", XO(4,428,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwo", XO(4,428,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwo.", XO(4,428,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhws", XO(4,492,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhws.", XO(4,492,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwso", XO(4,492,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwso.", XO(4,492,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwsu", XO(4,460,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwsu.", XO(4,460,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwsuo", XO(4,460,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwsuo.", XO(4,460,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwu", XO(4,396,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwu.", XO(4,396,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwuo", XO(4,396,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "maclhwuo.", XO(4,396,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mulchw", XRC(4,168,0), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mulchw.", XRC(4,168,1), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mulchwu", XRC(4,136,0), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mulchwu.", XRC(4,136,1), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mulhhw", XRC(4,40,0), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mulhhw.", XRC(4,40,1), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mulhhwu", XRC(4,8,0), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mulhhwu.", XRC(4,8,1), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mullhw", XRC(4,424,0), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mullhw.", XRC(4,424,1), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mullhwu", XRC(4,392,0), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mullhwu.", XRC(4,392,1), X_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmacchw", XO(4,174,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmacchw.", XO(4,174,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmacchwo", XO(4,174,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmacchwo.", XO(4,174,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmacchws", XO(4,238,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmacchws.", XO(4,238,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmacchwso", XO(4,238,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmacchwso.", XO(4,238,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmachhw", XO(4,46,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmachhw.", XO(4,46,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmachhwo", XO(4,46,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmachhwo.", XO(4,46,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmachhws", XO(4,110,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmachhws.", XO(4,110,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmachhwso", XO(4,110,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmachhwso.", XO(4,110,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmaclhw", XO(4,430,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmaclhw.", XO(4,430,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmaclhwo", XO(4,430,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmaclhwo.", XO(4,430,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmaclhws", XO(4,494,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmaclhws.", XO(4,494,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmaclhwso", XO(4,494,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "nmaclhwso.", XO(4,494,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } }, -{ "mfvscr", VX(4, 1540), VX_MASK, PPCVEC, { VD } }, -{ "mtvscr", VX(4, 1604), VX_MASK, PPCVEC, { VB } }, - - /* Double-precision opcodes. */ - /* Some of these conflict with AltiVec, so move them before, since - PPCVEC includes the PPC_OPCODE_PPC set. */ -{ "efscfd", VX(4, 719), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdabs", VX(4, 740), VX_MASK, PPCEFS, { RS, RA } }, -{ "efdnabs", VX(4, 741), VX_MASK, PPCEFS, { RS, RA } }, -{ "efdneg", VX(4, 742), VX_MASK, PPCEFS, { RS, RA } }, -{ "efdadd", VX(4, 736), VX_MASK, PPCEFS, { RS, RA, RB } }, -{ "efdsub", VX(4, 737), VX_MASK, PPCEFS, { RS, RA, RB } }, -{ "efdmul", VX(4, 744), VX_MASK, PPCEFS, { RS, RA, RB } }, -{ "efddiv", VX(4, 745), VX_MASK, PPCEFS, { RS, RA, RB } }, -{ "efdcmpgt", VX(4, 748), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efdcmplt", VX(4, 749), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efdcmpeq", VX(4, 750), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efdtstgt", VX(4, 764), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efdtstlt", VX(4, 765), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efdtsteq", VX(4, 766), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efdcfsi", VX(4, 753), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdcfsid", VX(4, 739), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdcfui", VX(4, 752), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdcfuid", VX(4, 738), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdcfsf", VX(4, 755), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdcfuf", VX(4, 754), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdctsi", VX(4, 757), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdctsidz",VX(4, 747), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdctsiz", VX(4, 762), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdctui", VX(4, 756), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdctuidz",VX(4, 746), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdctuiz", VX(4, 760), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdctsf", VX(4, 759), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdctuf", VX(4, 758), VX_MASK, PPCEFS, { RS, RB } }, -{ "efdcfs", VX(4, 751), VX_MASK, PPCEFS, { RS, RB } }, - /* End of double-precision opcodes. */ - -{ "vaddcuw", VX(4, 384), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vaddfp", VX(4, 10), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vaddsbs", VX(4, 768), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vaddshs", VX(4, 832), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vaddsws", VX(4, 896), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vaddubm", VX(4, 0), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vaddubs", VX(4, 512), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vadduhm", VX(4, 64), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vadduhs", VX(4, 576), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vadduwm", VX(4, 128), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vadduws", VX(4, 640), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vand", VX(4, 1028), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vandc", VX(4, 1092), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vavgsb", VX(4, 1282), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vavgsh", VX(4, 1346), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vavgsw", VX(4, 1410), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vavgub", VX(4, 1026), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vavguh", VX(4, 1090), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vavguw", VX(4, 1154), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcfsx", VX(4, 842), VX_MASK, PPCVEC, { VD, VB, UIMM } }, -{ "vcfux", VX(4, 778), VX_MASK, PPCVEC, { VD, VB, UIMM } }, -{ "vcmpbfp", VXR(4, 966, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpbfp.", VXR(4, 966, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpeqfp", VXR(4, 198, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpeqfp.", VXR(4, 198, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpequb", VXR(4, 6, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpequb.", VXR(4, 6, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpequh", VXR(4, 70, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpequh.", VXR(4, 70, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpequw", VXR(4, 134, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpequw.", VXR(4, 134, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgefp", VXR(4, 454, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgefp.", VXR(4, 454, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtfp", VXR(4, 710, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtfp.", VXR(4, 710, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtsb", VXR(4, 774, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtsb.", VXR(4, 774, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtsh", VXR(4, 838, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtsh.", VXR(4, 838, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtsw", VXR(4, 902, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtsw.", VXR(4, 902, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtub", VXR(4, 518, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtub.", VXR(4, 518, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtuh", VXR(4, 582, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtuh.", VXR(4, 582, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtuw", VXR(4, 646, 0), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vcmpgtuw.", VXR(4, 646, 1), VXR_MASK, PPCVEC, { VD, VA, VB } }, -{ "vctsxs", VX(4, 970), VX_MASK, PPCVEC, { VD, VB, UIMM } }, -{ "vctuxs", VX(4, 906), VX_MASK, PPCVEC, { VD, VB, UIMM } }, -{ "vexptefp", VX(4, 394), VX_MASK, PPCVEC, { VD, VB } }, -{ "vlogefp", VX(4, 458), VX_MASK, PPCVEC, { VD, VB } }, -{ "vmaddfp", VXA(4, 46), VXA_MASK, PPCVEC, { VD, VA, VC, VB } }, -{ "vmaxfp", VX(4, 1034), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmaxsb", VX(4, 258), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmaxsh", VX(4, 322), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmaxsw", VX(4, 386), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmaxub", VX(4, 2), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmaxuh", VX(4, 66), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmaxuw", VX(4, 130), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmhaddshs", VXA(4, 32), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vmhraddshs", VXA(4, 33), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vminfp", VX(4, 1098), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vminsb", VX(4, 770), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vminsh", VX(4, 834), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vminsw", VX(4, 898), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vminub", VX(4, 514), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vminuh", VX(4, 578), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vminuw", VX(4, 642), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmladduhm", VXA(4, 34), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vmrghb", VX(4, 12), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmrghh", VX(4, 76), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmrghw", VX(4, 140), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmrglb", VX(4, 268), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmrglh", VX(4, 332), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmrglw", VX(4, 396), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmsummbm", VXA(4, 37), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vmsumshm", VXA(4, 40), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vmsumshs", VXA(4, 41), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vmsumubm", VXA(4, 36), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vmsumuhm", VXA(4, 38), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vmsumuhs", VXA(4, 39), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vmulesb", VX(4, 776), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmulesh", VX(4, 840), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmuleub", VX(4, 520), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmuleuh", VX(4, 584), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmulosb", VX(4, 264), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmulosh", VX(4, 328), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmuloub", VX(4, 8), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vmulouh", VX(4, 72), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vnmsubfp", VXA(4, 47), VXA_MASK, PPCVEC, { VD, VA, VC, VB } }, -{ "vnor", VX(4, 1284), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vor", VX(4, 1156), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vperm", VXA(4, 43), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vpkpx", VX(4, 782), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vpkshss", VX(4, 398), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vpkshus", VX(4, 270), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vpkswss", VX(4, 462), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vpkswus", VX(4, 334), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vpkuhum", VX(4, 14), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vpkuhus", VX(4, 142), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vpkuwum", VX(4, 78), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vpkuwus", VX(4, 206), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vrefp", VX(4, 266), VX_MASK, PPCVEC, { VD, VB } }, -{ "vrfim", VX(4, 714), VX_MASK, PPCVEC, { VD, VB } }, -{ "vrfin", VX(4, 522), VX_MASK, PPCVEC, { VD, VB } }, -{ "vrfip", VX(4, 650), VX_MASK, PPCVEC, { VD, VB } }, -{ "vrfiz", VX(4, 586), VX_MASK, PPCVEC, { VD, VB } }, -{ "vrlb", VX(4, 4), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vrlh", VX(4, 68), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vrlw", VX(4, 132), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vrsqrtefp", VX(4, 330), VX_MASK, PPCVEC, { VD, VB } }, -{ "vsel", VXA(4, 42), VXA_MASK, PPCVEC, { VD, VA, VB, VC } }, -{ "vsl", VX(4, 452), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vslb", VX(4, 260), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsldoi", VXA(4, 44), VXA_MASK, PPCVEC, { VD, VA, VB, SHB } }, -{ "vslh", VX(4, 324), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vslo", VX(4, 1036), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vslw", VX(4, 388), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vspltb", VX(4, 524), VX_MASK, PPCVEC, { VD, VB, UIMM } }, -{ "vsplth", VX(4, 588), VX_MASK, PPCVEC, { VD, VB, UIMM } }, -{ "vspltisb", VX(4, 780), VX_MASK, PPCVEC, { VD, SIMM } }, -{ "vspltish", VX(4, 844), VX_MASK, PPCVEC, { VD, SIMM } }, -{ "vspltisw", VX(4, 908), VX_MASK, PPCVEC, { VD, SIMM } }, -{ "vspltw", VX(4, 652), VX_MASK, PPCVEC, { VD, VB, UIMM } }, -{ "vsr", VX(4, 708), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsrab", VX(4, 772), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsrah", VX(4, 836), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsraw", VX(4, 900), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsrb", VX(4, 516), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsrh", VX(4, 580), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsro", VX(4, 1100), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsrw", VX(4, 644), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubcuw", VX(4, 1408), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubfp", VX(4, 74), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubsbs", VX(4, 1792), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubshs", VX(4, 1856), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubsws", VX(4, 1920), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsububm", VX(4, 1024), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsububs", VX(4, 1536), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubuhm", VX(4, 1088), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubuhs", VX(4, 1600), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubuwm", VX(4, 1152), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsubuws", VX(4, 1664), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsumsws", VX(4, 1928), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsum2sws", VX(4, 1672), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsum4sbs", VX(4, 1800), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsum4shs", VX(4, 1608), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vsum4ubs", VX(4, 1544), VX_MASK, PPCVEC, { VD, VA, VB } }, -{ "vupkhpx", VX(4, 846), VX_MASK, PPCVEC, { VD, VB } }, -{ "vupkhsb", VX(4, 526), VX_MASK, PPCVEC, { VD, VB } }, -{ "vupkhsh", VX(4, 590), VX_MASK, PPCVEC, { VD, VB } }, -{ "vupklpx", VX(4, 974), VX_MASK, PPCVEC, { VD, VB } }, -{ "vupklsb", VX(4, 654), VX_MASK, PPCVEC, { VD, VB } }, -{ "vupklsh", VX(4, 718), VX_MASK, PPCVEC, { VD, VB } }, -{ "vxor", VX(4, 1220), VX_MASK, PPCVEC, { VD, VA, VB } }, - -{ "evaddw", VX(4, 512), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evaddiw", VX(4, 514), VX_MASK, PPCSPE, { RS, RB, UIMM } }, -{ "evsubfw", VX(4, 516), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evsubw", VX(4, 516), VX_MASK, PPCSPE, { RS, RB, RA } }, -{ "evsubifw", VX(4, 518), VX_MASK, PPCSPE, { RS, UIMM, RB } }, -{ "evsubiw", VX(4, 518), VX_MASK, PPCSPE, { RS, RB, UIMM } }, -{ "evabs", VX(4, 520), VX_MASK, PPCSPE, { RS, RA } }, -{ "evneg", VX(4, 521), VX_MASK, PPCSPE, { RS, RA } }, -{ "evextsb", VX(4, 522), VX_MASK, PPCSPE, { RS, RA } }, -{ "evextsh", VX(4, 523), VX_MASK, PPCSPE, { RS, RA } }, -{ "evrndw", VX(4, 524), VX_MASK, PPCSPE, { RS, RA } }, -{ "evcntlzw", VX(4, 525), VX_MASK, PPCSPE, { RS, RA } }, -{ "evcntlsw", VX(4, 526), VX_MASK, PPCSPE, { RS, RA } }, - -{ "brinc", VX(4, 527), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evand", VX(4, 529), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evandc", VX(4, 530), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmr", VX(4, 535), VX_MASK, PPCSPE, { RS, RA, BBA } }, -{ "evor", VX(4, 535), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evorc", VX(4, 539), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evxor", VX(4, 534), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "eveqv", VX(4, 537), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evnand", VX(4, 542), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evnot", VX(4, 536), VX_MASK, PPCSPE, { RS, RA, BBA } }, -{ "evnor", VX(4, 536), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evrlw", VX(4, 552), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evrlwi", VX(4, 554), VX_MASK, PPCSPE, { RS, RA, EVUIMM } }, -{ "evslw", VX(4, 548), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evslwi", VX(4, 550), VX_MASK, PPCSPE, { RS, RA, EVUIMM } }, -{ "evsrws", VX(4, 545), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evsrwu", VX(4, 544), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evsrwis", VX(4, 547), VX_MASK, PPCSPE, { RS, RA, EVUIMM } }, -{ "evsrwiu", VX(4, 546), VX_MASK, PPCSPE, { RS, RA, EVUIMM } }, -{ "evsplati", VX(4, 553), VX_MASK, PPCSPE, { RS, SIMM } }, -{ "evsplatfi", VX(4, 555), VX_MASK, PPCSPE, { RS, SIMM } }, -{ "evmergehi", VX(4, 556), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmergelo", VX(4, 557), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmergehilo",VX(4,558), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmergelohi",VX(4,559), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evcmpgts", VX(4, 561), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evcmpgtu", VX(4, 560), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evcmplts", VX(4, 563), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evcmpltu", VX(4, 562), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evcmpeq", VX(4, 564), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evsel", EVSEL(4,79),EVSEL_MASK, PPCSPE, { RS, RA, RB, CRFS } }, - -{ "evldd", VX(4, 769), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } }, -{ "evlddx", VX(4, 768), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evldw", VX(4, 771), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } }, -{ "evldwx", VX(4, 770), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evldh", VX(4, 773), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } }, -{ "evldhx", VX(4, 772), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evlwhe", VX(4, 785), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evlwhex", VX(4, 784), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evlwhou", VX(4, 789), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evlwhoux", VX(4, 788), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evlwhos", VX(4, 791), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evlwhosx", VX(4, 790), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evlwwsplat",VX(4, 793), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evlwwsplatx",VX(4, 792), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evlwhsplat",VX(4, 797), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evlwhsplatx",VX(4, 796), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evlhhesplat",VX(4, 777), VX_MASK, PPCSPE, { RS, EVUIMM_2, RA } }, -{ "evlhhesplatx",VX(4, 776), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evlhhousplat",VX(4, 781), VX_MASK, PPCSPE, { RS, EVUIMM_2, RA } }, -{ "evlhhousplatx",VX(4, 780), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evlhhossplat",VX(4, 783), VX_MASK, PPCSPE, { RS, EVUIMM_2, RA } }, -{ "evlhhossplatx",VX(4, 782), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evstdd", VX(4, 801), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } }, -{ "evstddx", VX(4, 800), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evstdw", VX(4, 803), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } }, -{ "evstdwx", VX(4, 802), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evstdh", VX(4, 805), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } }, -{ "evstdhx", VX(4, 804), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evstwwe", VX(4, 825), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evstwwex", VX(4, 824), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evstwwo", VX(4, 829), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evstwwox", VX(4, 828), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evstwhe", VX(4, 817), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evstwhex", VX(4, 816), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evstwho", VX(4, 821), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } }, -{ "evstwhox", VX(4, 820), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evfsabs", VX(4, 644), VX_MASK, PPCSPE, { RS, RA } }, -{ "evfsnabs", VX(4, 645), VX_MASK, PPCSPE, { RS, RA } }, -{ "evfsneg", VX(4, 646), VX_MASK, PPCSPE, { RS, RA } }, -{ "evfsadd", VX(4, 640), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evfssub", VX(4, 641), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evfsmul", VX(4, 648), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evfsdiv", VX(4, 649), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evfscmpgt", VX(4, 652), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evfscmplt", VX(4, 653), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evfscmpeq", VX(4, 654), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evfststgt", VX(4, 668), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evfststlt", VX(4, 669), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evfststeq", VX(4, 670), VX_MASK, PPCSPE, { CRFD, RA, RB } }, -{ "evfscfui", VX(4, 656), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfsctuiz", VX(4, 664), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfscfsi", VX(4, 657), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfscfuf", VX(4, 658), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfscfsf", VX(4, 659), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfsctui", VX(4, 660), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfsctsi", VX(4, 661), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfsctsiz", VX(4, 666), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfsctuf", VX(4, 662), VX_MASK, PPCSPE, { RS, RB } }, -{ "evfsctsf", VX(4, 663), VX_MASK, PPCSPE, { RS, RB } }, - -{ "efsabs", VX(4, 708), VX_MASK, PPCEFS, { RS, RA } }, -{ "efsnabs", VX(4, 709), VX_MASK, PPCEFS, { RS, RA } }, -{ "efsneg", VX(4, 710), VX_MASK, PPCEFS, { RS, RA } }, -{ "efsadd", VX(4, 704), VX_MASK, PPCEFS, { RS, RA, RB } }, -{ "efssub", VX(4, 705), VX_MASK, PPCEFS, { RS, RA, RB } }, -{ "efsmul", VX(4, 712), VX_MASK, PPCEFS, { RS, RA, RB } }, -{ "efsdiv", VX(4, 713), VX_MASK, PPCEFS, { RS, RA, RB } }, -{ "efscmpgt", VX(4, 716), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efscmplt", VX(4, 717), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efscmpeq", VX(4, 718), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efststgt", VX(4, 732), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efststlt", VX(4, 733), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efststeq", VX(4, 734), VX_MASK, PPCEFS, { CRFD, RA, RB } }, -{ "efscfui", VX(4, 720), VX_MASK, PPCEFS, { RS, RB } }, -{ "efsctuiz", VX(4, 728), VX_MASK, PPCEFS, { RS, RB } }, -{ "efscfsi", VX(4, 721), VX_MASK, PPCEFS, { RS, RB } }, -{ "efscfuf", VX(4, 722), VX_MASK, PPCEFS, { RS, RB } }, -{ "efscfsf", VX(4, 723), VX_MASK, PPCEFS, { RS, RB } }, -{ "efsctui", VX(4, 724), VX_MASK, PPCEFS, { RS, RB } }, -{ "efsctsi", VX(4, 725), VX_MASK, PPCEFS, { RS, RB } }, -{ "efsctsiz", VX(4, 730), VX_MASK, PPCEFS, { RS, RB } }, -{ "efsctuf", VX(4, 726), VX_MASK, PPCEFS, { RS, RB } }, -{ "efsctsf", VX(4, 727), VX_MASK, PPCEFS, { RS, RB } }, - -{ "evmhossf", VX(4, 1031), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhossfa", VX(4, 1063), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhosmf", VX(4, 1039), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhosmfa", VX(4, 1071), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhosmi", VX(4, 1037), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhosmia", VX(4, 1069), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhoumi", VX(4, 1036), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhoumia", VX(4, 1068), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhessf", VX(4, 1027), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhessfa", VX(4, 1059), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhesmf", VX(4, 1035), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhesmfa", VX(4, 1067), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhesmi", VX(4, 1033), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhesmia", VX(4, 1065), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmheumi", VX(4, 1032), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmheumia", VX(4, 1064), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmhossfaaw",VX(4, 1287), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhossiaaw",VX(4, 1285), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhosmfaaw",VX(4, 1295), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhosmiaaw",VX(4, 1293), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhousiaaw",VX(4, 1284), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhoumiaaw",VX(4, 1292), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhessfaaw",VX(4, 1283), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhessiaaw",VX(4, 1281), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhesmfaaw",VX(4, 1291), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhesmiaaw",VX(4, 1289), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmheusiaaw",VX(4, 1280), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmheumiaaw",VX(4, 1288), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmhossfanw",VX(4, 1415), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhossianw",VX(4, 1413), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhosmfanw",VX(4, 1423), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhosmianw",VX(4, 1421), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhousianw",VX(4, 1412), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhoumianw",VX(4, 1420), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhessfanw",VX(4, 1411), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhessianw",VX(4, 1409), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhesmfanw",VX(4, 1419), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhesmianw",VX(4, 1417), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmheusianw",VX(4, 1408), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmheumianw",VX(4, 1416), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmhogsmfaa",VX(4, 1327), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhogsmiaa",VX(4, 1325), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhogumiaa",VX(4, 1324), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhegsmfaa",VX(4, 1323), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhegsmiaa",VX(4, 1321), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhegumiaa",VX(4, 1320), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmhogsmfan",VX(4, 1455), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhogsmian",VX(4, 1453), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhogumian",VX(4, 1452), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhegsmfan",VX(4, 1451), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhegsmian",VX(4, 1449), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmhegumian",VX(4, 1448), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmwhssf", VX(4, 1095), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwhssfa", VX(4, 1127), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwhsmf", VX(4, 1103), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwhsmfa", VX(4, 1135), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwhsmi", VX(4, 1101), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwhsmia", VX(4, 1133), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwhumi", VX(4, 1100), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwhumia", VX(4, 1132), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmwlumi", VX(4, 1096), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwlumia", VX(4, 1128), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmwlssiaaw",VX(4, 1345), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwlsmiaaw",VX(4, 1353), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwlusiaaw",VX(4, 1344), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwlumiaaw",VX(4, 1352), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmwlssianw",VX(4, 1473), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwlsmianw",VX(4, 1481), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwlusianw",VX(4, 1472), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwlumianw",VX(4, 1480), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmwssf", VX(4, 1107), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwssfa", VX(4, 1139), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwsmf", VX(4, 1115), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwsmfa", VX(4, 1147), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwsmi", VX(4, 1113), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwsmia", VX(4, 1145), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwumi", VX(4, 1112), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwumia", VX(4, 1144), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmwssfaa", VX(4, 1363), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwsmfaa", VX(4, 1371), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwsmiaa", VX(4, 1369), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwumiaa", VX(4, 1368), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evmwssfan", VX(4, 1491), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwsmfan", VX(4, 1499), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwsmian", VX(4, 1497), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evmwumian", VX(4, 1496), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "evaddssiaaw",VX(4, 1217), VX_MASK, PPCSPE, { RS, RA } }, -{ "evaddsmiaaw",VX(4, 1225), VX_MASK, PPCSPE, { RS, RA } }, -{ "evaddusiaaw",VX(4, 1216), VX_MASK, PPCSPE, { RS, RA } }, -{ "evaddumiaaw",VX(4, 1224), VX_MASK, PPCSPE, { RS, RA } }, - -{ "evsubfssiaaw",VX(4, 1219), VX_MASK, PPCSPE, { RS, RA } }, -{ "evsubfsmiaaw",VX(4, 1227), VX_MASK, PPCSPE, { RS, RA } }, -{ "evsubfusiaaw",VX(4, 1218), VX_MASK, PPCSPE, { RS, RA } }, -{ "evsubfumiaaw",VX(4, 1226), VX_MASK, PPCSPE, { RS, RA } }, - -{ "evmra", VX(4, 1220), VX_MASK, PPCSPE, { RS, RA } }, - -{ "evdivws", VX(4, 1222), VX_MASK, PPCSPE, { RS, RA, RB } }, -{ "evdivwu", VX(4, 1223), VX_MASK, PPCSPE, { RS, RA, RB } }, - -{ "mulli", OP(7), OP_MASK, PPCCOM, { RT, RA, SI } }, -{ "muli", OP(7), OP_MASK, PWRCOM, { RT, RA, SI } }, - -{ "subfic", OP(8), OP_MASK, PPCCOM, { RT, RA, SI } }, -{ "sfi", OP(8), OP_MASK, PWRCOM, { RT, RA, SI } }, - -{ "dozi", OP(9), OP_MASK, M601, { RT, RA, SI } }, - -{ "bce", B(9,0,0), B_MASK, BOOKE64, { BO, BI, BD } }, -{ "bcel", B(9,0,1), B_MASK, BOOKE64, { BO, BI, BD } }, -{ "bcea", B(9,1,0), B_MASK, BOOKE64, { BO, BI, BDA } }, -{ "bcela", B(9,1,1), B_MASK, BOOKE64, { BO, BI, BDA } }, - -{ "cmplwi", OPL(10,0), OPL_MASK, PPCCOM, { OBF, RA, UI } }, -{ "cmpldi", OPL(10,1), OPL_MASK, PPC64, { OBF, RA, UI } }, -{ "cmpli", OP(10), OP_MASK, PPC, { BF, L, RA, UI } }, -{ "cmpli", OP(10), OP_MASK, PWRCOM, { BF, RA, UI } }, - -{ "cmpwi", OPL(11,0), OPL_MASK, PPCCOM, { OBF, RA, SI } }, -{ "cmpdi", OPL(11,1), OPL_MASK, PPC64, { OBF, RA, SI } }, -{ "cmpi", OP(11), OP_MASK, PPC, { BF, L, RA, SI } }, -{ "cmpi", OP(11), OP_MASK, PWRCOM, { BF, RA, SI } }, - -{ "addic", OP(12), OP_MASK, PPCCOM, { RT, RA, SI } }, -{ "ai", OP(12), OP_MASK, PWRCOM, { RT, RA, SI } }, -{ "subic", OP(12), OP_MASK, PPCCOM, { RT, RA, NSI } }, - -{ "addic.", OP(13), OP_MASK, PPCCOM, { RT, RA, SI } }, -{ "ai.", OP(13), OP_MASK, PWRCOM, { RT, RA, SI } }, -{ "subic.", OP(13), OP_MASK, PPCCOM, { RT, RA, NSI } }, - -{ "li", OP(14), DRA_MASK, PPCCOM, { RT, SI } }, -{ "lil", OP(14), DRA_MASK, PWRCOM, { RT, SI } }, -{ "addi", OP(14), OP_MASK, PPCCOM, { RT, RA0, SI } }, -{ "cal", OP(14), OP_MASK, PWRCOM, { RT, D, RA0 } }, -{ "subi", OP(14), OP_MASK, PPCCOM, { RT, RA0, NSI } }, -{ "la", OP(14), OP_MASK, PPCCOM, { RT, D, RA0 } }, - -{ "lis", OP(15), DRA_MASK, PPCCOM, { RT, SISIGNOPT } }, -{ "liu", OP(15), DRA_MASK, PWRCOM, { RT, SISIGNOPT } }, -{ "addis", OP(15), OP_MASK, PPCCOM, { RT,RA0,SISIGNOPT } }, -{ "cau", OP(15), OP_MASK, PWRCOM, { RT,RA0,SISIGNOPT } }, -{ "subis", OP(15), OP_MASK, PPCCOM, { RT, RA0, NSI } }, - -{ "bdnz-", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, { BDM } }, -{ "bdnz+", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, { BDP } }, -{ "bdnz", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, { BD } }, -{ "bdn", BBO(16,BODNZ,0,0), BBOATBI_MASK, PWRCOM, { BD } }, -{ "bdnzl-", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, { BDM } }, -{ "bdnzl+", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, { BDP } }, -{ "bdnzl", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, { BD } }, -{ "bdnl", BBO(16,BODNZ,0,1), BBOATBI_MASK, PWRCOM, { BD } }, -{ "bdnza-", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, { BDMA } }, -{ "bdnza+", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, { BDPA } }, -{ "bdnza", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, { BDA } }, -{ "bdna", BBO(16,BODNZ,1,0), BBOATBI_MASK, PWRCOM, { BDA } }, -{ "bdnzla-", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, { BDMA } }, -{ "bdnzla+", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, { BDPA } }, -{ "bdnzla", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, { BDA } }, -{ "bdnla", BBO(16,BODNZ,1,1), BBOATBI_MASK, PWRCOM, { BDA } }, -{ "bdz-", BBO(16,BODZ,0,0), BBOATBI_MASK, PPCCOM, { BDM } }, -{ "bdz+", BBO(16,BODZ,0,0), BBOATBI_MASK, PPCCOM, { BDP } }, -{ "bdz", BBO(16,BODZ,0,0), BBOATBI_MASK, COM, { BD } }, -{ "bdzl-", BBO(16,BODZ,0,1), BBOATBI_MASK, PPCCOM, { BDM } }, -{ "bdzl+", BBO(16,BODZ,0,1), BBOATBI_MASK, PPCCOM, { BDP } }, -{ "bdzl", BBO(16,BODZ,0,1), BBOATBI_MASK, COM, { BD } }, -{ "bdza-", BBO(16,BODZ,1,0), BBOATBI_MASK, PPCCOM, { BDMA } }, -{ "bdza+", BBO(16,BODZ,1,0), BBOATBI_MASK, PPCCOM, { BDPA } }, -{ "bdza", BBO(16,BODZ,1,0), BBOATBI_MASK, COM, { BDA } }, -{ "bdzla-", BBO(16,BODZ,1,1), BBOATBI_MASK, PPCCOM, { BDMA } }, -{ "bdzla+", BBO(16,BODZ,1,1), BBOATBI_MASK, PPCCOM, { BDPA } }, -{ "bdzla", BBO(16,BODZ,1,1), BBOATBI_MASK, COM, { BDA } }, -{ "blt-", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "blt+", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "blt", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "bltl-", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bltl+", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bltl", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "blta-", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "blta+", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "blta", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bltla-", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bltla+", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bltla", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bgt-", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bgt+", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bgt", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "bgtl-", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bgtl+", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bgtl", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "bgta-", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bgta+", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bgta", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bgtla-", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bgtla+", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bgtla", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "beq-", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "beq+", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "beq", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "beql-", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "beql+", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "beql", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "beqa-", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "beqa+", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "beqa", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "beqla-", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "beqla+", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "beqla", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bso-", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bso+", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bso", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "bsol-", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bsol+", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bsol", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "bsoa-", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bsoa+", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bsoa", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bsola-", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bsola+", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bsola", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bun-", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bun+", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bun", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BD } }, -{ "bunl-", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bunl+", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bunl", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BD } }, -{ "buna-", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "buna+", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "buna", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDA } }, -{ "bunla-", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bunla+", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bunla", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDA } }, -{ "bge-", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bge+", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bge", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "bgel-", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bgel+", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bgel", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "bgea-", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bgea+", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bgea", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bgela-", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bgela+", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bgela", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bnl-", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bnl+", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bnl", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "bnll-", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bnll+", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bnll", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "bnla-", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnla+", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnla", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bnlla-", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnlla+", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnlla", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "ble-", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "ble+", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "ble", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "blel-", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "blel+", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "blel", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "blea-", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "blea+", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "blea", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "blela-", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "blela+", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "blela", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bng-", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bng+", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bng", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "bngl-", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bngl+", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bngl", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "bnga-", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnga+", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnga", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bngla-", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bngla+", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bngla", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bne-", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bne+", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bne", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "bnel-", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bnel+", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bnel", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "bnea-", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnea+", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnea", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bnela-", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnela+", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnela", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bns-", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bns+", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bns", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, COM, { CR, BD } }, -{ "bnsl-", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bnsl+", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bnsl", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, COM, { CR, BD } }, -{ "bnsa-", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnsa+", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnsa", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bnsla-", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnsla+", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnsla", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, COM, { CR, BDA } }, -{ "bnu-", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bnu+", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bnu", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BD } }, -{ "bnul-", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } }, -{ "bnul+", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } }, -{ "bnul", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BD } }, -{ "bnua-", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnua+", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnua", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDA } }, -{ "bnula-", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } }, -{ "bnula+", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } }, -{ "bnula", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDA } }, -{ "bdnzt-", BBO(16,BODNZT,0,0), BBOY_MASK, NOPOWER4, { BI, BDM } }, -{ "bdnzt+", BBO(16,BODNZT,0,0), BBOY_MASK, NOPOWER4, { BI, BDP } }, -{ "bdnzt", BBO(16,BODNZT,0,0), BBOY_MASK, PPCCOM, { BI, BD } }, -{ "bdnztl-", BBO(16,BODNZT,0,1), BBOY_MASK, NOPOWER4, { BI, BDM } }, -{ "bdnztl+", BBO(16,BODNZT,0,1), BBOY_MASK, NOPOWER4, { BI, BDP } }, -{ "bdnztl", BBO(16,BODNZT,0,1), BBOY_MASK, PPCCOM, { BI, BD } }, -{ "bdnzta-", BBO(16,BODNZT,1,0), BBOY_MASK, NOPOWER4, { BI, BDMA } }, -{ "bdnzta+", BBO(16,BODNZT,1,0), BBOY_MASK, NOPOWER4, { BI, BDPA } }, -{ "bdnzta", BBO(16,BODNZT,1,0), BBOY_MASK, PPCCOM, { BI, BDA } }, -{ "bdnztla-",BBO(16,BODNZT,1,1), BBOY_MASK, NOPOWER4, { BI, BDMA } }, -{ "bdnztla+",BBO(16,BODNZT,1,1), BBOY_MASK, NOPOWER4, { BI, BDPA } }, -{ "bdnztla", BBO(16,BODNZT,1,1), BBOY_MASK, PPCCOM, { BI, BDA } }, -{ "bdnzf-", BBO(16,BODNZF,0,0), BBOY_MASK, NOPOWER4, { BI, BDM } }, -{ "bdnzf+", BBO(16,BODNZF,0,0), BBOY_MASK, NOPOWER4, { BI, BDP } }, -{ "bdnzf", BBO(16,BODNZF,0,0), BBOY_MASK, PPCCOM, { BI, BD } }, -{ "bdnzfl-", BBO(16,BODNZF,0,1), BBOY_MASK, NOPOWER4, { BI, BDM } }, -{ "bdnzfl+", BBO(16,BODNZF,0,1), BBOY_MASK, NOPOWER4, { BI, BDP } }, -{ "bdnzfl", BBO(16,BODNZF,0,1), BBOY_MASK, PPCCOM, { BI, BD } }, -{ "bdnzfa-", BBO(16,BODNZF,1,0), BBOY_MASK, NOPOWER4, { BI, BDMA } }, -{ "bdnzfa+", BBO(16,BODNZF,1,0), BBOY_MASK, NOPOWER4, { BI, BDPA } }, -{ "bdnzfa", BBO(16,BODNZF,1,0), BBOY_MASK, PPCCOM, { BI, BDA } }, -{ "bdnzfla-",BBO(16,BODNZF,1,1), BBOY_MASK, NOPOWER4, { BI, BDMA } }, -{ "bdnzfla+",BBO(16,BODNZF,1,1), BBOY_MASK, NOPOWER4, { BI, BDPA } }, -{ "bdnzfla", BBO(16,BODNZF,1,1), BBOY_MASK, PPCCOM, { BI, BDA } }, -{ "bt-", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, { BI, BDM } }, -{ "bt+", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, { BI, BDP } }, -{ "bt", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, { BI, BD } }, -{ "bbt", BBO(16,BOT,0,0), BBOAT_MASK, PWRCOM, { BI, BD } }, -{ "btl-", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, { BI, BDM } }, -{ "btl+", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, { BI, BDP } }, -{ "btl", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, { BI, BD } }, -{ "bbtl", BBO(16,BOT,0,1), BBOAT_MASK, PWRCOM, { BI, BD } }, -{ "bta-", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, { BI, BDMA } }, -{ "bta+", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, { BI, BDPA } }, -{ "bta", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, { BI, BDA } }, -{ "bbta", BBO(16,BOT,1,0), BBOAT_MASK, PWRCOM, { BI, BDA } }, -{ "btla-", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, { BI, BDMA } }, -{ "btla+", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, { BI, BDPA } }, -{ "btla", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, { BI, BDA } }, -{ "bbtla", BBO(16,BOT,1,1), BBOAT_MASK, PWRCOM, { BI, BDA } }, -{ "bf-", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, { BI, BDM } }, -{ "bf+", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, { BI, BDP } }, -{ "bf", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, { BI, BD } }, -{ "bbf", BBO(16,BOF,0,0), BBOAT_MASK, PWRCOM, { BI, BD } }, -{ "bfl-", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, { BI, BDM } }, -{ "bfl+", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, { BI, BDP } }, -{ "bfl", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, { BI, BD } }, -{ "bbfl", BBO(16,BOF,0,1), BBOAT_MASK, PWRCOM, { BI, BD } }, -{ "bfa-", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, { BI, BDMA } }, -{ "bfa+", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, { BI, BDPA } }, -{ "bfa", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, { BI, BDA } }, -{ "bbfa", BBO(16,BOF,1,0), BBOAT_MASK, PWRCOM, { BI, BDA } }, -{ "bfla-", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, { BI, BDMA } }, -{ "bfla+", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, { BI, BDPA } }, -{ "bfla", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, { BI, BDA } }, -{ "bbfla", BBO(16,BOF,1,1), BBOAT_MASK, PWRCOM, { BI, BDA } }, -{ "bdzt-", BBO(16,BODZT,0,0), BBOY_MASK, NOPOWER4, { BI, BDM } }, -{ "bdzt+", BBO(16,BODZT,0,0), BBOY_MASK, NOPOWER4, { BI, BDP } }, -{ "bdzt", BBO(16,BODZT,0,0), BBOY_MASK, PPCCOM, { BI, BD } }, -{ "bdztl-", BBO(16,BODZT,0,1), BBOY_MASK, NOPOWER4, { BI, BDM } }, -{ "bdztl+", BBO(16,BODZT,0,1), BBOY_MASK, NOPOWER4, { BI, BDP } }, -{ "bdztl", BBO(16,BODZT,0,1), BBOY_MASK, PPCCOM, { BI, BD } }, -{ "bdzta-", BBO(16,BODZT,1,0), BBOY_MASK, NOPOWER4, { BI, BDMA } }, -{ "bdzta+", BBO(16,BODZT,1,0), BBOY_MASK, NOPOWER4, { BI, BDPA } }, -{ "bdzta", BBO(16,BODZT,1,0), BBOY_MASK, PPCCOM, { BI, BDA } }, -{ "bdztla-", BBO(16,BODZT,1,1), BBOY_MASK, NOPOWER4, { BI, BDMA } }, -{ "bdztla+", BBO(16,BODZT,1,1), BBOY_MASK, NOPOWER4, { BI, BDPA } }, -{ "bdztla", BBO(16,BODZT,1,1), BBOY_MASK, PPCCOM, { BI, BDA } }, -{ "bdzf-", BBO(16,BODZF,0,0), BBOY_MASK, NOPOWER4, { BI, BDM } }, -{ "bdzf+", BBO(16,BODZF,0,0), BBOY_MASK, NOPOWER4, { BI, BDP } }, -{ "bdzf", BBO(16,BODZF,0,0), BBOY_MASK, PPCCOM, { BI, BD } }, -{ "bdzfl-", BBO(16,BODZF,0,1), BBOY_MASK, NOPOWER4, { BI, BDM } }, -{ "bdzfl+", BBO(16,BODZF,0,1), BBOY_MASK, NOPOWER4, { BI, BDP } }, -{ "bdzfl", BBO(16,BODZF,0,1), BBOY_MASK, PPCCOM, { BI, BD } }, -{ "bdzfa-", BBO(16,BODZF,1,0), BBOY_MASK, NOPOWER4, { BI, BDMA } }, -{ "bdzfa+", BBO(16,BODZF,1,0), BBOY_MASK, NOPOWER4, { BI, BDPA } }, -{ "bdzfa", BBO(16,BODZF,1,0), BBOY_MASK, PPCCOM, { BI, BDA } }, -{ "bdzfla-", BBO(16,BODZF,1,1), BBOY_MASK, NOPOWER4, { BI, BDMA } }, -{ "bdzfla+", BBO(16,BODZF,1,1), BBOY_MASK, NOPOWER4, { BI, BDPA } }, -{ "bdzfla", BBO(16,BODZF,1,1), BBOY_MASK, PPCCOM, { BI, BDA } }, -{ "bc-", B(16,0,0), B_MASK, PPCCOM, { BOE, BI, BDM } }, -{ "bc+", B(16,0,0), B_MASK, PPCCOM, { BOE, BI, BDP } }, -{ "bc", B(16,0,0), B_MASK, COM, { BO, BI, BD } }, -{ "bcl-", B(16,0,1), B_MASK, PPCCOM, { BOE, BI, BDM } }, -{ "bcl+", B(16,0,1), B_MASK, PPCCOM, { BOE, BI, BDP } }, -{ "bcl", B(16,0,1), B_MASK, COM, { BO, BI, BD } }, -{ "bca-", B(16,1,0), B_MASK, PPCCOM, { BOE, BI, BDMA } }, -{ "bca+", B(16,1,0), B_MASK, PPCCOM, { BOE, BI, BDPA } }, -{ "bca", B(16,1,0), B_MASK, COM, { BO, BI, BDA } }, -{ "bcla-", B(16,1,1), B_MASK, PPCCOM, { BOE, BI, BDMA } }, -{ "bcla+", B(16,1,1), B_MASK, PPCCOM, { BOE, BI, BDPA } }, -{ "bcla", B(16,1,1), B_MASK, COM, { BO, BI, BDA } }, - -{ "sc", SC(17,1,0), SC_MASK, PPC, { LEV } }, -{ "svc", SC(17,0,0), SC_MASK, POWER, { SVC_LEV, FL1, FL2 } }, -{ "svcl", SC(17,0,1), SC_MASK, POWER, { SVC_LEV, FL1, FL2 } }, -{ "svca", SC(17,1,0), SC_MASK, PWRCOM, { SV } }, -{ "svcla", SC(17,1,1), SC_MASK, POWER, { SV } }, - -{ "b", B(18,0,0), B_MASK, COM, { LI } }, -{ "bl", B(18,0,1), B_MASK, COM, { LI } }, -{ "ba", B(18,1,0), B_MASK, COM, { LIA } }, -{ "bla", B(18,1,1), B_MASK, COM, { LIA } }, - -{ "mcrf", XL(19,0), XLBB_MASK|(3 << 21)|(3 << 16), COM, { BF, BFA } }, - -{ "blr", XLO(19,BOU,16,0), XLBOBIBB_MASK, PPCCOM, { 0 } }, -{ "br", XLO(19,BOU,16,0), XLBOBIBB_MASK, PWRCOM, { 0 } }, -{ "blrl", XLO(19,BOU,16,1), XLBOBIBB_MASK, PPCCOM, { 0 } }, -{ "brl", XLO(19,BOU,16,1), XLBOBIBB_MASK, PWRCOM, { 0 } }, -{ "bdnzlr", XLO(19,BODNZ,16,0), XLBOBIBB_MASK, PPCCOM, { 0 } }, -{ "bdnzlr-", XLO(19,BODNZ,16,0), XLBOBIBB_MASK, NOPOWER4, { 0 } }, -{ "bdnzlr-", XLO(19,BODNZM4,16,0), XLBOBIBB_MASK, POWER4, { 0 } }, -{ "bdnzlr+", XLO(19,BODNZP,16,0), XLBOBIBB_MASK, NOPOWER4, { 0 } }, -{ "bdnzlr+", XLO(19,BODNZP4,16,0), XLBOBIBB_MASK, POWER4, { 0 } }, -{ "bdnzlrl", XLO(19,BODNZ,16,1), XLBOBIBB_MASK, PPCCOM, { 0 } }, -{ "bdnzlrl-",XLO(19,BODNZ,16,1), XLBOBIBB_MASK, NOPOWER4, { 0 } }, -{ "bdnzlrl-",XLO(19,BODNZM4,16,1), XLBOBIBB_MASK, POWER4, { 0 } }, -{ "bdnzlrl+",XLO(19,BODNZP,16,1), XLBOBIBB_MASK, NOPOWER4, { 0 } }, -{ "bdnzlrl+",XLO(19,BODNZP4,16,1), XLBOBIBB_MASK, POWER4, { 0 } }, -{ "bdzlr", XLO(19,BODZ,16,0), XLBOBIBB_MASK, PPCCOM, { 0 } }, -{ "bdzlr-", XLO(19,BODZ,16,0), XLBOBIBB_MASK, NOPOWER4, { 0 } }, -{ "bdzlr-", XLO(19,BODZM4,16,0), XLBOBIBB_MASK, POWER4, { 0 } }, -{ "bdzlr+", XLO(19,BODZP,16,0), XLBOBIBB_MASK, NOPOWER4, { 0 } }, -{ "bdzlr+", XLO(19,BODZP4,16,0), XLBOBIBB_MASK, POWER4, { 0 } }, -{ "bdzlrl", XLO(19,BODZ,16,1), XLBOBIBB_MASK, PPCCOM, { 0 } }, -{ "bdzlrl-", XLO(19,BODZ,16,1), XLBOBIBB_MASK, NOPOWER4, { 0 } }, -{ "bdzlrl-", XLO(19,BODZM4,16,1), XLBOBIBB_MASK, POWER4, { 0 } }, -{ "bdzlrl+", XLO(19,BODZP,16,1), XLBOBIBB_MASK, NOPOWER4, { 0 } }, -{ "bdzlrl+", XLO(19,BODZP4,16,1), XLBOBIBB_MASK, POWER4, { 0 } }, -{ "bltlr", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bltlr-", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bltlr-", XLOCB(19,BOTM4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bltlr+", XLOCB(19,BOTP,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bltlr+", XLOCB(19,BOTP4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bltr", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bltlrl", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bltlrl-", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bltlrl-", XLOCB(19,BOTM4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bltlrl+", XLOCB(19,BOTP,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bltlrl+", XLOCB(19,BOTP4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bltrl", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bgtlr", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bgtlr-", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgtlr-", XLOCB(19,BOTM4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgtlr+", XLOCB(19,BOTP,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgtlr+", XLOCB(19,BOTP4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgtr", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bgtlrl", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bgtlrl-", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgtlrl-", XLOCB(19,BOTM4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgtlrl+", XLOCB(19,BOTP,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgtlrl+", XLOCB(19,BOTP4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgtrl", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "beqlr", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "beqlr-", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "beqlr-", XLOCB(19,BOTM4,CBEQ,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "beqlr+", XLOCB(19,BOTP,CBEQ,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "beqlr+", XLOCB(19,BOTP4,CBEQ,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "beqr", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "beqlrl", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "beqlrl-", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "beqlrl-", XLOCB(19,BOTM4,CBEQ,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "beqlrl+", XLOCB(19,BOTP,CBEQ,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "beqlrl+", XLOCB(19,BOTP4,CBEQ,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "beqrl", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bsolr", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bsolr-", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bsolr-", XLOCB(19,BOTM4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bsolr+", XLOCB(19,BOTP,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bsolr+", XLOCB(19,BOTP4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bsor", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bsolrl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bsolrl-", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bsolrl-", XLOCB(19,BOTM4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bsolrl+", XLOCB(19,BOTP,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bsolrl+", XLOCB(19,BOTP4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bsorl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bunlr", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bunlr-", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bunlr-", XLOCB(19,BOTM4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bunlr+", XLOCB(19,BOTP,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bunlr+", XLOCB(19,BOTP4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bunlrl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bunlrl-", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bunlrl-", XLOCB(19,BOTM4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bunlrl+", XLOCB(19,BOTP,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bunlrl+", XLOCB(19,BOTP4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgelr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bgelr-", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgelr-", XLOCB(19,BOFM4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgelr+", XLOCB(19,BOFP,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgelr+", XLOCB(19,BOFP4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bger", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bgelrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bgelrl-", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgelrl-", XLOCB(19,BOFM4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgelrl+", XLOCB(19,BOFP,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgelrl+", XLOCB(19,BOFP4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgerl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnllr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnllr-", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnllr-", XLOCB(19,BOFM4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnllr+", XLOCB(19,BOFP,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnllr+", XLOCB(19,BOFP4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnlr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnllrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnllrl-", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnllrl-", XLOCB(19,BOFM4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnllrl+", XLOCB(19,BOFP,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnllrl+", XLOCB(19,BOFP4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnlrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "blelr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "blelr-", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "blelr-", XLOCB(19,BOFM4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "blelr+", XLOCB(19,BOFP,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "blelr+", XLOCB(19,BOFP4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bler", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "blelrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "blelrl-", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "blelrl-", XLOCB(19,BOFM4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "blelrl+", XLOCB(19,BOFP,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "blelrl+", XLOCB(19,BOFP4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "blerl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnglr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnglr-", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnglr-", XLOCB(19,BOFM4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnglr+", XLOCB(19,BOFP,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnglr+", XLOCB(19,BOFP4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bngr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnglrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnglrl-", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnglrl-", XLOCB(19,BOFM4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnglrl+", XLOCB(19,BOFP,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnglrl+", XLOCB(19,BOFP4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bngrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnelr", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnelr-", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnelr-", XLOCB(19,BOFM4,CBEQ,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnelr+", XLOCB(19,BOFP,CBEQ,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnelr+", XLOCB(19,BOFP4,CBEQ,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bner", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnelrl", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnelrl-", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnelrl-", XLOCB(19,BOFM4,CBEQ,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnelrl+", XLOCB(19,BOFP,CBEQ,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnelrl+", XLOCB(19,BOFP4,CBEQ,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnerl", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnslr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnslr-", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnslr-", XLOCB(19,BOFM4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnslr+", XLOCB(19,BOFP,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnslr+", XLOCB(19,BOFP4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnsr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnslrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnslrl-", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnslrl-", XLOCB(19,BOFM4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnslrl+", XLOCB(19,BOFP,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnslrl+", XLOCB(19,BOFP4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnsrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PWRCOM, { CR } }, -{ "bnulr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnulr-", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnulr-", XLOCB(19,BOFM4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnulr+", XLOCB(19,BOFP,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnulr+", XLOCB(19,BOFP4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnulrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnulrl-", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnulrl-", XLOCB(19,BOFM4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnulrl+", XLOCB(19,BOFP,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnulrl+", XLOCB(19,BOFP4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "btlr", XLO(19,BOT,16,0), XLBOBB_MASK, PPCCOM, { BI } }, -{ "btlr-", XLO(19,BOT,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "btlr-", XLO(19,BOTM4,16,0), XLBOBB_MASK, POWER4, { BI } }, -{ "btlr+", XLO(19,BOTP,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "btlr+", XLO(19,BOTP4,16,0), XLBOBB_MASK, POWER4, { BI } }, -{ "bbtr", XLO(19,BOT,16,0), XLBOBB_MASK, PWRCOM, { BI } }, -{ "btlrl", XLO(19,BOT,16,1), XLBOBB_MASK, PPCCOM, { BI } }, -{ "btlrl-", XLO(19,BOT,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "btlrl-", XLO(19,BOTM4,16,1), XLBOBB_MASK, POWER4, { BI } }, -{ "btlrl+", XLO(19,BOTP,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "btlrl+", XLO(19,BOTP4,16,1), XLBOBB_MASK, POWER4, { BI } }, -{ "bbtrl", XLO(19,BOT,16,1), XLBOBB_MASK, PWRCOM, { BI } }, -{ "bflr", XLO(19,BOF,16,0), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bflr-", XLO(19,BOF,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bflr-", XLO(19,BOFM4,16,0), XLBOBB_MASK, POWER4, { BI } }, -{ "bflr+", XLO(19,BOFP,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bflr+", XLO(19,BOFP4,16,0), XLBOBB_MASK, POWER4, { BI } }, -{ "bbfr", XLO(19,BOF,16,0), XLBOBB_MASK, PWRCOM, { BI } }, -{ "bflrl", XLO(19,BOF,16,1), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bflrl-", XLO(19,BOF,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bflrl-", XLO(19,BOFM4,16,1), XLBOBB_MASK, POWER4, { BI } }, -{ "bflrl+", XLO(19,BOFP,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bflrl+", XLO(19,BOFP4,16,1), XLBOBB_MASK, POWER4, { BI } }, -{ "bbfrl", XLO(19,BOF,16,1), XLBOBB_MASK, PWRCOM, { BI } }, -{ "bdnztlr", XLO(19,BODNZT,16,0), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bdnztlr-",XLO(19,BODNZT,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdnztlr+",XLO(19,BODNZTP,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdnztlrl",XLO(19,BODNZT,16,1), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bdnztlrl-",XLO(19,BODNZT,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdnztlrl+",XLO(19,BODNZTP,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdnzflr", XLO(19,BODNZF,16,0), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bdnzflr-",XLO(19,BODNZF,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdnzflr+",XLO(19,BODNZFP,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdnzflrl",XLO(19,BODNZF,16,1), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bdnzflrl-",XLO(19,BODNZF,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdnzflrl+",XLO(19,BODNZFP,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdztlr", XLO(19,BODZT,16,0), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bdztlr-", XLO(19,BODZT,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdztlr+", XLO(19,BODZTP,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdztlrl", XLO(19,BODZT,16,1), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bdztlrl-",XLO(19,BODZT,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdztlrl+",XLO(19,BODZTP,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdzflr", XLO(19,BODZF,16,0), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bdzflr-", XLO(19,BODZF,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdzflr+", XLO(19,BODZFP,16,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdzflrl", XLO(19,BODZF,16,1), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bdzflrl-",XLO(19,BODZF,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bdzflrl+",XLO(19,BODZFP,16,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bclr+", XLYLK(19,16,1,0), XLYBB_MASK, PPCCOM, { BOE, BI } }, -{ "bclrl+", XLYLK(19,16,1,1), XLYBB_MASK, PPCCOM, { BOE, BI } }, -{ "bclr-", XLYLK(19,16,0,0), XLYBB_MASK, PPCCOM, { BOE, BI } }, -{ "bclrl-", XLYLK(19,16,0,1), XLYBB_MASK, PPCCOM, { BOE, BI } }, -{ "bclr", XLLK(19,16,0), XLBH_MASK, PPCCOM, { BO, BI, BH } }, -{ "bclrl", XLLK(19,16,1), XLBH_MASK, PPCCOM, { BO, BI, BH } }, -{ "bcr", XLLK(19,16,0), XLBB_MASK, PWRCOM, { BO, BI } }, -{ "bcrl", XLLK(19,16,1), XLBB_MASK, PWRCOM, { BO, BI } }, -{ "bclre", XLLK(19,17,0), XLBB_MASK, BOOKE64, { BO, BI } }, -{ "bclrel", XLLK(19,17,1), XLBB_MASK, BOOKE64, { BO, BI } }, - -{ "rfid", XL(19,18), 0xffffffff, PPC64, { 0 } }, - -{ "crnot", XL(19,33), XL_MASK, PPCCOM, { BT, BA, BBA } }, -{ "crnor", XL(19,33), XL_MASK, COM, { BT, BA, BB } }, -{ "rfmci", X(19,38), 0xffffffff, PPCRFMCI, { 0 } }, - -{ "rfi", XL(19,50), 0xffffffff, COM, { 0 } }, -{ "rfci", XL(19,51), 0xffffffff, PPC403 | BOOKE, { 0 } }, - -{ "rfsvc", XL(19,82), 0xffffffff, POWER, { 0 } }, - -{ "crandc", XL(19,129), XL_MASK, COM, { BT, BA, BB } }, - -{ "isync", XL(19,150), 0xffffffff, PPCCOM, { 0 } }, -{ "ics", XL(19,150), 0xffffffff, PWRCOM, { 0 } }, - -{ "crclr", XL(19,193), XL_MASK, PPCCOM, { BT, BAT, BBA } }, -{ "crxor", XL(19,193), XL_MASK, COM, { BT, BA, BB } }, - -{ "crnand", XL(19,225), XL_MASK, COM, { BT, BA, BB } }, - -{ "crand", XL(19,257), XL_MASK, COM, { BT, BA, BB } }, - -{ "hrfid", XL(19,274), 0xffffffff, POWER5 | CELL, { 0 } }, - -{ "crset", XL(19,289), XL_MASK, PPCCOM, { BT, BAT, BBA } }, -{ "creqv", XL(19,289), XL_MASK, COM, { BT, BA, BB } }, - -{ "doze", XL(19,402), 0xffffffff, POWER6, { 0 } }, - -{ "crorc", XL(19,417), XL_MASK, COM, { BT, BA, BB } }, - -{ "nap", XL(19,434), 0xffffffff, POWER6, { 0 } }, - -{ "crmove", XL(19,449), XL_MASK, PPCCOM, { BT, BA, BBA } }, -{ "cror", XL(19,449), XL_MASK, COM, { BT, BA, BB } }, - -{ "sleep", XL(19,466), 0xffffffff, POWER6, { 0 } }, -{ "rvwinkle", XL(19,498), 0xffffffff, POWER6, { 0 } }, - -{ "bctr", XLO(19,BOU,528,0), XLBOBIBB_MASK, COM, { 0 } }, -{ "bctrl", XLO(19,BOU,528,1), XLBOBIBB_MASK, COM, { 0 } }, -{ "bltctr", XLOCB(19,BOT,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bltctr-", XLOCB(19,BOT,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bltctr-", XLOCB(19,BOTM4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bltctr+", XLOCB(19,BOTP,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bltctr+", XLOCB(19,BOTP4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bltctrl", XLOCB(19,BOT,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bltctrl-",XLOCB(19,BOT,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bltctrl-",XLOCB(19,BOTM4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bltctrl+",XLOCB(19,BOTP,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bltctrl+",XLOCB(19,BOTP4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgtctr", XLOCB(19,BOT,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bgtctr-", XLOCB(19,BOT,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgtctr-", XLOCB(19,BOTM4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgtctr+", XLOCB(19,BOTP,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgtctr+", XLOCB(19,BOTP4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgtctrl", XLOCB(19,BOT,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bgtctrl-",XLOCB(19,BOT,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgtctrl-",XLOCB(19,BOTM4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgtctrl+",XLOCB(19,BOTP,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgtctrl+",XLOCB(19,BOTP4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "beqctr", XLOCB(19,BOT,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "beqctr-", XLOCB(19,BOT,CBEQ,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "beqctr-", XLOCB(19,BOTM4,CBEQ,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "beqctr+", XLOCB(19,BOTP,CBEQ,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "beqctr+", XLOCB(19,BOTP4,CBEQ,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "beqctrl", XLOCB(19,BOT,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "beqctrl-",XLOCB(19,BOT,CBEQ,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "beqctrl-",XLOCB(19,BOTM4,CBEQ,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "beqctrl+",XLOCB(19,BOTP,CBEQ,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "beqctrl+",XLOCB(19,BOTP4,CBEQ,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bsoctr", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bsoctr-", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bsoctr-", XLOCB(19,BOTM4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bsoctr+", XLOCB(19,BOTP,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bsoctr+", XLOCB(19,BOTP4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bsoctrl", XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bsoctrl-",XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bsoctrl-",XLOCB(19,BOTM4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bsoctrl+",XLOCB(19,BOTP,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bsoctrl+",XLOCB(19,BOTP4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bunctr", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bunctr-", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bunctr-", XLOCB(19,BOTM4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bunctr+", XLOCB(19,BOTP,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bunctr+", XLOCB(19,BOTP4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bunctrl", XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bunctrl-",XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bunctrl-",XLOCB(19,BOTM4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bunctrl+",XLOCB(19,BOTP,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bunctrl+",XLOCB(19,BOTP4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgectr", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bgectr-", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgectr-", XLOCB(19,BOFM4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgectr+", XLOCB(19,BOFP,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgectr+", XLOCB(19,BOFP4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgectrl", XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bgectrl-",XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgectrl-",XLOCB(19,BOFM4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bgectrl+",XLOCB(19,BOFP,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bgectrl+",XLOCB(19,BOFP4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnlctr", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnlctr-", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnlctr-", XLOCB(19,BOFM4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnlctr+", XLOCB(19,BOFP,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnlctr+", XLOCB(19,BOFP4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnlctrl", XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnlctrl-",XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnlctrl-",XLOCB(19,BOFM4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnlctrl+",XLOCB(19,BOFP,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnlctrl+",XLOCB(19,BOFP4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "blectr", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "blectr-", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "blectr-", XLOCB(19,BOFM4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "blectr+", XLOCB(19,BOFP,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "blectr+", XLOCB(19,BOFP4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "blectrl", XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "blectrl-",XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "blectrl-",XLOCB(19,BOFM4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "blectrl+",XLOCB(19,BOFP,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "blectrl+",XLOCB(19,BOFP4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bngctr", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bngctr-", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bngctr-", XLOCB(19,BOFM4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bngctr+", XLOCB(19,BOFP,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bngctr+", XLOCB(19,BOFP4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bngctrl", XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bngctrl-",XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bngctrl-",XLOCB(19,BOFM4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bngctrl+",XLOCB(19,BOFP,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bngctrl+",XLOCB(19,BOFP4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnectr", XLOCB(19,BOF,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnectr-", XLOCB(19,BOF,CBEQ,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnectr-", XLOCB(19,BOFM4,CBEQ,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnectr+", XLOCB(19,BOFP,CBEQ,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnectr+", XLOCB(19,BOFP4,CBEQ,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnectrl", XLOCB(19,BOF,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnectrl-",XLOCB(19,BOF,CBEQ,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnectrl-",XLOCB(19,BOFM4,CBEQ,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnectrl+",XLOCB(19,BOFP,CBEQ,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnectrl+",XLOCB(19,BOFP4,CBEQ,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnsctr", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnsctr-", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnsctr-", XLOCB(19,BOFM4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnsctr+", XLOCB(19,BOFP,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnsctr+", XLOCB(19,BOFP4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnsctrl", XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnsctrl-",XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnsctrl-",XLOCB(19,BOFM4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnsctrl+",XLOCB(19,BOFP,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnsctrl+",XLOCB(19,BOFP4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnuctr", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnuctr-", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnuctr-", XLOCB(19,BOFM4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnuctr+", XLOCB(19,BOFP,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnuctr+", XLOCB(19,BOFP4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnuctrl", XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, { CR } }, -{ "bnuctrl-",XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnuctrl-",XLOCB(19,BOFM4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "bnuctrl+",XLOCB(19,BOFP,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } }, -{ "bnuctrl+",XLOCB(19,BOFP4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } }, -{ "btctr", XLO(19,BOT,528,0), XLBOBB_MASK, PPCCOM, { BI } }, -{ "btctr-", XLO(19,BOT,528,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "btctr-", XLO(19,BOTM4,528,0), XLBOBB_MASK, POWER4, { BI } }, -{ "btctr+", XLO(19,BOTP,528,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "btctr+", XLO(19,BOTP4,528,0), XLBOBB_MASK, POWER4, { BI } }, -{ "btctrl", XLO(19,BOT,528,1), XLBOBB_MASK, PPCCOM, { BI } }, -{ "btctrl-", XLO(19,BOT,528,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "btctrl-", XLO(19,BOTM4,528,1), XLBOBB_MASK, POWER4, { BI } }, -{ "btctrl+", XLO(19,BOTP,528,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "btctrl+", XLO(19,BOTP4,528,1), XLBOBB_MASK, POWER4, { BI } }, -{ "bfctr", XLO(19,BOF,528,0), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bfctr-", XLO(19,BOF,528,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bfctr-", XLO(19,BOFM4,528,0), XLBOBB_MASK, POWER4, { BI } }, -{ "bfctr+", XLO(19,BOFP,528,0), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bfctr+", XLO(19,BOFP4,528,0), XLBOBB_MASK, POWER4, { BI } }, -{ "bfctrl", XLO(19,BOF,528,1), XLBOBB_MASK, PPCCOM, { BI } }, -{ "bfctrl-", XLO(19,BOF,528,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bfctrl-", XLO(19,BOFM4,528,1), XLBOBB_MASK, POWER4, { BI } }, -{ "bfctrl+", XLO(19,BOFP,528,1), XLBOBB_MASK, NOPOWER4, { BI } }, -{ "bfctrl+", XLO(19,BOFP4,528,1), XLBOBB_MASK, POWER4, { BI } }, -{ "bcctr-", XLYLK(19,528,0,0), XLYBB_MASK, PPCCOM, { BOE, BI } }, -{ "bcctr+", XLYLK(19,528,1,0), XLYBB_MASK, PPCCOM, { BOE, BI } }, -{ "bcctrl-", XLYLK(19,528,0,1), XLYBB_MASK, PPCCOM, { BOE, BI } }, -{ "bcctrl+", XLYLK(19,528,1,1), XLYBB_MASK, PPCCOM, { BOE, BI } }, -{ "bcctr", XLLK(19,528,0), XLBH_MASK, PPCCOM, { BO, BI, BH } }, -{ "bcctrl", XLLK(19,528,1), XLBH_MASK, PPCCOM, { BO, BI, BH } }, -{ "bcc", XLLK(19,528,0), XLBB_MASK, PWRCOM, { BO, BI } }, -{ "bccl", XLLK(19,528,1), XLBB_MASK, PWRCOM, { BO, BI } }, -{ "bcctre", XLLK(19,529,0), XLYBB_MASK, BOOKE64, { BO, BI } }, -{ "bcctrel", XLLK(19,529,1), XLYBB_MASK, BOOKE64, { BO, BI } }, - -{ "rlwimi", M(20,0), M_MASK, PPCCOM, { RA,RS,SH,MBE,ME } }, -{ "rlimi", M(20,0), M_MASK, PWRCOM, { RA,RS,SH,MBE,ME } }, - -{ "rlwimi.", M(20,1), M_MASK, PPCCOM, { RA,RS,SH,MBE,ME } }, -{ "rlimi.", M(20,1), M_MASK, PWRCOM, { RA,RS,SH,MBE,ME } }, - -{ "rotlwi", MME(21,31,0), MMBME_MASK, PPCCOM, { RA, RS, SH } }, -{ "clrlwi", MME(21,31,0), MSHME_MASK, PPCCOM, { RA, RS, MB } }, -{ "rlwinm", M(21,0), M_MASK, PPCCOM, { RA,RS,SH,MBE,ME } }, -{ "rlinm", M(21,0), M_MASK, PWRCOM, { RA,RS,SH,MBE,ME } }, -{ "rotlwi.", MME(21,31,1), MMBME_MASK, PPCCOM, { RA,RS,SH } }, -{ "clrlwi.", MME(21,31,1), MSHME_MASK, PPCCOM, { RA, RS, MB } }, -{ "rlwinm.", M(21,1), M_MASK, PPCCOM, { RA,RS,SH,MBE,ME } }, -{ "rlinm.", M(21,1), M_MASK, PWRCOM, { RA,RS,SH,MBE,ME } }, - -{ "rlmi", M(22,0), M_MASK, M601, { RA,RS,RB,MBE,ME } }, -{ "rlmi.", M(22,1), M_MASK, M601, { RA,RS,RB,MBE,ME } }, - -{ "be", B(22,0,0), B_MASK, BOOKE64, { LI } }, -{ "bel", B(22,0,1), B_MASK, BOOKE64, { LI } }, -{ "bea", B(22,1,0), B_MASK, BOOKE64, { LIA } }, -{ "bela", B(22,1,1), B_MASK, BOOKE64, { LIA } }, - -{ "rotlw", MME(23,31,0), MMBME_MASK, PPCCOM, { RA, RS, RB } }, -{ "rlwnm", M(23,0), M_MASK, PPCCOM, { RA,RS,RB,MBE,ME } }, -{ "rlnm", M(23,0), M_MASK, PWRCOM, { RA,RS,RB,MBE,ME } }, -{ "rotlw.", MME(23,31,1), MMBME_MASK, PPCCOM, { RA, RS, RB } }, -{ "rlwnm.", M(23,1), M_MASK, PPCCOM, { RA,RS,RB,MBE,ME } }, -{ "rlnm.", M(23,1), M_MASK, PWRCOM, { RA,RS,RB,MBE,ME } }, - -{ "nop", OP(24), 0xffffffff, PPCCOM, { 0 } }, -{ "ori", OP(24), OP_MASK, PPCCOM, { RA, RS, UI } }, -{ "oril", OP(24), OP_MASK, PWRCOM, { RA, RS, UI } }, - -{ "oris", OP(25), OP_MASK, PPCCOM, { RA, RS, UI } }, -{ "oriu", OP(25), OP_MASK, PWRCOM, { RA, RS, UI } }, - -{ "xori", OP(26), OP_MASK, PPCCOM, { RA, RS, UI } }, -{ "xoril", OP(26), OP_MASK, PWRCOM, { RA, RS, UI } }, - -{ "xoris", OP(27), OP_MASK, PPCCOM, { RA, RS, UI } }, -{ "xoriu", OP(27), OP_MASK, PWRCOM, { RA, RS, UI } }, - -{ "andi.", OP(28), OP_MASK, PPCCOM, { RA, RS, UI } }, -{ "andil.", OP(28), OP_MASK, PWRCOM, { RA, RS, UI } }, - -{ "andis.", OP(29), OP_MASK, PPCCOM, { RA, RS, UI } }, -{ "andiu.", OP(29), OP_MASK, PWRCOM, { RA, RS, UI } }, - -{ "rotldi", MD(30,0,0), MDMB_MASK, PPC64, { RA, RS, SH6 } }, -{ "clrldi", MD(30,0,0), MDSH_MASK, PPC64, { RA, RS, MB6 } }, -{ "rldicl", MD(30,0,0), MD_MASK, PPC64, { RA, RS, SH6, MB6 } }, -{ "rotldi.", MD(30,0,1), MDMB_MASK, PPC64, { RA, RS, SH6 } }, -{ "clrldi.", MD(30,0,1), MDSH_MASK, PPC64, { RA, RS, MB6 } }, -{ "rldicl.", MD(30,0,1), MD_MASK, PPC64, { RA, RS, SH6, MB6 } }, - -{ "rldicr", MD(30,1,0), MD_MASK, PPC64, { RA, RS, SH6, ME6 } }, -{ "rldicr.", MD(30,1,1), MD_MASK, PPC64, { RA, RS, SH6, ME6 } }, - -{ "rldic", MD(30,2,0), MD_MASK, PPC64, { RA, RS, SH6, MB6 } }, -{ "rldic.", MD(30,2,1), MD_MASK, PPC64, { RA, RS, SH6, MB6 } }, - -{ "rldimi", MD(30,3,0), MD_MASK, PPC64, { RA, RS, SH6, MB6 } }, -{ "rldimi.", MD(30,3,1), MD_MASK, PPC64, { RA, RS, SH6, MB6 } }, - -{ "rotld", MDS(30,8,0), MDSMB_MASK, PPC64, { RA, RS, RB } }, -{ "rldcl", MDS(30,8,0), MDS_MASK, PPC64, { RA, RS, RB, MB6 } }, -{ "rotld.", MDS(30,8,1), MDSMB_MASK, PPC64, { RA, RS, RB } }, -{ "rldcl.", MDS(30,8,1), MDS_MASK, PPC64, { RA, RS, RB, MB6 } }, - -{ "rldcr", MDS(30,9,0), MDS_MASK, PPC64, { RA, RS, RB, ME6 } }, -{ "rldcr.", MDS(30,9,1), MDS_MASK, PPC64, { RA, RS, RB, ME6 } }, - -{ "cmpw", XOPL(31,0,0), XCMPL_MASK, PPCCOM, { OBF, RA, RB } }, -{ "cmpd", XOPL(31,0,1), XCMPL_MASK, PPC64, { OBF, RA, RB } }, -{ "cmp", X(31,0), XCMP_MASK, PPC, { BF, L, RA, RB } }, -{ "cmp", X(31,0), XCMPL_MASK, PWRCOM, { BF, RA, RB } }, - -{ "twlgt", XTO(31,4,TOLGT), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tlgt", XTO(31,4,TOLGT), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twllt", XTO(31,4,TOLLT), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tllt", XTO(31,4,TOLLT), XTO_MASK, PWRCOM, { RA, RB } }, -{ "tweq", XTO(31,4,TOEQ), XTO_MASK, PPCCOM, { RA, RB } }, -{ "teq", XTO(31,4,TOEQ), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twlge", XTO(31,4,TOLGE), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tlge", XTO(31,4,TOLGE), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twlnl", XTO(31,4,TOLNL), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tlnl", XTO(31,4,TOLNL), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twlle", XTO(31,4,TOLLE), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tlle", XTO(31,4,TOLLE), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twlng", XTO(31,4,TOLNG), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tlng", XTO(31,4,TOLNG), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twgt", XTO(31,4,TOGT), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tgt", XTO(31,4,TOGT), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twge", XTO(31,4,TOGE), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tge", XTO(31,4,TOGE), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twnl", XTO(31,4,TONL), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tnl", XTO(31,4,TONL), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twlt", XTO(31,4,TOLT), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tlt", XTO(31,4,TOLT), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twle", XTO(31,4,TOLE), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tle", XTO(31,4,TOLE), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twng", XTO(31,4,TONG), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tng", XTO(31,4,TONG), XTO_MASK, PWRCOM, { RA, RB } }, -{ "twne", XTO(31,4,TONE), XTO_MASK, PPCCOM, { RA, RB } }, -{ "tne", XTO(31,4,TONE), XTO_MASK, PWRCOM, { RA, RB } }, -{ "trap", XTO(31,4,TOU), 0xffffffff, PPCCOM, { 0 } }, -{ "tw", X(31,4), X_MASK, PPCCOM, { TO, RA, RB } }, -{ "t", X(31,4), X_MASK, PWRCOM, { TO, RA, RB } }, - -{ "subfc", XO(31,8,0,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "sf", XO(31,8,0,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "subc", XO(31,8,0,0), XO_MASK, PPC, { RT, RB, RA } }, -{ "subfc.", XO(31,8,0,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "sf.", XO(31,8,0,1), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "subc.", XO(31,8,0,1), XO_MASK, PPCCOM, { RT, RB, RA } }, -{ "subfco", XO(31,8,1,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "sfo", XO(31,8,1,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "subco", XO(31,8,1,0), XO_MASK, PPC, { RT, RB, RA } }, -{ "subfco.", XO(31,8,1,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "sfo.", XO(31,8,1,1), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "subco.", XO(31,8,1,1), XO_MASK, PPC, { RT, RB, RA } }, - -{ "mulhdu", XO(31,9,0,0), XO_MASK, PPC64, { RT, RA, RB } }, -{ "mulhdu.", XO(31,9,0,1), XO_MASK, PPC64, { RT, RA, RB } }, - -{ "addc", XO(31,10,0,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "a", XO(31,10,0,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "addc.", XO(31,10,0,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "a.", XO(31,10,0,1), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "addco", XO(31,10,1,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "ao", XO(31,10,1,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "addco.", XO(31,10,1,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "ao.", XO(31,10,1,1), XO_MASK, PWRCOM, { RT, RA, RB } }, - -{ "mulhwu", XO(31,11,0,0), XO_MASK, PPC, { RT, RA, RB } }, -{ "mulhwu.", XO(31,11,0,1), XO_MASK, PPC, { RT, RA, RB } }, - -{ "isellt", X(31,15), X_MASK, PPCISEL, { RT, RA, RB } }, -{ "iselgt", X(31,47), X_MASK, PPCISEL, { RT, RA, RB } }, -{ "iseleq", X(31,79), X_MASK, PPCISEL, { RT, RA, RB } }, -{ "isel", XISEL(31,15), XISEL_MASK, PPCISEL, { RT, RA, RB, CRB } }, - -{ "mfocrf", XFXM(31,19,0,1), XFXFXM_MASK, COM, { RT, FXM } }, -{ "mfcr", X(31,19), XRARB_MASK, NOPOWER4 | COM, { RT } }, -{ "mfcr", X(31,19), XFXFXM_MASK, POWER4, { RT, FXM4 } }, - -{ "lwarx", X(31,20), XEH_MASK, PPC, { RT, RA0, RB, EH } }, - -{ "ldx", X(31,21), X_MASK, PPC64, { RT, RA0, RB } }, - -{ "icbt", X(31,22), X_MASK, BOOKE|PPCE300, { CT, RA, RB } }, -{ "icbt", X(31,262), XRT_MASK, PPC403, { RA, RB } }, - -{ "lwzx", X(31,23), X_MASK, PPCCOM, { RT, RA0, RB } }, -{ "lx", X(31,23), X_MASK, PWRCOM, { RT, RA, RB } }, - -{ "slw", XRC(31,24,0), X_MASK, PPCCOM, { RA, RS, RB } }, -{ "sl", XRC(31,24,0), X_MASK, PWRCOM, { RA, RS, RB } }, -{ "slw.", XRC(31,24,1), X_MASK, PPCCOM, { RA, RS, RB } }, -{ "sl.", XRC(31,24,1), X_MASK, PWRCOM, { RA, RS, RB } }, - -{ "cntlzw", XRC(31,26,0), XRB_MASK, PPCCOM, { RA, RS } }, -{ "cntlz", XRC(31,26,0), XRB_MASK, PWRCOM, { RA, RS } }, -{ "cntlzw.", XRC(31,26,1), XRB_MASK, PPCCOM, { RA, RS } }, -{ "cntlz.", XRC(31,26,1), XRB_MASK, PWRCOM, { RA, RS } }, - -{ "sld", XRC(31,27,0), X_MASK, PPC64, { RA, RS, RB } }, -{ "sld.", XRC(31,27,1), X_MASK, PPC64, { RA, RS, RB } }, - -{ "and", XRC(31,28,0), X_MASK, COM, { RA, RS, RB } }, -{ "and.", XRC(31,28,1), X_MASK, COM, { RA, RS, RB } }, - -{ "maskg", XRC(31,29,0), X_MASK, M601, { RA, RS, RB } }, -{ "maskg.", XRC(31,29,1), X_MASK, M601, { RA, RS, RB } }, - -{ "icbte", X(31,30), X_MASK, BOOKE64, { CT, RA, RB } }, - -{ "lwzxe", X(31,31), X_MASK, BOOKE64, { RT, RA0, RB } }, - -{ "cmplw", XOPL(31,32,0), XCMPL_MASK, PPCCOM, { OBF, RA, RB } }, -{ "cmpld", XOPL(31,32,1), XCMPL_MASK, PPC64, { OBF, RA, RB } }, -{ "cmpl", X(31,32), XCMP_MASK, PPC, { BF, L, RA, RB } }, -{ "cmpl", X(31,32), XCMPL_MASK, PWRCOM, { BF, RA, RB } }, - -{ "subf", XO(31,40,0,0), XO_MASK, PPC, { RT, RA, RB } }, -{ "sub", XO(31,40,0,0), XO_MASK, PPC, { RT, RB, RA } }, -{ "subf.", XO(31,40,0,1), XO_MASK, PPC, { RT, RA, RB } }, -{ "sub.", XO(31,40,0,1), XO_MASK, PPC, { RT, RB, RA } }, -{ "subfo", XO(31,40,1,0), XO_MASK, PPC, { RT, RA, RB } }, -{ "subo", XO(31,40,1,0), XO_MASK, PPC, { RT, RB, RA } }, -{ "subfo.", XO(31,40,1,1), XO_MASK, PPC, { RT, RA, RB } }, -{ "subo.", XO(31,40,1,1), XO_MASK, PPC, { RT, RB, RA } }, +{"attn", X(0,256), X_MASK, POWER4|PPCA2, PPC476|PPCVLE, {0}}, +{"tdlgti", OPTO(2,TOLGT), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, +{"tdllti", OPTO(2,TOLLT), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, +{"tdeqi", OPTO(2,TOEQ), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, +{"tdlgei", OPTO(2,TOLGE), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, +{"tdlnli", OPTO(2,TOLNL), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, +{"tdllei", OPTO(2,TOLLE), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, +{"tdlngi", OPTO(2,TOLNG), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, +{"tdgti", OPTO(2,TOGT), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, +{"tdgei", OPTO(2,TOGE), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, +{"tdnli", OPTO(2,TONL), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, +{"tdlti", OPTO(2,TOLT), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, +{"tdlei", OPTO(2,TOLE), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, +{"tdngi", OPTO(2,TONG), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, +{"tdnei", OPTO(2,TONE), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, +{"tdui", OPTO(2,TOU), OPTO_MASK, PPC64, PPCVLE, {RA, SI}}, +{"tdi", OP(2), OP_MASK, PPC64, PPCVLE, {TO, RA, SI}}, + +{"twlgti", OPTO(3,TOLGT), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, +{"tlgti", OPTO(3,TOLGT), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, +{"twllti", OPTO(3,TOLLT), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, +{"tllti", OPTO(3,TOLLT), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, +{"tweqi", OPTO(3,TOEQ), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, +{"teqi", OPTO(3,TOEQ), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, +{"twlgei", OPTO(3,TOLGE), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, +{"tlgei", OPTO(3,TOLGE), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, +{"twlnli", OPTO(3,TOLNL), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, +{"tlnli", OPTO(3,TOLNL), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, +{"twllei", OPTO(3,TOLLE), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, +{"tllei", OPTO(3,TOLLE), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, +{"twlngi", OPTO(3,TOLNG), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, +{"tlngi", OPTO(3,TOLNG), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, +{"twgti", OPTO(3,TOGT), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, +{"tgti", OPTO(3,TOGT), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, +{"twgei", OPTO(3,TOGE), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, +{"tgei", OPTO(3,TOGE), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, +{"twnli", OPTO(3,TONL), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, +{"tnli", OPTO(3,TONL), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, +{"twlti", OPTO(3,TOLT), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, +{"tlti", OPTO(3,TOLT), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, +{"twlei", OPTO(3,TOLE), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, +{"tlei", OPTO(3,TOLE), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, +{"twngi", OPTO(3,TONG), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, +{"tngi", OPTO(3,TONG), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, +{"twnei", OPTO(3,TONE), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, +{"tnei", OPTO(3,TONE), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, +{"twui", OPTO(3,TOU), OPTO_MASK, PPCCOM, PPCVLE, {RA, SI}}, +{"tui", OPTO(3,TOU), OPTO_MASK, PWRCOM, PPCVLE, {RA, SI}}, +{"twi", OP(3), OP_MASK, PPCCOM, PPCVLE, {TO, RA, SI}}, +{"ti", OP(3), OP_MASK, PWRCOM, PPCVLE, {TO, RA, SI}}, + +{"ps_cmpu0", X (4, 0), XBF_MASK, PPCPS, 0, {BF, FRA, FRB}}, +{"vaddubm", VX (4, 0), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vmul10cuq", VX (4, 1), VXVB_MASK, PPCVEC3, 0, {VD, VA}}, +{"vmaxub", VX (4, 2), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vrlb", VX (4, 4), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpequb", VXR(4, 6,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpneb", VXR(4, 7,0), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vmuloub", VX (4, 8), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vaddfp", VX (4, 10), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"psq_lx", XW (4, 6,0), XW_MASK, PPCPS, 0, {FRT,RA,RB,PSWM,PSQM}}, +{"vmrghb", VX (4, 12), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"psq_stx", XW (4, 7,0), XW_MASK, PPCPS, 0, {FRS,RA,RB,PSWM,PSQM}}, +{"vpkuhum", VX (4, 14), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"mulhhwu", XRC(4, 8,0), X_MASK, MULHW, 0, {RT, RA, RB}}, +{"mulhhwu.", XRC(4, 8,1), X_MASK, MULHW, 0, {RT, RA, RB}}, +{"ps_sum0", A (4, 10,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"ps_sum0.", A (4, 10,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"ps_sum1", A (4, 11,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"ps_sum1.", A (4, 11,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"ps_muls0", A (4, 12,0), AFRB_MASK, PPCPS, 0, {FRT, FRA, FRC}}, +{"machhwu", XO (4, 12,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"ps_muls0.", A (4, 12,1), AFRB_MASK, PPCPS, 0, {FRT, FRA, FRC}}, +{"machhwu.", XO (4, 12,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"ps_muls1", A (4, 13,0), AFRB_MASK, PPCPS, 0, {FRT, FRA, FRC}}, +{"ps_muls1.", A (4, 13,1), AFRB_MASK, PPCPS, 0, {FRT, FRA, FRC}}, +{"ps_madds0", A (4, 14,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"ps_madds0.", A (4, 14,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"ps_madds1", A (4, 15,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"ps_madds1.", A (4, 15,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"vmhaddshs", VXA(4, 32), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, +{"vmhraddshs", VXA(4, 33), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, +{"vmladduhm", VXA(4, 34), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, +{"vmsumudm", VXA(4, 35), VXA_MASK, PPCVEC3, 0, {VD, VA, VB, VC}}, +{"ps_div", A (4, 18,0), AFRC_MASK, PPCPS, 0, {FRT, FRA, FRB}}, +{"vmsumubm", VXA(4, 36), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, +{"ps_div.", A (4, 18,1), AFRC_MASK, PPCPS, 0, {FRT, FRA, FRB}}, +{"vmsummbm", VXA(4, 37), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, +{"vmsumuhm", VXA(4, 38), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, +{"vmsumuhs", VXA(4, 39), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, +{"ps_sub", A (4, 20,0), AFRC_MASK, PPCPS, 0, {FRT, FRA, FRB}}, +{"vmsumshm", VXA(4, 40), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, +{"ps_sub.", A (4, 20,1), AFRC_MASK, PPCPS, 0, {FRT, FRA, FRB}}, +{"vmsumshs", VXA(4, 41), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, +{"ps_add", A (4, 21,0), AFRC_MASK, PPCPS, 0, {FRT, FRA, FRB}}, +{"vsel", VXA(4, 42), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, +{"ps_add.", A (4, 21,1), AFRC_MASK, PPCPS, 0, {FRT, FRA, FRB}}, +{"vperm", VXA(4, 43), VXA_MASK, PPCVEC, 0, {VD, VA, VB, VC}}, +{"vsldoi", VXA(4, 44), VXASHB_MASK, PPCVEC, 0, {VD, VA, VB, SHB}}, +{"vpermxor", VXA(4, 45), VXA_MASK, PPCVEC2, 0, {VD, VA, VB, VC}}, +{"ps_sel", A (4, 23,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"vmaddfp", VXA(4, 46), VXA_MASK, PPCVEC, 0, {VD, VA, VC, VB}}, +{"ps_sel.", A (4, 23,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"vnmsubfp", VXA(4, 47), VXA_MASK, PPCVEC, 0, {VD, VA, VC, VB}}, +{"ps_res", A (4, 24,0), AFRAFRC_MASK, PPCPS, 0, {FRT, FRB}}, +{"maddhd", VXA(4, 48), VXA_MASK, POWER9, 0, {RT, RA, RB, RC}}, +{"ps_res.", A (4, 24,1), AFRAFRC_MASK, PPCPS, 0, {FRT, FRB}}, +{"maddhdu", VXA(4, 49), VXA_MASK, POWER9, 0, {RT, RA, RB, RC}}, +{"ps_mul", A (4, 25,0), AFRB_MASK, PPCPS, 0, {FRT, FRA, FRC}}, +{"ps_mul.", A (4, 25,1), AFRB_MASK, PPCPS, 0, {FRT, FRA, FRC}}, +{"maddld", VXA(4, 51), VXA_MASK, POWER9, 0, {RT, RA, RB, RC}}, +{"ps_rsqrte", A (4, 26,0), AFRAFRC_MASK, PPCPS, 0, {FRT, FRB}}, +{"ps_rsqrte.", A (4, 26,1), AFRAFRC_MASK, PPCPS, 0, {FRT, FRB}}, +{"ps_msub", A (4, 28,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"ps_msub.", A (4, 28,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"ps_madd", A (4, 29,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"ps_madd.", A (4, 29,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"vpermr", VXA(4, 59), VXA_MASK, PPCVEC3, 0, {VD, VA, VB, VC}}, +{"ps_nmsub", A (4, 30,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"vaddeuqm", VXA(4, 60), VXA_MASK, PPCVEC2, 0, {VD, VA, VB, VC}}, +{"ps_nmsub.", A (4, 30,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"vaddecuq", VXA(4, 61), VXA_MASK, PPCVEC2, 0, {VD, VA, VB, VC}}, +{"ps_nmadd", A (4, 31,0), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"vsubeuqm", VXA(4, 62), VXA_MASK, PPCVEC2, 0, {VD, VA, VB, VC}}, +{"ps_nmadd.", A (4, 31,1), A_MASK, PPCPS, 0, {FRT, FRA, FRC, FRB}}, +{"vsubecuq", VXA(4, 63), VXA_MASK, PPCVEC2, 0, {VD, VA, VB, VC}}, +{"ps_cmpo0", X (4, 32), XBF_MASK, PPCPS, 0, {BF, FRA, FRB}}, +{"vadduhm", VX (4, 64), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vmul10ecuq", VX (4, 65), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vmaxuh", VX (4, 66), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vrlh", VX (4, 68), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpequh", VXR(4, 70,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpneh", VXR(4, 71,0), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vmulouh", VX (4, 72), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vsubfp", VX (4, 74), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"psq_lux", XW (4, 38,0), XW_MASK, PPCPS, 0, {FRT,RA,RB,PSWM,PSQM}}, +{"vmrghh", VX (4, 76), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"psq_stux", XW (4, 39,0), XW_MASK, PPCPS, 0, {FRS,RA,RB,PSWM,PSQM}}, +{"vpkuwum", VX (4, 78), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"ps_neg", XRC(4, 40,0), XRA_MASK, PPCPS, 0, {FRT, FRB}}, +{"mulhhw", XRC(4, 40,0), X_MASK, MULHW, 0, {RT, RA, RB}}, +{"ps_neg.", XRC(4, 40,1), XRA_MASK, PPCPS, 0, {FRT, FRB}}, +{"mulhhw.", XRC(4, 40,1), X_MASK, MULHW, 0, {RT, RA, RB}}, +{"machhw", XO (4, 44,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"machhw.", XO (4, 44,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmachhw", XO (4, 46,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmachhw.", XO (4, 46,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"ps_cmpu1", X (4, 64), XBF_MASK, PPCPS, 0, {BF, FRA, FRB}}, +{"vadduwm", VX (4, 128), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vmaxuw", VX (4, 130), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vrlw", VX (4, 132), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vrlwmi", VX (4, 133), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vcmpequw", VXR(4, 134,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpnew", VXR(4, 135,0), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vmulouw", VX (4, 136), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vmuluwm", VX (4, 137), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vmrghw", VX (4, 140), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vpkuhus", VX (4, 142), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"ps_mr", XRC(4, 72,0), XRA_MASK, PPCPS, 0, {FRT, FRB}}, +{"ps_mr.", XRC(4, 72,1), XRA_MASK, PPCPS, 0, {FRT, FRB}}, +{"machhwsu", XO (4, 76,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"machhwsu.", XO (4, 76,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"ps_cmpo1", X (4, 96), XBF_MASK, PPCPS, 0, {BF, FRA, FRB}}, +{"vaddudm", VX (4, 192), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vmaxud", VX (4, 194), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vrld", VX (4, 196), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vrldmi", VX (4, 197), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vcmpeqfp", VXR(4, 198,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpequd", VXR(4, 199,0), VXR_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vpkuwus", VX (4, 206), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"machhws", XO (4, 108,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"machhws.", XO (4, 108,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmachhws", XO (4, 110,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmachhws.", XO (4, 110,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"vadduqm", VX (4, 256), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vmaxsb", VX (4, 258), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vslb", VX (4, 260), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpnezb", VXR(4, 263,0), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vmulosb", VX (4, 264), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vrefp", VX (4, 266), VXVA_MASK, PPCVEC, 0, {VD, VB}}, +{"vmrglb", VX (4, 268), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vpkshus", VX (4, 270), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"ps_nabs", XRC(4, 136,0), XRA_MASK, PPCPS, 0, {FRT, FRB}}, +{"mulchwu", XRC(4, 136,0), X_MASK, MULHW, 0, {RT, RA, RB}}, +{"ps_nabs.", XRC(4, 136,1), XRA_MASK, PPCPS, 0, {FRT, FRB}}, +{"mulchwu.", XRC(4, 136,1), X_MASK, MULHW, 0, {RT, RA, RB}}, +{"macchwu", XO (4, 140,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"macchwu.", XO (4, 140,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"vaddcuq", VX (4, 320), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vmaxsh", VX (4, 322), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vslh", VX (4, 324), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpnezh", VXR(4, 327,0), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vmulosh", VX (4, 328), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vrsqrtefp", VX (4, 330), VXVA_MASK, PPCVEC, 0, {VD, VB}}, +{"vmrglh", VX (4, 332), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vpkswus", VX (4, 334), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"mulchw", XRC(4, 168,0), X_MASK, MULHW, 0, {RT, RA, RB}}, +{"mulchw.", XRC(4, 168,1), X_MASK, MULHW, 0, {RT, RA, RB}}, +{"macchw", XO (4, 172,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"macchw.", XO (4, 172,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmacchw", XO (4, 174,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmacchw.", XO (4, 174,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"vaddcuw", VX (4, 384), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vmaxsw", VX (4, 386), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vslw", VX (4, 388), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vrlwnm", VX (4, 389), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vcmpnezw", VXR(4, 391,0), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vmulosw", VX (4, 392), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vexptefp", VX (4, 394), VXVA_MASK, PPCVEC, 0, {VD, VB}}, +{"vmrglw", VX (4, 396), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vpkshss", VX (4, 398), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"macchwsu", XO (4, 204,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"macchwsu.", XO (4, 204,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"vmaxsd", VX (4, 450), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vsl", VX (4, 452), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vrldnm", VX (4, 453), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vcmpgefp", VXR(4, 454,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vlogefp", VX (4, 458), VXVA_MASK, PPCVEC, 0, {VD, VB}}, +{"vpkswss", VX (4, 462), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"macchws", XO (4, 236,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"macchws.", XO (4, 236,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmacchws", XO (4, 238,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmacchws.", XO (4, 238,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"evaddw", VX (4, 512), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vaddubs", VX (4, 512), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vmul10uq", VX (4, 513), VXVB_MASK, PPCVEC3, 0, {VD, VA}}, +{"evaddiw", VX (4, 514), VX_MASK, PPCSPE, 0, {RS, RB, UIMM}}, +{"vminub", VX (4, 514), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evsubfw", VX (4, 516), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evsubw", VX (4, 516), VX_MASK, PPCSPE, 0, {RS, RB, RA}}, +{"vsrb", VX (4, 516), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evsubifw", VX (4, 518), VX_MASK, PPCSPE, 0, {RS, UIMM, RB}}, +{"evsubiw", VX (4, 518), VX_MASK, PPCSPE, 0, {RS, RB, UIMM}}, +{"vcmpgtub", VXR(4, 518,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evabs", VX (4, 520), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"vmuleub", VX (4, 520), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evneg", VX (4, 521), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"evextsb", VX (4, 522), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"vrfin", VX (4, 522), VXVA_MASK, PPCVEC, 0, {VD, VB}}, +{"evextsh", VX (4, 523), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"evrndw", VX (4, 524), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"vspltb", VX (4, 524), VXUIMM4_MASK, PPCVEC, 0, {VD, VB, UIMM4}}, +{"vextractub", VX (4, 525), VXUIMM4_MASK, PPCVEC3, 0, {VD, VB, UIMM4}}, +{"evcntlzw", VX (4, 525), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"evcntlsw", VX (4, 526), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"vupkhsb", VX (4, 526), VXVA_MASK, PPCVEC, 0, {VD, VB}}, +{"brinc", VX (4, 527), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"ps_abs", XRC(4, 264,0), XRA_MASK, PPCPS, 0, {FRT, FRB}}, +{"ps_abs.", XRC(4, 264,1), XRA_MASK, PPCPS, 0, {FRT, FRB}}, +{"evand", VX (4, 529), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evandc", VX (4, 530), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evxor", VX (4, 534), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmr", VX (4, 535), VX_MASK, PPCSPE, 0, {RS, RA, BBA}}, +{"evor", VX (4, 535), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evnor", VX (4, 536), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evnot", VX (4, 536), VX_MASK, PPCSPE, 0, {RS, RA, BBA}}, +{"get", APU(4, 268,0), APU_RA_MASK, PPC405, 0, {RT, FSL}}, +{"eveqv", VX (4, 537), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evorc", VX (4, 539), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evnand", VX (4, 542), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evsrwu", VX (4, 544), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evsrws", VX (4, 545), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evsrwiu", VX (4, 546), VX_MASK, PPCSPE, 0, {RS, RA, EVUIMM}}, +{"evsrwis", VX (4, 547), VX_MASK, PPCSPE, 0, {RS, RA, EVUIMM}}, +{"evslw", VX (4, 548), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evslwi", VX (4, 550), VX_MASK, PPCSPE, 0, {RS, RA, EVUIMM}}, +{"evrlw", VX (4, 552), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evsplati", VX (4, 553), VX_MASK, PPCSPE, 0, {RS, SIMM}}, +{"evrlwi", VX (4, 554), VX_MASK, PPCSPE, 0, {RS, RA, EVUIMM}}, +{"evsplatfi", VX (4, 555), VX_MASK, PPCSPE, 0, {RS, SIMM}}, +{"evmergehi", VX (4, 556), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmergelo", VX (4, 557), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmergehilo", VX (4, 558), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmergelohi", VX (4, 559), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evcmpgtu", VX (4, 560), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, +{"evcmpgts", VX (4, 561), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, +{"evcmpltu", VX (4, 562), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, +{"evcmplts", VX (4, 563), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, +{"evcmpeq", VX (4, 564), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, +{"cget", APU(4, 284,0), APU_RA_MASK, PPC405, 0, {RT, FSL}}, +{"vadduhs", VX (4, 576), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vmul10euq", VX (4, 577), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vminuh", VX (4, 578), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vsrh", VX (4, 580), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpgtuh", VXR(4, 582,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vmuleuh", VX (4, 584), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vrfiz", VX (4, 586), VXVA_MASK, PPCVEC, 0, {VD, VB}}, +{"vsplth", VX (4, 588), VXUIMM3_MASK, PPCVEC, 0, {VD, VB, UIMM3}}, +{"vextractuh", VX (4, 589), VXUIMM4_MASK, PPCVEC3, 0, {VD, VB, UIMM4}}, +{"vupkhsh", VX (4, 590), VXVA_MASK, PPCVEC, 0, {VD, VB}}, +{"nget", APU(4, 300,0), APU_RA_MASK, PPC405, 0, {RT, FSL}}, +{"evsel", EVSEL(4,79), EVSEL_MASK, PPCSPE, 0, {RS, RA, RB, CRFS}}, +{"ncget", APU(4, 316,0), APU_RA_MASK, PPC405, 0, {RT, FSL}}, +{"evfsadd", VX (4, 640), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vadduws", VX (4, 640), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evfssub", VX (4, 641), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vminuw", VX (4, 642), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evfsabs", VX (4, 644), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"vsrw", VX (4, 644), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evfsnabs", VX (4, 645), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"evfsneg", VX (4, 646), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"vcmpgtuw", VXR(4, 646,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vmuleuw", VX (4, 648), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"evfsmul", VX (4, 648), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evfsdiv", VX (4, 649), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vrfip", VX (4, 650), VXVA_MASK, PPCVEC, 0, {VD, VB}}, +{"evfscmpgt", VX (4, 652), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, +{"vspltw", VX (4, 652), VXUIMM2_MASK, PPCVEC, 0, {VD, VB, UIMM2}}, +{"vextractuw", VX (4, 653), VXUIMM4_MASK, PPCVEC3, 0, {VD, VB, UIMM4}}, +{"evfscmplt", VX (4, 653), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, +{"evfscmpeq", VX (4, 654), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, +{"vupklsb", VX (4, 654), VXVA_MASK, PPCVEC, 0, {VD, VB}}, +{"evfscfui", VX (4, 656), VX_MASK, PPCSPE, 0, {RS, RB}}, +{"evfscfsi", VX (4, 657), VX_MASK, PPCSPE, 0, {RS, RB}}, +{"evfscfuf", VX (4, 658), VX_MASK, PPCSPE, 0, {RS, RB}}, +{"evfscfsf", VX (4, 659), VX_MASK, PPCSPE, 0, {RS, RB}}, +{"evfsctui", VX (4, 660), VX_MASK, PPCSPE, 0, {RS, RB}}, +{"evfsctsi", VX (4, 661), VX_MASK, PPCSPE, 0, {RS, RB}}, +{"evfsctuf", VX (4, 662), VX_MASK, PPCSPE, 0, {RS, RB}}, +{"evfsctsf", VX (4, 663), VX_MASK, PPCSPE, 0, {RS, RB}}, +{"evfsctuiz", VX (4, 664), VX_MASK, PPCSPE, 0, {RS, RB}}, +{"put", APU(4, 332,0), APU_RT_MASK, PPC405, 0, {RA, FSL}}, +{"evfsctsiz", VX (4, 666), VX_MASK, PPCSPE, 0, {RS, RB}}, +{"evfststgt", VX (4, 668), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, +{"evfststlt", VX (4, 669), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, +{"evfststeq", VX (4, 670), VX_MASK, PPCSPE, 0, {CRFD, RA, RB}}, +{"cput", APU(4, 348,0), APU_RT_MASK, PPC405, 0, {RA, FSL}}, +{"efsadd", VX (4, 704), VX_MASK, PPCEFS, 0, {RS, RA, RB}}, +{"efssub", VX (4, 705), VX_MASK, PPCEFS, 0, {RS, RA, RB}}, +{"vminud", VX (4, 706), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"efsabs", VX (4, 708), VX_MASK, PPCEFS, 0, {RS, RA}}, +{"vsr", VX (4, 708), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"efsnabs", VX (4, 709), VX_MASK, PPCEFS, 0, {RS, RA}}, +{"efsneg", VX (4, 710), VX_MASK, PPCEFS, 0, {RS, RA}}, +{"vcmpgtfp", VXR(4, 710,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpgtud", VXR(4, 711,0), VXR_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"efsmul", VX (4, 712), VX_MASK, PPCEFS, 0, {RS, RA, RB}}, +{"efsdiv", VX (4, 713), VX_MASK, PPCEFS, 0, {RS, RA, RB}}, +{"vrfim", VX (4, 714), VXVA_MASK, PPCVEC, 0, {VD, VB}}, +{"efscmpgt", VX (4, 716), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, +{"vextractd", VX (4, 717), VXUIMM4_MASK, PPCVEC3, 0, {VD, VB, UIMM4}}, +{"efscmplt", VX (4, 717), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, +{"efscmpeq", VX (4, 718), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, +{"vupklsh", VX (4, 718), VXVA_MASK, PPCVEC, 0, {VD, VB}}, +{"efscfd", VX (4, 719), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efscfui", VX (4, 720), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efscfsi", VX (4, 721), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efscfuf", VX (4, 722), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efscfsf", VX (4, 723), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efsctui", VX (4, 724), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efsctsi", VX (4, 725), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efsctuf", VX (4, 726), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efsctsf", VX (4, 727), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efsctuiz", VX (4, 728), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"nput", APU(4, 364,0), APU_RT_MASK, PPC405, 0, {RA, FSL}}, +{"efsctsiz", VX (4, 730), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efststgt", VX (4, 732), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, +{"efststlt", VX (4, 733), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, +{"efststeq", VX (4, 734), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, +{"efdadd", VX (4, 736), VX_MASK, PPCEFS, 0, {RS, RA, RB}}, +{"efdsub", VX (4, 737), VX_MASK, PPCEFS, 0, {RS, RA, RB}}, +{"efdcfuid", VX (4, 738), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efdcfsid", VX (4, 739), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efdabs", VX (4, 740), VX_MASK, PPCEFS, 0, {RS, RA}}, +{"efdnabs", VX (4, 741), VX_MASK, PPCEFS, 0, {RS, RA}}, +{"efdneg", VX (4, 742), VX_MASK, PPCEFS, 0, {RS, RA}}, +{"efdmul", VX (4, 744), VX_MASK, PPCEFS, 0, {RS, RA, RB}}, +{"efddiv", VX (4, 745), VX_MASK, PPCEFS, 0, {RS, RA, RB}}, +{"efdctuidz", VX (4, 746), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efdctsidz", VX (4, 747), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efdcmpgt", VX (4, 748), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, +{"efdcmplt", VX (4, 749), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, +{"efdcmpeq", VX (4, 750), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, +{"efdcfs", VX (4, 751), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efdcfui", VX (4, 752), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efdcfsi", VX (4, 753), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efdcfuf", VX (4, 754), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efdcfsf", VX (4, 755), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efdctui", VX (4, 756), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efdctsi", VX (4, 757), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efdctuf", VX (4, 758), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efdctsf", VX (4, 759), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efdctuiz", VX (4, 760), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"ncput", APU(4, 380,0), APU_RT_MASK, PPC405, 0, {RA, FSL}}, +{"efdctsiz", VX (4, 762), VX_MASK, PPCEFS, 0, {RS, RB}}, +{"efdtstgt", VX (4, 764), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, +{"efdtstlt", VX (4, 765), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, +{"efdtsteq", VX (4, 766), VX_MASK, PPCEFS, 0, {CRFD, RA, RB}}, +{"evlddx", VX (4, 768), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vaddsbs", VX (4, 768), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evldd", VX (4, 769), VX_MASK, PPCSPE, 0, {RS, EVUIMM_8, RA}}, +{"evldwx", VX (4, 770), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vminsb", VX (4, 770), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evldw", VX (4, 771), VX_MASK, PPCSPE, 0, {RS, EVUIMM_8, RA}}, +{"evldhx", VX (4, 772), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vsrab", VX (4, 772), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evldh", VX (4, 773), VX_MASK, PPCSPE, 0, {RS, EVUIMM_8, RA}}, +{"vcmpgtsb", VXR(4, 774,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evlhhesplatx",VX (4, 776), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vmulesb", VX (4, 776), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evlhhesplat", VX (4, 777), VX_MASK, PPCSPE, 0, {RS, EVUIMM_2, RA}}, +{"vcfux", VX (4, 778), VX_MASK, PPCVEC, 0, {VD, VB, UIMM}}, +{"vcuxwfp", VX (4, 778), VX_MASK, PPCVEC, 0, {VD, VB, UIMM}}, +{"evlhhousplatx",VX(4, 780), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vspltisb", VX (4, 780), VXVB_MASK, PPCVEC, 0, {VD, SIMM}}, +{"vinsertb", VX (4, 781), VXUIMM4_MASK, PPCVEC3, 0, {VD, VB, UIMM4}}, +{"evlhhousplat",VX (4, 781), VX_MASK, PPCSPE, 0, {RS, EVUIMM_2, RA}}, +{"evlhhossplatx",VX(4, 782), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vpkpx", VX (4, 782), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evlhhossplat",VX (4, 783), VX_MASK, PPCSPE, 0, {RS, EVUIMM_2, RA}}, +{"mullhwu", XRC(4, 392,0), X_MASK, MULHW, 0, {RT, RA, RB}}, +{"evlwhex", VX (4, 784), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"mullhwu.", XRC(4, 392,1), X_MASK, MULHW, 0, {RT, RA, RB}}, +{"evlwhe", VX (4, 785), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, +{"evlwhoux", VX (4, 788), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evlwhou", VX (4, 789), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, +{"evlwhosx", VX (4, 790), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evlwhos", VX (4, 791), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, +{"maclhwu", XO (4, 396,0,0),XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"evlwwsplatx", VX (4, 792), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"maclhwu.", XO (4, 396,0,1),XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"evlwwsplat", VX (4, 793), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, +{"evlwhsplatx", VX (4, 796), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evlwhsplat", VX (4, 797), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, +{"evstddx", VX (4, 800), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evstdd", VX (4, 801), VX_MASK, PPCSPE, 0, {RS, EVUIMM_8, RA}}, +{"evstdwx", VX (4, 802), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evstdw", VX (4, 803), VX_MASK, PPCSPE, 0, {RS, EVUIMM_8, RA}}, +{"evstdhx", VX (4, 804), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evstdh", VX (4, 805), VX_MASK, PPCSPE, 0, {RS, EVUIMM_8, RA}}, +{"evstwhex", VX (4, 816), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evstwhe", VX (4, 817), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, +{"evstwhox", VX (4, 820), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evstwho", VX (4, 821), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, +{"evstwwex", VX (4, 824), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evstwwe", VX (4, 825), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, +{"evstwwox", VX (4, 828), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evstwwo", VX (4, 829), VX_MASK, PPCSPE, 0, {RS, EVUIMM_4, RA}}, +{"vaddshs", VX (4, 832), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"bcdcpsgn.", VX (4, 833), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vminsh", VX (4, 834), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vsrah", VX (4, 836), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpgtsh", VXR(4, 838,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vmulesh", VX (4, 840), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcfsx", VX (4, 842), VX_MASK, PPCVEC, 0, {VD, VB, UIMM}}, +{"vcsxwfp", VX (4, 842), VX_MASK, PPCVEC, 0, {VD, VB, UIMM}}, +{"vspltish", VX (4, 844), VXVB_MASK, PPCVEC, 0, {VD, SIMM}}, +{"vinserth", VX (4, 845), VXUIMM4_MASK, PPCVEC3, 0, {VD, VB, UIMM4}}, +{"vupkhpx", VX (4, 846), VXVA_MASK, PPCVEC, 0, {VD, VB}}, +{"mullhw", XRC(4, 424,0), X_MASK, MULHW, 0, {RT, RA, RB}}, +{"mullhw.", XRC(4, 424,1), X_MASK, MULHW, 0, {RT, RA, RB}}, +{"maclhw", XO (4, 428,0,0),XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"maclhw.", XO (4, 428,0,1),XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmaclhw", XO (4, 430,0,0),XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmaclhw.", XO (4, 430,0,1),XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"vaddsws", VX (4, 896), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vminsw", VX (4, 898), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vsraw", VX (4, 900), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpgtsw", VXR(4, 902,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vmulesw", VX (4, 904), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vctuxs", VX (4, 906), VX_MASK, PPCVEC, 0, {VD, VB, UIMM}}, +{"vcfpuxws", VX (4, 906), VX_MASK, PPCVEC, 0, {VD, VB, UIMM}}, +{"vspltisw", VX (4, 908), VXVB_MASK, PPCVEC, 0, {VD, SIMM}}, +{"vinsertw", VX (4, 909), VXUIMM4_MASK, PPCVEC3, 0, {VD, VB, UIMM4}}, +{"maclhwsu", XO (4, 460,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"maclhwsu.", XO (4, 460,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"vminsd", VX (4, 962), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vsrad", VX (4, 964), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vcmpbfp", VXR(4, 966,0), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpgtsd", VXR(4, 967,0), VXR_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vctsxs", VX (4, 970), VX_MASK, PPCVEC, 0, {VD, VB, UIMM}}, +{"vcfpsxws", VX (4, 970), VX_MASK, PPCVEC, 0, {VD, VB, UIMM}}, +{"vinsertd", VX (4, 973), VXUIMM4_MASK, PPCVEC3, 0, {VD, VB, UIMM4}}, +{"vupklpx", VX (4, 974), VXVA_MASK, PPCVEC, 0, {VD, VB}}, +{"maclhws", XO (4, 492,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"maclhws.", XO (4, 492,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmaclhws", XO (4, 494,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmaclhws.", XO (4, 494,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"vsububm", VX (4,1024), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"bcdadd.", VX (4,1025), VXPS_MASK, PPCVEC2, 0, {VD, VA, VB, PS}}, +{"vavgub", VX (4,1026), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vabsdub", VX (4,1027), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"evmhessf", VX (4,1027), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vand", VX (4,1028), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpequb.", VXR(4, 6,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpneb.", VXR(4, 7,1), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"udi0fcm.", APU(4, 515,0), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, +{"udi0fcm", APU(4, 515,1), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, +{"evmhossf", VX (4,1031), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vpmsumb", VX (4,1032), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"evmheumi", VX (4,1032), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhesmi", VX (4,1033), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vmaxfp", VX (4,1034), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evmhesmf", VX (4,1035), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhoumi", VX (4,1036), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vslo", VX (4,1036), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evmhosmi", VX (4,1037), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhosmf", VX (4,1039), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"machhwuo", XO (4, 12,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"machhwuo.", XO (4, 12,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"ps_merge00", XOPS(4,528,0), XOPS_MASK, PPCPS, 0, {FRT, FRA, FRB}}, +{"ps_merge00.", XOPS(4,528,1), XOPS_MASK, PPCPS, 0, {FRT, FRA, FRB}}, +{"evmhessfa", VX (4,1059), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhossfa", VX (4,1063), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmheumia", VX (4,1064), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhesmia", VX (4,1065), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhesmfa", VX (4,1067), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhoumia", VX (4,1068), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhosmia", VX (4,1069), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhosmfa", VX (4,1071), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vsubuhm", VX (4,1088), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"bcdsub.", VX (4,1089), VXPS_MASK, PPCVEC2, 0, {VD, VA, VB, PS}}, +{"vavguh", VX (4,1090), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vabsduh", VX (4,1091), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vandc", VX (4,1092), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpequh.", VXR(4, 70,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"udi1fcm.", APU(4, 547,0), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, +{"udi1fcm", APU(4, 547,1), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, +{"vcmpneh.", VXR(4, 71,1), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"evmwhssf", VX (4,1095), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vpmsumh", VX (4,1096), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"evmwlumi", VX (4,1096), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vminfp", VX (4,1098), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evmwhumi", VX (4,1100), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vsro", VX (4,1100), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evmwhsmi", VX (4,1101), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vpkudum", VX (4,1102), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"evmwhsmf", VX (4,1103), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmwssf", VX (4,1107), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"machhwo", XO (4, 44,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"evmwumi", VX (4,1112), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"machhwo.", XO (4, 44,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"evmwsmi", VX (4,1113), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmwsmf", VX (4,1115), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"nmachhwo", XO (4, 46,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmachhwo.", XO (4, 46,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"ps_merge01", XOPS(4,560,0), XOPS_MASK, PPCPS, 0, {FRT, FRA, FRB}}, +{"ps_merge01.", XOPS(4,560,1), XOPS_MASK, PPCPS, 0, {FRT, FRA, FRB}}, +{"evmwhssfa", VX (4,1127), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmwlumia", VX (4,1128), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmwhumia", VX (4,1132), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmwhsmia", VX (4,1133), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmwhsmfa", VX (4,1135), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmwssfa", VX (4,1139), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmwumia", VX (4,1144), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmwsmia", VX (4,1145), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmwsmfa", VX (4,1147), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vsubuwm", VX (4,1152), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"bcdus.", VX (4,1153), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vavguw", VX (4,1154), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vabsduw", VX (4,1155), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vmr", VX (4,1156), VX_MASK, PPCVEC, 0, {VD, VA, VBA}}, +{"vor", VX (4,1156), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vcmpnew.", VXR(4, 135,1), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vpmsumw", VX (4,1160), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vcmpequw.", VXR(4, 134,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"udi2fcm.", APU(4, 579,0), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, +{"udi2fcm", APU(4, 579,1), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, +{"machhwsuo", XO (4, 76,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"machhwsuo.", XO (4, 76,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"ps_merge10", XOPS(4,592,0), XOPS_MASK, PPCPS, 0, {FRT, FRA, FRB}}, +{"ps_merge10.", XOPS(4,592,1), XOPS_MASK, PPCPS, 0, {FRT, FRA, FRB}}, +{"vsubudm", VX (4,1216), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"evaddusiaaw", VX (4,1216), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"bcds.", VX (4,1217), VXPS_MASK, PPCVEC3, 0, {VD, VA, VB, PS}}, +{"evaddssiaaw", VX (4,1217), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"evsubfusiaaw",VX (4,1218), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"evsubfssiaaw",VX (4,1219), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"evmra", VX (4,1220), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"vxor", VX (4,1220), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evdivws", VX (4,1222), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vcmpeqfp.", VXR(4, 198,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"udi3fcm.", APU(4, 611,0), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, +{"vcmpequd.", VXR(4, 199,1), VXR_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"udi3fcm", APU(4, 611,1), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, +{"evdivwu", VX (4,1223), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vpmsumd", VX (4,1224), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"evaddumiaaw", VX (4,1224), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"evaddsmiaaw", VX (4,1225), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"evsubfumiaaw",VX (4,1226), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"evsubfsmiaaw",VX (4,1227), VX_MASK, PPCSPE, 0, {RS, RA}}, +{"vpkudus", VX (4,1230), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"machhwso", XO (4, 108,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"machhwso.", XO (4, 108,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmachhwso", XO (4, 110,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmachhwso.", XO (4, 110,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"ps_merge11", XOPS(4,624,0), XOPS_MASK, PPCPS, 0, {FRT, FRA, FRB}}, +{"ps_merge11.", XOPS(4,624,1), XOPS_MASK, PPCPS, 0, {FRT, FRA, FRB}}, +{"vsubuqm", VX (4,1280), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"evmheusiaaw", VX (4,1280), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"bcdtrunc.", VX (4,1281), VXPS_MASK, PPCVEC3, 0, {VD, VA, VB, PS}}, +{"evmhessiaaw", VX (4,1281), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vavgsb", VX (4,1282), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evmhessfaaw", VX (4,1283), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhousiaaw", VX (4,1284), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vnot", VX (4,1284), VX_MASK, PPCVEC, 0, {VD, VA, VBA}}, +{"vnor", VX (4,1284), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evmhossiaaw", VX (4,1285), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"udi4fcm.", APU(4, 643,0), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, +{"udi4fcm", APU(4, 643,1), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, +{"vcmpnezb.", VXR(4, 263,1), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"evmhossfaaw", VX (4,1287), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmheumiaaw", VX (4,1288), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vcipher", VX (4,1288), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vcipherlast", VX (4,1289), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"evmhesmiaaw", VX (4,1289), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhesmfaaw", VX (4,1291), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vgbbd", VX (4,1292), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, +{"evmhoumiaaw", VX (4,1292), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhosmiaaw", VX (4,1293), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhosmfaaw", VX (4,1295), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"macchwuo", XO (4, 140,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"macchwuo.", XO (4, 140,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"evmhegumiaa", VX (4,1320), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhegsmiaa", VX (4,1321), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhegsmfaa", VX (4,1323), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhogumiaa", VX (4,1324), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhogsmiaa", VX (4,1325), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhogsmfaa", VX (4,1327), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vsubcuq", VX (4,1344), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"evmwlusiaaw", VX (4,1344), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"bcdutrunc.", VX (4,1345), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"evmwlssiaaw", VX (4,1345), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vavgsh", VX (4,1346), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vorc", VX (4,1348), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"udi5fcm.", APU(4, 675,0), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, +{"udi5fcm", APU(4, 675,1), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, +{"vcmpnezh.", VXR(4, 327,1), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vncipher", VX (4,1352), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"evmwlumiaaw", VX (4,1352), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vncipherlast",VX (4,1353), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"evmwlsmiaaw", VX (4,1353), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vbpermq", VX (4,1356), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vpksdus", VX (4,1358), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"evmwssfaa", VX (4,1363), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"macchwo", XO (4, 172,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"evmwumiaa", VX (4,1368), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"macchwo.", XO (4, 172,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"evmwsmiaa", VX (4,1369), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmwsmfaa", VX (4,1371), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"nmacchwo", XO (4, 174,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmacchwo.", XO (4, 174,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"evmheusianw", VX (4,1408), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vsubcuw", VX (4,1408), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evmhessianw", VX (4,1409), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"bcdctsq.", VXVA(4,1409,0), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, +{"bcdcfsq.", VXVA(4,1409,2), VXVAPS_MASK, PPCVEC3, 0, {VD, VB, PS}}, +{"bcdctz.", VXVA(4,1409,4), VXVAPS_MASK, PPCVEC3, 0, {VD, VB, PS}}, +{"bcdctn.", VXVA(4,1409,5), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, +{"bcdcfz.", VXVA(4,1409,6), VXVAPS_MASK, PPCVEC3, 0, {VD, VB, PS}}, +{"bcdcfn.", VXVA(4,1409,7), VXVAPS_MASK, PPCVEC3, 0, {VD, VB, PS}}, +{"bcdsetsgn.", VXVA(4,1409,31), VXVAPS_MASK, PPCVEC3, 0, {VD, VB, PS}}, +{"vavgsw", VX (4,1410), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"evmhessfanw", VX (4,1411), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vnand", VX (4,1412), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"evmhousianw", VX (4,1412), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhossianw", VX (4,1413), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"udi6fcm.", APU(4, 707,0), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, +{"udi6fcm", APU(4, 707,1), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, +{"vcmpnezw.", VXR(4, 391,1), VXR_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"evmhossfanw", VX (4,1415), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmheumianw", VX (4,1416), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhesmianw", VX (4,1417), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhesmfanw", VX (4,1419), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhoumianw", VX (4,1420), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhosmianw", VX (4,1421), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhosmfanw", VX (4,1423), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"macchwsuo", XO (4, 204,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"macchwsuo.", XO (4, 204,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"evmhegumian", VX (4,1448), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhegsmian", VX (4,1449), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhegsmfan", VX (4,1451), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhogumian", VX (4,1452), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhogsmian", VX (4,1453), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmhogsmfan", VX (4,1455), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmwlusianw", VX (4,1472), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"bcdsr.", VX (4,1473), VXPS_MASK, PPCVEC3, 0, {VD, VA, VB, PS}}, +{"evmwlssianw", VX (4,1473), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vsld", VX (4,1476), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vcmpgefp.", VXR(4, 454,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"udi7fcm.", APU(4, 739,0), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, +{"udi7fcm", APU(4, 739,1), APU_MASK, PPC405|PPC440, PPC476, {URT, URA, URB}}, +{"vsbox", VX (4,1480), VXVB_MASK, PPCVEC2, 0, {VD, VA}}, +{"evmwlumianw", VX (4,1480), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmwlsmianw", VX (4,1481), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"vbpermd", VX (4,1484), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vpksdss", VX (4,1486), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"evmwssfan", VX (4,1491), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"macchwso", XO (4, 236,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"evmwumian", VX (4,1496), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"macchwso.", XO (4, 236,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"evmwsmian", VX (4,1497), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"evmwsmfan", VX (4,1499), VX_MASK, PPCSPE, 0, {RS, RA, RB}}, +{"nmacchwso", XO (4, 238,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmacchwso.", XO (4, 238,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"vsububs", VX (4,1536), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vclzlsbb", VXVA(4,1538,0), VXVA_MASK, PPCVEC3, 0, {RT, VB}}, +{"vctzlsbb", VXVA(4,1538,1), VXVA_MASK, PPCVEC3, 0, {RT, VB}}, +{"vnegw", VXVA(4,1538,6), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, +{"vnegd", VXVA(4,1538,7), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, +{"vprtybw", VXVA(4,1538,8), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, +{"vprtybd", VXVA(4,1538,9), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, +{"vprtybq", VXVA(4,1538,10), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, +{"vextsb2w", VXVA(4,1538,16), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, +{"vextsh2w", VXVA(4,1538,17), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, +{"vextsb2d", VXVA(4,1538,24), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, +{"vextsh2d", VXVA(4,1538,25), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, +{"vextsw2d", VXVA(4,1538,26), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, +{"vctzb", VXVA(4,1538,28), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, +{"vctzh", VXVA(4,1538,29), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, +{"vctzw", VXVA(4,1538,30), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, +{"vctzd", VXVA(4,1538,31), VXVA_MASK, PPCVEC3, 0, {VD, VB}}, +{"mfvscr", VX (4,1540), VXVAVB_MASK, PPCVEC, 0, {VD}}, +{"vcmpgtub.", VXR(4, 518,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"udi8fcm.", APU(4, 771,0), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, +{"udi8fcm", APU(4, 771,1), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, +{"vsum4ubs", VX (4,1544), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vextublx", VX (4,1549), VX_MASK, PPCVEC3, 0, {RT, RA, VB}}, +{"vsubuhs", VX (4,1600), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"mtvscr", VX (4,1604), VXVDVA_MASK, PPCVEC, 0, {VB}}, +{"vcmpgtuh.", VXR(4, 582,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vsum4shs", VX (4,1608), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"udi9fcm.", APU(4, 804,0), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, +{"udi9fcm", APU(4, 804,1), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, +{"vextuhlx", VX (4,1613), VX_MASK, PPCVEC3, 0, {RT, RA, VB}}, +{"vupkhsw", VX (4,1614), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, +{"vsubuws", VX (4,1664), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vshasigmaw", VX (4,1666), VX_MASK, PPCVEC2, 0, {VD, VA, ST, SIX}}, +{"veqv", VX (4,1668), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vcmpgtuw.", VXR(4, 646,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"udi10fcm.", APU(4, 835,0), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, +{"udi10fcm", APU(4, 835,1), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, +{"vsum2sws", VX (4,1672), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vmrgow", VX (4,1676), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vextuwlx", VX (4,1677), VX_MASK, PPCVEC3, 0, {RT, RA, VB}}, +{"vshasigmad", VX (4,1730), VX_MASK, PPCVEC2, 0, {VD, VA, ST, SIX}}, +{"vsrd", VX (4,1732), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vcmpgtfp.", VXR(4, 710,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"udi11fcm.", APU(4, 867,0), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, +{"vcmpgtud.", VXR(4, 711,1), VXR_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"udi11fcm", APU(4, 867,1), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, +{"vupklsw", VX (4,1742), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, +{"vsubsbs", VX (4,1792), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vclzb", VX (4,1794), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, +{"vpopcntb", VX (4,1795), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, +{"vsrv", VX (4,1796), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vcmpgtsb.", VXR(4, 774,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"udi12fcm.", APU(4, 899,0), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, +{"udi12fcm", APU(4, 899,1), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, +{"vsum4sbs", VX (4,1800), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vextubrx", VX (4,1805), VX_MASK, PPCVEC3, 0, {RT, RA, VB}}, +{"maclhwuo", XO (4, 396,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"maclhwuo.", XO (4, 396,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"vsubshs", VX (4,1856), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vclzh", VX (4,1858), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, +{"vpopcnth", VX (4,1859), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, +{"vslv", VX (4,1860), VX_MASK, PPCVEC3, 0, {VD, VA, VB}}, +{"vcmpgtsh.", VXR(4, 838,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vextuhrx", VX (4,1869), VX_MASK, PPCVEC3, 0, {RT, RA, VB}}, +{"udi13fcm.", APU(4, 931,0), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, +{"udi13fcm", APU(4, 931,1), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, +{"maclhwo", XO (4, 428,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"maclhwo.", XO (4, 428,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmaclhwo", XO (4, 430,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmaclhwo.", XO (4, 430,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"vsubsws", VX (4,1920), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vclzw", VX (4,1922), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, +{"vpopcntw", VX (4,1923), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, +{"vcmpgtsw.", VXR(4, 902,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"udi14fcm.", APU(4, 963,0), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, +{"udi14fcm", APU(4, 963,1), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, +{"vsumsws", VX (4,1928), VX_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"vmrgew", VX (4,1932), VX_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"vextuwrx", VX (4,1933), VX_MASK, PPCVEC3, 0, {RT, RA, VB}}, +{"maclhwsuo", XO (4, 460,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"maclhwsuo.", XO (4, 460,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"vclzd", VX (4,1986), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, +{"vpopcntd", VX (4,1987), VXVA_MASK, PPCVEC2, 0, {VD, VB}}, +{"vcmpbfp.", VXR(4, 966,1), VXR_MASK, PPCVEC, 0, {VD, VA, VB}}, +{"udi15fcm.", APU(4, 995,0), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, +{"vcmpgtsd.", VXR(4, 967,1), VXR_MASK, PPCVEC2, 0, {VD, VA, VB}}, +{"udi15fcm", APU(4, 995,1), APU_MASK, PPC440, PPC476, {URT, URA, URB}}, +{"maclhwso", XO (4, 492,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"maclhwso.", XO (4, 492,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmaclhwso", XO (4, 494,1,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"nmaclhwso.", XO (4, 494,1,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, +{"dcbz_l", X (4,1014), XRT_MASK, PPCPS, 0, {RA, RB}}, + +{"mulli", OP(7), OP_MASK, PPCCOM, PPCVLE, {RT, RA, SI}}, +{"muli", OP(7), OP_MASK, PWRCOM, PPCVLE, {RT, RA, SI}}, + +{"subfic", OP(8), OP_MASK, PPCCOM, PPCVLE, {RT, RA, SI}}, +{"sfi", OP(8), OP_MASK, PWRCOM, PPCVLE, {RT, RA, SI}}, + +{"dozi", OP(9), OP_MASK, M601, PPCVLE, {RT, RA, SI}}, + +{"cmplwi", OPL(10,0), OPL_MASK, PPCCOM, PPCVLE, {OBF, RA, UISIGNOPT}}, +{"cmpldi", OPL(10,1), OPL_MASK, PPC64, PPCVLE, {OBF, RA, UISIGNOPT}}, +{"cmpli", OP(10), OP_MASK, PPC, PPCVLE, {BF, L32OPT, RA, UISIGNOPT}}, +{"cmpli", OP(10), OP_MASK, PWRCOM, PPC|PPCVLE, {BF, RA, UISIGNOPT}}, + +{"cmpwi", OPL(11,0), OPL_MASK, PPCCOM, PPCVLE, {OBF, RA, SI}}, +{"cmpdi", OPL(11,1), OPL_MASK, PPC64, PPCVLE, {OBF, RA, SI}}, +{"cmpi", OP(11), OP_MASK, PPC, PPCVLE, {BF, L32OPT, RA, SI}}, +{"cmpi", OP(11), OP_MASK, PWRCOM, PPC|PPCVLE, {BF, RA, SI}}, + +{"addic", OP(12), OP_MASK, PPCCOM, PPCVLE, {RT, RA, SI}}, +{"ai", OP(12), OP_MASK, PWRCOM, PPCVLE, {RT, RA, SI}}, +{"subic", OP(12), OP_MASK, PPCCOM, PPCVLE, {RT, RA, NSI}}, + +{"addic.", OP(13), OP_MASK, PPCCOM, PPCVLE, {RT, RA, SI}}, +{"ai.", OP(13), OP_MASK, PWRCOM, PPCVLE, {RT, RA, SI}}, +{"subic.", OP(13), OP_MASK, PPCCOM, PPCVLE, {RT, RA, NSI}}, + +{"li", OP(14), DRA_MASK, PPCCOM, PPCVLE, {RT, SI}}, +{"lil", OP(14), DRA_MASK, PWRCOM, PPCVLE, {RT, SI}}, +{"addi", OP(14), OP_MASK, PPCCOM, PPCVLE, {RT, RA0, SI}}, +{"cal", OP(14), OP_MASK, PWRCOM, PPCVLE, {RT, D, RA0}}, +{"subi", OP(14), OP_MASK, PPCCOM, PPCVLE, {RT, RA0, NSI}}, +{"la", OP(14), OP_MASK, PPCCOM, PPCVLE, {RT, D, RA0}}, + +{"lis", OP(15), DRA_MASK, PPCCOM, PPCVLE, {RT, SISIGNOPT}}, +{"liu", OP(15), DRA_MASK, PWRCOM, PPCVLE, {RT, SISIGNOPT}}, +{"addis", OP(15), OP_MASK, PPCCOM, PPCVLE, {RT, RA0, SISIGNOPT}}, +{"cau", OP(15), OP_MASK, PWRCOM, PPCVLE, {RT, RA0, SISIGNOPT}}, +{"subis", OP(15), OP_MASK, PPCCOM, PPCVLE, {RT, RA0, NSISIGNOPT}}, + +{"bdnz-", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDM}}, +{"bdnz+", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDP}}, +{"bdnz", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BD}}, +{"bdn", BBO(16,BODNZ,0,0), BBOATBI_MASK, PWRCOM, PPCVLE, {BD}}, +{"bdnzl-", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDM}}, +{"bdnzl+", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDP}}, +{"bdnzl", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BD}}, +{"bdnl", BBO(16,BODNZ,0,1), BBOATBI_MASK, PWRCOM, PPCVLE, {BD}}, +{"bdnza-", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDMA}}, +{"bdnza+", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDPA}}, +{"bdnza", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDA}}, +{"bdna", BBO(16,BODNZ,1,0), BBOATBI_MASK, PWRCOM, PPCVLE, {BDA}}, +{"bdnzla-", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDMA}}, +{"bdnzla+", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDPA}}, +{"bdnzla", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDA}}, +{"bdnla", BBO(16,BODNZ,1,1), BBOATBI_MASK, PWRCOM, PPCVLE, {BDA}}, +{"bdz-", BBO(16,BODZ,0,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDM}}, +{"bdz+", BBO(16,BODZ,0,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDP}}, +{"bdz", BBO(16,BODZ,0,0), BBOATBI_MASK, COM, PPCVLE, {BD}}, +{"bdzl-", BBO(16,BODZ,0,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDM}}, +{"bdzl+", BBO(16,BODZ,0,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDP}}, +{"bdzl", BBO(16,BODZ,0,1), BBOATBI_MASK, COM, PPCVLE, {BD}}, +{"bdza-", BBO(16,BODZ,1,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDMA}}, +{"bdza+", BBO(16,BODZ,1,0), BBOATBI_MASK, PPCCOM, PPCVLE, {BDPA}}, +{"bdza", BBO(16,BODZ,1,0), BBOATBI_MASK, COM, PPCVLE, {BDA}}, +{"bdzla-", BBO(16,BODZ,1,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDMA}}, +{"bdzla+", BBO(16,BODZ,1,1), BBOATBI_MASK, PPCCOM, PPCVLE, {BDPA}}, +{"bdzla", BBO(16,BODZ,1,1), BBOATBI_MASK, COM, PPCVLE, {BDA}}, + +{"bge-", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bge+", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bge", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"bnl-", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bnl+", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bnl", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"bgel-", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bgel+", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bgel", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"bnll-", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bnll+", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bnll", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"bgea-", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bgea+", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bgea", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"bnla-", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bnla+", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bnla", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"bgela-", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bgela+", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bgela", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"bnlla-", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bnlla+", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bnlla", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"ble-", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"ble+", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"ble", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"bng-", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bng+", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bng", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"blel-", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"blel+", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"blel", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"bngl-", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bngl+", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bngl", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"blea-", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"blea+", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"blea", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"bnga-", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bnga+", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bnga", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"blela-", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"blela+", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"blela", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"bngla-", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bngla+", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bngla", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"bne-", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bne+", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bne", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"bnel-", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bnel+", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bnel", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"bnea-", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bnea+", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bnea", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"bnela-", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bnela+", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bnela", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"bns-", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bns+", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bns", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"bnu-", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bnu+", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bnu", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BD}}, +{"bnsl-", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bnsl+", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bnsl", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"bnul-", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bnul+", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bnul", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BD}}, +{"bnsa-", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bnsa+", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bnsa", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"bnua-", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bnua+", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bnua", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDA}}, +{"bnsla-", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bnsla+", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bnsla", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"bnula-", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bnula+", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bnula", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDA}}, + +{"blt-", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"blt+", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"blt", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"bltl-", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bltl+", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bltl", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"blta-", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"blta+", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"blta", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"bltla-", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bltla+", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bltla", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"bgt-", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bgt+", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bgt", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"bgtl-", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bgtl+", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bgtl", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"bgta-", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bgta+", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bgta", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"bgtla-", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bgtla+", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bgtla", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"beq-", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"beq+", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"beq", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"beql-", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"beql+", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"beql", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"beqa-", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"beqa+", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"beqa", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"beqla-", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"beqla+", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"beqla", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"bso-", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bso+", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bso", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"bun-", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bun+", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bun", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BD}}, +{"bsol-", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bsol+", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bsol", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, COM, PPCVLE, {CR, BD}}, +{"bunl-", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDM}}, +{"bunl+", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDP}}, +{"bunl", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BD}}, +{"bsoa-", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bsoa+", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bsoa", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"buna-", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"buna+", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"buna", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDA}}, +{"bsola-", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bsola+", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bsola", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, COM, PPCVLE, {CR, BDA}}, +{"bunla-", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDMA}}, +{"bunla+", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDPA}}, +{"bunla", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, PPCVLE, {CR, BDA}}, + +{"bdnzf-", BBO(16,BODNZF,0,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDM}}, +{"bdnzf+", BBO(16,BODNZF,0,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDP}}, +{"bdnzf", BBO(16,BODNZF,0,0), BBOY_MASK, PPCCOM, PPCVLE, {BI, BD}}, +{"bdnzfl-", BBO(16,BODNZF,0,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDM}}, +{"bdnzfl+", BBO(16,BODNZF,0,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDP}}, +{"bdnzfl", BBO(16,BODNZF,0,1), BBOY_MASK, PPCCOM, PPCVLE, {BI, BD}}, +{"bdnzfa-", BBO(16,BODNZF,1,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDMA}}, +{"bdnzfa+", BBO(16,BODNZF,1,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDPA}}, +{"bdnzfa", BBO(16,BODNZF,1,0), BBOY_MASK, PPCCOM, PPCVLE, {BI, BDA}}, +{"bdnzfla-", BBO(16,BODNZF,1,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDMA}}, +{"bdnzfla+", BBO(16,BODNZF,1,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDPA}}, +{"bdnzfla", BBO(16,BODNZF,1,1), BBOY_MASK, PPCCOM, PPCVLE, {BI, BDA}}, +{"bdzf-", BBO(16,BODZF,0,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDM}}, +{"bdzf+", BBO(16,BODZF,0,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDP}}, +{"bdzf", BBO(16,BODZF,0,0), BBOY_MASK, PPCCOM, PPCVLE, {BI, BD}}, +{"bdzfl-", BBO(16,BODZF,0,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDM}}, +{"bdzfl+", BBO(16,BODZF,0,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDP}}, +{"bdzfl", BBO(16,BODZF,0,1), BBOY_MASK, PPCCOM, PPCVLE, {BI, BD}}, +{"bdzfa-", BBO(16,BODZF,1,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDMA}}, +{"bdzfa+", BBO(16,BODZF,1,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDPA}}, +{"bdzfa", BBO(16,BODZF,1,0), BBOY_MASK, PPCCOM, PPCVLE, {BI, BDA}}, +{"bdzfla-", BBO(16,BODZF,1,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDMA}}, +{"bdzfla+", BBO(16,BODZF,1,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDPA}}, +{"bdzfla", BBO(16,BODZF,1,1), BBOY_MASK, PPCCOM, PPCVLE, {BI, BDA}}, + +{"bf-", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDM}}, +{"bf+", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDP}}, +{"bf", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BD}}, +{"bbf", BBO(16,BOF,0,0), BBOAT_MASK, PWRCOM, PPCVLE, {BI, BD}}, +{"bfl-", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDM}}, +{"bfl+", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDP}}, +{"bfl", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BD}}, +{"bbfl", BBO(16,BOF,0,1), BBOAT_MASK, PWRCOM, PPCVLE, {BI, BD}}, +{"bfa-", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDMA}}, +{"bfa+", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDPA}}, +{"bfa", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDA}}, +{"bbfa", BBO(16,BOF,1,0), BBOAT_MASK, PWRCOM, PPCVLE, {BI, BDA}}, +{"bfla-", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDMA}}, +{"bfla+", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDPA}}, +{"bfla", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDA}}, +{"bbfla", BBO(16,BOF,1,1), BBOAT_MASK, PWRCOM, PPCVLE, {BI, BDA}}, + +{"bdnzt-", BBO(16,BODNZT,0,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDM}}, +{"bdnzt+", BBO(16,BODNZT,0,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDP}}, +{"bdnzt", BBO(16,BODNZT,0,0), BBOY_MASK, PPCCOM, PPCVLE, {BI, BD}}, +{"bdnztl-", BBO(16,BODNZT,0,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDM}}, +{"bdnztl+", BBO(16,BODNZT,0,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDP}}, +{"bdnztl", BBO(16,BODNZT,0,1), BBOY_MASK, PPCCOM, PPCVLE, {BI, BD}}, +{"bdnzta-", BBO(16,BODNZT,1,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDMA}}, +{"bdnzta+", BBO(16,BODNZT,1,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDPA}}, +{"bdnzta", BBO(16,BODNZT,1,0), BBOY_MASK, PPCCOM, PPCVLE, {BI, BDA}}, +{"bdnztla-", BBO(16,BODNZT,1,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDMA}}, +{"bdnztla+", BBO(16,BODNZT,1,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDPA}}, +{"bdnztla", BBO(16,BODNZT,1,1), BBOY_MASK, PPCCOM, PPCVLE, {BI, BDA}}, +{"bdzt-", BBO(16,BODZT,0,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDM}}, +{"bdzt+", BBO(16,BODZT,0,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDP}}, +{"bdzt", BBO(16,BODZT,0,0), BBOY_MASK, PPCCOM, PPCVLE, {BI, BD}}, +{"bdztl-", BBO(16,BODZT,0,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDM}}, +{"bdztl+", BBO(16,BODZT,0,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDP}}, +{"bdztl", BBO(16,BODZT,0,1), BBOY_MASK, PPCCOM, PPCVLE, {BI, BD}}, +{"bdzta-", BBO(16,BODZT,1,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDMA}}, +{"bdzta+", BBO(16,BODZT,1,0), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDPA}}, +{"bdzta", BBO(16,BODZT,1,0), BBOY_MASK, PPCCOM, PPCVLE, {BI, BDA}}, +{"bdztla-", BBO(16,BODZT,1,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDMA}}, +{"bdztla+", BBO(16,BODZT,1,1), BBOY_MASK, PPCCOM, ISA_V2|PPCVLE, {BI, BDPA}}, +{"bdztla", BBO(16,BODZT,1,1), BBOY_MASK, PPCCOM, PPCVLE, {BI, BDA}}, + +{"bt-", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDM}}, +{"bt+", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDP}}, +{"bt", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BD}}, +{"bbt", BBO(16,BOT,0,0), BBOAT_MASK, PWRCOM, PPCVLE, {BI, BD}}, +{"btl-", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDM}}, +{"btl+", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDP}}, +{"btl", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BD}}, +{"bbtl", BBO(16,BOT,0,1), BBOAT_MASK, PWRCOM, PPCVLE, {BI, BD}}, +{"bta-", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDMA}}, +{"bta+", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDPA}}, +{"bta", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDA}}, +{"bbta", BBO(16,BOT,1,0), BBOAT_MASK, PWRCOM, PPCVLE, {BI, BDA}}, +{"btla-", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDMA}}, +{"btla+", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDPA}}, +{"btla", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, PPCVLE, {BI, BDA}}, +{"bbtla", BBO(16,BOT,1,1), BBOAT_MASK, PWRCOM, PPCVLE, {BI, BDA}}, + +{"bc-", B(16,0,0), B_MASK, PPCCOM, PPCVLE, {BOE, BI, BDM}}, +{"bc+", B(16,0,0), B_MASK, PPCCOM, PPCVLE, {BOE, BI, BDP}}, +{"bc", B(16,0,0), B_MASK, COM, PPCVLE, {BO, BI, BD}}, +{"bcl-", B(16,0,1), B_MASK, PPCCOM, PPCVLE, {BOE, BI, BDM}}, +{"bcl+", B(16,0,1), B_MASK, PPCCOM, PPCVLE, {BOE, BI, BDP}}, +{"bcl", B(16,0,1), B_MASK, COM, PPCVLE, {BO, BI, BD}}, +{"bca-", B(16,1,0), B_MASK, PPCCOM, PPCVLE, {BOE, BI, BDMA}}, +{"bca+", B(16,1,0), B_MASK, PPCCOM, PPCVLE, {BOE, BI, BDPA}}, +{"bca", B(16,1,0), B_MASK, COM, PPCVLE, {BO, BI, BDA}}, +{"bcla-", B(16,1,1), B_MASK, PPCCOM, PPCVLE, {BOE, BI, BDMA}}, +{"bcla+", B(16,1,1), B_MASK, PPCCOM, PPCVLE, {BOE, BI, BDPA}}, +{"bcla", B(16,1,1), B_MASK, COM, PPCVLE, {BO, BI, BDA}}, + +{"svc", SC(17,0,0), SC_MASK, POWER, PPCVLE, {SVC_LEV, FL1, FL2}}, +{"svcl", SC(17,0,1), SC_MASK, POWER, PPCVLE, {SVC_LEV, FL1, FL2}}, +{"sc", SC(17,1,0), SC_MASK, PPC, PPCVLE, {LEV}}, +{"svca", SC(17,1,0), SC_MASK, PWRCOM, PPCVLE, {SV}}, +{"svcla", SC(17,1,1), SC_MASK, POWER, PPCVLE, {SV}}, + +{"b", B(18,0,0), B_MASK, COM, PPCVLE, {LI}}, +{"bl", B(18,0,1), B_MASK, COM, PPCVLE, {LI}}, +{"ba", B(18,1,0), B_MASK, COM, PPCVLE, {LIA}}, +{"bla", B(18,1,1), B_MASK, COM, PPCVLE, {LIA}}, + +{"mcrf", XL(19,0), XLBB_MASK|(3<<21)|(3<<16), COM, PPCVLE, {BF, BFA}}, + +{"addpcis", DX(19,2), DX_MASK, POWER9, PPCVLE, {RT, DXD}}, +{"subpcis", DX(19,2), DX_MASK, POWER9, PPCVLE, {RT, NDXD}}, + +{"bdnzlr", XLO(19,BODNZ,16,0), XLBOBIBB_MASK, PPCCOM, PPCVLE, {0}}, +{"bdnzlr-", XLO(19,BODNZ,16,0), XLBOBIBB_MASK, PPCCOM, ISA_V2|PPCVLE, {0}}, +{"bdnzlrl", XLO(19,BODNZ,16,1), XLBOBIBB_MASK, PPCCOM, PPCVLE, {0}}, +{"bdnzlrl-", XLO(19,BODNZ,16,1), XLBOBIBB_MASK, PPCCOM, ISA_V2|PPCVLE, {0}}, +{"bdnzlr+", XLO(19,BODNZP,16,0), XLBOBIBB_MASK, PPCCOM, ISA_V2|PPCVLE, {0}}, +{"bdnzlrl+", XLO(19,BODNZP,16,1), XLBOBIBB_MASK, PPCCOM, ISA_V2|PPCVLE, {0}}, +{"bdzlr", XLO(19,BODZ,16,0), XLBOBIBB_MASK, PPCCOM, PPCVLE, {0}}, +{"bdzlr-", XLO(19,BODZ,16,0), XLBOBIBB_MASK, PPCCOM, ISA_V2|PPCVLE, {0}}, +{"bdzlrl", XLO(19,BODZ,16,1), XLBOBIBB_MASK, PPCCOM, PPCVLE, {0}}, +{"bdzlrl-", XLO(19,BODZ,16,1), XLBOBIBB_MASK, PPCCOM, ISA_V2|PPCVLE, {0}}, +{"bdzlr+", XLO(19,BODZP,16,0), XLBOBIBB_MASK, PPCCOM, ISA_V2|PPCVLE, {0}}, +{"bdzlrl+", XLO(19,BODZP,16,1), XLBOBIBB_MASK, PPCCOM, ISA_V2|PPCVLE, {0}}, +{"blr", XLO(19,BOU,16,0), XLBOBIBB_MASK, PPCCOM, PPCVLE, {0}}, +{"br", XLO(19,BOU,16,0), XLBOBIBB_MASK, PWRCOM, PPCVLE, {0}}, +{"blrl", XLO(19,BOU,16,1), XLBOBIBB_MASK, PPCCOM, PPCVLE, {0}}, +{"brl", XLO(19,BOU,16,1), XLBOBIBB_MASK, PWRCOM, PPCVLE, {0}}, +{"bdnzlr-", XLO(19,BODNZM4,16,0), XLBOBIBB_MASK, ISA_V2, PPCVLE, {0}}, +{"bdnzlrl-", XLO(19,BODNZM4,16,1), XLBOBIBB_MASK, ISA_V2, PPCVLE, {0}}, +{"bdnzlr+", XLO(19,BODNZP4,16,0), XLBOBIBB_MASK, ISA_V2, PPCVLE, {0}}, +{"bdnzlrl+", XLO(19,BODNZP4,16,1), XLBOBIBB_MASK, ISA_V2, PPCVLE, {0}}, +{"bdzlr-", XLO(19,BODZM4,16,0), XLBOBIBB_MASK, ISA_V2, PPCVLE, {0}}, +{"bdzlrl-", XLO(19,BODZM4,16,1), XLBOBIBB_MASK, ISA_V2, PPCVLE, {0}}, +{"bdzlr+", XLO(19,BODZP4,16,0), XLBOBIBB_MASK, ISA_V2, PPCVLE, {0}}, +{"bdzlrl+", XLO(19,BODZP4,16,1), XLBOBIBB_MASK, ISA_V2, PPCVLE, {0}}, + +{"bgelr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bgelr-", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bger", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"bnllr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnllr-", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnlr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"bgelrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bgelrl-", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bgerl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"bnllrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnllrl-", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnlrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"blelr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"blelr-", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bler", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"bnglr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnglr-", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bngr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"blelrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"blelrl-", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"blerl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"bnglrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnglrl-", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bngrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"bnelr", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnelr-", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bner", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"bnelrl", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnelrl-", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnerl", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"bnslr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnslr-", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnsr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"bnulr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnulr-", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnslrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnslrl-", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnsrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"bnulrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnulrl-", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bgelr+", XLOCB(19,BOFP,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnllr+", XLOCB(19,BOFP,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bgelrl+", XLOCB(19,BOFP,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnllrl+", XLOCB(19,BOFP,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"blelr+", XLOCB(19,BOFP,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnglr+", XLOCB(19,BOFP,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"blelrl+", XLOCB(19,BOFP,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnglrl+", XLOCB(19,BOFP,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnelr+", XLOCB(19,BOFP,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnelrl+", XLOCB(19,BOFP,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnslr+", XLOCB(19,BOFP,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnulr+", XLOCB(19,BOFP,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnslrl+", XLOCB(19,BOFP,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnulrl+", XLOCB(19,BOFP,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bgelr-", XLOCB(19,BOFM4,CBLT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnllr-", XLOCB(19,BOFM4,CBLT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bgelrl-", XLOCB(19,BOFM4,CBLT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnllrl-", XLOCB(19,BOFM4,CBLT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"blelr-", XLOCB(19,BOFM4,CBGT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnglr-", XLOCB(19,BOFM4,CBGT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"blelrl-", XLOCB(19,BOFM4,CBGT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnglrl-", XLOCB(19,BOFM4,CBGT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnelr-", XLOCB(19,BOFM4,CBEQ,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnelrl-", XLOCB(19,BOFM4,CBEQ,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnslr-", XLOCB(19,BOFM4,CBSO,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnulr-", XLOCB(19,BOFM4,CBSO,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnslrl-", XLOCB(19,BOFM4,CBSO,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnulrl-", XLOCB(19,BOFM4,CBSO,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bgelr+", XLOCB(19,BOFP4,CBLT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnllr+", XLOCB(19,BOFP4,CBLT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bgelrl+", XLOCB(19,BOFP4,CBLT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnllrl+", XLOCB(19,BOFP4,CBLT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"blelr+", XLOCB(19,BOFP4,CBGT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnglr+", XLOCB(19,BOFP4,CBGT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"blelrl+", XLOCB(19,BOFP4,CBGT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnglrl+", XLOCB(19,BOFP4,CBGT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnelr+", XLOCB(19,BOFP4,CBEQ,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnelrl+", XLOCB(19,BOFP4,CBEQ,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnslr+", XLOCB(19,BOFP4,CBSO,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnulr+", XLOCB(19,BOFP4,CBSO,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnslrl+", XLOCB(19,BOFP4,CBSO,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnulrl+", XLOCB(19,BOFP4,CBSO,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bltlr", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bltlr-", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bltr", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"bltlrl", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bltlrl-", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bltrl", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"bgtlr", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bgtlr-", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bgtr", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"bgtlrl", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bgtlrl-", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bgtrl", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"beqlr", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"beqlr-", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"beqr", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"beqlrl", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"beqlrl-", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"beqrl", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"bsolr", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bsolr-", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bsor", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"bunlr", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bunlr-", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bsolrl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bsolrl-", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bsorl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PWRCOM, PPCVLE, {CR}}, +{"bunlrl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bunlrl-", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bltlr+", XLOCB(19,BOTP,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bltlrl+", XLOCB(19,BOTP,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bgtlr+", XLOCB(19,BOTP,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bgtlrl+", XLOCB(19,BOTP,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"beqlr+", XLOCB(19,BOTP,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"beqlrl+", XLOCB(19,BOTP,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bsolr+", XLOCB(19,BOTP,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bunlr+", XLOCB(19,BOTP,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bsolrl+", XLOCB(19,BOTP,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bunlrl+", XLOCB(19,BOTP,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bltlr-", XLOCB(19,BOTM4,CBLT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bltlrl-", XLOCB(19,BOTM4,CBLT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bgtlr-", XLOCB(19,BOTM4,CBGT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bgtlrl-", XLOCB(19,BOTM4,CBGT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"beqlr-", XLOCB(19,BOTM4,CBEQ,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"beqlrl-", XLOCB(19,BOTM4,CBEQ,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bsolr-", XLOCB(19,BOTM4,CBSO,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bunlr-", XLOCB(19,BOTM4,CBSO,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bsolrl-", XLOCB(19,BOTM4,CBSO,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bunlrl-", XLOCB(19,BOTM4,CBSO,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bltlr+", XLOCB(19,BOTP4,CBLT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bltlrl+", XLOCB(19,BOTP4,CBLT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bgtlr+", XLOCB(19,BOTP4,CBGT,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bgtlrl+", XLOCB(19,BOTP4,CBGT,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"beqlr+", XLOCB(19,BOTP4,CBEQ,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"beqlrl+", XLOCB(19,BOTP4,CBEQ,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bsolr+", XLOCB(19,BOTP4,CBSO,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bunlr+", XLOCB(19,BOTP4,CBSO,16,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bsolrl+", XLOCB(19,BOTP4,CBSO,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bunlrl+", XLOCB(19,BOTP4,CBSO,16,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, + +{"bdnzflr", XLO(19,BODNZF,16,0), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, +{"bdnzflr-", XLO(19,BODNZF,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bdnzflrl", XLO(19,BODNZF,16,1), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, +{"bdnzflrl-",XLO(19,BODNZF,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bdnzflr+", XLO(19,BODNZFP,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bdnzflrl+",XLO(19,BODNZFP,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bdzflr", XLO(19,BODZF,16,0), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, +{"bdzflr-", XLO(19,BODZF,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bdzflrl", XLO(19,BODZF,16,1), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, +{"bdzflrl-", XLO(19,BODZF,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bdzflr+", XLO(19,BODZFP,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bdzflrl+", XLO(19,BODZFP,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bflr", XLO(19,BOF,16,0), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, +{"bflr-", XLO(19,BOF,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bbfr", XLO(19,BOF,16,0), XLBOBB_MASK, PWRCOM, PPCVLE, {BI}}, +{"bflrl", XLO(19,BOF,16,1), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, +{"bflrl-", XLO(19,BOF,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bbfrl", XLO(19,BOF,16,1), XLBOBB_MASK, PWRCOM, PPCVLE, {BI}}, +{"bflr+", XLO(19,BOFP,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bflrl+", XLO(19,BOFP,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bflr-", XLO(19,BOFM4,16,0), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, +{"bflrl-", XLO(19,BOFM4,16,1), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, +{"bflr+", XLO(19,BOFP4,16,0), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, +{"bflrl+", XLO(19,BOFP4,16,1), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, +{"bdnztlr", XLO(19,BODNZT,16,0), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, +{"bdnztlr-", XLO(19,BODNZT,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bdnztlrl", XLO(19,BODNZT,16,1), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, +{"bdnztlrl-", XLO(19,BODNZT,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bdnztlr+", XLO(19,BODNZTP,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bdnztlrl+", XLO(19,BODNZTP,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bdztlr", XLO(19,BODZT,16,0), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, +{"bdztlr-", XLO(19,BODZT,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bdztlrl", XLO(19,BODZT,16,1), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, +{"bdztlrl-", XLO(19,BODZT,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bdztlr+", XLO(19,BODZTP,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bdztlrl+", XLO(19,BODZTP,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"btlr", XLO(19,BOT,16,0), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, +{"btlr-", XLO(19,BOT,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bbtr", XLO(19,BOT,16,0), XLBOBB_MASK, PWRCOM, PPCVLE, {BI}}, +{"btlrl", XLO(19,BOT,16,1), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, +{"btlrl-", XLO(19,BOT,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bbtrl", XLO(19,BOT,16,1), XLBOBB_MASK, PWRCOM, PPCVLE, {BI}}, +{"btlr+", XLO(19,BOTP,16,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"btlrl+", XLO(19,BOTP,16,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"btlr-", XLO(19,BOTM4,16,0), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, +{"btlrl-", XLO(19,BOTM4,16,1), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, +{"btlr+", XLO(19,BOTP4,16,0), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, +{"btlrl+", XLO(19,BOTP4,16,1), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, + +{"bclr-", XLYLK(19,16,0,0), XLYBB_MASK, PPCCOM, PPCVLE, {BOE, BI}}, +{"bclrl-", XLYLK(19,16,0,1), XLYBB_MASK, PPCCOM, PPCVLE, {BOE, BI}}, +{"bclr+", XLYLK(19,16,1,0), XLYBB_MASK, PPCCOM, PPCVLE, {BOE, BI}}, +{"bclrl+", XLYLK(19,16,1,1), XLYBB_MASK, PPCCOM, PPCVLE, {BOE, BI}}, +{"bclr", XLLK(19,16,0), XLBH_MASK, PPCCOM, PPCVLE, {BO, BI, BH}}, +{"bcr", XLLK(19,16,0), XLBB_MASK, PWRCOM, PPCVLE, {BO, BI}}, +{"bclrl", XLLK(19,16,1), XLBH_MASK, PPCCOM, PPCVLE, {BO, BI, BH}}, +{"bcrl", XLLK(19,16,1), XLBB_MASK, PWRCOM, PPCVLE, {BO, BI}}, + +{"rfid", XL(19,18), 0xffffffff, PPC64, PPCVLE, {0}}, + +{"crnot", XL(19,33), XL_MASK, PPCCOM, PPCVLE, {BT, BA, BBA}}, +{"crnor", XL(19,33), XL_MASK, COM, PPCVLE, {BT, BA, BB}}, +{"rfmci", X(19,38), 0xffffffff, PPCRFMCI|PPCA2|PPC476, PPCVLE, {0}}, + +{"rfdi", XL(19,39), 0xffffffff, E500MC, PPCVLE, {0}}, +{"rfi", XL(19,50), 0xffffffff, COM, PPCVLE, {0}}, +{"rfci", XL(19,51), 0xffffffff, PPC403|BOOKE|PPCE300|PPCA2|PPC476, PPCVLE, {0}}, + +{"rfsvc", XL(19,82), 0xffffffff, POWER, PPCVLE, {0}}, + +{"rfgi", XL(19,102), 0xffffffff, E500MC|PPCA2, PPCVLE, {0}}, + +{"crandc", XL(19,129), XL_MASK, COM, PPCVLE, {BT, BA, BB}}, + +{"rfebb", XL(19,146), XLS_MASK, POWER8, PPCVLE, {SXL}}, + +{"isync", XL(19,150), 0xffffffff, PPCCOM, PPCVLE, {0}}, +{"ics", XL(19,150), 0xffffffff, PWRCOM, PPCVLE, {0}}, + +{"crclr", XL(19,193), XL_MASK, PPCCOM, PPCVLE, {BT, BAT, BBA}}, +{"crxor", XL(19,193), XL_MASK, COM, PPCVLE, {BT, BA, BB}}, + +{"dnh", X(19,198), X_MASK, E500MC, PPCVLE, {DUI, DUIS}}, + +{"crnand", XL(19,225), XL_MASK, COM, PPCVLE, {BT, BA, BB}}, + +{"crand", XL(19,257), XL_MASK, COM, PPCVLE, {BT, BA, BB}}, + +{"hrfid", XL(19,274), 0xffffffff, POWER5|CELL, PPC476|PPCVLE, {0}}, + +{"crset", XL(19,289), XL_MASK, PPCCOM, PPCVLE, {BT, BAT, BBA}}, +{"creqv", XL(19,289), XL_MASK, COM, PPCVLE, {BT, BA, BB}}, + +{"urfid", XL(19,306), 0xffffffff, POWER9, PPCVLE, {0}}, +{"stop", XL(19,370), 0xffffffff, POWER9, PPCVLE, {0}}, + +{"doze", XL(19,402), 0xffffffff, POWER6, POWER9|PPCVLE, {0}}, + +{"crorc", XL(19,417), XL_MASK, COM, PPCVLE, {BT, BA, BB}}, + +{"nap", XL(19,434), 0xffffffff, POWER6, POWER9|PPCVLE, {0}}, + +{"crmove", XL(19,449), XL_MASK, PPCCOM, PPCVLE, {BT, BA, BBA}}, +{"cror", XL(19,449), XL_MASK, COM, PPCVLE, {BT, BA, BB}}, + +{"sleep", XL(19,466), 0xffffffff, POWER6, POWER9|PPCVLE, {0}}, +{"rvwinkle", XL(19,498), 0xffffffff, POWER6, POWER9|PPCVLE, {0}}, + +{"bctr", XLO(19,BOU,528,0), XLBOBIBB_MASK, COM, PPCVLE, {0}}, +{"bctrl", XLO(19,BOU,528,1), XLBOBIBB_MASK, COM, PPCVLE, {0}}, + +{"bgectr", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bgectr-", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnlctr", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnlctr-", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bgectrl", XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bgectrl-",XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnlctrl", XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnlctrl-",XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"blectr", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"blectr-", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bngctr", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bngctr-", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"blectrl", XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"blectrl-",XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bngctrl", XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bngctrl-",XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnectr", XLOCB(19,BOF,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnectr-", XLOCB(19,BOF,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnectrl", XLOCB(19,BOF,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnectrl-",XLOCB(19,BOF,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnsctr", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnsctr-", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnuctr", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnuctr-", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnsctrl", XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnsctrl-",XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnuctrl", XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bnuctrl-",XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bgectr+", XLOCB(19,BOFP,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnlctr+", XLOCB(19,BOFP,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bgectrl+",XLOCB(19,BOFP,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnlctrl+",XLOCB(19,BOFP,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"blectr+", XLOCB(19,BOFP,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bngctr+", XLOCB(19,BOFP,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"blectrl+",XLOCB(19,BOFP,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bngctrl+",XLOCB(19,BOFP,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnectr+", XLOCB(19,BOFP,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnectrl+",XLOCB(19,BOFP,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnsctr+", XLOCB(19,BOFP,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnuctr+", XLOCB(19,BOFP,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnsctrl+",XLOCB(19,BOFP,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bnuctrl+",XLOCB(19,BOFP,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bgectr-", XLOCB(19,BOFM4,CBLT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnlctr-", XLOCB(19,BOFM4,CBLT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bgectrl-",XLOCB(19,BOFM4,CBLT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnlctrl-",XLOCB(19,BOFM4,CBLT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"blectr-", XLOCB(19,BOFM4,CBGT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bngctr-", XLOCB(19,BOFM4,CBGT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"blectrl-",XLOCB(19,BOFM4,CBGT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bngctrl-",XLOCB(19,BOFM4,CBGT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnectr-", XLOCB(19,BOFM4,CBEQ,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnectrl-",XLOCB(19,BOFM4,CBEQ,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnsctr-", XLOCB(19,BOFM4,CBSO,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnuctr-", XLOCB(19,BOFM4,CBSO,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnsctrl-",XLOCB(19,BOFM4,CBSO,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnuctrl-",XLOCB(19,BOFM4,CBSO,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bgectr+", XLOCB(19,BOFP4,CBLT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnlctr+", XLOCB(19,BOFP4,CBLT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bgectrl+",XLOCB(19,BOFP4,CBLT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnlctrl+",XLOCB(19,BOFP4,CBLT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"blectr+", XLOCB(19,BOFP4,CBGT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bngctr+", XLOCB(19,BOFP4,CBGT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"blectrl+",XLOCB(19,BOFP4,CBGT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bngctrl+",XLOCB(19,BOFP4,CBGT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnectr+", XLOCB(19,BOFP4,CBEQ,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnectrl+",XLOCB(19,BOFP4,CBEQ,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnsctr+", XLOCB(19,BOFP4,CBSO,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnuctr+", XLOCB(19,BOFP4,CBSO,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnsctrl+",XLOCB(19,BOFP4,CBSO,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bnuctrl+",XLOCB(19,BOFP4,CBSO,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bltctr", XLOCB(19,BOT,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bltctr-", XLOCB(19,BOT,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bltctrl", XLOCB(19,BOT,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bltctrl-",XLOCB(19,BOT,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bgtctr", XLOCB(19,BOT,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bgtctr-", XLOCB(19,BOT,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bgtctrl", XLOCB(19,BOT,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bgtctrl-",XLOCB(19,BOT,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"beqctr", XLOCB(19,BOT,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"beqctr-", XLOCB(19,BOT,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"beqctrl", XLOCB(19,BOT,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"beqctrl-",XLOCB(19,BOT,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bsoctr", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bsoctr-", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bunctr", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bunctr-", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bsoctrl", XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bsoctrl-",XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bunctrl", XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, PPCVLE, {CR}}, +{"bunctrl-",XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bltctr+", XLOCB(19,BOTP,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bltctrl+",XLOCB(19,BOTP,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bgtctr+", XLOCB(19,BOTP,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bgtctrl+",XLOCB(19,BOTP,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"beqctr+", XLOCB(19,BOTP,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"beqctrl+",XLOCB(19,BOTP,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bsoctr+", XLOCB(19,BOTP,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bunctr+", XLOCB(19,BOTP,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bsoctrl+",XLOCB(19,BOTP,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bunctrl+",XLOCB(19,BOTP,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, ISA_V2|PPCVLE, {CR}}, +{"bltctr-", XLOCB(19,BOTM4,CBLT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bltctrl-",XLOCB(19,BOTM4,CBLT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bgtctr-", XLOCB(19,BOTM4,CBGT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bgtctrl-",XLOCB(19,BOTM4,CBGT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"beqctr-", XLOCB(19,BOTM4,CBEQ,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"beqctrl-",XLOCB(19,BOTM4,CBEQ,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bsoctr-", XLOCB(19,BOTM4,CBSO,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bunctr-", XLOCB(19,BOTM4,CBSO,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bsoctrl-",XLOCB(19,BOTM4,CBSO,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bunctrl-",XLOCB(19,BOTM4,CBSO,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bltctr+", XLOCB(19,BOTP4,CBLT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bltctrl+",XLOCB(19,BOTP4,CBLT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bgtctr+", XLOCB(19,BOTP4,CBGT,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bgtctrl+",XLOCB(19,BOTP4,CBGT,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"beqctr+", XLOCB(19,BOTP4,CBEQ,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"beqctrl+",XLOCB(19,BOTP4,CBEQ,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bsoctr+", XLOCB(19,BOTP4,CBSO,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bunctr+", XLOCB(19,BOTP4,CBSO,528,0), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bsoctrl+",XLOCB(19,BOTP4,CBSO,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, +{"bunctrl+",XLOCB(19,BOTP4,CBSO,528,1), XLBOCBBB_MASK, ISA_V2, PPCVLE, {CR}}, + +{"bfctr", XLO(19,BOF,528,0), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, +{"bfctr-", XLO(19,BOF,528,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bfctrl", XLO(19,BOF,528,1), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, +{"bfctrl-", XLO(19,BOF,528,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bfctr+", XLO(19,BOFP,528,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bfctrl+", XLO(19,BOFP,528,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"bfctr-", XLO(19,BOFM4,528,0), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, +{"bfctrl-", XLO(19,BOFM4,528,1), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, +{"bfctr+", XLO(19,BOFP4,528,0), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, +{"bfctrl+", XLO(19,BOFP4,528,1), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, +{"btctr", XLO(19,BOT,528,0), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, +{"btctr-", XLO(19,BOT,528,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"btctrl", XLO(19,BOT,528,1), XLBOBB_MASK, PPCCOM, PPCVLE, {BI}}, +{"btctrl-", XLO(19,BOT,528,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"btctr+", XLO(19,BOTP,528,0), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"btctrl+", XLO(19,BOTP,528,1), XLBOBB_MASK, PPCCOM, ISA_V2|PPCVLE, {BI}}, +{"btctr-", XLO(19,BOTM4,528,0), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, +{"btctrl-", XLO(19,BOTM4,528,1), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, +{"btctr+", XLO(19,BOTP4,528,0), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, +{"btctrl+", XLO(19,BOTP4,528,1), XLBOBB_MASK, ISA_V2, PPCVLE, {BI}}, + +{"bcctr-", XLYLK(19,528,0,0), XLYBB_MASK, PPCCOM, PPCVLE, {BOE, BI}}, +{"bcctrl-", XLYLK(19,528,0,1), XLYBB_MASK, PPCCOM, PPCVLE, {BOE, BI}}, +{"bcctr+", XLYLK(19,528,1,0), XLYBB_MASK, PPCCOM, PPCVLE, {BOE, BI}}, +{"bcctrl+", XLYLK(19,528,1,1), XLYBB_MASK, PPCCOM, PPCVLE, {BOE, BI}}, +{"bcctr", XLLK(19,528,0), XLBH_MASK, PPCCOM, PPCVLE, {BO, BI, BH}}, +{"bcc", XLLK(19,528,0), XLBB_MASK, PWRCOM, PPCVLE, {BO, BI}}, +{"bcctrl", XLLK(19,528,1), XLBH_MASK, PPCCOM, PPCVLE, {BO, BI, BH}}, +{"bccl", XLLK(19,528,1), XLBB_MASK, PWRCOM, PPCVLE, {BO, BI}}, + +{"bctar-", XLYLK(19,560,0,0), XLYBB_MASK, POWER8, PPCVLE, {BOE, BI}}, +{"bctarl-", XLYLK(19,560,0,1), XLYBB_MASK, POWER8, PPCVLE, {BOE, BI}}, +{"bctar+", XLYLK(19,560,1,0), XLYBB_MASK, POWER8, PPCVLE, {BOE, BI}}, +{"bctarl+", XLYLK(19,560,1,1), XLYBB_MASK, POWER8, PPCVLE, {BOE, BI}}, +{"bctar", XLLK(19,560,0), XLBH_MASK, POWER8, PPCVLE, {BO, BI, BH}}, +{"bctarl", XLLK(19,560,1), XLBH_MASK, POWER8, PPCVLE, {BO, BI, BH}}, + +{"rlwimi", M(20,0), M_MASK, PPCCOM, PPCVLE, {RA, RS, SH, MBE, ME}}, +{"rlimi", M(20,0), M_MASK, PWRCOM, PPCVLE, {RA, RS, SH, MBE, ME}}, + +{"rlwimi.", M(20,1), M_MASK, PPCCOM, PPCVLE, {RA, RS, SH, MBE, ME}}, +{"rlimi.", M(20,1), M_MASK, PWRCOM, PPCVLE, {RA, RS, SH, MBE, ME}}, + +{"rotlwi", MME(21,31,0), MMBME_MASK, PPCCOM, PPCVLE, {RA, RS, SH}}, +{"clrlwi", MME(21,31,0), MSHME_MASK, PPCCOM, PPCVLE, {RA, RS, MB}}, +{"rlwinm", M(21,0), M_MASK, PPCCOM, PPCVLE, {RA, RS, SH, MBE, ME}}, +{"rlinm", M(21,0), M_MASK, PWRCOM, PPCVLE, {RA, RS, SH, MBE, ME}}, +{"rotlwi.", MME(21,31,1), MMBME_MASK, PPCCOM, PPCVLE, {RA, RS, SH}}, +{"clrlwi.", MME(21,31,1), MSHME_MASK, PPCCOM, PPCVLE, {RA, RS, MB}}, +{"rlwinm.", M(21,1), M_MASK, PPCCOM, PPCVLE, {RA, RS, SH, MBE, ME}}, +{"rlinm.", M(21,1), M_MASK, PWRCOM, PPCVLE, {RA, RS, SH, MBE, ME}}, + +{"rlmi", M(22,0), M_MASK, M601, PPCVLE, {RA, RS, RB, MBE, ME}}, +{"rlmi.", M(22,1), M_MASK, M601, PPCVLE, {RA, RS, RB, MBE, ME}}, + +{"rotlw", MME(23,31,0), MMBME_MASK, PPCCOM, PPCVLE, {RA, RS, RB}}, +{"rlwnm", M(23,0), M_MASK, PPCCOM, PPCVLE, {RA, RS, RB, MBE, ME}}, +{"rlnm", M(23,0), M_MASK, PWRCOM, PPCVLE, {RA, RS, RB, MBE, ME}}, +{"rotlw.", MME(23,31,1), MMBME_MASK, PPCCOM, PPCVLE, {RA, RS, RB}}, +{"rlwnm.", M(23,1), M_MASK, PPCCOM, PPCVLE, {RA, RS, RB, MBE, ME}}, +{"rlnm.", M(23,1), M_MASK, PWRCOM, PPCVLE, {RA, RS, RB, MBE, ME}}, + +{"nop", OP(24), 0xffffffff, PPCCOM, PPCVLE, {0}}, +{"ori", OP(24), OP_MASK, PPCCOM, PPCVLE, {RA, RS, UI}}, +{"oril", OP(24), OP_MASK, PWRCOM, PPCVLE, {RA, RS, UI}}, + +{"oris", OP(25), OP_MASK, PPCCOM, PPCVLE, {RA, RS, UI}}, +{"oriu", OP(25), OP_MASK, PWRCOM, PPCVLE, {RA, RS, UI}}, + +{"xnop", OP(26), 0xffffffff, PPCCOM, PPCVLE, {0}}, +{"xori", OP(26), OP_MASK, PPCCOM, PPCVLE, {RA, RS, UI}}, +{"xoril", OP(26), OP_MASK, PWRCOM, PPCVLE, {RA, RS, UI}}, + +{"xoris", OP(27), OP_MASK, PPCCOM, PPCVLE, {RA, RS, UI}}, +{"xoriu", OP(27), OP_MASK, PWRCOM, PPCVLE, {RA, RS, UI}}, + +{"andi.", OP(28), OP_MASK, PPCCOM, PPCVLE, {RA, RS, UI}}, +{"andil.", OP(28), OP_MASK, PWRCOM, PPCVLE, {RA, RS, UI}}, + +{"andis.", OP(29), OP_MASK, PPCCOM, PPCVLE, {RA, RS, UI}}, +{"andiu.", OP(29), OP_MASK, PWRCOM, PPCVLE, {RA, RS, UI}}, + +{"rotldi", MD(30,0,0), MDMB_MASK, PPC64, PPCVLE, {RA, RS, SH6}}, +{"clrldi", MD(30,0,0), MDSH_MASK, PPC64, PPCVLE, {RA, RS, MB6}}, +{"rldicl", MD(30,0,0), MD_MASK, PPC64, PPCVLE, {RA, RS, SH6, MB6}}, +{"rotldi.", MD(30,0,1), MDMB_MASK, PPC64, PPCVLE, {RA, RS, SH6}}, +{"clrldi.", MD(30,0,1), MDSH_MASK, PPC64, PPCVLE, {RA, RS, MB6}}, +{"rldicl.", MD(30,0,1), MD_MASK, PPC64, PPCVLE, {RA, RS, SH6, MB6}}, + +{"rldicr", MD(30,1,0), MD_MASK, PPC64, PPCVLE, {RA, RS, SH6, ME6}}, +{"rldicr.", MD(30,1,1), MD_MASK, PPC64, PPCVLE, {RA, RS, SH6, ME6}}, + +{"rldic", MD(30,2,0), MD_MASK, PPC64, PPCVLE, {RA, RS, SH6, MB6}}, +{"rldic.", MD(30,2,1), MD_MASK, PPC64, PPCVLE, {RA, RS, SH6, MB6}}, + +{"rldimi", MD(30,3,0), MD_MASK, PPC64, PPCVLE, {RA, RS, SH6, MB6}}, +{"rldimi.", MD(30,3,1), MD_MASK, PPC64, PPCVLE, {RA, RS, SH6, MB6}}, + +{"rotld", MDS(30,8,0), MDSMB_MASK, PPC64, PPCVLE, {RA, RS, RB}}, +{"rldcl", MDS(30,8,0), MDS_MASK, PPC64, PPCVLE, {RA, RS, RB, MB6}}, +{"rotld.", MDS(30,8,1), MDSMB_MASK, PPC64, PPCVLE, {RA, RS, RB}}, +{"rldcl.", MDS(30,8,1), MDS_MASK, PPC64, PPCVLE, {RA, RS, RB, MB6}}, + +{"rldcr", MDS(30,9,0), MDS_MASK, PPC64, PPCVLE, {RA, RS, RB, ME6}}, +{"rldcr.", MDS(30,9,1), MDS_MASK, PPC64, PPCVLE, {RA, RS, RB, ME6}}, + +{"cmpw", XOPL(31,0,0), XCMPL_MASK, PPCCOM, 0, {OBF, RA, RB}}, +{"cmpd", XOPL(31,0,1), XCMPL_MASK, PPC64, 0, {OBF, RA, RB}}, +{"cmp", X(31,0), XCMP_MASK, PPC, 0, {BF, L32OPT, RA, RB}}, +{"cmp", X(31,0), XCMPL_MASK, PWRCOM, PPC, {BF, RA, RB}}, + +{"twlgt", XTO(31,4,TOLGT), XTO_MASK, PPCCOM, 0, {RA, RB}}, +{"tlgt", XTO(31,4,TOLGT), XTO_MASK, PWRCOM, 0, {RA, RB}}, +{"twllt", XTO(31,4,TOLLT), XTO_MASK, PPCCOM, 0, {RA, RB}}, +{"tllt", XTO(31,4,TOLLT), XTO_MASK, PWRCOM, 0, {RA, RB}}, +{"tweq", XTO(31,4,TOEQ), XTO_MASK, PPCCOM, 0, {RA, RB}}, +{"teq", XTO(31,4,TOEQ), XTO_MASK, PWRCOM, 0, {RA, RB}}, +{"twlge", XTO(31,4,TOLGE), XTO_MASK, PPCCOM, 0, {RA, RB}}, +{"tlge", XTO(31,4,TOLGE), XTO_MASK, PWRCOM, 0, {RA, RB}}, +{"twlnl", XTO(31,4,TOLNL), XTO_MASK, PPCCOM, 0, {RA, RB}}, +{"tlnl", XTO(31,4,TOLNL), XTO_MASK, PWRCOM, 0, {RA, RB}}, +{"twlle", XTO(31,4,TOLLE), XTO_MASK, PPCCOM, 0, {RA, RB}}, +{"tlle", XTO(31,4,TOLLE), XTO_MASK, PWRCOM, 0, {RA, RB}}, +{"twlng", XTO(31,4,TOLNG), XTO_MASK, PPCCOM, 0, {RA, RB}}, +{"tlng", XTO(31,4,TOLNG), XTO_MASK, PWRCOM, 0, {RA, RB}}, +{"twgt", XTO(31,4,TOGT), XTO_MASK, PPCCOM, 0, {RA, RB}}, +{"tgt", XTO(31,4,TOGT), XTO_MASK, PWRCOM, 0, {RA, RB}}, +{"twge", XTO(31,4,TOGE), XTO_MASK, PPCCOM, 0, {RA, RB}}, +{"tge", XTO(31,4,TOGE), XTO_MASK, PWRCOM, 0, {RA, RB}}, +{"twnl", XTO(31,4,TONL), XTO_MASK, PPCCOM, 0, {RA, RB}}, +{"tnl", XTO(31,4,TONL), XTO_MASK, PWRCOM, 0, {RA, RB}}, +{"twlt", XTO(31,4,TOLT), XTO_MASK, PPCCOM, 0, {RA, RB}}, +{"tlt", XTO(31,4,TOLT), XTO_MASK, PWRCOM, 0, {RA, RB}}, +{"twle", XTO(31,4,TOLE), XTO_MASK, PPCCOM, 0, {RA, RB}}, +{"tle", XTO(31,4,TOLE), XTO_MASK, PWRCOM, 0, {RA, RB}}, +{"twng", XTO(31,4,TONG), XTO_MASK, PPCCOM, 0, {RA, RB}}, +{"tng", XTO(31,4,TONG), XTO_MASK, PWRCOM, 0, {RA, RB}}, +{"twne", XTO(31,4,TONE), XTO_MASK, PPCCOM, 0, {RA, RB}}, +{"tne", XTO(31,4,TONE), XTO_MASK, PWRCOM, 0, {RA, RB}}, +{"trap", XTO(31,4,TOU), 0xffffffff, PPCCOM, 0, {0}}, +{"twu", XTO(31,4,TOU), XTO_MASK, PPCCOM, 0, {RA, RB}}, +{"tu", XTO(31,4,TOU), XTO_MASK, PWRCOM, 0, {RA, RB}}, +{"tw", X(31,4), X_MASK, PPCCOM, 0, {TO, RA, RB}}, +{"t", X(31,4), X_MASK, PWRCOM, 0, {TO, RA, RB}}, + +{"lvsl", X(31,6), X_MASK, PPCVEC, 0, {VD, RA0, RB}}, +{"lvebx", X(31,7), X_MASK, PPCVEC, 0, {VD, RA0, RB}}, +{"lbfcmx", APU(31,7,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, + +{"subfc", XO(31,8,0,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"sf", XO(31,8,0,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, +{"subc", XO(31,8,0,0), XO_MASK, PPCCOM, 0, {RT, RB, RA}}, +{"subfc.", XO(31,8,0,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"sf.", XO(31,8,0,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, +{"subc.", XO(31,8,0,1), XO_MASK, PPCCOM, 0, {RT, RB, RA}}, + +{"mulhdu", XO(31,9,0,0), XO_MASK, PPC64, 0, {RT, RA, RB}}, +{"mulhdu.", XO(31,9,0,1), XO_MASK, PPC64, 0, {RT, RA, RB}}, + +{"addc", XO(31,10,0,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"a", XO(31,10,0,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, +{"addc.", XO(31,10,0,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"a.", XO(31,10,0,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, + +{"mulhwu", XO(31,11,0,0), XO_MASK, PPC, 0, {RT, RA, RB}}, +{"mulhwu.", XO(31,11,0,1), XO_MASK, PPC, 0, {RT, RA, RB}}, + +{"lxsiwzx", X(31,12), XX1_MASK, PPCVSX2, 0, {XT6, RA0, RB}}, + +{"isellt", X(31,15), X_MASK, PPCISEL, 0, {RT, RA0, RB}}, + +{"tlbilxlpid", XTO(31,18,0), XTO_MASK, E500MC|PPCA2, 0, {0}}, +{"tlbilxpid", XTO(31,18,1), XTO_MASK, E500MC|PPCA2, 0, {0}}, +{"tlbilxva", XTO(31,18,3), XTO_MASK, E500MC|PPCA2, 0, {RA0, RB}}, +{"tlbilx", X(31,18), X_MASK, E500MC|PPCA2, 0, {T, RA0, RB}}, + +{"mfcr", XFXM(31,19,0,0), XFXFXM_MASK, COM, 0, {RT, FXM4}}, +{"mfocrf", XFXM(31,19,0,1), XFXFXM_MASK, COM, 0, {RT, FXM}}, + +{"lwarx", X(31,20), XEH_MASK, PPC, 0, {RT, RA0, RB, EH}}, + +{"ldx", X(31,21), X_MASK, PPC64, 0, {RT, RA0, RB}}, + +{"icbt", X(31,22), X_MASK, BOOKE|PPCE300|PPCA2|PPC476, 0, {CT, RA0, RB}}, + +{"lwzx", X(31,23), X_MASK, PPCCOM, 0, {RT, RA0, RB}}, +{"lx", X(31,23), X_MASK, PWRCOM, 0, {RT, RA, RB}}, + +{"slw", XRC(31,24,0), X_MASK, PPCCOM, 0, {RA, RS, RB}}, +{"sl", XRC(31,24,0), X_MASK, PWRCOM, 0, {RA, RS, RB}}, +{"slw.", XRC(31,24,1), X_MASK, PPCCOM, 0, {RA, RS, RB}}, +{"sl.", XRC(31,24,1), X_MASK, PWRCOM, 0, {RA, RS, RB}}, + +{"cntlzw", XRC(31,26,0), XRB_MASK, PPCCOM, 0, {RA, RS}}, +{"cntlz", XRC(31,26,0), XRB_MASK, PWRCOM, 0, {RA, RS}}, +{"cntlzw.", XRC(31,26,1), XRB_MASK, PPCCOM, 0, {RA, RS}}, +{"cntlz.", XRC(31,26,1), XRB_MASK, PWRCOM, 0, {RA, RS}}, + +{"sld", XRC(31,27,0), X_MASK, PPC64, 0, {RA, RS, RB}}, +{"sld.", XRC(31,27,1), X_MASK, PPC64, 0, {RA, RS, RB}}, + +{"and", XRC(31,28,0), X_MASK, COM, 0, {RA, RS, RB}}, +{"and.", XRC(31,28,1), X_MASK, COM, 0, {RA, RS, RB}}, + +{"maskg", XRC(31,29,0), X_MASK, M601, PPCA2, {RA, RS, RB}}, +{"maskg.", XRC(31,29,1), X_MASK, M601, PPCA2, {RA, RS, RB}}, + +{"ldepx", X(31,29), X_MASK, E500MC|PPCA2, 0, {RT, RA0, RB}}, + +{"waitasec", X(31,30), XRTRARB_MASK, POWER8, POWER9, {0}}, +{"wait", X(31,30), XWC_MASK, POWER9, 0, {WC}}, + +{"lwepx", X(31,31), X_MASK, E500MC|PPCA2, 0, {RT, RA0, RB}}, + +{"cmplw", XOPL(31,32,0), XCMPL_MASK, PPCCOM, 0, {OBF, RA, RB}}, +{"cmpld", XOPL(31,32,1), XCMPL_MASK, PPC64, 0, {OBF, RA, RB}}, +{"cmpl", X(31,32), XCMP_MASK, PPC, 0, {BF, L32OPT, RA, RB}}, +{"cmpl", X(31,32), XCMPL_MASK, PWRCOM, PPC, {BF, RA, RB}}, + +{"lvsr", X(31,38), X_MASK, PPCVEC, 0, {VD, RA0, RB}}, +{"lvehx", X(31,39), X_MASK, PPCVEC, 0, {VD, RA0, RB}}, +{"lhfcmx", APU(31,39,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, + +{"mviwsplt", X(31,46), X_MASK, PPCVEC2, 0, {VD, RA, RB}}, + +{"iselgt", X(31,47), X_MASK, PPCISEL, 0, {RT, RA0, RB}}, + +{"lvewx", X(31,71), X_MASK, PPCVEC, 0, {VD, RA0, RB}}, + +{"addg6s", XO(31,74,0,0), XO_MASK, POWER6, 0, {RT, RA, RB}}, + +{"lxsiwax", X(31,76), XX1_MASK, PPCVSX2, 0, {XT6, RA0, RB}}, + +{"iseleq", X(31,79), X_MASK, PPCISEL, 0, {RT, RA0, RB}}, + +{"isel", XISEL(31,15), XISEL_MASK, PPCISEL|TITAN, 0, {RT, RA0, RB, CRB}}, + +{"subf", XO(31,40,0,0), XO_MASK, PPC, 0, {RT, RA, RB}}, +{"sub", XO(31,40,0,0), XO_MASK, PPC, 0, {RT, RB, RA}}, +{"subf.", XO(31,40,0,1), XO_MASK, PPC, 0, {RT, RA, RB}}, +{"sub.", XO(31,40,0,1), XO_MASK, PPC, 0, {RT, RB, RA}}, + +{"mfvsrd", X(31,51), XX1RB_MASK, PPCVSX2, 0, {RA, XS6}}, +{"mffprd", X(31,51), XX1RB_MASK|1, PPCVSX2, 0, {RA, FRS}}, +{"mfvrd", X(31,51)|1, XX1RB_MASK|1, PPCVSX2, 0, {RA, VS}}, +{"eratilx", X(31,51), X_MASK, PPCA2, 0, {ERAT_T, RA, RB}}, + +{"lbarx", X(31,52), XEH_MASK, POWER8|E6500, 0, {RT, RA0, RB, EH}}, + +{"ldux", X(31,53), X_MASK, PPC64, 0, {RT, RAL, RB}}, + +{"dcbst", X(31,54), XRT_MASK, PPC, 0, {RA0, RB}}, + +{"lwzux", X(31,55), X_MASK, PPCCOM, 0, {RT, RAL, RB}}, +{"lux", X(31,55), X_MASK, PWRCOM, 0, {RT, RA, RB}}, + +{"cntlzd", XRC(31,58,0), XRB_MASK, PPC64, 0, {RA, RS}}, +{"cntlzd.", XRC(31,58,1), XRB_MASK, PPC64, 0, {RA, RS}}, + +{"andc", XRC(31,60,0), X_MASK, COM, 0, {RA, RS, RB}}, +{"andc.", XRC(31,60,1), X_MASK, COM, 0, {RA, RS, RB}}, + +{"waitrsv", X(31,62)|(1<<21), 0xffffffff, E500MC|PPCA2, 0, {0}}, +{"waitimpl", X(31,62)|(2<<21), 0xffffffff, E500MC|PPCA2, 0, {0}}, +{"wait", X(31,62), XWC_MASK, E500MC|PPCA2, 0, {WC}}, + +{"dcbstep", XRT(31,63,0), XRT_MASK, E500MC|PPCA2, 0, {RA0, RB}}, + +{"tdlgt", XTO(31,68,TOLGT), XTO_MASK, PPC64, 0, {RA, RB}}, +{"tdllt", XTO(31,68,TOLLT), XTO_MASK, PPC64, 0, {RA, RB}}, +{"tdeq", XTO(31,68,TOEQ), XTO_MASK, PPC64, 0, {RA, RB}}, +{"tdlge", XTO(31,68,TOLGE), XTO_MASK, PPC64, 0, {RA, RB}}, +{"tdlnl", XTO(31,68,TOLNL), XTO_MASK, PPC64, 0, {RA, RB}}, +{"tdlle", XTO(31,68,TOLLE), XTO_MASK, PPC64, 0, {RA, RB}}, +{"tdlng", XTO(31,68,TOLNG), XTO_MASK, PPC64, 0, {RA, RB}}, +{"tdgt", XTO(31,68,TOGT), XTO_MASK, PPC64, 0, {RA, RB}}, +{"tdge", XTO(31,68,TOGE), XTO_MASK, PPC64, 0, {RA, RB}}, +{"tdnl", XTO(31,68,TONL), XTO_MASK, PPC64, 0, {RA, RB}}, +{"tdlt", XTO(31,68,TOLT), XTO_MASK, PPC64, 0, {RA, RB}}, +{"tdle", XTO(31,68,TOLE), XTO_MASK, PPC64, 0, {RA, RB}}, +{"tdng", XTO(31,68,TONG), XTO_MASK, PPC64, 0, {RA, RB}}, +{"tdne", XTO(31,68,TONE), XTO_MASK, PPC64, 0, {RA, RB}}, +{"tdu", XTO(31,68,TOU), XTO_MASK, PPC64, 0, {RA, RB}}, +{"td", X(31,68), X_MASK, PPC64, 0, {TO, RA, RB}}, + +{"lwfcmx", APU(31,71,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, +{"mulhd", XO(31,73,0,0), XO_MASK, PPC64, 0, {RT, RA, RB}}, +{"mulhd.", XO(31,73,0,1), XO_MASK, PPC64, 0, {RT, RA, RB}}, + +{"mulhw", XO(31,75,0,0), XO_MASK, PPC, 0, {RT, RA, RB}}, +{"mulhw.", XO(31,75,0,1), XO_MASK, PPC, 0, {RT, RA, RB}}, + +{"dlmzb", XRC(31,78,0), X_MASK, PPC403|PPC440|TITAN, 0, {RA, RS, RB}}, +{"dlmzb.", XRC(31,78,1), X_MASK, PPC403|PPC440|TITAN, 0, {RA, RS, RB}}, + +{"mtsrd", X(31,82), XRB_MASK|(1<<20), PPC64, 0, {SR, RS}}, + +{"mfmsr", X(31,83), XRARB_MASK, COM, 0, {RT}}, + +{"ldarx", X(31,84), XEH_MASK, PPC64, 0, {RT, RA0, RB, EH}}, + +{"dcbfl", XOPL(31,86,1), XRT_MASK, POWER5, PPC476, {RA0, RB}}, +{"dcbf", X(31,86), XLRT_MASK, PPC, 0, {RA0, RB, L2OPT}}, + +{"lbzx", X(31,87), X_MASK, COM, 0, {RT, RA0, RB}}, + +{"lbepx", X(31,95), X_MASK, E500MC|PPCA2, 0, {RT, RA0, RB}}, + +{"dni", XRC(31,97,1), XRB_MASK, E6500, 0, {DUI, DCTL}}, + +{"lvx", X(31,103), X_MASK, PPCVEC, 0, {VD, RA0, RB}}, +{"lqfcmx", APU(31,103,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, + +{"neg", XO(31,104,0,0), XORB_MASK, COM, 0, {RT, RA}}, +{"neg.", XO(31,104,0,1), XORB_MASK, COM, 0, {RT, RA}}, + +{"mul", XO(31,107,0,0), XO_MASK, M601, 0, {RT, RA, RB}}, +{"mul.", XO(31,107,0,1), XO_MASK, M601, 0, {RT, RA, RB}}, + +{"mvidsplt", X(31,110), X_MASK, PPCVEC2, 0, {VD, RA, RB}}, + +{"mtsrdin", X(31,114), XRA_MASK, PPC64, 0, {RS, RB}}, + +{"mffprwz", X(31,115), XX1RB_MASK|1, PPCVSX2, 0, {RA, FRS}}, +{"mfvrwz", X(31,115)|1, XX1RB_MASK|1, PPCVSX2, 0, {RA, VS}}, +{"mfvsrwz", X(31,115), XX1RB_MASK, PPCVSX2, 0, {RA, XS6}}, + +{"lharx", X(31,116), XEH_MASK, POWER8|E6500, 0, {RT, RA0, RB, EH}}, + +{"clf", X(31,118), XTO_MASK, POWER, 0, {RA, RB}}, + +{"lbzux", X(31,119), X_MASK, COM, 0, {RT, RAL, RB}}, + +{"popcntb", X(31,122), XRB_MASK, POWER5, 0, {RA, RS}}, + +{"not", XRC(31,124,0), X_MASK, COM, 0, {RA, RS, RBS}}, +{"nor", XRC(31,124,0), X_MASK, COM, 0, {RA, RS, RB}}, +{"not.", XRC(31,124,1), X_MASK, COM, 0, {RA, RS, RBS}}, +{"nor.", XRC(31,124,1), X_MASK, COM, 0, {RA, RS, RB}}, + +{"dcbfep", XRT(31,127,0), XRT_MASK, E500MC|PPCA2, 0, {RA0, RB}}, + +{"setb", X(31,128), XRB_MASK|(3<<16), POWER9, 0, {RT, BFA}}, + +{"wrtee", X(31,131), XRARB_MASK, PPC403|BOOKE|PPCA2|PPC476, 0, {RS}}, + +{"dcbtstls", X(31,134), X_MASK, PPCCHLK|PPC476|TITAN, 0, {CT, RA0, RB}}, + +{"stvebx", X(31,135), X_MASK, PPCVEC, 0, {VS, RA0, RB}}, +{"stbfcmx", APU(31,135,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, + +{"subfe", XO(31,136,0,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"sfe", XO(31,136,0,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, +{"subfe.", XO(31,136,0,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"sfe.", XO(31,136,0,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, + +{"adde", XO(31,138,0,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"ae", XO(31,138,0,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, +{"adde.", XO(31,138,0,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"ae.", XO(31,138,0,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, + +{"stxsiwx", X(31,140), XX1_MASK, PPCVSX2, 0, {XS6, RA0, RB}}, + +{"msgsndp", XRTRA(31,142,0,0), XRTRA_MASK, POWER8, 0, {RB}}, +{"dcbtstlse", X(31,142), X_MASK, PPCCHLK, E500MC, {CT, RA0, RB}}, + +{"mtcr", XFXM(31,144,0xff,0), XRARB_MASK, COM, 0, {RS}}, +{"mtcrf", XFXM(31,144,0,0), XFXFXM_MASK, COM, 0, {FXM, RS}}, +{"mtocrf", XFXM(31,144,0,1), XFXFXM_MASK, COM, 0, {FXM, RS}}, + +{"mtmsr", X(31,146), XRLARB_MASK, COM, 0, {RS, A_L}}, + +{"mtsle", X(31,147), XRTLRARB_MASK, POWER8, 0, {L}}, -{ "ldux", X(31,53), X_MASK, PPC64, { RT, RAL, RB } }, +{"eratsx", XRC(31,147,0), X_MASK, PPCA2, 0, {RT, RA0, RB}}, +{"eratsx.", XRC(31,147,1), X_MASK, PPCA2, 0, {RT, RA0, RB}}, -{ "dcbst", X(31,54), XRT_MASK, PPC, { RA, RB } }, +{"stdx", X(31,149), X_MASK, PPC64, 0, {RS, RA0, RB}}, -{ "lwzux", X(31,55), X_MASK, PPCCOM, { RT, RAL, RB } }, -{ "lux", X(31,55), X_MASK, PWRCOM, { RT, RA, RB } }, +{"stwcx.", XRC(31,150,1), X_MASK, PPC, 0, {RS, RA0, RB}}, -{ "dcbste", X(31,62), XRT_MASK, BOOKE64, { RA, RB } }, +{"stwx", X(31,151), X_MASK, PPCCOM, 0, {RS, RA0, RB}}, +{"stx", X(31,151), X_MASK, PWRCOM, 0, {RS, RA, RB}}, -{ "lwzuxe", X(31,63), X_MASK, BOOKE64, { RT, RAL, RB } }, +{"slq", XRC(31,152,0), X_MASK, M601, 0, {RA, RS, RB}}, +{"slq.", XRC(31,152,1), X_MASK, M601, 0, {RA, RS, RB}}, -{ "cntlzd", XRC(31,58,0), XRB_MASK, PPC64, { RA, RS } }, -{ "cntlzd.", XRC(31,58,1), XRB_MASK, PPC64, { RA, RS } }, +{"sle", XRC(31,153,0), X_MASK, M601, 0, {RA, RS, RB}}, +{"sle.", XRC(31,153,1), X_MASK, M601, 0, {RA, RS, RB}}, -{ "andc", XRC(31,60,0), X_MASK, COM, { RA, RS, RB } }, -{ "andc.", XRC(31,60,1), X_MASK, COM, { RA, RS, RB } }, +{"prtyw", X(31,154), XRB_MASK, POWER6|PPCA2|PPC476, 0, {RA, RS}}, -{ "tdlgt", XTO(31,68,TOLGT), XTO_MASK, PPC64, { RA, RB } }, -{ "tdllt", XTO(31,68,TOLLT), XTO_MASK, PPC64, { RA, RB } }, -{ "tdeq", XTO(31,68,TOEQ), XTO_MASK, PPC64, { RA, RB } }, -{ "tdlge", XTO(31,68,TOLGE), XTO_MASK, PPC64, { RA, RB } }, -{ "tdlnl", XTO(31,68,TOLNL), XTO_MASK, PPC64, { RA, RB } }, -{ "tdlle", XTO(31,68,TOLLE), XTO_MASK, PPC64, { RA, RB } }, -{ "tdlng", XTO(31,68,TOLNG), XTO_MASK, PPC64, { RA, RB } }, -{ "tdgt", XTO(31,68,TOGT), XTO_MASK, PPC64, { RA, RB } }, -{ "tdge", XTO(31,68,TOGE), XTO_MASK, PPC64, { RA, RB } }, -{ "tdnl", XTO(31,68,TONL), XTO_MASK, PPC64, { RA, RB } }, -{ "tdlt", XTO(31,68,TOLT), XTO_MASK, PPC64, { RA, RB } }, -{ "tdle", XTO(31,68,TOLE), XTO_MASK, PPC64, { RA, RB } }, -{ "tdng", XTO(31,68,TONG), XTO_MASK, PPC64, { RA, RB } }, -{ "tdne", XTO(31,68,TONE), XTO_MASK, PPC64, { RA, RB } }, -{ "td", X(31,68), X_MASK, PPC64, { TO, RA, RB } }, +{"stdepx", X(31,157), X_MASK, E500MC|PPCA2, 0, {RS, RA0, RB}}, -{ "mulhd", XO(31,73,0,0), XO_MASK, PPC64, { RT, RA, RB } }, -{ "mulhd.", XO(31,73,0,1), XO_MASK, PPC64, { RT, RA, RB } }, +{"stwepx", X(31,159), X_MASK, E500MC|PPCA2, 0, {RS, RA0, RB}}, -{ "mulhw", XO(31,75,0,0), XO_MASK, PPC, { RT, RA, RB } }, -{ "mulhw.", XO(31,75,0,1), XO_MASK, PPC, { RT, RA, RB } }, +{"wrteei", X(31,163), XE_MASK, PPC403|BOOKE|PPCA2|PPC476, 0, {E}}, -{ "dlmzb", XRC(31,78,0), X_MASK, PPC403|PPC440, { RA, RS, RB } }, -{ "dlmzb.", XRC(31,78,1), X_MASK, PPC403|PPC440, { RA, RS, RB } }, +{"dcbtls", X(31,166), X_MASK, PPCCHLK|PPC476|TITAN, 0, {CT, RA0, RB}}, -{ "mtsrd", X(31,82), XRB_MASK|(1<<20), PPC64, { SR, RS } }, +{"stvehx", X(31,167), X_MASK, PPCVEC, 0, {VS, RA0, RB}}, +{"sthfcmx", APU(31,167,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, -{ "mfmsr", X(31,83), XRARB_MASK, COM, { RT } }, +{"addex", ZRC(31,170,0), Z2_MASK, POWER9, 0, {RT, RA, RB, CY}}, -{ "ldarx", X(31,84), XEH_MASK, PPC64, { RT, RA0, RB, EH } }, +{"msgclrp", XRTRA(31,174,0,0), XRTRA_MASK, POWER8, 0, {RB}}, +{"dcbtlse", X(31,174), X_MASK, PPCCHLK, E500MC, {CT, RA0, RB}}, -{ "dcbfl", XOPL(31,86,1), XRT_MASK, POWER5, { RA, RB } }, -{ "dcbf", X(31,86), XLRT_MASK, PPC, { RA, RB, XRT_L } }, +{"mtmsrd", X(31,178), XRLARB_MASK, PPC64, 0, {RS, A_L}}, -{ "lbzx", X(31,87), X_MASK, COM, { RT, RA0, RB } }, +{"mtvsrd", X(31,179), XX1RB_MASK, PPCVSX2, 0, {XT6, RA}}, +{"mtfprd", X(31,179), XX1RB_MASK|1, PPCVSX2, 0, {FRT, RA}}, +{"mtvrd", X(31,179)|1, XX1RB_MASK|1, PPCVSX2, 0, {VD, RA}}, +{"eratre", X(31,179), X_MASK, PPCA2, 0, {RT, RA, WS}}, -{ "dcbfe", X(31,94), XRT_MASK, BOOKE64, { RA, RB } }, +{"stdux", X(31,181), X_MASK, PPC64, 0, {RS, RAS, RB}}, -{ "lbzxe", X(31,95), X_MASK, BOOKE64, { RT, RA0, RB } }, +{"stqcx.", XRC(31,182,1), X_MASK, POWER8, 0, {RSQ, RA0, RB}}, +{"wchkall", X(31,182), X_MASK, PPCA2, 0, {OBF}}, -{ "neg", XO(31,104,0,0), XORB_MASK, COM, { RT, RA } }, -{ "neg.", XO(31,104,0,1), XORB_MASK, COM, { RT, RA } }, -{ "nego", XO(31,104,1,0), XORB_MASK, COM, { RT, RA } }, -{ "nego.", XO(31,104,1,1), XORB_MASK, COM, { RT, RA } }, +{"stwux", X(31,183), X_MASK, PPCCOM, 0, {RS, RAS, RB}}, +{"stux", X(31,183), X_MASK, PWRCOM, 0, {RS, RA0, RB}}, -{ "mul", XO(31,107,0,0), XO_MASK, M601, { RT, RA, RB } }, -{ "mul.", XO(31,107,0,1), XO_MASK, M601, { RT, RA, RB } }, -{ "mulo", XO(31,107,1,0), XO_MASK, M601, { RT, RA, RB } }, -{ "mulo.", XO(31,107,1,1), XO_MASK, M601, { RT, RA, RB } }, +{"sliq", XRC(31,184,0), X_MASK, M601, 0, {RA, RS, SH}}, +{"sliq.", XRC(31,184,1), X_MASK, M601, 0, {RA, RS, SH}}, -{ "mtsrdin", X(31,114), XRA_MASK, PPC64, { RS, RB } }, +{"prtyd", X(31,186), XRB_MASK, POWER6|PPCA2, 0, {RA, RS}}, -{ "clf", X(31,118), XTO_MASK, POWER, { RA, RB } }, +{"cmprb", X(31,192), XCMP_MASK, POWER9, 0, {BF, L, RA, RB}}, -{ "lbzux", X(31,119), X_MASK, COM, { RT, RAL, RB } }, +{"icblq.", XRC(31,198,1), X_MASK, E6500, 0, {CT, RA0, RB}}, -{ "popcntb", X(31,122), XRB_MASK, POWER5, { RA, RS } }, +{"stvewx", X(31,199), X_MASK, PPCVEC, 0, {VS, RA0, RB}}, +{"stwfcmx", APU(31,199,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, -{ "not", XRC(31,124,0), X_MASK, COM, { RA, RS, RBS } }, -{ "nor", XRC(31,124,0), X_MASK, COM, { RA, RS, RB } }, -{ "not.", XRC(31,124,1), X_MASK, COM, { RA, RS, RBS } }, -{ "nor.", XRC(31,124,1), X_MASK, COM, { RA, RS, RB } }, +{"subfze", XO(31,200,0,0), XORB_MASK, PPCCOM, 0, {RT, RA}}, +{"sfze", XO(31,200,0,0), XORB_MASK, PWRCOM, 0, {RT, RA}}, +{"subfze.", XO(31,200,0,1), XORB_MASK, PPCCOM, 0, {RT, RA}}, +{"sfze.", XO(31,200,0,1), XORB_MASK, PWRCOM, 0, {RT, RA}}, -{ "lwarxe", X(31,126), X_MASK, BOOKE64, { RT, RA0, RB } }, +{"addze", XO(31,202,0,0), XORB_MASK, PPCCOM, 0, {RT, RA}}, +{"aze", XO(31,202,0,0), XORB_MASK, PWRCOM, 0, {RT, RA}}, +{"addze.", XO(31,202,0,1), XORB_MASK, PPCCOM, 0, {RT, RA}}, +{"aze.", XO(31,202,0,1), XORB_MASK, PWRCOM, 0, {RT, RA}}, -{ "lbzuxe", X(31,127), X_MASK, BOOKE64, { RT, RAL, RB } }, +{"msgsnd", XRTRA(31,206,0,0), XRTRA_MASK, E500MC|PPCA2|POWER8, 0, {RB}}, -{ "wrtee", X(31,131), XRARB_MASK, PPC403 | BOOKE, { RS } }, +{"mtsr", X(31,210), XRB_MASK|(1<<20), COM, NON32, {SR, RS}}, -{ "dcbtstls",X(31,134), X_MASK, PPCCHLK, { CT, RA, RB }}, +{"mtfprwa", X(31,211), XX1RB_MASK|1, PPCVSX2, 0, {FRT, RA}}, +{"mtvrwa", X(31,211)|1, XX1RB_MASK|1, PPCVSX2, 0, {VD, RA}}, +{"mtvsrwa", X(31,211), XX1RB_MASK, PPCVSX2, 0, {XT6, RA}}, +{"eratwe", X(31,211), X_MASK, PPCA2, 0, {RS, RA, WS}}, -{ "subfe", XO(31,136,0,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "sfe", XO(31,136,0,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "subfe.", XO(31,136,0,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "sfe.", XO(31,136,0,1), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "subfeo", XO(31,136,1,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "sfeo", XO(31,136,1,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "subfeo.", XO(31,136,1,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "sfeo.", XO(31,136,1,1), XO_MASK, PWRCOM, { RT, RA, RB } }, +{"ldawx.", XRC(31,212,1), X_MASK, PPCA2, 0, {RT, RA0, RB}}, -{ "adde", XO(31,138,0,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "ae", XO(31,138,0,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "adde.", XO(31,138,0,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "ae.", XO(31,138,0,1), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "addeo", XO(31,138,1,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "aeo", XO(31,138,1,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "addeo.", XO(31,138,1,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "aeo.", XO(31,138,1,1), XO_MASK, PWRCOM, { RT, RA, RB } }, +{"stdcx.", XRC(31,214,1), X_MASK, PPC64, 0, {RS, RA0, RB}}, -{ "dcbtstlse",X(31,142),X_MASK, PPCCHLK64, { CT, RA, RB }}, +{"stbx", X(31,215), X_MASK, COM, 0, {RS, RA0, RB}}, -{ "mtocrf", XFXM(31,144,0,1), XFXFXM_MASK, COM, { FXM, RS } }, -{ "mtcr", XFXM(31,144,0xff,0), XRARB_MASK, COM, { RS }}, -{ "mtcrf", X(31,144), XFXFXM_MASK, COM, { FXM, RS } }, +{"sllq", XRC(31,216,0), X_MASK, M601, 0, {RA, RS, RB}}, +{"sllq.", XRC(31,216,1), X_MASK, M601, 0, {RA, RS, RB}}, -{ "mtmsr", X(31,146), XRARB_MASK, COM, { RS } }, +{"sleq", XRC(31,217,0), X_MASK, M601, 0, {RA, RS, RB}}, +{"sleq.", XRC(31,217,1), X_MASK, M601, 0, {RA, RS, RB}}, -{ "stdx", X(31,149), X_MASK, PPC64, { RS, RA0, RB } }, +{"stbepx", X(31,223), X_MASK, E500MC|PPCA2, 0, {RS, RA0, RB}}, -{ "stwcx.", XRC(31,150,1), X_MASK, PPC, { RS, RA0, RB } }, +{"cmpeqb", X(31,224), XCMPL_MASK, POWER9, 0, {BF, RA, RB}}, -{ "stwx", X(31,151), X_MASK, PPCCOM, { RS, RA0, RB } }, -{ "stx", X(31,151), X_MASK, PWRCOM, { RS, RA, RB } }, +{"icblc", X(31,230), X_MASK, PPCCHLK|PPC476|TITAN, 0, {CT, RA0, RB}}, -{ "stwcxe.", XRC(31,158,1), X_MASK, BOOKE64, { RS, RA0, RB } }, +{"stvx", X(31,231), X_MASK, PPCVEC, 0, {VS, RA0, RB}}, +{"stqfcmx", APU(31,231,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, -{ "stwxe", X(31,159), X_MASK, BOOKE64, { RS, RA0, RB } }, +{"subfme", XO(31,232,0,0), XORB_MASK, PPCCOM, 0, {RT, RA}}, +{"sfme", XO(31,232,0,0), XORB_MASK, PWRCOM, 0, {RT, RA}}, +{"subfme.", XO(31,232,0,1), XORB_MASK, PPCCOM, 0, {RT, RA}}, +{"sfme.", XO(31,232,0,1), XORB_MASK, PWRCOM, 0, {RT, RA}}, -{ "slq", XRC(31,152,0), X_MASK, M601, { RA, RS, RB } }, -{ "slq.", XRC(31,152,1), X_MASK, M601, { RA, RS, RB } }, +{"mulld", XO(31,233,0,0), XO_MASK, PPC64, 0, {RT, RA, RB}}, +{"mulld.", XO(31,233,0,1), XO_MASK, PPC64, 0, {RT, RA, RB}}, -{ "sle", XRC(31,153,0), X_MASK, M601, { RA, RS, RB } }, -{ "sle.", XRC(31,153,1), X_MASK, M601, { RA, RS, RB } }, +{"addme", XO(31,234,0,0), XORB_MASK, PPCCOM, 0, {RT, RA}}, +{"ame", XO(31,234,0,0), XORB_MASK, PWRCOM, 0, {RT, RA}}, +{"addme.", XO(31,234,0,1), XORB_MASK, PPCCOM, 0, {RT, RA}}, +{"ame.", XO(31,234,0,1), XORB_MASK, PWRCOM, 0, {RT, RA}}, -{ "prtyw", X(31,154), XRB_MASK, POWER6, { RA, RS } }, +{"mullw", XO(31,235,0,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"muls", XO(31,235,0,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, +{"mullw.", XO(31,235,0,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"muls.", XO(31,235,0,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, -{ "wrteei", X(31,163), XE_MASK, PPC403 | BOOKE, { E } }, +{"icblce", X(31,238), X_MASK, PPCCHLK, E500MC|PPCA2, {CT, RA, RB}}, +{"msgclr", XRTRA(31,238,0,0), XRTRA_MASK, E500MC|PPCA2|POWER8, 0, {RB}}, +{"mtsrin", X(31,242), XRA_MASK, PPC, NON32, {RS, RB}}, +{"mtsri", X(31,242), XRA_MASK, POWER, NON32, {RS, RB}}, -{ "dcbtls", X(31,166), X_MASK, PPCCHLK, { CT, RA, RB }}, -{ "dcbtlse", X(31,174), X_MASK, PPCCHLK64, { CT, RA, RB }}, +{"mtfprwz", X(31,243), XX1RB_MASK|1, PPCVSX2, 0, {FRT, RA}}, +{"mtvrwz", X(31,243)|1, XX1RB_MASK|1, PPCVSX2, 0, {VD, RA}}, +{"mtvsrwz", X(31,243), XX1RB_MASK, PPCVSX2, 0, {XT6, RA}}, -{ "mtmsrd", X(31,178), XRLARB_MASK, PPC64, { RS, MTMSRD_L } }, +{"dcbtstt", XRT(31,246,0x10), XRT_MASK, POWER7, 0, {RA0, RB}}, +{"dcbtst", X(31,246), X_MASK, POWER4, DCBT_EO, {RA0, RB, CT}}, +{"dcbtst", X(31,246), X_MASK, DCBT_EO, 0, {CT, RA0, RB}}, +{"dcbtst", X(31,246), X_MASK, PPC, POWER4|DCBT_EO, {RA0, RB}}, -{ "stdux", X(31,181), X_MASK, PPC64, { RS, RAS, RB } }, +{"stbux", X(31,247), X_MASK, COM, 0, {RS, RAS, RB}}, -{ "stwux", X(31,183), X_MASK, PPCCOM, { RS, RAS, RB } }, -{ "stux", X(31,183), X_MASK, PWRCOM, { RS, RA0, RB } }, +{"slliq", XRC(31,248,0), X_MASK, M601, 0, {RA, RS, SH}}, +{"slliq.", XRC(31,248,1), X_MASK, M601, 0, {RA, RS, SH}}, -{ "sliq", XRC(31,184,0), X_MASK, M601, { RA, RS, SH } }, -{ "sliq.", XRC(31,184,1), X_MASK, M601, { RA, RS, SH } }, +{"bpermd", X(31,252), X_MASK, POWER7|PPCA2, 0, {RA, RS, RB}}, -{ "prtyd", X(31,186), XRB_MASK, POWER6, { RA, RS } }, +{"dcbtstep", XRT(31,255,0), X_MASK, E500MC|PPCA2, 0, {RT, RA0, RB}}, -{ "stwuxe", X(31,191), X_MASK, BOOKE64, { RS, RAS, RB } }, +{"mfdcrx", X(31,259), X_MASK, BOOKE|PPCA2|PPC476, TITAN, {RS, RA}}, +{"mfdcrx.", XRC(31,259,1), X_MASK, PPCA2, 0, {RS, RA}}, -{ "subfze", XO(31,200,0,0), XORB_MASK, PPCCOM, { RT, RA } }, -{ "sfze", XO(31,200,0,0), XORB_MASK, PWRCOM, { RT, RA } }, -{ "subfze.", XO(31,200,0,1), XORB_MASK, PPCCOM, { RT, RA } }, -{ "sfze.", XO(31,200,0,1), XORB_MASK, PWRCOM, { RT, RA } }, -{ "subfzeo", XO(31,200,1,0), XORB_MASK, PPCCOM, { RT, RA } }, -{ "sfzeo", XO(31,200,1,0), XORB_MASK, PWRCOM, { RT, RA } }, -{ "subfzeo.",XO(31,200,1,1), XORB_MASK, PPCCOM, { RT, RA } }, -{ "sfzeo.", XO(31,200,1,1), XORB_MASK, PWRCOM, { RT, RA } }, +{"lvexbx", X(31,261), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, -{ "addze", XO(31,202,0,0), XORB_MASK, PPCCOM, { RT, RA } }, -{ "aze", XO(31,202,0,0), XORB_MASK, PWRCOM, { RT, RA } }, -{ "addze.", XO(31,202,0,1), XORB_MASK, PPCCOM, { RT, RA } }, -{ "aze.", XO(31,202,0,1), XORB_MASK, PWRCOM, { RT, RA } }, -{ "addzeo", XO(31,202,1,0), XORB_MASK, PPCCOM, { RT, RA } }, -{ "azeo", XO(31,202,1,0), XORB_MASK, PWRCOM, { RT, RA } }, -{ "addzeo.", XO(31,202,1,1), XORB_MASK, PPCCOM, { RT, RA } }, -{ "azeo.", XO(31,202,1,1), XORB_MASK, PWRCOM, { RT, RA } }, +{"icbt", X(31,262), XRT_MASK, PPC403, 0, {RA, RB}}, -{ "mtsr", X(31,210), XRB_MASK|(1<<20), COM32, { SR, RS } }, +{"lvepxl", X(31,263), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, -{ "stdcx.", XRC(31,214,1), X_MASK, PPC64, { RS, RA0, RB } }, +{"ldfcmx", APU(31,263,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, +{"doz", XO(31,264,0,0), XO_MASK, M601, 0, {RT, RA, RB}}, +{"doz.", XO(31,264,0,1), XO_MASK, M601, 0, {RT, RA, RB}}, -{ "stbx", X(31,215), X_MASK, COM, { RS, RA0, RB } }, +{"modud", X(31,265), X_MASK, POWER9, 0, {RT, RA, RB}}, -{ "sllq", XRC(31,216,0), X_MASK, M601, { RA, RS, RB } }, -{ "sllq.", XRC(31,216,1), X_MASK, M601, { RA, RS, RB } }, +{"add", XO(31,266,0,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"cax", XO(31,266,0,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, +{"add.", XO(31,266,0,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"cax.", XO(31,266,0,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, -{ "sleq", XRC(31,217,0), X_MASK, M601, { RA, RS, RB } }, -{ "sleq.", XRC(31,217,1), X_MASK, M601, { RA, RS, RB } }, +{"moduw", X(31,267), X_MASK, POWER9, 0, {RT, RA, RB}}, -{ "stbxe", X(31,223), X_MASK, BOOKE64, { RS, RA0, RB } }, +{"lxvx", X(31,268), XX1_MASK|1<<6, PPCVSX3, 0, {XT6, RA0, RB}}, +{"lxvl", X(31,269), XX1_MASK, PPCVSX3, 0, {XT6, RA0, RB}}, -{ "icblc", X(31,230), X_MASK, PPCCHLK, { CT, RA, RB }}, +{"ehpriv", X(31,270), 0xffffffff, E500MC|PPCA2, 0, {0}}, -{ "subfme", XO(31,232,0,0), XORB_MASK, PPCCOM, { RT, RA } }, -{ "sfme", XO(31,232,0,0), XORB_MASK, PWRCOM, { RT, RA } }, -{ "subfme.", XO(31,232,0,1), XORB_MASK, PPCCOM, { RT, RA } }, -{ "sfme.", XO(31,232,0,1), XORB_MASK, PWRCOM, { RT, RA } }, -{ "subfmeo", XO(31,232,1,0), XORB_MASK, PPCCOM, { RT, RA } }, -{ "sfmeo", XO(31,232,1,0), XORB_MASK, PWRCOM, { RT, RA } }, -{ "subfmeo.",XO(31,232,1,1), XORB_MASK, PPCCOM, { RT, RA } }, -{ "sfmeo.", XO(31,232,1,1), XORB_MASK, PWRCOM, { RT, RA } }, +{"tlbiel", X(31,274), X_MASK|1<<20,POWER9, PPC476, {RB, RSO, RIC, PRS, X_R}}, +{"tlbiel", X(31,274), XRTLRA_MASK, POWER4, POWER9|PPC476, {RB, LOPT}}, -{ "mulld", XO(31,233,0,0), XO_MASK, PPC64, { RT, RA, RB } }, -{ "mulld.", XO(31,233,0,1), XO_MASK, PPC64, { RT, RA, RB } }, -{ "mulldo", XO(31,233,1,0), XO_MASK, PPC64, { RT, RA, RB } }, -{ "mulldo.", XO(31,233,1,1), XO_MASK, PPC64, { RT, RA, RB } }, +{"mfapidi", X(31,275), X_MASK, BOOKE, E500|TITAN, {RT, RA}}, -{ "addme", XO(31,234,0,0), XORB_MASK, PPCCOM, { RT, RA } }, -{ "ame", XO(31,234,0,0), XORB_MASK, PWRCOM, { RT, RA } }, -{ "addme.", XO(31,234,0,1), XORB_MASK, PPCCOM, { RT, RA } }, -{ "ame.", XO(31,234,0,1), XORB_MASK, PWRCOM, { RT, RA } }, -{ "addmeo", XO(31,234,1,0), XORB_MASK, PPCCOM, { RT, RA } }, -{ "ameo", XO(31,234,1,0), XORB_MASK, PWRCOM, { RT, RA } }, -{ "addmeo.", XO(31,234,1,1), XORB_MASK, PPCCOM, { RT, RA } }, -{ "ameo.", XO(31,234,1,1), XORB_MASK, PWRCOM, { RT, RA } }, +{"lqarx", X(31,276), XEH_MASK, POWER8, 0, {RTQ, RAX, RBX, EH}}, -{ "mullw", XO(31,235,0,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "muls", XO(31,235,0,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "mullw.", XO(31,235,0,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "muls.", XO(31,235,0,1), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "mullwo", XO(31,235,1,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "mulso", XO(31,235,1,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "mullwo.", XO(31,235,1,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "mulso.", XO(31,235,1,1), XO_MASK, PWRCOM, { RT, RA, RB } }, +{"lscbx", XRC(31,277,0), X_MASK, M601, 0, {RT, RA, RB}}, +{"lscbx.", XRC(31,277,1), X_MASK, M601, 0, {RT, RA, RB}}, -{ "icblce", X(31,238), X_MASK, PPCCHLK64, { CT, RA, RB }}, -{ "mtsrin", X(31,242), XRA_MASK, PPC32, { RS, RB } }, -{ "mtsri", X(31,242), XRA_MASK, POWER32, { RS, RB } }, +{"dcbtt", XRT(31,278,0x10), XRT_MASK, POWER7, 0, {RA0, RB}}, +{"dcbt", X(31,278), X_MASK, POWER4, DCBT_EO, {RA0, RB, CT}}, +{"dcbt", X(31,278), X_MASK, DCBT_EO, 0, {CT, RA0, RB}}, +{"dcbt", X(31,278), X_MASK, PPC, POWER4|DCBT_EO, {RA0, RB}}, -{ "dcbtst", X(31,246), X_MASK, PPC, { CT, RA, RB } }, +{"lhzx", X(31,279), X_MASK, COM, 0, {RT, RA0, RB}}, -{ "stbux", X(31,247), X_MASK, COM, { RS, RAS, RB } }, - -{ "slliq", XRC(31,248,0), X_MASK, M601, { RA, RS, SH } }, -{ "slliq.", XRC(31,248,1), X_MASK, M601, { RA, RS, SH } }, - -{ "dcbtste", X(31,253), X_MASK, BOOKE64, { CT, RA, RB } }, - -{ "stbuxe", X(31,255), X_MASK, BOOKE64, { RS, RAS, RB } }, - -{ "mfdcrx", X(31,259), X_MASK, BOOKE, { RS, RA } }, - -{ "doz", XO(31,264,0,0), XO_MASK, M601, { RT, RA, RB } }, -{ "doz.", XO(31,264,0,1), XO_MASK, M601, { RT, RA, RB } }, -{ "dozo", XO(31,264,1,0), XO_MASK, M601, { RT, RA, RB } }, -{ "dozo.", XO(31,264,1,1), XO_MASK, M601, { RT, RA, RB } }, - -{ "add", XO(31,266,0,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "cax", XO(31,266,0,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "add.", XO(31,266,0,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "cax.", XO(31,266,0,1), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "addo", XO(31,266,1,0), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "caxo", XO(31,266,1,0), XO_MASK, PWRCOM, { RT, RA, RB } }, -{ "addo.", XO(31,266,1,1), XO_MASK, PPCCOM, { RT, RA, RB } }, -{ "caxo.", XO(31,266,1,1), XO_MASK, PWRCOM, { RT, RA, RB } }, - -{ "tlbiel", X(31,274), XRTLRA_MASK, POWER4, { RB, L } }, - -{ "mfapidi", X(31,275), X_MASK, BOOKE, { RT, RA } }, - -{ "lscbx", XRC(31,277,0), X_MASK, M601, { RT, RA, RB } }, -{ "lscbx.", XRC(31,277,1), X_MASK, M601, { RT, RA, RB } }, - -{ "dcbt", X(31,278), X_MASK, PPC, { CT, RA, RB } }, - -{ "lhzx", X(31,279), X_MASK, COM, { RT, RA0, RB } }, - -{ "eqv", XRC(31,284,0), X_MASK, COM, { RA, RS, RB } }, -{ "eqv.", XRC(31,284,1), X_MASK, COM, { RA, RS, RB } }, - -{ "dcbte", X(31,286), X_MASK, BOOKE64, { CT, RA, RB } }, - -{ "lhzxe", X(31,287), X_MASK, BOOKE64, { RT, RA0, RB } }, - -{ "tlbie", X(31,306), XRTLRA_MASK, PPC, { RB, L } }, -{ "tlbi", X(31,306), XRT_MASK, POWER, { RA0, RB } }, - -{ "eciwx", X(31,310), X_MASK, PPC, { RT, RA, RB } }, - -{ "lhzux", X(31,311), X_MASK, COM, { RT, RAL, RB } }, - -{ "xor", XRC(31,316,0), X_MASK, COM, { RA, RS, RB } }, -{ "xor.", XRC(31,316,1), X_MASK, COM, { RA, RS, RB } }, - -{ "lhzuxe", X(31,319), X_MASK, BOOKE64, { RT, RAL, RB } }, - -{ "mfexisr", XSPR(31,323,64), XSPR_MASK, PPC403, { RT } }, -{ "mfexier", XSPR(31,323,66), XSPR_MASK, PPC403, { RT } }, -{ "mfbr0", XSPR(31,323,128), XSPR_MASK, PPC403, { RT } }, -{ "mfbr1", XSPR(31,323,129), XSPR_MASK, PPC403, { RT } }, -{ "mfbr2", XSPR(31,323,130), XSPR_MASK, PPC403, { RT } }, -{ "mfbr3", XSPR(31,323,131), XSPR_MASK, PPC403, { RT } }, -{ "mfbr4", XSPR(31,323,132), XSPR_MASK, PPC403, { RT } }, -{ "mfbr5", XSPR(31,323,133), XSPR_MASK, PPC403, { RT } }, -{ "mfbr6", XSPR(31,323,134), XSPR_MASK, PPC403, { RT } }, -{ "mfbr7", XSPR(31,323,135), XSPR_MASK, PPC403, { RT } }, -{ "mfbear", XSPR(31,323,144), XSPR_MASK, PPC403, { RT } }, -{ "mfbesr", XSPR(31,323,145), XSPR_MASK, PPC403, { RT } }, -{ "mfiocr", XSPR(31,323,160), XSPR_MASK, PPC403, { RT } }, -{ "mfdmacr0", XSPR(31,323,192), XSPR_MASK, PPC403, { RT } }, -{ "mfdmact0", XSPR(31,323,193), XSPR_MASK, PPC403, { RT } }, -{ "mfdmada0", XSPR(31,323,194), XSPR_MASK, PPC403, { RT } }, -{ "mfdmasa0", XSPR(31,323,195), XSPR_MASK, PPC403, { RT } }, -{ "mfdmacc0", XSPR(31,323,196), XSPR_MASK, PPC403, { RT } }, -{ "mfdmacr1", XSPR(31,323,200), XSPR_MASK, PPC403, { RT } }, -{ "mfdmact1", XSPR(31,323,201), XSPR_MASK, PPC403, { RT } }, -{ "mfdmada1", XSPR(31,323,202), XSPR_MASK, PPC403, { RT } }, -{ "mfdmasa1", XSPR(31,323,203), XSPR_MASK, PPC403, { RT } }, -{ "mfdmacc1", XSPR(31,323,204), XSPR_MASK, PPC403, { RT } }, -{ "mfdmacr2", XSPR(31,323,208), XSPR_MASK, PPC403, { RT } }, -{ "mfdmact2", XSPR(31,323,209), XSPR_MASK, PPC403, { RT } }, -{ "mfdmada2", XSPR(31,323,210), XSPR_MASK, PPC403, { RT } }, -{ "mfdmasa2", XSPR(31,323,211), XSPR_MASK, PPC403, { RT } }, -{ "mfdmacc2", XSPR(31,323,212), XSPR_MASK, PPC403, { RT } }, -{ "mfdmacr3", XSPR(31,323,216), XSPR_MASK, PPC403, { RT } }, -{ "mfdmact3", XSPR(31,323,217), XSPR_MASK, PPC403, { RT } }, -{ "mfdmada3", XSPR(31,323,218), XSPR_MASK, PPC403, { RT } }, -{ "mfdmasa3", XSPR(31,323,219), XSPR_MASK, PPC403, { RT } }, -{ "mfdmacc3", XSPR(31,323,220), XSPR_MASK, PPC403, { RT } }, -{ "mfdmasr", XSPR(31,323,224), XSPR_MASK, PPC403, { RT } }, -{ "mfdcr", X(31,323), X_MASK, PPC403 | BOOKE, { RT, SPR } }, - -{ "div", XO(31,331,0,0), XO_MASK, M601, { RT, RA, RB } }, -{ "div.", XO(31,331,0,1), XO_MASK, M601, { RT, RA, RB } }, -{ "divo", XO(31,331,1,0), XO_MASK, M601, { RT, RA, RB } }, -{ "divo.", XO(31,331,1,1), XO_MASK, M601, { RT, RA, RB } }, - -{ "mfpmr", X(31,334), X_MASK, PPCPMR, { RT, PMR }}, - -{ "mfmq", XSPR(31,339,0), XSPR_MASK, M601, { RT } }, -{ "mfxer", XSPR(31,339,1), XSPR_MASK, COM, { RT } }, -{ "mfrtcu", XSPR(31,339,4), XSPR_MASK, COM, { RT } }, -{ "mfrtcl", XSPR(31,339,5), XSPR_MASK, COM, { RT } }, -{ "mfdec", XSPR(31,339,6), XSPR_MASK, MFDEC1, { RT } }, -{ "mfdec", XSPR(31,339,22), XSPR_MASK, MFDEC2, { RT } }, -{ "mflr", XSPR(31,339,8), XSPR_MASK, COM, { RT } }, -{ "mfctr", XSPR(31,339,9), XSPR_MASK, COM, { RT } }, -{ "mftid", XSPR(31,339,17), XSPR_MASK, POWER, { RT } }, -{ "mfdsisr", XSPR(31,339,18), XSPR_MASK, COM, { RT } }, -{ "mfdar", XSPR(31,339,19), XSPR_MASK, COM, { RT } }, -{ "mfsdr0", XSPR(31,339,24), XSPR_MASK, POWER, { RT } }, -{ "mfsdr1", XSPR(31,339,25), XSPR_MASK, COM, { RT } }, -{ "mfsrr0", XSPR(31,339,26), XSPR_MASK, COM, { RT } }, -{ "mfsrr1", XSPR(31,339,27), XSPR_MASK, COM, { RT } }, -{ "mfcfar", XSPR(31,339,28), XSPR_MASK, POWER6, { RT } }, -{ "mfpid", XSPR(31,339,48), XSPR_MASK, BOOKE, { RT } }, -{ "mfpid", XSPR(31,339,945), XSPR_MASK, PPC403, { RT } }, -{ "mfcsrr0", XSPR(31,339,58), XSPR_MASK, BOOKE, { RT } }, -{ "mfcsrr1", XSPR(31,339,59), XSPR_MASK, BOOKE, { RT } }, -{ "mfdear", XSPR(31,339,61), XSPR_MASK, BOOKE, { RT } }, -{ "mfdear", XSPR(31,339,981), XSPR_MASK, PPC403, { RT } }, -{ "mfesr", XSPR(31,339,62), XSPR_MASK, BOOKE, { RT } }, -{ "mfesr", XSPR(31,339,980), XSPR_MASK, PPC403, { RT } }, -{ "mfivpr", XSPR(31,339,63), XSPR_MASK, BOOKE, { RT } }, -{ "mfcmpa", XSPR(31,339,144), XSPR_MASK, PPC860, { RT } }, -{ "mfcmpb", XSPR(31,339,145), XSPR_MASK, PPC860, { RT } }, -{ "mfcmpc", XSPR(31,339,146), XSPR_MASK, PPC860, { RT } }, -{ "mfcmpd", XSPR(31,339,147), XSPR_MASK, PPC860, { RT } }, -{ "mficr", XSPR(31,339,148), XSPR_MASK, PPC860, { RT } }, -{ "mfder", XSPR(31,339,149), XSPR_MASK, PPC860, { RT } }, -{ "mfcounta", XSPR(31,339,150), XSPR_MASK, PPC860, { RT } }, -{ "mfcountb", XSPR(31,339,151), XSPR_MASK, PPC860, { RT } }, -{ "mfcmpe", XSPR(31,339,152), XSPR_MASK, PPC860, { RT } }, -{ "mfcmpf", XSPR(31,339,153), XSPR_MASK, PPC860, { RT } }, -{ "mfcmpg", XSPR(31,339,154), XSPR_MASK, PPC860, { RT } }, -{ "mfcmph", XSPR(31,339,155), XSPR_MASK, PPC860, { RT } }, -{ "mflctrl1", XSPR(31,339,156), XSPR_MASK, PPC860, { RT } }, -{ "mflctrl2", XSPR(31,339,157), XSPR_MASK, PPC860, { RT } }, -{ "mfictrl", XSPR(31,339,158), XSPR_MASK, PPC860, { RT } }, -{ "mfbar", XSPR(31,339,159), XSPR_MASK, PPC860, { RT } }, -{ "mfvrsave", XSPR(31,339,256), XSPR_MASK, PPCVEC, { RT } }, -{ "mfusprg0", XSPR(31,339,256), XSPR_MASK, BOOKE, { RT } }, -{ "mftb", X(31,371), X_MASK, CLASSIC, { RT, TBR } }, -{ "mftb", XSPR(31,339,268), XSPR_MASK, BOOKE, { RT } }, -{ "mftbl", XSPR(31,371,268), XSPR_MASK, CLASSIC, { RT } }, -{ "mftbl", XSPR(31,339,268), XSPR_MASK, BOOKE, { RT } }, -{ "mftbu", XSPR(31,371,269), XSPR_MASK, CLASSIC, { RT } }, -{ "mftbu", XSPR(31,339,269), XSPR_MASK, BOOKE, { RT } }, -{ "mfsprg", XSPR(31,339,256), XSPRG_MASK, PPC, { RT, SPRG } }, -{ "mfsprg0", XSPR(31,339,272), XSPR_MASK, PPC, { RT } }, -{ "mfsprg1", XSPR(31,339,273), XSPR_MASK, PPC, { RT } }, -{ "mfsprg2", XSPR(31,339,274), XSPR_MASK, PPC, { RT } }, -{ "mfsprg3", XSPR(31,339,275), XSPR_MASK, PPC, { RT } }, -{ "mfsprg4", XSPR(31,339,260), XSPR_MASK, PPC405 | BOOKE, { RT } }, -{ "mfsprg5", XSPR(31,339,261), XSPR_MASK, PPC405 | BOOKE, { RT } }, -{ "mfsprg6", XSPR(31,339,262), XSPR_MASK, PPC405 | BOOKE, { RT } }, -{ "mfsprg7", XSPR(31,339,263), XSPR_MASK, PPC405 | BOOKE, { RT } }, -{ "mfasr", XSPR(31,339,280), XSPR_MASK, PPC64, { RT } }, -{ "mfear", XSPR(31,339,282), XSPR_MASK, PPC, { RT } }, -{ "mfpir", XSPR(31,339,286), XSPR_MASK, BOOKE, { RT } }, -{ "mfpvr", XSPR(31,339,287), XSPR_MASK, PPC, { RT } }, -{ "mfdbsr", XSPR(31,339,304), XSPR_MASK, BOOKE, { RT } }, -{ "mfdbsr", XSPR(31,339,1008), XSPR_MASK, PPC403, { RT } }, -{ "mfdbcr0", XSPR(31,339,308), XSPR_MASK, BOOKE, { RT } }, -{ "mfdbcr0", XSPR(31,339,1010), XSPR_MASK, PPC405, { RT } }, -{ "mfdbcr1", XSPR(31,339,309), XSPR_MASK, BOOKE, { RT } }, -{ "mfdbcr1", XSPR(31,339,957), XSPR_MASK, PPC405, { RT } }, -{ "mfdbcr2", XSPR(31,339,310), XSPR_MASK, BOOKE, { RT } }, -{ "mfiac1", XSPR(31,339,312), XSPR_MASK, BOOKE, { RT } }, -{ "mfiac1", XSPR(31,339,1012), XSPR_MASK, PPC403, { RT } }, -{ "mfiac2", XSPR(31,339,313), XSPR_MASK, BOOKE, { RT } }, -{ "mfiac2", XSPR(31,339,1013), XSPR_MASK, PPC403, { RT } }, -{ "mfiac3", XSPR(31,339,314), XSPR_MASK, BOOKE, { RT } }, -{ "mfiac3", XSPR(31,339,948), XSPR_MASK, PPC405, { RT } }, -{ "mfiac4", XSPR(31,339,315), XSPR_MASK, BOOKE, { RT } }, -{ "mfiac4", XSPR(31,339,949), XSPR_MASK, PPC405, { RT } }, -{ "mfdac1", XSPR(31,339,316), XSPR_MASK, BOOKE, { RT } }, -{ "mfdac1", XSPR(31,339,1014), XSPR_MASK, PPC403, { RT } }, -{ "mfdac2", XSPR(31,339,317), XSPR_MASK, BOOKE, { RT } }, -{ "mfdac2", XSPR(31,339,1015), XSPR_MASK, PPC403, { RT } }, -{ "mfdvc1", XSPR(31,339,318), XSPR_MASK, BOOKE, { RT } }, -{ "mfdvc1", XSPR(31,339,950), XSPR_MASK, PPC405, { RT } }, -{ "mfdvc2", XSPR(31,339,319), XSPR_MASK, BOOKE, { RT } }, -{ "mfdvc2", XSPR(31,339,951), XSPR_MASK, PPC405, { RT } }, -{ "mftsr", XSPR(31,339,336), XSPR_MASK, BOOKE, { RT } }, -{ "mftsr", XSPR(31,339,984), XSPR_MASK, PPC403, { RT } }, -{ "mftcr", XSPR(31,339,340), XSPR_MASK, BOOKE, { RT } }, -{ "mftcr", XSPR(31,339,986), XSPR_MASK, PPC403, { RT } }, -{ "mfivor0", XSPR(31,339,400), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor1", XSPR(31,339,401), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor2", XSPR(31,339,402), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor3", XSPR(31,339,403), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor4", XSPR(31,339,404), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor5", XSPR(31,339,405), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor6", XSPR(31,339,406), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor7", XSPR(31,339,407), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor8", XSPR(31,339,408), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor9", XSPR(31,339,409), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor10", XSPR(31,339,410), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor11", XSPR(31,339,411), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor12", XSPR(31,339,412), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor13", XSPR(31,339,413), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor14", XSPR(31,339,414), XSPR_MASK, BOOKE, { RT } }, -{ "mfivor15", XSPR(31,339,415), XSPR_MASK, BOOKE, { RT } }, -{ "mfspefscr", XSPR(31,339,512), XSPR_MASK, PPCSPE, { RT } }, -{ "mfbbear", XSPR(31,339,513), XSPR_MASK, PPCBRLK, { RT } }, -{ "mfbbtar", XSPR(31,339,514), XSPR_MASK, PPCBRLK, { RT } }, -{ "mfivor32", XSPR(31,339,528), XSPR_MASK, PPCSPE, { RT } }, -{ "mfivor33", XSPR(31,339,529), XSPR_MASK, PPCSPE, { RT } }, -{ "mfivor34", XSPR(31,339,530), XSPR_MASK, PPCSPE, { RT } }, -{ "mfivor35", XSPR(31,339,531), XSPR_MASK, PPCPMR, { RT } }, -{ "mfibatu", XSPR(31,339,528), XSPRBAT_MASK, PPC, { RT, SPRBAT } }, -{ "mfibatl", XSPR(31,339,529), XSPRBAT_MASK, PPC, { RT, SPRBAT } }, -{ "mfdbatu", XSPR(31,339,536), XSPRBAT_MASK, PPC, { RT, SPRBAT } }, -{ "mfdbatl", XSPR(31,339,537), XSPRBAT_MASK, PPC, { RT, SPRBAT } }, -{ "mfic_cst", XSPR(31,339,560), XSPR_MASK, PPC860, { RT } }, -{ "mfic_adr", XSPR(31,339,561), XSPR_MASK, PPC860, { RT } }, -{ "mfic_dat", XSPR(31,339,562), XSPR_MASK, PPC860, { RT } }, -{ "mfdc_cst", XSPR(31,339,568), XSPR_MASK, PPC860, { RT } }, -{ "mfdc_adr", XSPR(31,339,569), XSPR_MASK, PPC860, { RT } }, -{ "mfmcsrr0", XSPR(31,339,570), XSPR_MASK, PPCRFMCI, { RT } }, -{ "mfdc_dat", XSPR(31,339,570), XSPR_MASK, PPC860, { RT } }, -{ "mfmcsrr1", XSPR(31,339,571), XSPR_MASK, PPCRFMCI, { RT } }, -{ "mfmcsr", XSPR(31,339,572), XSPR_MASK, PPCRFMCI, { RT } }, -{ "mfmcar", XSPR(31,339,573), XSPR_MASK, PPCRFMCI, { RT } }, -{ "mfdpdr", XSPR(31,339,630), XSPR_MASK, PPC860, { RT } }, -{ "mfdpir", XSPR(31,339,631), XSPR_MASK, PPC860, { RT } }, -{ "mfimmr", XSPR(31,339,638), XSPR_MASK, PPC860, { RT } }, -{ "mfmi_ctr", XSPR(31,339,784), XSPR_MASK, PPC860, { RT } }, -{ "mfmi_ap", XSPR(31,339,786), XSPR_MASK, PPC860, { RT } }, -{ "mfmi_epn", XSPR(31,339,787), XSPR_MASK, PPC860, { RT } }, -{ "mfmi_twc", XSPR(31,339,789), XSPR_MASK, PPC860, { RT } }, -{ "mfmi_rpn", XSPR(31,339,790), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_ctr", XSPR(31,339,792), XSPR_MASK, PPC860, { RT } }, -{ "mfm_casid", XSPR(31,339,793), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_ap", XSPR(31,339,794), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_epn", XSPR(31,339,795), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_twb", XSPR(31,339,796), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_twc", XSPR(31,339,797), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_rpn", XSPR(31,339,798), XSPR_MASK, PPC860, { RT } }, -{ "mfm_tw", XSPR(31,339,799), XSPR_MASK, PPC860, { RT } }, -{ "mfmi_dbcam", XSPR(31,339,816), XSPR_MASK, PPC860, { RT } }, -{ "mfmi_dbram0",XSPR(31,339,817), XSPR_MASK, PPC860, { RT } }, -{ "mfmi_dbram1",XSPR(31,339,818), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_dbcam", XSPR(31,339,824), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_dbram0",XSPR(31,339,825), XSPR_MASK, PPC860, { RT } }, -{ "mfmd_dbram1",XSPR(31,339,826), XSPR_MASK, PPC860, { RT } }, -{ "mfummcr0", XSPR(31,339,936), XSPR_MASK, PPC750, { RT } }, -{ "mfupmc1", XSPR(31,339,937), XSPR_MASK, PPC750, { RT } }, -{ "mfupmc2", XSPR(31,339,938), XSPR_MASK, PPC750, { RT } }, -{ "mfusia", XSPR(31,339,939), XSPR_MASK, PPC750, { RT } }, -{ "mfummcr1", XSPR(31,339,940), XSPR_MASK, PPC750, { RT } }, -{ "mfupmc3", XSPR(31,339,941), XSPR_MASK, PPC750, { RT } }, -{ "mfupmc4", XSPR(31,339,942), XSPR_MASK, PPC750, { RT } }, -{ "mfzpr", XSPR(31,339,944), XSPR_MASK, PPC403, { RT } }, -{ "mfccr0", XSPR(31,339,947), XSPR_MASK, PPC405, { RT } }, -{ "mfmmcr0", XSPR(31,339,952), XSPR_MASK, PPC750, { RT } }, -{ "mfpmc1", XSPR(31,339,953), XSPR_MASK, PPC750, { RT } }, -{ "mfsgr", XSPR(31,339,953), XSPR_MASK, PPC403, { RT } }, -{ "mfpmc2", XSPR(31,339,954), XSPR_MASK, PPC750, { RT } }, -{ "mfdcwr", XSPR(31,339,954), XSPR_MASK, PPC403, { RT } }, -{ "mfsia", XSPR(31,339,955), XSPR_MASK, PPC750, { RT } }, -{ "mfsler", XSPR(31,339,955), XSPR_MASK, PPC405, { RT } }, -{ "mfmmcr1", XSPR(31,339,956), XSPR_MASK, PPC750, { RT } }, -{ "mfsu0r", XSPR(31,339,956), XSPR_MASK, PPC405, { RT } }, -{ "mfpmc3", XSPR(31,339,957), XSPR_MASK, PPC750, { RT } }, -{ "mfpmc4", XSPR(31,339,958), XSPR_MASK, PPC750, { RT } }, -{ "mficdbdr", XSPR(31,339,979), XSPR_MASK, PPC403, { RT } }, -{ "mfevpr", XSPR(31,339,982), XSPR_MASK, PPC403, { RT } }, -{ "mfcdbcr", XSPR(31,339,983), XSPR_MASK, PPC403, { RT } }, -{ "mfpit", XSPR(31,339,987), XSPR_MASK, PPC403, { RT } }, -{ "mftbhi", XSPR(31,339,988), XSPR_MASK, PPC403, { RT } }, -{ "mftblo", XSPR(31,339,989), XSPR_MASK, PPC403, { RT } }, -{ "mfsrr2", XSPR(31,339,990), XSPR_MASK, PPC403, { RT } }, -{ "mfsrr3", XSPR(31,339,991), XSPR_MASK, PPC403, { RT } }, -{ "mfl2cr", XSPR(31,339,1017), XSPR_MASK, PPC750, { RT } }, -{ "mfdccr", XSPR(31,339,1018), XSPR_MASK, PPC403, { RT } }, -{ "mficcr", XSPR(31,339,1019), XSPR_MASK, PPC403, { RT } }, -{ "mfictc", XSPR(31,339,1019), XSPR_MASK, PPC750, { RT } }, -{ "mfpbl1", XSPR(31,339,1020), XSPR_MASK, PPC403, { RT } }, -{ "mfthrm1", XSPR(31,339,1020), XSPR_MASK, PPC750, { RT } }, -{ "mfpbu1", XSPR(31,339,1021), XSPR_MASK, PPC403, { RT } }, -{ "mfthrm2", XSPR(31,339,1021), XSPR_MASK, PPC750, { RT } }, -{ "mfpbl2", XSPR(31,339,1022), XSPR_MASK, PPC403, { RT } }, -{ "mfthrm3", XSPR(31,339,1022), XSPR_MASK, PPC750, { RT } }, -{ "mfpbu2", XSPR(31,339,1023), XSPR_MASK, PPC403, { RT } }, -{ "mfspr", X(31,339), X_MASK, COM, { RT, SPR } }, - -{ "lwax", X(31,341), X_MASK, PPC64, { RT, RA0, RB } }, - -{ "dst", XDSS(31,342,0), XDSS_MASK, PPCVEC, { RA, RB, STRM } }, -{ "dstt", XDSS(31,342,1), XDSS_MASK, PPCVEC, { RA, RB, STRM } }, - -{ "lhax", X(31,343), X_MASK, COM, { RT, RA0, RB } }, - -{ "lhaxe", X(31,351), X_MASK, BOOKE64, { RT, RA0, RB } }, - -{ "dstst", XDSS(31,374,0), XDSS_MASK, PPCVEC, { RA, RB, STRM } }, -{ "dststt", XDSS(31,374,1), XDSS_MASK, PPCVEC, { RA, RB, STRM } }, - -{ "dccci", X(31,454), XRT_MASK, PPC403|PPC440, { RA, RB } }, - -{ "abs", XO(31,360,0,0), XORB_MASK, M601, { RT, RA } }, -{ "abs.", XO(31,360,0,1), XORB_MASK, M601, { RT, RA } }, -{ "abso", XO(31,360,1,0), XORB_MASK, M601, { RT, RA } }, -{ "abso.", XO(31,360,1,1), XORB_MASK, M601, { RT, RA } }, - -{ "divs", XO(31,363,0,0), XO_MASK, M601, { RT, RA, RB } }, -{ "divs.", XO(31,363,0,1), XO_MASK, M601, { RT, RA, RB } }, -{ "divso", XO(31,363,1,0), XO_MASK, M601, { RT, RA, RB } }, -{ "divso.", XO(31,363,1,1), XO_MASK, M601, { RT, RA, RB } }, - -{ "tlbia", X(31,370), 0xffffffff, PPC, { 0 } }, - -{ "lwaux", X(31,373), X_MASK, PPC64, { RT, RAL, RB } }, - -{ "lhaux", X(31,375), X_MASK, COM, { RT, RAL, RB } }, - -{ "lhauxe", X(31,383), X_MASK, BOOKE64, { RT, RAL, RB } }, - -{ "mtdcrx", X(31,387), X_MASK, BOOKE, { RA, RS } }, - -{ "dcblc", X(31,390), X_MASK, PPCCHLK, { CT, RA, RB }}, - -{ "subfe64", XO(31,392,0,0), XO_MASK, BOOKE64, { RT, RA, RB } }, -{ "subfe64o",XO(31,392,1,0), XO_MASK, BOOKE64, { RT, RA, RB } }, - -{ "adde64", XO(31,394,0,0), XO_MASK, BOOKE64, { RT, RA, RB } }, -{ "adde64o", XO(31,394,1,0), XO_MASK, BOOKE64, { RT, RA, RB } }, - -{ "dcblce", X(31,398), X_MASK, PPCCHLK64, { CT, RA, RB }}, - -{ "slbmte", X(31,402), XRA_MASK, PPC64, { RS, RB } }, - -{ "sthx", X(31,407), X_MASK, COM, { RS, RA0, RB } }, - -{ "cmpb", X(31,508), X_MASK, POWER6, { RA, RS, RB } }, - -{ "lfqx", X(31,791), X_MASK, POWER2, { FRT, RA, RB } }, - -{ "lfdpx", X(31,791), X_MASK, POWER6, { FRT, RA, RB } }, - -{ "lfqux", X(31,823), X_MASK, POWER2, { FRT, RA, RB } }, - -{ "stfqx", X(31,919), X_MASK, POWER2, { FRS, RA, RB } }, - -{ "stfdpx", X(31,919), X_MASK, POWER6, { FRS, RA, RB } }, - -{ "stfqux", X(31,951), X_MASK, POWER2, { FRS, RA, RB } }, - -{ "orc", XRC(31,412,0), X_MASK, COM, { RA, RS, RB } }, -{ "orc.", XRC(31,412,1), X_MASK, COM, { RA, RS, RB } }, - -{ "sradi", XS(31,413,0), XS_MASK, PPC64, { RA, RS, SH6 } }, -{ "sradi.", XS(31,413,1), XS_MASK, PPC64, { RA, RS, SH6 } }, - -{ "sthxe", X(31,415), X_MASK, BOOKE64, { RS, RA0, RB } }, - -{ "slbie", X(31,434), XRTRA_MASK, PPC64, { RB } }, - -{ "ecowx", X(31,438), X_MASK, PPC, { RT, RA, RB } }, - -{ "sthux", X(31,439), X_MASK, COM, { RS, RAS, RB } }, - -{ "sthuxe", X(31,447), X_MASK, BOOKE64, { RS, RAS, RB } }, - -{ "mr", XRC(31,444,0), X_MASK, COM, { RA, RS, RBS } }, -{ "or", XRC(31,444,0), X_MASK, COM, { RA, RS, RB } }, -{ "mr.", XRC(31,444,1), X_MASK, COM, { RA, RS, RBS } }, -{ "or.", XRC(31,444,1), X_MASK, COM, { RA, RS, RB } }, - -{ "mtexisr", XSPR(31,451,64), XSPR_MASK, PPC403, { RS } }, -{ "mtexier", XSPR(31,451,66), XSPR_MASK, PPC403, { RS } }, -{ "mtbr0", XSPR(31,451,128), XSPR_MASK, PPC403, { RS } }, -{ "mtbr1", XSPR(31,451,129), XSPR_MASK, PPC403, { RS } }, -{ "mtbr2", XSPR(31,451,130), XSPR_MASK, PPC403, { RS } }, -{ "mtbr3", XSPR(31,451,131), XSPR_MASK, PPC403, { RS } }, -{ "mtbr4", XSPR(31,451,132), XSPR_MASK, PPC403, { RS } }, -{ "mtbr5", XSPR(31,451,133), XSPR_MASK, PPC403, { RS } }, -{ "mtbr6", XSPR(31,451,134), XSPR_MASK, PPC403, { RS } }, -{ "mtbr7", XSPR(31,451,135), XSPR_MASK, PPC403, { RS } }, -{ "mtbear", XSPR(31,451,144), XSPR_MASK, PPC403, { RS } }, -{ "mtbesr", XSPR(31,451,145), XSPR_MASK, PPC403, { RS } }, -{ "mtiocr", XSPR(31,451,160), XSPR_MASK, PPC403, { RS } }, -{ "mtdmacr0", XSPR(31,451,192), XSPR_MASK, PPC403, { RS } }, -{ "mtdmact0", XSPR(31,451,193), XSPR_MASK, PPC403, { RS } }, -{ "mtdmada0", XSPR(31,451,194), XSPR_MASK, PPC403, { RS } }, -{ "mtdmasa0", XSPR(31,451,195), XSPR_MASK, PPC403, { RS } }, -{ "mtdmacc0", XSPR(31,451,196), XSPR_MASK, PPC403, { RS } }, -{ "mtdmacr1", XSPR(31,451,200), XSPR_MASK, PPC403, { RS } }, -{ "mtdmact1", XSPR(31,451,201), XSPR_MASK, PPC403, { RS } }, -{ "mtdmada1", XSPR(31,451,202), XSPR_MASK, PPC403, { RS } }, -{ "mtdmasa1", XSPR(31,451,203), XSPR_MASK, PPC403, { RS } }, -{ "mtdmacc1", XSPR(31,451,204), XSPR_MASK, PPC403, { RS } }, -{ "mtdmacr2", XSPR(31,451,208), XSPR_MASK, PPC403, { RS } }, -{ "mtdmact2", XSPR(31,451,209), XSPR_MASK, PPC403, { RS } }, -{ "mtdmada2", XSPR(31,451,210), XSPR_MASK, PPC403, { RS } }, -{ "mtdmasa2", XSPR(31,451,211), XSPR_MASK, PPC403, { RS } }, -{ "mtdmacc2", XSPR(31,451,212), XSPR_MASK, PPC403, { RS } }, -{ "mtdmacr3", XSPR(31,451,216), XSPR_MASK, PPC403, { RS } }, -{ "mtdmact3", XSPR(31,451,217), XSPR_MASK, PPC403, { RS } }, -{ "mtdmada3", XSPR(31,451,218), XSPR_MASK, PPC403, { RS } }, -{ "mtdmasa3", XSPR(31,451,219), XSPR_MASK, PPC403, { RS } }, -{ "mtdmacc3", XSPR(31,451,220), XSPR_MASK, PPC403, { RS } }, -{ "mtdmasr", XSPR(31,451,224), XSPR_MASK, PPC403, { RS } }, -{ "mtdcr", X(31,451), X_MASK, PPC403 | BOOKE, { SPR, RS } }, - -{ "subfze64",XO(31,456,0,0), XORB_MASK, BOOKE64, { RT, RA } }, -{ "subfze64o",XO(31,456,1,0), XORB_MASK, BOOKE64, { RT, RA } }, - -{ "divdu", XO(31,457,0,0), XO_MASK, PPC64, { RT, RA, RB } }, -{ "divdu.", XO(31,457,0,1), XO_MASK, PPC64, { RT, RA, RB } }, -{ "divduo", XO(31,457,1,0), XO_MASK, PPC64, { RT, RA, RB } }, -{ "divduo.", XO(31,457,1,1), XO_MASK, PPC64, { RT, RA, RB } }, - -{ "addze64", XO(31,458,0,0), XORB_MASK, BOOKE64, { RT, RA } }, -{ "addze64o",XO(31,458,1,0), XORB_MASK, BOOKE64, { RT, RA } }, - -{ "divwu", XO(31,459,0,0), XO_MASK, PPC, { RT, RA, RB } }, -{ "divwu.", XO(31,459,0,1), XO_MASK, PPC, { RT, RA, RB } }, -{ "divwuo", XO(31,459,1,0), XO_MASK, PPC, { RT, RA, RB } }, -{ "divwuo.", XO(31,459,1,1), XO_MASK, PPC, { RT, RA, RB } }, - -{ "mtmq", XSPR(31,467,0), XSPR_MASK, M601, { RS } }, -{ "mtxer", XSPR(31,467,1), XSPR_MASK, COM, { RS } }, -{ "mtlr", XSPR(31,467,8), XSPR_MASK, COM, { RS } }, -{ "mtctr", XSPR(31,467,9), XSPR_MASK, COM, { RS } }, -{ "mttid", XSPR(31,467,17), XSPR_MASK, POWER, { RS } }, -{ "mtdsisr", XSPR(31,467,18), XSPR_MASK, COM, { RS } }, -{ "mtdar", XSPR(31,467,19), XSPR_MASK, COM, { RS } }, -{ "mtrtcu", XSPR(31,467,20), XSPR_MASK, COM, { RS } }, -{ "mtrtcl", XSPR(31,467,21), XSPR_MASK, COM, { RS } }, -{ "mtdec", XSPR(31,467,22), XSPR_MASK, COM, { RS } }, -{ "mtsdr0", XSPR(31,467,24), XSPR_MASK, POWER, { RS } }, -{ "mtsdr1", XSPR(31,467,25), XSPR_MASK, COM, { RS } }, -{ "mtsrr0", XSPR(31,467,26), XSPR_MASK, COM, { RS } }, -{ "mtsrr1", XSPR(31,467,27), XSPR_MASK, COM, { RS } }, -{ "mtcfar", XSPR(31,467,28), XSPR_MASK, POWER6, { RS } }, -{ "mtpid", XSPR(31,467,48), XSPR_MASK, BOOKE, { RS } }, -{ "mtpid", XSPR(31,467,945), XSPR_MASK, PPC403, { RS } }, -{ "mtdecar", XSPR(31,467,54), XSPR_MASK, BOOKE, { RS } }, -{ "mtcsrr0", XSPR(31,467,58), XSPR_MASK, BOOKE, { RS } }, -{ "mtcsrr1", XSPR(31,467,59), XSPR_MASK, BOOKE, { RS } }, -{ "mtdear", XSPR(31,467,61), XSPR_MASK, BOOKE, { RS } }, -{ "mtdear", XSPR(31,467,981), XSPR_MASK, PPC403, { RS } }, -{ "mtesr", XSPR(31,467,62), XSPR_MASK, BOOKE, { RS } }, -{ "mtesr", XSPR(31,467,980), XSPR_MASK, PPC403, { RS } }, -{ "mtivpr", XSPR(31,467,63), XSPR_MASK, BOOKE, { RS } }, -{ "mtcmpa", XSPR(31,467,144), XSPR_MASK, PPC860, { RS } }, -{ "mtcmpb", XSPR(31,467,145), XSPR_MASK, PPC860, { RS } }, -{ "mtcmpc", XSPR(31,467,146), XSPR_MASK, PPC860, { RS } }, -{ "mtcmpd", XSPR(31,467,147), XSPR_MASK, PPC860, { RS } }, -{ "mticr", XSPR(31,467,148), XSPR_MASK, PPC860, { RS } }, -{ "mtder", XSPR(31,467,149), XSPR_MASK, PPC860, { RS } }, -{ "mtcounta", XSPR(31,467,150), XSPR_MASK, PPC860, { RS } }, -{ "mtcountb", XSPR(31,467,151), XSPR_MASK, PPC860, { RS } }, -{ "mtcmpe", XSPR(31,467,152), XSPR_MASK, PPC860, { RS } }, -{ "mtcmpf", XSPR(31,467,153), XSPR_MASK, PPC860, { RS } }, -{ "mtcmpg", XSPR(31,467,154), XSPR_MASK, PPC860, { RS } }, -{ "mtcmph", XSPR(31,467,155), XSPR_MASK, PPC860, { RS } }, -{ "mtlctrl1", XSPR(31,467,156), XSPR_MASK, PPC860, { RS } }, -{ "mtlctrl2", XSPR(31,467,157), XSPR_MASK, PPC860, { RS } }, -{ "mtictrl", XSPR(31,467,158), XSPR_MASK, PPC860, { RS } }, -{ "mtbar", XSPR(31,467,159), XSPR_MASK, PPC860, { RS } }, -{ "mtvrsave", XSPR(31,467,256), XSPR_MASK, PPCVEC, { RS } }, -{ "mtusprg0", XSPR(31,467,256), XSPR_MASK, BOOKE, { RS } }, -{ "mtsprg", XSPR(31,467,256), XSPRG_MASK,PPC, { SPRG, RS } }, -{ "mtsprg0", XSPR(31,467,272), XSPR_MASK, PPC, { RS } }, -{ "mtsprg1", XSPR(31,467,273), XSPR_MASK, PPC, { RS } }, -{ "mtsprg2", XSPR(31,467,274), XSPR_MASK, PPC, { RS } }, -{ "mtsprg3", XSPR(31,467,275), XSPR_MASK, PPC, { RS } }, -{ "mtsprg4", XSPR(31,467,276), XSPR_MASK, PPC405 | BOOKE, { RS } }, -{ "mtsprg5", XSPR(31,467,277), XSPR_MASK, PPC405 | BOOKE, { RS } }, -{ "mtsprg6", XSPR(31,467,278), XSPR_MASK, PPC405 | BOOKE, { RS } }, -{ "mtsprg7", XSPR(31,467,279), XSPR_MASK, PPC405 | BOOKE, { RS } }, -{ "mtasr", XSPR(31,467,280), XSPR_MASK, PPC64, { RS } }, -{ "mtear", XSPR(31,467,282), XSPR_MASK, PPC, { RS } }, -{ "mttbl", XSPR(31,467,284), XSPR_MASK, PPC, { RS } }, -{ "mttbu", XSPR(31,467,285), XSPR_MASK, PPC, { RS } }, -{ "mtdbsr", XSPR(31,467,304), XSPR_MASK, BOOKE, { RS } }, -{ "mtdbsr", XSPR(31,467,1008), XSPR_MASK, PPC403, { RS } }, -{ "mtdbcr0", XSPR(31,467,308), XSPR_MASK, BOOKE, { RS } }, -{ "mtdbcr0", XSPR(31,467,1010), XSPR_MASK, PPC405, { RS } }, -{ "mtdbcr1", XSPR(31,467,309), XSPR_MASK, BOOKE, { RS } }, -{ "mtdbcr1", XSPR(31,467,957), XSPR_MASK, PPC405, { RS } }, -{ "mtdbcr2", XSPR(31,467,310), XSPR_MASK, BOOKE, { RS } }, -{ "mtiac1", XSPR(31,467,312), XSPR_MASK, BOOKE, { RS } }, -{ "mtiac1", XSPR(31,467,1012), XSPR_MASK, PPC403, { RS } }, -{ "mtiac2", XSPR(31,467,313), XSPR_MASK, BOOKE, { RS } }, -{ "mtiac2", XSPR(31,467,1013), XSPR_MASK, PPC403, { RS } }, -{ "mtiac3", XSPR(31,467,314), XSPR_MASK, BOOKE, { RS } }, -{ "mtiac3", XSPR(31,467,948), XSPR_MASK, PPC405, { RS } }, -{ "mtiac4", XSPR(31,467,315), XSPR_MASK, BOOKE, { RS } }, -{ "mtiac4", XSPR(31,467,949), XSPR_MASK, PPC405, { RS } }, -{ "mtdac1", XSPR(31,467,316), XSPR_MASK, BOOKE, { RS } }, -{ "mtdac1", XSPR(31,467,1014), XSPR_MASK, PPC403, { RS } }, -{ "mtdac2", XSPR(31,467,317), XSPR_MASK, BOOKE, { RS } }, -{ "mtdac2", XSPR(31,467,1015), XSPR_MASK, PPC403, { RS } }, -{ "mtdvc1", XSPR(31,467,318), XSPR_MASK, BOOKE, { RS } }, -{ "mtdvc1", XSPR(31,467,950), XSPR_MASK, PPC405, { RS } }, -{ "mtdvc2", XSPR(31,467,319), XSPR_MASK, BOOKE, { RS } }, -{ "mtdvc2", XSPR(31,467,951), XSPR_MASK, PPC405, { RS } }, -{ "mttsr", XSPR(31,467,336), XSPR_MASK, BOOKE, { RS } }, -{ "mttsr", XSPR(31,467,984), XSPR_MASK, PPC403, { RS } }, -{ "mttcr", XSPR(31,467,340), XSPR_MASK, BOOKE, { RS } }, -{ "mttcr", XSPR(31,467,986), XSPR_MASK, PPC403, { RS } }, -{ "mtivor0", XSPR(31,467,400), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor1", XSPR(31,467,401), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor2", XSPR(31,467,402), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor3", XSPR(31,467,403), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor4", XSPR(31,467,404), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor5", XSPR(31,467,405), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor6", XSPR(31,467,406), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor7", XSPR(31,467,407), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor8", XSPR(31,467,408), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor9", XSPR(31,467,409), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor10", XSPR(31,467,410), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor11", XSPR(31,467,411), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor12", XSPR(31,467,412), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor13", XSPR(31,467,413), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor14", XSPR(31,467,414), XSPR_MASK, BOOKE, { RS } }, -{ "mtivor15", XSPR(31,467,415), XSPR_MASK, BOOKE, { RS } }, -{ "mtspefscr", XSPR(31,467,512), XSPR_MASK, PPCSPE, { RS } }, -{ "mtbbear", XSPR(31,467,513), XSPR_MASK, PPCBRLK, { RS } }, -{ "mtbbtar", XSPR(31,467,514), XSPR_MASK, PPCBRLK, { RS } }, -{ "mtivor32", XSPR(31,467,528), XSPR_MASK, PPCSPE, { RS } }, -{ "mtivor33", XSPR(31,467,529), XSPR_MASK, PPCSPE, { RS } }, -{ "mtivor34", XSPR(31,467,530), XSPR_MASK, PPCSPE, { RS } }, -{ "mtivor35", XSPR(31,467,531), XSPR_MASK, PPCPMR, { RS } }, -{ "mtibatu", XSPR(31,467,528), XSPRBAT_MASK, PPC, { SPRBAT, RS } }, -{ "mtibatl", XSPR(31,467,529), XSPRBAT_MASK, PPC, { SPRBAT, RS } }, -{ "mtdbatu", XSPR(31,467,536), XSPRBAT_MASK, PPC, { SPRBAT, RS } }, -{ "mtdbatl", XSPR(31,467,537), XSPRBAT_MASK, PPC, { SPRBAT, RS } }, -{ "mtmcsrr0", XSPR(31,467,570), XSPR_MASK, PPCRFMCI, { RS } }, -{ "mtmcsrr1", XSPR(31,467,571), XSPR_MASK, PPCRFMCI, { RS } }, -{ "mtmcsr", XSPR(31,467,572), XSPR_MASK, PPCRFMCI, { RS } }, -{ "mtummcr0", XSPR(31,467,936), XSPR_MASK, PPC750, { RS } }, -{ "mtupmc1", XSPR(31,467,937), XSPR_MASK, PPC750, { RS } }, -{ "mtupmc2", XSPR(31,467,938), XSPR_MASK, PPC750, { RS } }, -{ "mtusia", XSPR(31,467,939), XSPR_MASK, PPC750, { RS } }, -{ "mtummcr1", XSPR(31,467,940), XSPR_MASK, PPC750, { RS } }, -{ "mtupmc3", XSPR(31,467,941), XSPR_MASK, PPC750, { RS } }, -{ "mtupmc4", XSPR(31,467,942), XSPR_MASK, PPC750, { RS } }, -{ "mtzpr", XSPR(31,467,944), XSPR_MASK, PPC403, { RS } }, -{ "mtccr0", XSPR(31,467,947), XSPR_MASK, PPC405, { RS } }, -{ "mtmmcr0", XSPR(31,467,952), XSPR_MASK, PPC750, { RS } }, -{ "mtsgr", XSPR(31,467,953), XSPR_MASK, PPC403, { RS } }, -{ "mtpmc1", XSPR(31,467,953), XSPR_MASK, PPC750, { RS } }, -{ "mtdcwr", XSPR(31,467,954), XSPR_MASK, PPC403, { RS } }, -{ "mtpmc2", XSPR(31,467,954), XSPR_MASK, PPC750, { RS } }, -{ "mtsler", XSPR(31,467,955), XSPR_MASK, PPC405, { RS } }, -{ "mtsia", XSPR(31,467,955), XSPR_MASK, PPC750, { RS } }, -{ "mtsu0r", XSPR(31,467,956), XSPR_MASK, PPC405, { RS } }, -{ "mtmmcr1", XSPR(31,467,956), XSPR_MASK, PPC750, { RS } }, -{ "mtpmc3", XSPR(31,467,957), XSPR_MASK, PPC750, { RS } }, -{ "mtpmc4", XSPR(31,467,958), XSPR_MASK, PPC750, { RS } }, -{ "mticdbdr", XSPR(31,467,979), XSPR_MASK, PPC403, { RS } }, -{ "mtevpr", XSPR(31,467,982), XSPR_MASK, PPC403, { RS } }, -{ "mtcdbcr", XSPR(31,467,983), XSPR_MASK, PPC403, { RS } }, -{ "mtpit", XSPR(31,467,987), XSPR_MASK, PPC403, { RS } }, -{ "mttbhi", XSPR(31,467,988), XSPR_MASK, PPC403, { RS } }, -{ "mttblo", XSPR(31,467,989), XSPR_MASK, PPC403, { RS } }, -{ "mtsrr2", XSPR(31,467,990), XSPR_MASK, PPC403, { RS } }, -{ "mtsrr3", XSPR(31,467,991), XSPR_MASK, PPC403, { RS } }, -{ "mtl2cr", XSPR(31,467,1017), XSPR_MASK, PPC750, { RS } }, -{ "mtdccr", XSPR(31,467,1018), XSPR_MASK, PPC403, { RS } }, -{ "mticcr", XSPR(31,467,1019), XSPR_MASK, PPC403, { RS } }, -{ "mtictc", XSPR(31,467,1019), XSPR_MASK, PPC750, { RS } }, -{ "mtpbl1", XSPR(31,467,1020), XSPR_MASK, PPC403, { RS } }, -{ "mtthrm1", XSPR(31,467,1020), XSPR_MASK, PPC750, { RS } }, -{ "mtpbu1", XSPR(31,467,1021), XSPR_MASK, PPC403, { RS } }, -{ "mtthrm2", XSPR(31,467,1021), XSPR_MASK, PPC750, { RS } }, -{ "mtpbl2", XSPR(31,467,1022), XSPR_MASK, PPC403, { RS } }, -{ "mtthrm3", XSPR(31,467,1022), XSPR_MASK, PPC750, { RS } }, -{ "mtpbu2", XSPR(31,467,1023), XSPR_MASK, PPC403, { RS } }, -{ "mtspr", X(31,467), X_MASK, COM, { SPR, RS } }, - -{ "dcbi", X(31,470), XRT_MASK, PPC, { RA, RB } }, - -{ "nand", XRC(31,476,0), X_MASK, COM, { RA, RS, RB } }, -{ "nand.", XRC(31,476,1), X_MASK, COM, { RA, RS, RB } }, - -{ "dcbie", X(31,478), XRT_MASK, BOOKE64, { RA, RB } }, - -{ "dcread", X(31,486), X_MASK, PPC403|PPC440, { RT, RA, RB }}, - -{ "mtpmr", X(31,462), X_MASK, PPCPMR, { PMR, RS }}, - -{ "icbtls", X(31,486), X_MASK, PPCCHLK, { CT, RA, RB }}, - -{ "nabs", XO(31,488,0,0), XORB_MASK, M601, { RT, RA } }, -{ "subfme64",XO(31,488,0,0), XORB_MASK, BOOKE64, { RT, RA } }, -{ "nabs.", XO(31,488,0,1), XORB_MASK, M601, { RT, RA } }, -{ "nabso", XO(31,488,1,0), XORB_MASK, M601, { RT, RA } }, -{ "subfme64o",XO(31,488,1,0), XORB_MASK, BOOKE64, { RT, RA } }, -{ "nabso.", XO(31,488,1,1), XORB_MASK, M601, { RT, RA } }, - -{ "divd", XO(31,489,0,0), XO_MASK, PPC64, { RT, RA, RB } }, -{ "divd.", XO(31,489,0,1), XO_MASK, PPC64, { RT, RA, RB } }, -{ "divdo", XO(31,489,1,0), XO_MASK, PPC64, { RT, RA, RB } }, -{ "divdo.", XO(31,489,1,1), XO_MASK, PPC64, { RT, RA, RB } }, - -{ "addme64", XO(31,490,0,0), XORB_MASK, BOOKE64, { RT, RA } }, -{ "addme64o",XO(31,490,1,0), XORB_MASK, BOOKE64, { RT, RA } }, - -{ "divw", XO(31,491,0,0), XO_MASK, PPC, { RT, RA, RB } }, -{ "divw.", XO(31,491,0,1), XO_MASK, PPC, { RT, RA, RB } }, -{ "divwo", XO(31,491,1,0), XO_MASK, PPC, { RT, RA, RB } }, -{ "divwo.", XO(31,491,1,1), XO_MASK, PPC, { RT, RA, RB } }, +{"cdtbcd", X(31,282), XRB_MASK, POWER6, 0, {RA, RS}}, -{ "icbtlse", X(31,494), X_MASK, PPCCHLK64, { CT, RA, RB }}, +{"eqv", XRC(31,284,0), X_MASK, COM, 0, {RA, RS, RB}}, +{"eqv.", XRC(31,284,1), X_MASK, COM, 0, {RA, RS, RB}}, -{ "slbia", X(31,498), 0xffffffff, PPC64, { 0 } }, +{"lhepx", X(31,287), X_MASK, E500MC|PPCA2, 0, {RT, RA0, RB}}, -{ "cli", X(31,502), XRB_MASK, POWER, { RT, RA } }, +{"mfdcrux", X(31,291), X_MASK, PPC464, 0, {RS, RA}}, -{ "stdcxe.", XRC(31,511,1), X_MASK, BOOKE64, { RS, RA, RB } }, +{"lvexhx", X(31,293), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, +{"lvepx", X(31,295), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, -{ "mcrxr", X(31,512), XRARB_MASK|(3<<21), COM, { BF } }, +{"lxvll", X(31,301), XX1_MASK, PPCVSX3, 0, {XT6, RA0, RB}}, -{ "bblels", X(31,518), X_MASK, PPCBRLK, { 0 }}, -{ "mcrxr64", X(31,544), XRARB_MASK|(3<<21), BOOKE64, { BF } }, +{"mfbhrbe", X(31,302), X_MASK, POWER8, 0, {RT, BHRBE}}, + +{"tlbie", X(31,306), X_MASK|1<<20,POWER9, TITAN, {RB, RS, RIC, PRS, X_R}}, +{"tlbie", X(31,306), XRA_MASK, POWER7, POWER9|TITAN, {RB, RS}}, +{"tlbie", X(31,306), XRTLRA_MASK, PPC, E500|POWER7|TITAN, {RB, LOPT}}, +{"tlbi", X(31,306), XRT_MASK, POWER, 0, {RA0, RB}}, + +{"mfvsrld", X(31,307), XX1RB_MASK, PPCVSX3, 0, {RA, XS6}}, + +{"ldmx", X(31,309), X_MASK, POWER9, 0, {RT, RA0, RB}}, + +{"eciwx", X(31,310), X_MASK, PPC, E500|TITAN, {RT, RA0, RB}}, + +{"lhzux", X(31,311), X_MASK, COM, 0, {RT, RAL, RB}}, + +{"cbcdtd", X(31,314), XRB_MASK, POWER6, 0, {RA, RS}}, + +{"xor", XRC(31,316,0), X_MASK, COM, 0, {RA, RS, RB}}, +{"xor.", XRC(31,316,1), X_MASK, COM, 0, {RA, RS, RB}}, + +{"dcbtep", XRT(31,319,0), X_MASK, E500MC|PPCA2, 0, {RT, RA0, RB}}, + +{"mfexisr", XSPR(31,323, 64), XSPR_MASK, PPC403, 0, {RT}}, +{"mfexier", XSPR(31,323, 66), XSPR_MASK, PPC403, 0, {RT}}, +{"mfbr0", XSPR(31,323,128), XSPR_MASK, PPC403, 0, {RT}}, +{"mfbr1", XSPR(31,323,129), XSPR_MASK, PPC403, 0, {RT}}, +{"mfbr2", XSPR(31,323,130), XSPR_MASK, PPC403, 0, {RT}}, +{"mfbr3", XSPR(31,323,131), XSPR_MASK, PPC403, 0, {RT}}, +{"mfbr4", XSPR(31,323,132), XSPR_MASK, PPC403, 0, {RT}}, +{"mfbr5", XSPR(31,323,133), XSPR_MASK, PPC403, 0, {RT}}, +{"mfbr6", XSPR(31,323,134), XSPR_MASK, PPC403, 0, {RT}}, +{"mfbr7", XSPR(31,323,135), XSPR_MASK, PPC403, 0, {RT}}, +{"mfbear", XSPR(31,323,144), XSPR_MASK, PPC403, 0, {RT}}, +{"mfbesr", XSPR(31,323,145), XSPR_MASK, PPC403, 0, {RT}}, +{"mfiocr", XSPR(31,323,160), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmacr0", XSPR(31,323,192), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmact0", XSPR(31,323,193), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmada0", XSPR(31,323,194), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmasa0", XSPR(31,323,195), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmacc0", XSPR(31,323,196), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmacr1", XSPR(31,323,200), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmact1", XSPR(31,323,201), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmada1", XSPR(31,323,202), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmasa1", XSPR(31,323,203), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmacc1", XSPR(31,323,204), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmacr2", XSPR(31,323,208), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmact2", XSPR(31,323,209), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmada2", XSPR(31,323,210), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmasa2", XSPR(31,323,211), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmacc2", XSPR(31,323,212), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmacr3", XSPR(31,323,216), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmact3", XSPR(31,323,217), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmada3", XSPR(31,323,218), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmasa3", XSPR(31,323,219), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmacc3", XSPR(31,323,220), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdmasr", XSPR(31,323,224), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdcr", X(31,323), X_MASK, PPC403|BOOKE|PPCA2|PPC476, E500|TITAN, {RT, SPR}}, +{"mfdcr.", XRC(31,323,1), X_MASK, PPCA2, 0, {RT, SPR}}, + +{"lvexwx", X(31,325), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, + +{"dcread", X(31,326), X_MASK, PPC476|TITAN, 0, {RT, RA0, RB}}, + +{"div", XO(31,331,0,0), XO_MASK, M601, 0, {RT, RA, RB}}, +{"div.", XO(31,331,0,1), XO_MASK, M601, 0, {RT, RA, RB}}, + +{"lxvdsx", X(31,332), XX1_MASK, PPCVSX, 0, {XT6, RA0, RB}}, + +{"mfpmr", X(31,334), X_MASK, PPCPMR|PPCE300, 0, {RT, PMR}}, +{"mftmr", X(31,366), X_MASK, PPCTMR|E6500, 0, {RT, TMR}}, + +{"slbsync", X(31,338), 0xffffffff, POWER9, 0, {0}}, + +{"mfmq", XSPR(31,339, 0), XSPR_MASK, M601, 0, {RT}}, +{"mfxer", XSPR(31,339, 1), XSPR_MASK, COM, 0, {RT}}, +{"mfrtcu", XSPR(31,339, 4), XSPR_MASK, COM, TITAN, {RT}}, +{"mfrtcl", XSPR(31,339, 5), XSPR_MASK, COM, TITAN, {RT}}, +{"mfdec", XSPR(31,339, 6), XSPR_MASK, MFDEC1, 0, {RT}}, +{"mflr", XSPR(31,339, 8), XSPR_MASK, COM, 0, {RT}}, +{"mfctr", XSPR(31,339, 9), XSPR_MASK, COM, 0, {RT}}, +{"mfdscr", XSPR(31,339, 17), XSPR_MASK, POWER6, 0, {RT}}, +{"mftid", XSPR(31,339, 17), XSPR_MASK, POWER, 0, {RT}}, +{"mfdsisr", XSPR(31,339, 18), XSPR_MASK, COM, TITAN, {RT}}, +{"mfdar", XSPR(31,339, 19), XSPR_MASK, COM, TITAN, {RT}}, +{"mfdec", XSPR(31,339, 22), XSPR_MASK, MFDEC2, MFDEC1, {RT}}, +{"mfsdr0", XSPR(31,339, 24), XSPR_MASK, POWER, 0, {RT}}, +{"mfsdr1", XSPR(31,339, 25), XSPR_MASK, COM, TITAN, {RT}}, +{"mfsrr0", XSPR(31,339, 26), XSPR_MASK, COM, 0, {RT}}, +{"mfsrr1", XSPR(31,339, 27), XSPR_MASK, COM, 0, {RT}}, +{"mfcfar", XSPR(31,339, 28), XSPR_MASK, POWER6, 0, {RT}}, +{"mfpid", XSPR(31,339, 48), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfcsrr0", XSPR(31,339, 58), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfcsrr1", XSPR(31,339, 59), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfdear", XSPR(31,339, 61), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfesr", XSPR(31,339, 62), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivpr", XSPR(31,339, 63), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfctrl", XSPR(31,339,136), XSPR_MASK, POWER4, 0, {RT}}, +{"mfcmpa", XSPR(31,339,144), XSPR_MASK, PPC860, 0, {RT}}, +{"mfcmpb", XSPR(31,339,145), XSPR_MASK, PPC860, 0, {RT}}, +{"mfcmpc", XSPR(31,339,146), XSPR_MASK, PPC860, 0, {RT}}, +{"mfcmpd", XSPR(31,339,147), XSPR_MASK, PPC860, 0, {RT}}, +{"mficr", XSPR(31,339,148), XSPR_MASK, PPC860, 0, {RT}}, +{"mfder", XSPR(31,339,149), XSPR_MASK, PPC860, 0, {RT}}, +{"mfcounta", XSPR(31,339,150), XSPR_MASK, PPC860, 0, {RT}}, +{"mfcountb", XSPR(31,339,151), XSPR_MASK, PPC860, 0, {RT}}, +{"mfcmpe", XSPR(31,339,152), XSPR_MASK, PPC860, 0, {RT}}, +{"mfcmpf", XSPR(31,339,153), XSPR_MASK, PPC860, 0, {RT}}, +{"mfcmpg", XSPR(31,339,154), XSPR_MASK, PPC860, 0, {RT}}, +{"mfcmph", XSPR(31,339,155), XSPR_MASK, PPC860, 0, {RT}}, +{"mflctrl1", XSPR(31,339,156), XSPR_MASK, PPC860, 0, {RT}}, +{"mflctrl2", XSPR(31,339,157), XSPR_MASK, PPC860, 0, {RT}}, +{"mfictrl", XSPR(31,339,158), XSPR_MASK, PPC860, 0, {RT}}, +{"mfbar", XSPR(31,339,159), XSPR_MASK, PPC860, 0, {RT}}, +{"mfvrsave", XSPR(31,339,256), XSPR_MASK, PPCVEC, 0, {RT}}, +{"mfusprg0", XSPR(31,339,256), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfsprg", XSPR(31,339,256), XSPRG_MASK, PPC, 0, {RT, SPRG}}, +{"mfsprg4", XSPR(31,339,260), XSPR_MASK, PPC405|BOOKE, 0, {RT}}, +{"mfsprg5", XSPR(31,339,261), XSPR_MASK, PPC405|BOOKE, 0, {RT}}, +{"mfsprg6", XSPR(31,339,262), XSPR_MASK, PPC405|BOOKE, 0, {RT}}, +{"mfsprg7", XSPR(31,339,263), XSPR_MASK, PPC405|BOOKE, 0, {RT}}, +{"mftbu", XSPR(31,339,269), XSPR_MASK, POWER4|BOOKE, 0, {RT}}, +{"mftb", X(31,339), X_MASK, POWER4|BOOKE, 0, {RT, TBR}}, +{"mftbl", XSPR(31,339,268), XSPR_MASK, POWER4|BOOKE, 0, {RT}}, +{"mfsprg0", XSPR(31,339,272), XSPR_MASK, PPC, 0, {RT}}, +{"mfsprg1", XSPR(31,339,273), XSPR_MASK, PPC, 0, {RT}}, +{"mfsprg2", XSPR(31,339,274), XSPR_MASK, PPC, 0, {RT}}, +{"mfsprg3", XSPR(31,339,275), XSPR_MASK, PPC, 0, {RT}}, +{"mfasr", XSPR(31,339,280), XSPR_MASK, PPC64, 0, {RT}}, +{"mfear", XSPR(31,339,282), XSPR_MASK, PPC, TITAN, {RT}}, +{"mfpir", XSPR(31,339,286), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfpvr", XSPR(31,339,287), XSPR_MASK, PPC, 0, {RT}}, +{"mfdbsr", XSPR(31,339,304), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfdbcr0", XSPR(31,339,308), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfdbcr1", XSPR(31,339,309), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfdbcr2", XSPR(31,339,310), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfiac1", XSPR(31,339,312), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfiac2", XSPR(31,339,313), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfiac3", XSPR(31,339,314), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfiac4", XSPR(31,339,315), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfdac1", XSPR(31,339,316), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfdac2", XSPR(31,339,317), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfdvc1", XSPR(31,339,318), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfdvc2", XSPR(31,339,319), XSPR_MASK, BOOKE, 0, {RT}}, +{"mftsr", XSPR(31,339,336), XSPR_MASK, BOOKE, 0, {RT}}, +{"mftcr", XSPR(31,339,340), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivor0", XSPR(31,339,400), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivor1", XSPR(31,339,401), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivor2", XSPR(31,339,402), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivor3", XSPR(31,339,403), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivor4", XSPR(31,339,404), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivor5", XSPR(31,339,405), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivor6", XSPR(31,339,406), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivor7", XSPR(31,339,407), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivor8", XSPR(31,339,408), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivor9", XSPR(31,339,409), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivor10", XSPR(31,339,410), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivor11", XSPR(31,339,411), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivor12", XSPR(31,339,412), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivor13", XSPR(31,339,413), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivor14", XSPR(31,339,414), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfivor15", XSPR(31,339,415), XSPR_MASK, BOOKE, 0, {RT}}, +{"mfspefscr", XSPR(31,339,512), XSPR_MASK, PPCSPE, 0, {RT}}, +{"mfbbear", XSPR(31,339,513), XSPR_MASK, PPCBRLK, 0, {RT}}, +{"mfbbtar", XSPR(31,339,514), XSPR_MASK, PPCBRLK, 0, {RT}}, +{"mfivor32", XSPR(31,339,528), XSPR_MASK, PPCSPE, 0, {RT}}, +{"mfibatu", XSPR(31,339,528), XSPRBAT_MASK, PPC, TITAN, {RT, SPRBAT}}, +{"mfivor33", XSPR(31,339,529), XSPR_MASK, PPCSPE, 0, {RT}}, +{"mfibatl", XSPR(31,339,529), XSPRBAT_MASK, PPC, TITAN, {RT, SPRBAT}}, +{"mfivor34", XSPR(31,339,530), XSPR_MASK, PPCSPE, 0, {RT}}, +{"mfivor35", XSPR(31,339,531), XSPR_MASK, PPCPMR, 0, {RT}}, +{"mfdbatu", XSPR(31,339,536), XSPRBAT_MASK, PPC, TITAN, {RT, SPRBAT}}, +{"mfdbatl", XSPR(31,339,537), XSPRBAT_MASK, PPC, TITAN, {RT, SPRBAT}}, +{"mfic_cst", XSPR(31,339,560), XSPR_MASK, PPC860, 0, {RT}}, +{"mfic_adr", XSPR(31,339,561), XSPR_MASK, PPC860, 0, {RT}}, +{"mfic_dat", XSPR(31,339,562), XSPR_MASK, PPC860, 0, {RT}}, +{"mfdc_cst", XSPR(31,339,568), XSPR_MASK, PPC860, 0, {RT}}, +{"mfdc_adr", XSPR(31,339,569), XSPR_MASK, PPC860, 0, {RT}}, +{"mfdc_dat", XSPR(31,339,570), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmcsrr0", XSPR(31,339,570), XSPR_MASK, PPCRFMCI, 0, {RT}}, +{"mfmcsrr1", XSPR(31,339,571), XSPR_MASK, PPCRFMCI, 0, {RT}}, +{"mfmcsr", XSPR(31,339,572), XSPR_MASK, PPCRFMCI, 0, {RT}}, +{"mfmcar", XSPR(31,339,573), XSPR_MASK, PPCRFMCI, TITAN, {RT}}, +{"mfdpdr", XSPR(31,339,630), XSPR_MASK, PPC860, 0, {RT}}, +{"mfdpir", XSPR(31,339,631), XSPR_MASK, PPC860, 0, {RT}}, +{"mfimmr", XSPR(31,339,638), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmi_ctr", XSPR(31,339,784), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmi_ap", XSPR(31,339,786), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmi_epn", XSPR(31,339,787), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmi_twc", XSPR(31,339,789), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmi_rpn", XSPR(31,339,790), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmd_ctr", XSPR(31,339,792), XSPR_MASK, PPC860, 0, {RT}}, +{"mfm_casid", XSPR(31,339,793), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmd_ap", XSPR(31,339,794), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmd_epn", XSPR(31,339,795), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmd_twb", XSPR(31,339,796), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmd_twc", XSPR(31,339,797), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmd_rpn", XSPR(31,339,798), XSPR_MASK, PPC860, 0, {RT}}, +{"mfm_tw", XSPR(31,339,799), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmi_dbcam", XSPR(31,339,816), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmi_dbram0", XSPR(31,339,817), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmi_dbram1", XSPR(31,339,818), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmd_dbcam", XSPR(31,339,824), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmd_dbram0", XSPR(31,339,825), XSPR_MASK, PPC860, 0, {RT}}, +{"mfmd_dbram1", XSPR(31,339,826), XSPR_MASK, PPC860, 0, {RT}}, +{"mfivndx", XSPR(31,339,880), XSPR_MASK, TITAN, 0, {RT}}, +{"mfdvndx", XSPR(31,339,881), XSPR_MASK, TITAN, 0, {RT}}, +{"mfivlim", XSPR(31,339,882), XSPR_MASK, TITAN, 0, {RT}}, +{"mfdvlim", XSPR(31,339,883), XSPR_MASK, TITAN, 0, {RT}}, +{"mfclcsr", XSPR(31,339,884), XSPR_MASK, TITAN, 0, {RT}}, +{"mfccr1", XSPR(31,339,888), XSPR_MASK, TITAN, 0, {RT}}, +{"mfppr", XSPR(31,339,896), XSPR_MASK, POWER7, 0, {RT}}, +{"mfppr32", XSPR(31,339,898), XSPR_MASK, POWER7, 0, {RT}}, +{"mfrstcfg", XSPR(31,339,923), XSPR_MASK, TITAN, 0, {RT}}, +{"mfdcdbtrl", XSPR(31,339,924), XSPR_MASK, TITAN, 0, {RT}}, +{"mfdcdbtrh", XSPR(31,339,925), XSPR_MASK, TITAN, 0, {RT}}, +{"mficdbtr", XSPR(31,339,927), XSPR_MASK, TITAN, 0, {RT}}, +{"mfummcr0", XSPR(31,339,936), XSPR_MASK, PPC750, 0, {RT}}, +{"mfupmc1", XSPR(31,339,937), XSPR_MASK, PPC750, 0, {RT}}, +{"mfupmc2", XSPR(31,339,938), XSPR_MASK, PPC750, 0, {RT}}, +{"mfusia", XSPR(31,339,939), XSPR_MASK, PPC750, 0, {RT}}, +{"mfummcr1", XSPR(31,339,940), XSPR_MASK, PPC750, 0, {RT}}, +{"mfupmc3", XSPR(31,339,941), XSPR_MASK, PPC750, 0, {RT}}, +{"mfupmc4", XSPR(31,339,942), XSPR_MASK, PPC750, 0, {RT}}, +{"mfzpr", XSPR(31,339,944), XSPR_MASK, PPC403, 0, {RT}}, +{"mfpid", XSPR(31,339,945), XSPR_MASK, PPC403, 0, {RT}}, +{"mfmmucr", XSPR(31,339,946), XSPR_MASK, TITAN, 0, {RT}}, +{"mfccr0", XSPR(31,339,947), XSPR_MASK, PPC405|TITAN, 0, {RT}}, +{"mfiac3", XSPR(31,339,948), XSPR_MASK, PPC405, 0, {RT}}, +{"mfiac4", XSPR(31,339,949), XSPR_MASK, PPC405, 0, {RT}}, +{"mfdvc1", XSPR(31,339,950), XSPR_MASK, PPC405, 0, {RT}}, +{"mfdvc2", XSPR(31,339,951), XSPR_MASK, PPC405, 0, {RT}}, +{"mfmmcr0", XSPR(31,339,952), XSPR_MASK, PPC750, 0, {RT}}, +{"mfpmc1", XSPR(31,339,953), XSPR_MASK, PPC750, 0, {RT}}, +{"mfsgr", XSPR(31,339,953), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdcwr", XSPR(31,339,954), XSPR_MASK, PPC403, 0, {RT}}, +{"mfpmc2", XSPR(31,339,954), XSPR_MASK, PPC750, 0, {RT}}, +{"mfsia", XSPR(31,339,955), XSPR_MASK, PPC750, 0, {RT}}, +{"mfsler", XSPR(31,339,955), XSPR_MASK, PPC405, 0, {RT}}, +{"mfmmcr1", XSPR(31,339,956), XSPR_MASK, PPC750, 0, {RT}}, +{"mfsu0r", XSPR(31,339,956), XSPR_MASK, PPC405, 0, {RT}}, +{"mfdbcr1", XSPR(31,339,957), XSPR_MASK, PPC405, 0, {RT}}, +{"mfpmc3", XSPR(31,339,957), XSPR_MASK, PPC750, 0, {RT}}, +{"mfpmc4", XSPR(31,339,958), XSPR_MASK, PPC750, 0, {RT}}, +{"mficdbdr", XSPR(31,339,979), XSPR_MASK, PPC403|TITAN, 0, {RT}}, +{"mfesr", XSPR(31,339,980), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdear", XSPR(31,339,981), XSPR_MASK, PPC403, 0, {RT}}, +{"mfevpr", XSPR(31,339,982), XSPR_MASK, PPC403, 0, {RT}}, +{"mfcdbcr", XSPR(31,339,983), XSPR_MASK, PPC403, 0, {RT}}, +{"mftsr", XSPR(31,339,984), XSPR_MASK, PPC403, 0, {RT}}, +{"mftcr", XSPR(31,339,986), XSPR_MASK, PPC403, 0, {RT}}, +{"mfpit", XSPR(31,339,987), XSPR_MASK, PPC403, 0, {RT}}, +{"mftbhi", XSPR(31,339,988), XSPR_MASK, PPC403, 0, {RT}}, +{"mftblo", XSPR(31,339,989), XSPR_MASK, PPC403, 0, {RT}}, +{"mfsrr2", XSPR(31,339,990), XSPR_MASK, PPC403, 0, {RT}}, +{"mfsrr3", XSPR(31,339,991), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdbsr", XSPR(31,339,1008), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdbcr0", XSPR(31,339,1010), XSPR_MASK, PPC405, 0, {RT}}, +{"mfdbdr", XSPR(31,339,1011), XSPR_MASK, TITAN, 0, {RS}}, +{"mfiac1", XSPR(31,339,1012), XSPR_MASK, PPC403, 0, {RT}}, +{"mfiac2", XSPR(31,339,1013), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdac1", XSPR(31,339,1014), XSPR_MASK, PPC403, 0, {RT}}, +{"mfdac2", XSPR(31,339,1015), XSPR_MASK, PPC403, 0, {RT}}, +{"mfl2cr", XSPR(31,339,1017), XSPR_MASK, PPC750, 0, {RT}}, +{"mfdccr", XSPR(31,339,1018), XSPR_MASK, PPC403, 0, {RT}}, +{"mficcr", XSPR(31,339,1019), XSPR_MASK, PPC403, 0, {RT}}, +{"mfictc", XSPR(31,339,1019), XSPR_MASK, PPC750, 0, {RT}}, +{"mfpbl1", XSPR(31,339,1020), XSPR_MASK, PPC403, 0, {RT}}, +{"mfthrm1", XSPR(31,339,1020), XSPR_MASK, PPC750, 0, {RT}}, +{"mfpbu1", XSPR(31,339,1021), XSPR_MASK, PPC403, 0, {RT}}, +{"mfthrm2", XSPR(31,339,1021), XSPR_MASK, PPC750, 0, {RT}}, +{"mfpbl2", XSPR(31,339,1022), XSPR_MASK, PPC403, 0, {RT}}, +{"mfthrm3", XSPR(31,339,1022), XSPR_MASK, PPC750, 0, {RT}}, +{"mfpbu2", XSPR(31,339,1023), XSPR_MASK, PPC403, 0, {RT}}, +{"mfspr", X(31,339), X_MASK, COM, 0, {RT, SPR}}, + +{"lwax", X(31,341), X_MASK, PPC64, 0, {RT, RA0, RB}}, + +{"dst", XDSS(31,342,0), XDSS_MASK, PPCVEC, 0, {RA, RB, STRM}}, + +{"lhax", X(31,343), X_MASK, COM, 0, {RT, RA0, RB}}, + +{"lvxl", X(31,359), X_MASK, PPCVEC, 0, {VD, RA0, RB}}, + +{"abs", XO(31,360,0,0), XORB_MASK, M601, 0, {RT, RA}}, +{"abs.", XO(31,360,0,1), XORB_MASK, M601, 0, {RT, RA}}, + +{"divs", XO(31,363,0,0), XO_MASK, M601, 0, {RT, RA, RB}}, +{"divs.", XO(31,363,0,1), XO_MASK, M601, 0, {RT, RA, RB}}, + +{"lxvwsx", X(31,364), XX1_MASK, PPCVSX3, 0, {XT6, RA0, RB}}, + +{"tlbia", X(31,370), 0xffffffff, PPC, E500|TITAN, {0}}, + +{"mftbu", XSPR(31,371,269), XSPR_MASK, PPC, NO371|POWER4, {RT}}, +{"mftb", X(31,371), X_MASK, PPC, NO371|POWER4, {RT, TBR}}, +{"mftbl", XSPR(31,371,268), XSPR_MASK, PPC, NO371|POWER4, {RT}}, + +{"lwaux", X(31,373), X_MASK, PPC64, 0, {RT, RAL, RB}}, + +{"dstst", XDSS(31,374,0), XDSS_MASK, PPCVEC, 0, {RA, RB, STRM}}, + +{"lhaux", X(31,375), X_MASK, COM, 0, {RT, RAL, RB}}, + +{"popcntw", X(31,378), XRB_MASK, POWER7|PPCA2, 0, {RA, RS}}, + +{"mtdcrx", X(31,387), X_MASK, BOOKE|PPCA2|PPC476, TITAN, {RA, RS}}, +{"mtdcrx.", XRC(31,387,1), X_MASK, PPCA2, 0, {RA, RS}}, + +{"stvexbx", X(31,389), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, + +{"dcblc", X(31,390), X_MASK, PPCCHLK|PPC476|TITAN, 0, {CT, RA0, RB}}, +{"stdfcmx", APU(31,391,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, + +{"divdeu", XO(31,393,0,0), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, +{"divdeu.", XO(31,393,0,1), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, +{"divweu", XO(31,395,0,0), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, +{"divweu.", XO(31,395,0,1), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, + +{"stxvx", X(31,396), XX1_MASK, PPCVSX3, 0, {XS6, RA0, RB}}, +{"stxvl", X(31,397), XX1_MASK, PPCVSX3, 0, {XS6, RA0, RB}}, + +{"dcblce", X(31,398), X_MASK, PPCCHLK, E500MC, {CT, RA, RB}}, + +{"slbmte", X(31,402), XRA_MASK, PPC64, 0, {RS, RB}}, + +{"mtvsrws", X(31,403), XX1RB_MASK, PPCVSX3, 0, {XT6, RA}}, + +{"pbt.", XRC(31,404,1), X_MASK, POWER8, 0, {RS, RA0, RB}}, + +{"icswx", XRC(31,406,0), X_MASK, POWER7|PPCA2, 0, {RS, RA, RB}}, +{"icswx.", XRC(31,406,1), X_MASK, POWER7|PPCA2, 0, {RS, RA, RB}}, + +{"sthx", X(31,407), X_MASK, COM, 0, {RS, RA0, RB}}, + +{"orc", XRC(31,412,0), X_MASK, COM, 0, {RA, RS, RB}}, +{"orc.", XRC(31,412,1), X_MASK, COM, 0, {RA, RS, RB}}, + +{"sthepx", X(31,415), X_MASK, E500MC|PPCA2, 0, {RS, RA0, RB}}, + +{"mtdcrux", X(31,419), X_MASK, PPC464, 0, {RA, RS}}, + +{"stvexhx", X(31,421), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, + +{"dcblq.", XRC(31,422,1), X_MASK, E6500, 0, {CT, RA0, RB}}, + +{"divde", XO(31,425,0,0), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, +{"divde.", XO(31,425,0,1), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, +{"divwe", XO(31,427,0,0), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, +{"divwe.", XO(31,427,0,1), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, + +{"stxvll", X(31,429), XX1_MASK, PPCVSX3, 0, {XS6, RA0, RB}}, + +{"clrbhrb", X(31,430), 0xffffffff, POWER8, 0, {0}}, + +{"slbie", X(31,434), XRTRA_MASK, PPC64, 0, {RB}}, + +{"mtvsrdd", X(31,435), XX1_MASK, PPCVSX3, 0, {XT6, RA0, RB}}, + +{"ecowx", X(31,438), X_MASK, PPC, E500|TITAN, {RT, RA0, RB}}, + +{"sthux", X(31,439), X_MASK, COM, 0, {RS, RAS, RB}}, + +{"mdors", 0x7f9ce378, 0xffffffff, E500MC, 0, {0}}, + +{"miso", 0x7f5ad378, 0xffffffff, E6500, 0, {0}}, + +/* The "yield", "mdoio" and "mdoom" instructions are extended mnemonics for + "or rX,rX,rX", with rX being r27, r29 and r30 respectively. */ +{"yield", 0x7f7bdb78, 0xffffffff, POWER7, 0, {0}}, +{"mdoio", 0x7fbdeb78, 0xffffffff, POWER7, 0, {0}}, +{"mdoom", 0x7fdef378, 0xffffffff, POWER7, 0, {0}}, +{"mr", XRC(31,444,0), X_MASK, COM, 0, {RA, RS, RBS}}, +{"or", XRC(31,444,0), X_MASK, COM, 0, {RA, RS, RB}}, +{"mr.", XRC(31,444,1), X_MASK, COM, 0, {RA, RS, RBS}}, +{"or.", XRC(31,444,1), X_MASK, COM, 0, {RA, RS, RB}}, + +{"mtexisr", XSPR(31,451, 64), XSPR_MASK, PPC403, 0, {RS}}, +{"mtexier", XSPR(31,451, 66), XSPR_MASK, PPC403, 0, {RS}}, +{"mtbr0", XSPR(31,451,128), XSPR_MASK, PPC403, 0, {RS}}, +{"mtbr1", XSPR(31,451,129), XSPR_MASK, PPC403, 0, {RS}}, +{"mtbr2", XSPR(31,451,130), XSPR_MASK, PPC403, 0, {RS}}, +{"mtbr3", XSPR(31,451,131), XSPR_MASK, PPC403, 0, {RS}}, +{"mtbr4", XSPR(31,451,132), XSPR_MASK, PPC403, 0, {RS}}, +{"mtbr5", XSPR(31,451,133), XSPR_MASK, PPC403, 0, {RS}}, +{"mtbr6", XSPR(31,451,134), XSPR_MASK, PPC403, 0, {RS}}, +{"mtbr7", XSPR(31,451,135), XSPR_MASK, PPC403, 0, {RS}}, +{"mtbear", XSPR(31,451,144), XSPR_MASK, PPC403, 0, {RS}}, +{"mtbesr", XSPR(31,451,145), XSPR_MASK, PPC403, 0, {RS}}, +{"mtiocr", XSPR(31,451,160), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmacr0", XSPR(31,451,192), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmact0", XSPR(31,451,193), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmada0", XSPR(31,451,194), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmasa0", XSPR(31,451,195), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmacc0", XSPR(31,451,196), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmacr1", XSPR(31,451,200), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmact1", XSPR(31,451,201), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmada1", XSPR(31,451,202), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmasa1", XSPR(31,451,203), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmacc1", XSPR(31,451,204), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmacr2", XSPR(31,451,208), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmact2", XSPR(31,451,209), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmada2", XSPR(31,451,210), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmasa2", XSPR(31,451,211), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmacc2", XSPR(31,451,212), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmacr3", XSPR(31,451,216), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmact3", XSPR(31,451,217), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmada3", XSPR(31,451,218), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmasa3", XSPR(31,451,219), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmacc3", XSPR(31,451,220), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdmasr", XSPR(31,451,224), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdcr", X(31,451), X_MASK, PPC403|BOOKE|PPCA2|PPC476, E500|TITAN, {SPR, RS}}, +{"mtdcr.", XRC(31,451,1), X_MASK, PPCA2, 0, {SPR, RS}}, + +{"stvexwx", X(31,453), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, + +{"dccci", X(31,454), XRT_MASK, PPC403|PPC440|TITAN|PPCA2, 0, {RAOPT, RBOPT}}, +{"dci", X(31,454), XRARB_MASK, PPCA2|PPC476, 0, {CT}}, + +{"divdu", XO(31,457,0,0), XO_MASK, PPC64, 0, {RT, RA, RB}}, +{"divdu.", XO(31,457,0,1), XO_MASK, PPC64, 0, {RT, RA, RB}}, + +{"divwu", XO(31,459,0,0), XO_MASK, PPC, 0, {RT, RA, RB}}, +{"divwu.", XO(31,459,0,1), XO_MASK, PPC, 0, {RT, RA, RB}}, + +{"mtpmr", X(31,462), X_MASK, PPCPMR|PPCE300, 0, {PMR, RS}}, +{"mttmr", X(31,494), X_MASK, PPCTMR|E6500, 0, {TMR, RS}}, + +{"slbieg", X(31,466), XRA_MASK, POWER9, 0, {RS, RB}}, + +{"mtmq", XSPR(31,467, 0), XSPR_MASK, M601, 0, {RS}}, +{"mtxer", XSPR(31,467, 1), XSPR_MASK, COM, 0, {RS}}, +{"mtlr", XSPR(31,467, 8), XSPR_MASK, COM, 0, {RS}}, +{"mtctr", XSPR(31,467, 9), XSPR_MASK, COM, 0, {RS}}, +{"mtdscr", XSPR(31,467, 17), XSPR_MASK, POWER6, 0, {RS}}, +{"mttid", XSPR(31,467, 17), XSPR_MASK, POWER, 0, {RS}}, +{"mtdsisr", XSPR(31,467, 18), XSPR_MASK, COM, TITAN, {RS}}, +{"mtdar", XSPR(31,467, 19), XSPR_MASK, COM, TITAN, {RS}}, +{"mtrtcu", XSPR(31,467, 20), XSPR_MASK, COM, TITAN, {RS}}, +{"mtrtcl", XSPR(31,467, 21), XSPR_MASK, COM, TITAN, {RS}}, +{"mtdec", XSPR(31,467, 22), XSPR_MASK, COM, 0, {RS}}, +{"mtsdr0", XSPR(31,467, 24), XSPR_MASK, POWER, 0, {RS}}, +{"mtsdr1", XSPR(31,467, 25), XSPR_MASK, COM, TITAN, {RS}}, +{"mtsrr0", XSPR(31,467, 26), XSPR_MASK, COM, 0, {RS}}, +{"mtsrr1", XSPR(31,467, 27), XSPR_MASK, COM, 0, {RS}}, +{"mtcfar", XSPR(31,467, 28), XSPR_MASK, POWER6, 0, {RS}}, +{"mtpid", XSPR(31,467, 48), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtdecar", XSPR(31,467, 54), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtcsrr0", XSPR(31,467, 58), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtcsrr1", XSPR(31,467, 59), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtdear", XSPR(31,467, 61), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtesr", XSPR(31,467, 62), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivpr", XSPR(31,467, 63), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtcmpa", XSPR(31,467,144), XSPR_MASK, PPC860, 0, {RS}}, +{"mtcmpb", XSPR(31,467,145), XSPR_MASK, PPC860, 0, {RS}}, +{"mtcmpc", XSPR(31,467,146), XSPR_MASK, PPC860, 0, {RS}}, +{"mtcmpd", XSPR(31,467,147), XSPR_MASK, PPC860, 0, {RS}}, +{"mticr", XSPR(31,467,148), XSPR_MASK, PPC860, 0, {RS}}, +{"mtder", XSPR(31,467,149), XSPR_MASK, PPC860, 0, {RS}}, +{"mtcounta", XSPR(31,467,150), XSPR_MASK, PPC860, 0, {RS}}, +{"mtcountb", XSPR(31,467,151), XSPR_MASK, PPC860, 0, {RS}}, +{"mtctrl", XSPR(31,467,152), XSPR_MASK, POWER4, 0, {RS}}, +{"mtcmpe", XSPR(31,467,152), XSPR_MASK, PPC860, 0, {RS}}, +{"mtcmpf", XSPR(31,467,153), XSPR_MASK, PPC860, 0, {RS}}, +{"mtcmpg", XSPR(31,467,154), XSPR_MASK, PPC860, 0, {RS}}, +{"mtcmph", XSPR(31,467,155), XSPR_MASK, PPC860, 0, {RS}}, +{"mtlctrl1", XSPR(31,467,156), XSPR_MASK, PPC860, 0, {RS}}, +{"mtlctrl2", XSPR(31,467,157), XSPR_MASK, PPC860, 0, {RS}}, +{"mtictrl", XSPR(31,467,158), XSPR_MASK, PPC860, 0, {RS}}, +{"mtbar", XSPR(31,467,159), XSPR_MASK, PPC860, 0, {RS}}, +{"mtvrsave", XSPR(31,467,256), XSPR_MASK, PPCVEC, 0, {RS}}, +{"mtusprg0", XSPR(31,467,256), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtsprg", XSPR(31,467,256), XSPRG_MASK, PPC, 0, {SPRG, RS}}, +{"mtsprg0", XSPR(31,467,272), XSPR_MASK, PPC, 0, {RS}}, +{"mtsprg1", XSPR(31,467,273), XSPR_MASK, PPC, 0, {RS}}, +{"mtsprg2", XSPR(31,467,274), XSPR_MASK, PPC, 0, {RS}}, +{"mtsprg3", XSPR(31,467,275), XSPR_MASK, PPC, 0, {RS}}, +{"mtsprg4", XSPR(31,467,276), XSPR_MASK, PPC405|BOOKE, 0, {RS}}, +{"mtsprg5", XSPR(31,467,277), XSPR_MASK, PPC405|BOOKE, 0, {RS}}, +{"mtsprg6", XSPR(31,467,278), XSPR_MASK, PPC405|BOOKE, 0, {RS}}, +{"mtsprg7", XSPR(31,467,279), XSPR_MASK, PPC405|BOOKE, 0, {RS}}, +{"mtasr", XSPR(31,467,280), XSPR_MASK, PPC64, 0, {RS}}, +{"mtear", XSPR(31,467,282), XSPR_MASK, PPC, TITAN, {RS}}, +{"mttbl", XSPR(31,467,284), XSPR_MASK, PPC, 0, {RS}}, +{"mttbu", XSPR(31,467,285), XSPR_MASK, PPC, 0, {RS}}, +{"mtdbsr", XSPR(31,467,304), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtdbcr0", XSPR(31,467,308), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtdbcr1", XSPR(31,467,309), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtdbcr2", XSPR(31,467,310), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtiac1", XSPR(31,467,312), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtiac2", XSPR(31,467,313), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtiac3", XSPR(31,467,314), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtiac4", XSPR(31,467,315), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtdac1", XSPR(31,467,316), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtdac2", XSPR(31,467,317), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtdvc1", XSPR(31,467,318), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtdvc2", XSPR(31,467,319), XSPR_MASK, BOOKE, 0, {RS}}, +{"mttsr", XSPR(31,467,336), XSPR_MASK, BOOKE, 0, {RS}}, +{"mttcr", XSPR(31,467,340), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivor0", XSPR(31,467,400), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivor1", XSPR(31,467,401), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivor2", XSPR(31,467,402), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivor3", XSPR(31,467,403), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivor4", XSPR(31,467,404), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivor5", XSPR(31,467,405), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivor6", XSPR(31,467,406), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivor7", XSPR(31,467,407), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivor8", XSPR(31,467,408), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivor9", XSPR(31,467,409), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivor10", XSPR(31,467,410), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivor11", XSPR(31,467,411), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivor12", XSPR(31,467,412), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivor13", XSPR(31,467,413), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivor14", XSPR(31,467,414), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtivor15", XSPR(31,467,415), XSPR_MASK, BOOKE, 0, {RS}}, +{"mtspefscr", XSPR(31,467,512), XSPR_MASK, PPCSPE, 0, {RS}}, +{"mtbbear", XSPR(31,467,513), XSPR_MASK, PPCBRLK, 0, {RS}}, +{"mtbbtar", XSPR(31,467,514), XSPR_MASK, PPCBRLK, 0, {RS}}, +{"mtivor32", XSPR(31,467,528), XSPR_MASK, PPCSPE, 0, {RS}}, +{"mtibatu", XSPR(31,467,528), XSPRBAT_MASK, PPC, TITAN, {SPRBAT, RS}}, +{"mtivor33", XSPR(31,467,529), XSPR_MASK, PPCSPE, 0, {RS}}, +{"mtibatl", XSPR(31,467,529), XSPRBAT_MASK, PPC, TITAN, {SPRBAT, RS}}, +{"mtivor34", XSPR(31,467,530), XSPR_MASK, PPCSPE, 0, {RS}}, +{"mtivor35", XSPR(31,467,531), XSPR_MASK, PPCPMR, 0, {RS}}, +{"mtdbatu", XSPR(31,467,536), XSPRBAT_MASK, PPC, TITAN, {SPRBAT, RS}}, +{"mtdbatl", XSPR(31,467,537), XSPRBAT_MASK, PPC, TITAN, {SPRBAT, RS}}, +{"mtmcsrr0", XSPR(31,467,570), XSPR_MASK, PPCRFMCI, 0, {RS}}, +{"mtmcsrr1", XSPR(31,467,571), XSPR_MASK, PPCRFMCI, 0, {RS}}, +{"mtmcsr", XSPR(31,467,572), XSPR_MASK, PPCRFMCI, 0, {RS}}, +{"mtivndx", XSPR(31,467,880), XSPR_MASK, TITAN, 0, {RS}}, +{"mtdvndx", XSPR(31,467,881), XSPR_MASK, TITAN, 0, {RS}}, +{"mtivlim", XSPR(31,467,882), XSPR_MASK, TITAN, 0, {RS}}, +{"mtdvlim", XSPR(31,467,883), XSPR_MASK, TITAN, 0, {RS}}, +{"mtclcsr", XSPR(31,467,884), XSPR_MASK, TITAN, 0, {RS}}, +{"mtccr1", XSPR(31,467,888), XSPR_MASK, TITAN, 0, {RS}}, +{"mtppr", XSPR(31,467,896), XSPR_MASK, POWER7, 0, {RS}}, +{"mtppr32", XSPR(31,467,898), XSPR_MASK, POWER7, 0, {RS}}, +{"mtummcr0", XSPR(31,467,936), XSPR_MASK, PPC750, 0, {RS}}, +{"mtupmc1", XSPR(31,467,937), XSPR_MASK, PPC750, 0, {RS}}, +{"mtupmc2", XSPR(31,467,938), XSPR_MASK, PPC750, 0, {RS}}, +{"mtusia", XSPR(31,467,939), XSPR_MASK, PPC750, 0, {RS}}, +{"mtummcr1", XSPR(31,467,940), XSPR_MASK, PPC750, 0, {RS}}, +{"mtupmc3", XSPR(31,467,941), XSPR_MASK, PPC750, 0, {RS}}, +{"mtupmc4", XSPR(31,467,942), XSPR_MASK, PPC750, 0, {RS}}, +{"mtzpr", XSPR(31,467,944), XSPR_MASK, PPC403, 0, {RS}}, +{"mtpid", XSPR(31,467,945), XSPR_MASK, PPC403, 0, {RS}}, +{"mtrmmucr", XSPR(31,467,946), XSPR_MASK, TITAN, 0, {RS}}, +{"mtccr0", XSPR(31,467,947), XSPR_MASK, PPC405|TITAN, 0, {RS}}, +{"mtiac3", XSPR(31,467,948), XSPR_MASK, PPC405, 0, {RS}}, +{"mtiac4", XSPR(31,467,949), XSPR_MASK, PPC405, 0, {RS}}, +{"mtdvc1", XSPR(31,467,950), XSPR_MASK, PPC405, 0, {RS}}, +{"mtdvc2", XSPR(31,467,951), XSPR_MASK, PPC405, 0, {RS}}, +{"mtmmcr0", XSPR(31,467,952), XSPR_MASK, PPC750, 0, {RS}}, +{"mtpmc1", XSPR(31,467,953), XSPR_MASK, PPC750, 0, {RS}}, +{"mtsgr", XSPR(31,467,953), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdcwr", XSPR(31,467,954), XSPR_MASK, PPC403, 0, {RS}}, +{"mtpmc2", XSPR(31,467,954), XSPR_MASK, PPC750, 0, {RS}}, +{"mtsia", XSPR(31,467,955), XSPR_MASK, PPC750, 0, {RS}}, +{"mtsler", XSPR(31,467,955), XSPR_MASK, PPC405, 0, {RS}}, +{"mtmmcr1", XSPR(31,467,956), XSPR_MASK, PPC750, 0, {RS}}, +{"mtsu0r", XSPR(31,467,956), XSPR_MASK, PPC405, 0, {RS}}, +{"mtdbcr1", XSPR(31,467,957), XSPR_MASK, PPC405, 0, {RS}}, +{"mtpmc3", XSPR(31,467,957), XSPR_MASK, PPC750, 0, {RS}}, +{"mtpmc4", XSPR(31,467,958), XSPR_MASK, PPC750, 0, {RS}}, +{"mticdbdr", XSPR(31,467,979), XSPR_MASK, PPC403, 0, {RS}}, +{"mtesr", XSPR(31,467,980), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdear", XSPR(31,467,981), XSPR_MASK, PPC403, 0, {RS}}, +{"mtevpr", XSPR(31,467,982), XSPR_MASK, PPC403, 0, {RS}}, +{"mtcdbcr", XSPR(31,467,983), XSPR_MASK, PPC403, 0, {RS}}, +{"mttsr", XSPR(31,467,984), XSPR_MASK, PPC403, 0, {RS}}, +{"mttcr", XSPR(31,467,986), XSPR_MASK, PPC403, 0, {RS}}, +{"mtpit", XSPR(31,467,987), XSPR_MASK, PPC403, 0, {RS}}, +{"mttbhi", XSPR(31,467,988), XSPR_MASK, PPC403, 0, {RS}}, +{"mttblo", XSPR(31,467,989), XSPR_MASK, PPC403, 0, {RS}}, +{"mtsrr2", XSPR(31,467,990), XSPR_MASK, PPC403, 0, {RS}}, +{"mtsrr3", XSPR(31,467,991), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdbsr", XSPR(31,467,1008), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdbdr", XSPR(31,467,1011), XSPR_MASK, TITAN, 0, {RS}}, +{"mtdbcr0", XSPR(31,467,1010), XSPR_MASK, PPC405, 0, {RS}}, +{"mtiac1", XSPR(31,467,1012), XSPR_MASK, PPC403, 0, {RS}}, +{"mtiac2", XSPR(31,467,1013), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdac1", XSPR(31,467,1014), XSPR_MASK, PPC403, 0, {RS}}, +{"mtdac2", XSPR(31,467,1015), XSPR_MASK, PPC403, 0, {RS}}, +{"mtl2cr", XSPR(31,467,1017), XSPR_MASK, PPC750, 0, {RS}}, +{"mtdccr", XSPR(31,467,1018), XSPR_MASK, PPC403, 0, {RS}}, +{"mticcr", XSPR(31,467,1019), XSPR_MASK, PPC403, 0, {RS}}, +{"mtictc", XSPR(31,467,1019), XSPR_MASK, PPC750, 0, {RS}}, +{"mtpbl1", XSPR(31,467,1020), XSPR_MASK, PPC403, 0, {RS}}, +{"mtthrm1", XSPR(31,467,1020), XSPR_MASK, PPC750, 0, {RS}}, +{"mtpbu1", XSPR(31,467,1021), XSPR_MASK, PPC403, 0, {RS}}, +{"mtthrm2", XSPR(31,467,1021), XSPR_MASK, PPC750, 0, {RS}}, +{"mtpbl2", XSPR(31,467,1022), XSPR_MASK, PPC403, 0, {RS}}, +{"mtthrm3", XSPR(31,467,1022), XSPR_MASK, PPC750, 0, {RS}}, +{"mtpbu2", XSPR(31,467,1023), XSPR_MASK, PPC403, 0, {RS}}, +{"mtspr", X(31,467), X_MASK, COM, 0, {SPR, RS}}, + +{"dcbi", X(31,470), XRT_MASK, PPC, 0, {RA0, RB}}, + +{"nand", XRC(31,476,0), X_MASK, COM, 0, {RA, RS, RB}}, +{"nand.", XRC(31,476,1), X_MASK, COM, 0, {RA, RS, RB}}, + +{"dsn", X(31,483), XRT_MASK, E500MC, 0, {RA, RB}}, + +{"dcread", X(31,486), X_MASK, PPC403|PPC440, PPCA2|PPC476, {RT, RA0, RB}}, + +{"icbtls", X(31,486), X_MASK, PPCCHLK|PPC476|TITAN, 0, {CT, RA0, RB}}, + +{"stvxl", X(31,487), X_MASK, PPCVEC, 0, {VS, RA0, RB}}, + +{"nabs", XO(31,488,0,0), XORB_MASK, M601, 0, {RT, RA}}, +{"nabs.", XO(31,488,0,1), XORB_MASK, M601, 0, {RT, RA}}, + +{"divd", XO(31,489,0,0), XO_MASK, PPC64, 0, {RT, RA, RB}}, +{"divd.", XO(31,489,0,1), XO_MASK, PPC64, 0, {RT, RA, RB}}, + +{"divw", XO(31,491,0,0), XO_MASK, PPC, 0, {RT, RA, RB}}, +{"divw.", XO(31,491,0,1), XO_MASK, PPC, 0, {RT, RA, RB}}, + +{"icbtlse", X(31,494), X_MASK, PPCCHLK, E500MC, {CT, RA, RB}}, + +{"slbia", X(31,498), 0xff1fffff, POWER6, 0, {IH}}, +{"slbia", X(31,498), 0xffffffff, PPC64, POWER6, {0}}, + +{"cli", X(31,502), XRB_MASK, POWER, 0, {RT, RA}}, + +{"popcntd", X(31,506), XRB_MASK, POWER7|PPCA2, 0, {RA, RS}}, + +{"cmpb", X(31,508), X_MASK, POWER6|PPCA2|PPC476, 0, {RA, RS, RB}}, + +{"mcrxr", X(31,512), XBFRARB_MASK, COM, POWER7, {BF}}, + +{"lbdcbx", X(31,514), X_MASK, E200Z4, 0, {RT, RA, RB}}, +{"lbdx", X(31,515), X_MASK, E500MC, 0, {RT, RA, RB}}, -{ "clcs", X(31,531), XRB_MASK, M601, { RT, RA } }, +{"bblels", X(31,518), X_MASK, PPCBRLK, 0, {0}}, -{ "ldbrx", X(31,532), X_MASK, CELL, { RT, RA0, RB } }, +{"lvlx", X(31,519), X_MASK, CELL, 0, {VD, RA0, RB}}, +{"lbfcmux", APU(31,519,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, -{ "lswx", X(31,533), X_MASK, PPCCOM, { RT, RA0, RB } }, -{ "lsx", X(31,533), X_MASK, PWRCOM, { RT, RA, RB } }, +{"subfco", XO(31,8,1,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"sfo", XO(31,8,1,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, +{"subco", XO(31,8,1,0), XO_MASK, PPCCOM, 0, {RT, RB, RA}}, +{"subfco.", XO(31,8,1,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"sfo.", XO(31,8,1,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, +{"subco.", XO(31,8,1,1), XO_MASK, PPCCOM, 0, {RT, RB, RA}}, -{ "lwbrx", X(31,534), X_MASK, PPCCOM, { RT, RA0, RB } }, -{ "lbrx", X(31,534), X_MASK, PWRCOM, { RT, RA, RB } }, +{"addco", XO(31,10,1,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"ao", XO(31,10,1,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, +{"addco.", XO(31,10,1,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"ao.", XO(31,10,1,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, -{ "lfsx", X(31,535), X_MASK, COM, { FRT, RA0, RB } }, +{"lxsspx", X(31,524), XX1_MASK, PPCVSX2, 0, {XT6, RA0, RB}}, -{ "srw", XRC(31,536,0), X_MASK, PPCCOM, { RA, RS, RB } }, -{ "sr", XRC(31,536,0), X_MASK, PWRCOM, { RA, RS, RB } }, -{ "srw.", XRC(31,536,1), X_MASK, PPCCOM, { RA, RS, RB } }, -{ "sr.", XRC(31,536,1), X_MASK, PWRCOM, { RA, RS, RB } }, +{"clcs", X(31,531), XRB_MASK, M601, 0, {RT, RA}}, -{ "rrib", XRC(31,537,0), X_MASK, M601, { RA, RS, RB } }, -{ "rrib.", XRC(31,537,1), X_MASK, M601, { RA, RS, RB } }, +{"ldbrx", X(31,532), X_MASK, CELL|POWER7|PPCA2, 0, {RT, RA0, RB}}, -{ "srd", XRC(31,539,0), X_MASK, PPC64, { RA, RS, RB } }, -{ "srd.", XRC(31,539,1), X_MASK, PPC64, { RA, RS, RB } }, +{"lswx", X(31,533), X_MASK, PPCCOM, E500|E500MC, {RT, RAX, RBX}}, +{"lsx", X(31,533), X_MASK, PWRCOM, 0, {RT, RA, RB}}, -{ "maskir", XRC(31,541,0), X_MASK, M601, { RA, RS, RB } }, -{ "maskir.", XRC(31,541,1), X_MASK, M601, { RA, RS, RB } }, +{"lwbrx", X(31,534), X_MASK, PPCCOM, 0, {RT, RA0, RB}}, +{"lbrx", X(31,534), X_MASK, PWRCOM, 0, {RT, RA, RB}}, -{ "lwbrxe", X(31,542), X_MASK, BOOKE64, { RT, RA0, RB } }, +{"lfsx", X(31,535), X_MASK, COM, PPCEFS, {FRT, RA0, RB}}, -{ "lfsxe", X(31,543), X_MASK, BOOKE64, { FRT, RA0, RB } }, +{"srw", XRC(31,536,0), X_MASK, PPCCOM, 0, {RA, RS, RB}}, +{"sr", XRC(31,536,0), X_MASK, PWRCOM, 0, {RA, RS, RB}}, +{"srw.", XRC(31,536,1), X_MASK, PPCCOM, 0, {RA, RS, RB}}, +{"sr.", XRC(31,536,1), X_MASK, PWRCOM, 0, {RA, RS, RB}}, -{ "bbelr", X(31,550), X_MASK, PPCBRLK, { 0 }}, +{"rrib", XRC(31,537,0), X_MASK, M601, 0, {RA, RS, RB}}, +{"rrib.", XRC(31,537,1), X_MASK, M601, 0, {RA, RS, RB}}, -{ "tlbsync", X(31,566), 0xffffffff, PPC, { 0 } }, +{"cnttzw", XRC(31,538,0), XRB_MASK, POWER9, 0, {RA, RS}}, +{"cnttzw.", XRC(31,538,1), XRB_MASK, POWER9, 0, {RA, RS}}, -{ "lfsux", X(31,567), X_MASK, COM, { FRT, RAS, RB } }, +{"srd", XRC(31,539,0), X_MASK, PPC64, 0, {RA, RS, RB}}, +{"srd.", XRC(31,539,1), X_MASK, PPC64, 0, {RA, RS, RB}}, -{ "lfsuxe", X(31,575), X_MASK, BOOKE64, { FRT, RAS, RB } }, +{"maskir", XRC(31,541,0), X_MASK, M601, 0, {RA, RS, RB}}, +{"maskir.", XRC(31,541,1), X_MASK, M601, 0, {RA, RS, RB}}, -{ "mfsr", X(31,595), XRB_MASK|(1<<20), COM32, { RT, SR } }, +{"lhdcbx", X(31,546), X_MASK, E200Z4, 0, {RT, RA, RB}}, +{"lhdx", X(31,547), X_MASK, E500MC, 0, {RT, RA, RB}}, -{ "lswi", X(31,597), X_MASK, PPCCOM, { RT, RA0, NB } }, -{ "lsi", X(31,597), X_MASK, PWRCOM, { RT, RA0, NB } }, +{"lvtrx", X(31,549), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, -{ "lwsync", XSYNC(31,598,1), 0xffffffff, PPC, { 0 } }, -{ "ptesync", XSYNC(31,598,2), 0xffffffff, PPC64, { 0 } }, -{ "msync", X(31,598), 0xffffffff, BOOKE, { 0 } }, -{ "sync", X(31,598), XSYNC_MASK, PPCCOM, { LS } }, -{ "dcs", X(31,598), 0xffffffff, PWRCOM, { 0 } }, +{"bbelr", X(31,550), X_MASK, PPCBRLK, 0, {0}}, -{ "lfdx", X(31,599), X_MASK, COM, { FRT, RA0, RB } }, +{"lvrx", X(31,551), X_MASK, CELL, 0, {VD, RA0, RB}}, +{"lhfcmux", APU(31,551,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, -{ "lfdxe", X(31,607), X_MASK, BOOKE64, { FRT, RA0, RB } }, +{"subfo", XO(31,40,1,0), XO_MASK, PPC, 0, {RT, RA, RB}}, +{"subo", XO(31,40,1,0), XO_MASK, PPC, 0, {RT, RB, RA}}, +{"subfo.", XO(31,40,1,1), XO_MASK, PPC, 0, {RT, RA, RB}}, +{"subo.", XO(31,40,1,1), XO_MASK, PPC, 0, {RT, RB, RA}}, -{ "mffgpr", XRC(31,607,0), XRA_MASK, POWER6, { FRT, RB } }, +{"tlbsync", X(31,566), 0xffffffff, PPC, 0, {0}}, -{ "mfsri", X(31,627), X_MASK, PWRCOM, { RT, RA, RB } }, +{"lfsux", X(31,567), X_MASK, COM, PPCEFS, {FRT, RAS, RB}}, -{ "dclst", X(31,630), XRB_MASK, PWRCOM, { RS, RA } }, +{"cnttzd", XRC(31,570,0), XRB_MASK, POWER9, 0, {RA, RS}}, +{"cnttzd.", XRC(31,570,1), XRB_MASK, POWER9, 0, {RA, RS}}, -{ "lfdux", X(31,631), X_MASK, COM, { FRT, RAS, RB } }, +{"mcrxrx", X(31,576), XBFRARB_MASK, POWER9, 0, {BF}}, -{ "lfduxe", X(31,639), X_MASK, BOOKE64, { FRT, RAS, RB } }, +{"lwdcbx", X(31,578), X_MASK, E200Z4, 0, {RT, RA, RB}}, +{"lwdx", X(31,579), X_MASK, E500MC, 0, {RT, RA, RB}}, -{ "mfsrin", X(31,659), XRA_MASK, PPC32, { RT, RB } }, +{"lvtlx", X(31,581), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, -{ "stdbrx", X(31,660), X_MASK, CELL, { RS, RA0, RB } }, +{"lwat", X(31,582), X_MASK, POWER9, 0, {RT, RA0, FC}}, -{ "stswx", X(31,661), X_MASK, PPCCOM, { RS, RA0, RB } }, -{ "stsx", X(31,661), X_MASK, PWRCOM, { RS, RA0, RB } }, +{"lwfcmux", APU(31,583,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, -{ "stwbrx", X(31,662), X_MASK, PPCCOM, { RS, RA0, RB } }, -{ "stbrx", X(31,662), X_MASK, PWRCOM, { RS, RA0, RB } }, +{"lxsdx", X(31,588), XX1_MASK, PPCVSX, 0, {XT6, RA0, RB}}, -{ "stfsx", X(31,663), X_MASK, COM, { FRS, RA0, RB } }, +{"mfsr", X(31,595), XRB_MASK|(1<<20), COM, NON32, {RT, SR}}, -{ "srq", XRC(31,664,0), X_MASK, M601, { RA, RS, RB } }, -{ "srq.", XRC(31,664,1), X_MASK, M601, { RA, RS, RB } }, +{"lswi", X(31,597), X_MASK, PPCCOM, E500|E500MC, {RT, RAX, NBI}}, +{"lsi", X(31,597), X_MASK, PWRCOM, 0, {RT, RA0, NB}}, -{ "sre", XRC(31,665,0), X_MASK, M601, { RA, RS, RB } }, -{ "sre.", XRC(31,665,1), X_MASK, M601, { RA, RS, RB } }, +{"hwsync", XSYNC(31,598,0), 0xffffffff, POWER4, BOOKE|PPC476, {0}}, +{"lwsync", XSYNC(31,598,1), 0xffffffff, PPC, E500, {0}}, +{"ptesync", XSYNC(31,598,2), 0xffffffff, PPC64, 0, {0}}, +{"sync", X(31,598), XSYNCLE_MASK, E6500, 0, {LS, ESYNC}}, +{"sync", X(31,598), XSYNC_MASK, PPCCOM, BOOKE|PPC476, {LS}}, +{"msync", X(31,598), 0xffffffff, BOOKE|PPCA2|PPC476, 0, {0}}, +{"sync", X(31,598), 0xffffffff, BOOKE|PPC476, E6500, {0}}, +{"lwsync", X(31,598), 0xffffffff, E500, 0, {0}}, +{"dcs", X(31,598), 0xffffffff, PWRCOM, 0, {0}}, -{ "stwbrxe", X(31,670), X_MASK, BOOKE64, { RS, RA0, RB } }, +{"lfdx", X(31,599), X_MASK, COM, PPCEFS, {FRT, RA0, RB}}, -{ "stfsxe", X(31,671), X_MASK, BOOKE64, { FRS, RA0, RB } }, +{"mffgpr", XRC(31,607,0), XRA_MASK, POWER6, POWER7, {FRT, RB}}, +{"lfdepx", X(31,607), X_MASK, E500MC|PPCA2, 0, {FRT, RA0, RB}}, -{ "stfsux", X(31,695), X_MASK, COM, { FRS, RAS, RB } }, +{"lddx", X(31,611), X_MASK, E500MC, 0, {RT, RA, RB}}, -{ "sriq", XRC(31,696,0), X_MASK, M601, { RA, RS, SH } }, -{ "sriq.", XRC(31,696,1), X_MASK, M601, { RA, RS, SH } }, +{"lvswx", X(31,613), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, -{ "stfsuxe", X(31,703), X_MASK, BOOKE64, { FRS, RAS, RB } }, +{"ldat", X(31,614), X_MASK, POWER9, 0, {RT, RA0, FC}}, -{ "stswi", X(31,725), X_MASK, PPCCOM, { RS, RA0, NB } }, -{ "stsi", X(31,725), X_MASK, PWRCOM, { RS, RA0, NB } }, +{"lqfcmux", APU(31,615,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, -{ "stfdx", X(31,727), X_MASK, COM, { FRS, RA0, RB } }, +{"nego", XO(31,104,1,0), XORB_MASK, COM, 0, {RT, RA}}, +{"nego.", XO(31,104,1,1), XORB_MASK, COM, 0, {RT, RA}}, -{ "srlq", XRC(31,728,0), X_MASK, M601, { RA, RS, RB } }, -{ "srlq.", XRC(31,728,1), X_MASK, M601, { RA, RS, RB } }, +{"mulo", XO(31,107,1,0), XO_MASK, M601, 0, {RT, RA, RB}}, +{"mulo.", XO(31,107,1,1), XO_MASK, M601, 0, {RT, RA, RB}}, -{ "sreq", XRC(31,729,0), X_MASK, M601, { RA, RS, RB } }, -{ "sreq.", XRC(31,729,1), X_MASK, M601, { RA, RS, RB } }, +{"mfsri", X(31,627), X_MASK, M601, 0, {RT, RA, RB}}, -{ "stfdxe", X(31,735), X_MASK, BOOKE64, { FRS, RA0, RB } }, +{"dclst", X(31,630), XRB_MASK, M601, 0, {RS, RA}}, -{ "mftgpr", XRC(31,735,0), XRA_MASK, POWER6, { RT, FRB } }, +{"lfdux", X(31,631), X_MASK, COM, PPCEFS, {FRT, RAS, RB}}, -{ "dcba", X(31,758), XRT_MASK, PPC405 | BOOKE, { RA, RB } }, +{"stbdcbx", X(31,642), X_MASK, E200Z4, 0, {RS, RA, RB}}, +{"stbdx", X(31,643), X_MASK, E500MC, 0, {RS, RA, RB}}, -{ "stfdux", X(31,759), X_MASK, COM, { FRS, RAS, RB } }, +{"stvlx", X(31,647), X_MASK, CELL, 0, {VS, RA0, RB}}, +{"stbfcmux", APU(31,647,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, -{ "srliq", XRC(31,760,0), X_MASK, M601, { RA, RS, SH } }, -{ "srliq.", XRC(31,760,1), X_MASK, M601, { RA, RS, SH } }, +{"stxsspx", X(31,652), XX1_MASK, PPCVSX2, 0, {XS6, RA0, RB}}, -{ "dcbae", X(31,766), XRT_MASK, BOOKE64, { RA, RB } }, +{"tbegin.", XRC(31,654,1), XRTLRARB_MASK, PPCHTM, 0, {HTM_R}}, -{ "stfduxe", X(31,767), X_MASK, BOOKE64, { FRS, RAS, RB } }, +{"subfeo", XO(31,136,1,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"sfeo", XO(31,136,1,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, +{"subfeo.", XO(31,136,1,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"sfeo.", XO(31,136,1,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, -{ "tlbivax", X(31,786), XRT_MASK, BOOKE, { RA, RB } }, -{ "tlbivaxe",X(31,787), XRT_MASK, BOOKE64, { RA, RB } }, +{"addeo", XO(31,138,1,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"aeo", XO(31,138,1,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, +{"addeo.", XO(31,138,1,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"aeo.", XO(31,138,1,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, -{ "lwzcix", X(31,789), X_MASK, POWER6, { RT, RA0, RB } }, +{"mfsrin", X(31,659), XRA_MASK, PPC, NON32, {RT, RB}}, -{ "lhbrx", X(31,790), X_MASK, COM, { RT, RA0, RB } }, +{"stdbrx", X(31,660), X_MASK, CELL|POWER7|PPCA2, 0, {RS, RA0, RB}}, -{ "sraw", XRC(31,792,0), X_MASK, PPCCOM, { RA, RS, RB } }, -{ "sra", XRC(31,792,0), X_MASK, PWRCOM, { RA, RS, RB } }, -{ "sraw.", XRC(31,792,1), X_MASK, PPCCOM, { RA, RS, RB } }, -{ "sra.", XRC(31,792,1), X_MASK, PWRCOM, { RA, RS, RB } }, +{"stswx", X(31,661), X_MASK, PPCCOM, E500|E500MC, {RS, RA0, RB}}, +{"stsx", X(31,661), X_MASK, PWRCOM, 0, {RS, RA0, RB}}, -{ "srad", XRC(31,794,0), X_MASK, PPC64, { RA, RS, RB } }, -{ "srad.", XRC(31,794,1), X_MASK, PPC64, { RA, RS, RB } }, +{"stwbrx", X(31,662), X_MASK, PPCCOM, 0, {RS, RA0, RB}}, +{"stbrx", X(31,662), X_MASK, PWRCOM, 0, {RS, RA0, RB}}, -{ "lhbrxe", X(31,798), X_MASK, BOOKE64, { RT, RA0, RB } }, +{"stfsx", X(31,663), X_MASK, COM, PPCEFS, {FRS, RA0, RB}}, -{ "ldxe", X(31,799), X_MASK, BOOKE64, { RT, RA0, RB } }, -{ "lduxe", X(31,831), X_MASK, BOOKE64, { RT, RA0, RB } }, +{"srq", XRC(31,664,0), X_MASK, M601, 0, {RA, RS, RB}}, +{"srq.", XRC(31,664,1), X_MASK, M601, 0, {RA, RS, RB}}, -{ "rac", X(31,818), X_MASK, PWRCOM, { RT, RA, RB } }, +{"sre", XRC(31,665,0), X_MASK, M601, 0, {RA, RS, RB}}, +{"sre.", XRC(31,665,1), X_MASK, M601, 0, {RA, RS, RB}}, -{ "lhzcix", X(31,821), X_MASK, POWER6, { RT, RA0, RB } }, +{"sthdcbx", X(31,674), X_MASK, E200Z4, 0, {RS, RA, RB}}, +{"sthdx", X(31,675), X_MASK, E500MC, 0, {RS, RA, RB}}, -{ "dss", XDSS(31,822,0), XDSS_MASK, PPCVEC, { STRM } }, -{ "dssall", XDSS(31,822,1), XDSS_MASK, PPCVEC, { 0 } }, +{"stvfrx", X(31,677), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, -{ "srawi", XRC(31,824,0), X_MASK, PPCCOM, { RA, RS, SH } }, -{ "srai", XRC(31,824,0), X_MASK, PWRCOM, { RA, RS, SH } }, -{ "srawi.", XRC(31,824,1), X_MASK, PPCCOM, { RA, RS, SH } }, -{ "srai.", XRC(31,824,1), X_MASK, PWRCOM, { RA, RS, SH } }, +{"stvrx", X(31,679), X_MASK, CELL, 0, {VS, RA0, RB}}, +{"sthfcmux", APU(31,679,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, -{ "slbmfev", X(31,851), XRA_MASK, PPC64, { RT, RB } }, +{"tendall.", XRC(31,686,1)|(1<<25), XRTRARB_MASK, PPCHTM, 0, {0}}, +{"tend.", XRC(31,686,1), XRTARARB_MASK, PPCHTM, 0, {HTM_A}}, -{ "lbzcix", X(31,853), X_MASK, POWER6, { RT, RA0, RB } }, +{"stbcx.", XRC(31,694,1), X_MASK, POWER8|E6500, 0, {RS, RA0, RB}}, -{ "mbar", X(31,854), X_MASK, BOOKE, { MO } }, -{ "eieio", X(31,854), 0xffffffff, PPC, { 0 } }, +{"stfsux", X(31,695), X_MASK, COM, PPCEFS, {FRS, RAS, RB}}, -{ "lfiwax", X(31,855), X_MASK, POWER6, { FRT, RA0, RB } }, +{"sriq", XRC(31,696,0), X_MASK, M601, 0, {RA, RS, SH}}, +{"sriq.", XRC(31,696,1), X_MASK, M601, 0, {RA, RS, SH}}, -{ "ldcix", X(31,885), X_MASK, POWER6, { RT, RA0, RB } }, +{"stwdcbx", X(31,706), X_MASK, E200Z4, 0, {RS, RA, RB}}, +{"stwdx", X(31,707), X_MASK, E500MC, 0, {RS, RA, RB}}, -{ "tlbsx", XRC(31,914,0), X_MASK, PPC403|BOOKE, { RTO, RA, RB } }, -{ "tlbsx.", XRC(31,914,1), X_MASK, PPC403|BOOKE, { RTO, RA, RB } }, -{ "tlbsxe", XRC(31,915,0), X_MASK, BOOKE64, { RA, RB } }, -{ "tlbsxe.", XRC(31,915,1), X_MASK, BOOKE64, { RA, RB } }, +{"stvflx", X(31,709), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, -{ "slbmfee", X(31,915), XRA_MASK, PPC64, { RT, RB } }, +{"stwat", X(31,710), X_MASK, POWER9, 0, {RS, RA0, FC}}, -{ "stwcix", X(31,917), X_MASK, POWER6, { RS, RA0, RB } }, +{"stwfcmux", APU(31,711,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, -{ "sthbrx", X(31,918), X_MASK, COM, { RS, RA0, RB } }, +{"stxsdx", X(31,716), XX1_MASK, PPCVSX, 0, {XS6, RA0, RB}}, -{ "sraq", XRC(31,920,0), X_MASK, M601, { RA, RS, RB } }, -{ "sraq.", XRC(31,920,1), X_MASK, M601, { RA, RS, RB } }, +{"tcheck", X(31,718), XRTBFRARB_MASK, PPCHTM, 0, {BF}}, -{ "srea", XRC(31,921,0), X_MASK, M601, { RA, RS, RB } }, -{ "srea.", XRC(31,921,1), X_MASK, M601, { RA, RS, RB } }, +{"subfzeo", XO(31,200,1,0), XORB_MASK, PPCCOM, 0, {RT, RA}}, +{"sfzeo", XO(31,200,1,0), XORB_MASK, PWRCOM, 0, {RT, RA}}, +{"subfzeo.", XO(31,200,1,1), XORB_MASK, PPCCOM, 0, {RT, RA}}, +{"sfzeo.", XO(31,200,1,1), XORB_MASK, PWRCOM, 0, {RT, RA}}, -{ "extsh", XRC(31,922,0), XRB_MASK, PPCCOM, { RA, RS } }, -{ "exts", XRC(31,922,0), XRB_MASK, PWRCOM, { RA, RS } }, -{ "extsh.", XRC(31,922,1), XRB_MASK, PPCCOM, { RA, RS } }, -{ "exts.", XRC(31,922,1), XRB_MASK, PWRCOM, { RA, RS } }, +{"addzeo", XO(31,202,1,0), XORB_MASK, PPCCOM, 0, {RT, RA}}, +{"azeo", XO(31,202,1,0), XORB_MASK, PWRCOM, 0, {RT, RA}}, +{"addzeo.", XO(31,202,1,1), XORB_MASK, PPCCOM, 0, {RT, RA}}, +{"azeo.", XO(31,202,1,1), XORB_MASK, PWRCOM, 0, {RT, RA}}, -{ "sthbrxe", X(31,926), X_MASK, BOOKE64, { RS, RA0, RB } }, +{"stswi", X(31,725), X_MASK, PPCCOM, E500|E500MC, {RS, RA0, NB}}, +{"stsi", X(31,725), X_MASK, PWRCOM, 0, {RS, RA0, NB}}, -{ "stdxe", X(31,927), X_MASK, BOOKE64, { RS, RA0, RB } }, +{"sthcx.", XRC(31,726,1), X_MASK, POWER8|E6500, 0, {RS, RA0, RB}}, -{ "tlbrehi", XTLB(31,946,0), XTLB_MASK, PPC403, { RT, RA } }, -{ "tlbrelo", XTLB(31,946,1), XTLB_MASK, PPC403, { RT, RA } }, -{ "tlbre", X(31,946), X_MASK, PPC403|BOOKE, { RSO, RAOPT, SHO } }, +{"stfdx", X(31,727), X_MASK, COM, PPCEFS, {FRS, RA0, RB}}, -{ "sthcix", X(31,949), X_MASK, POWER6, { RS, RA0, RB } }, +{"srlq", XRC(31,728,0), X_MASK, M601, 0, {RA, RS, RB}}, +{"srlq.", XRC(31,728,1), X_MASK, M601, 0, {RA, RS, RB}}, -{ "sraiq", XRC(31,952,0), X_MASK, M601, { RA, RS, SH } }, -{ "sraiq.", XRC(31,952,1), X_MASK, M601, { RA, RS, SH } }, +{"sreq", XRC(31,729,0), X_MASK, M601, 0, {RA, RS, RB}}, +{"sreq.", XRC(31,729,1), X_MASK, M601, 0, {RA, RS, RB}}, -{ "extsb", XRC(31,954,0), XRB_MASK, PPC, { RA, RS} }, -{ "extsb.", XRC(31,954,1), XRB_MASK, PPC, { RA, RS} }, +{"mftgpr", XRC(31,735,0), XRA_MASK, POWER6, POWER7, {RT, FRB}}, +{"stfdepx", X(31,735), X_MASK, E500MC|PPCA2, 0, {FRS, RA0, RB}}, -{ "stduxe", X(31,959), X_MASK, BOOKE64, { RS, RAS, RB } }, +{"stddx", X(31,739), X_MASK, E500MC, 0, {RS, RA, RB}}, -{ "iccci", X(31,966), XRT_MASK, PPC403|PPC440, { RA, RB } }, +{"stvswx", X(31,741), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, -{ "tlbwehi", XTLB(31,978,0), XTLB_MASK, PPC403, { RT, RA } }, -{ "tlbwelo", XTLB(31,978,1), XTLB_MASK, PPC403, { RT, RA } }, -{ "tlbwe", X(31,978), X_MASK, PPC403|BOOKE, { RSO, RAOPT, SHO } }, -{ "tlbld", X(31,978), XRTRA_MASK, PPC, { RB } }, +{"stdat", X(31,742), X_MASK, POWER9, 0, {RS, RA0, FC}}, -{ "stbcix", X(31,981), X_MASK, POWER6, { RS, RA0, RB } }, +{"stqfcmux", APU(31,743,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, -{ "icbi", X(31,982), XRT_MASK, PPC, { RA, RB } }, +{"subfmeo", XO(31,232,1,0), XORB_MASK, PPCCOM, 0, {RT, RA}}, +{"sfmeo", XO(31,232,1,0), XORB_MASK, PWRCOM, 0, {RT, RA}}, +{"subfmeo.", XO(31,232,1,1), XORB_MASK, PPCCOM, 0, {RT, RA}}, +{"sfmeo.", XO(31,232,1,1), XORB_MASK, PWRCOM, 0, {RT, RA}}, -{ "stfiwx", X(31,983), X_MASK, PPC, { FRS, RA0, RB } }, +{"mulldo", XO(31,233,1,0), XO_MASK, PPC64, 0, {RT, RA, RB}}, +{"mulldo.", XO(31,233,1,1), XO_MASK, PPC64, 0, {RT, RA, RB}}, -{ "extsw", XRC(31,986,0), XRB_MASK, PPC64 | BOOKE64,{ RA, RS } }, -{ "extsw.", XRC(31,986,1), XRB_MASK, PPC64, { RA, RS } }, +{"addmeo", XO(31,234,1,0), XORB_MASK, PPCCOM, 0, {RT, RA}}, +{"ameo", XO(31,234,1,0), XORB_MASK, PWRCOM, 0, {RT, RA}}, +{"addmeo.", XO(31,234,1,1), XORB_MASK, PPCCOM, 0, {RT, RA}}, +{"ameo.", XO(31,234,1,1), XORB_MASK, PWRCOM, 0, {RT, RA}}, -{ "icread", X(31,998), XRT_MASK, PPC403|PPC440, { RA, RB } }, +{"mullwo", XO(31,235,1,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"mulso", XO(31,235,1,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, +{"mullwo.", XO(31,235,1,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"mulso.", XO(31,235,1,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, -{ "icbie", X(31,990), XRT_MASK, BOOKE64, { RA, RB } }, -{ "stfiwxe", X(31,991), X_MASK, BOOKE64, { FRS, RA0, RB } }, +{"tsuspend.", XRCL(31,750,0,1), XRTRARB_MASK,PPCHTM, 0, {0}}, +{"tresume.", XRCL(31,750,1,1), XRTRARB_MASK,PPCHTM, 0, {0}}, +{"tsr.", XRC(31,750,1), XRTLRARB_MASK,PPCHTM, 0, {L}}, -{ "tlbli", X(31,1010), XRTRA_MASK, PPC, { RB } }, +{"darn", X(31,755), XLRAND_MASK, POWER9, 0, {RT, LRAND}}, -{ "stdcix", X(31,1013), X_MASK, POWER6, { RS, RA0, RB } }, +{"dcba", X(31,758), XRT_MASK, PPC405|PPC7450|BOOKE|PPCA2|PPC476, 0, {RA0, RB}}, +{"dcbal", XOPL(31,758,1), XRT_MASK, E500MC, 0, {RA0, RB}}, -{ "dcbzl", XOPL(31,1014,1), XRT_MASK,POWER4, { RA, RB } }, -{ "dcbz", X(31,1014), XRT_MASK, PPC, { RA, RB } }, -{ "dclz", X(31,1014), XRT_MASK, PPC, { RA, RB } }, +{"stfdux", X(31,759), X_MASK, COM, PPCEFS, {FRS, RAS, RB}}, -{ "dcbze", X(31,1022), XRT_MASK, BOOKE64, { RA, RB } }, +{"srliq", XRC(31,760,0), X_MASK, M601, 0, {RA, RS, SH}}, +{"srliq.", XRC(31,760,1), X_MASK, M601, 0, {RA, RS, SH}}, -{ "lvebx", X(31, 7), X_MASK, PPCVEC, { VD, RA, RB } }, -{ "lvehx", X(31, 39), X_MASK, PPCVEC, { VD, RA, RB } }, -{ "lvewx", X(31, 71), X_MASK, PPCVEC, { VD, RA, RB } }, -{ "lvsl", X(31, 6), X_MASK, PPCVEC, { VD, RA, RB } }, -{ "lvsr", X(31, 38), X_MASK, PPCVEC, { VD, RA, RB } }, -{ "lvx", X(31, 103), X_MASK, PPCVEC, { VD, RA, RB } }, -{ "lvxl", X(31, 359), X_MASK, PPCVEC, { VD, RA, RB } }, -{ "stvebx", X(31, 135), X_MASK, PPCVEC, { VS, RA, RB } }, -{ "stvehx", X(31, 167), X_MASK, PPCVEC, { VS, RA, RB } }, -{ "stvewx", X(31, 199), X_MASK, PPCVEC, { VS, RA, RB } }, -{ "stvx", X(31, 231), X_MASK, PPCVEC, { VS, RA, RB } }, -{ "stvxl", X(31, 487), X_MASK, PPCVEC, { VS, RA, RB } }, +{"lvsm", X(31,773), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, -/* New load/store left/right index vector instructions that are in the Cell only. */ -{ "lvlx", X(31, 519), X_MASK, CELL, { VD, RA0, RB } }, -{ "lvlxl", X(31, 775), X_MASK, CELL, { VD, RA0, RB } }, -{ "lvrx", X(31, 551), X_MASK, CELL, { VD, RA0, RB } }, -{ "lvrxl", X(31, 807), X_MASK, CELL, { VD, RA0, RB } }, -{ "stvlx", X(31, 647), X_MASK, CELL, { VS, RA0, RB } }, -{ "stvlxl", X(31, 903), X_MASK, CELL, { VS, RA0, RB } }, -{ "stvrx", X(31, 679), X_MASK, CELL, { VS, RA0, RB } }, -{ "stvrxl", X(31, 935), X_MASK, CELL, { VS, RA0, RB } }, +{"copy", XOPL(31,774,1), XRT_MASK, POWER9, 0, {RA0, RB}}, -{ "lwz", OP(32), OP_MASK, PPCCOM, { RT, D, RA0 } }, -{ "l", OP(32), OP_MASK, PWRCOM, { RT, D, RA0 } }, +{"stvepxl", X(31,775), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, +{"lvlxl", X(31,775), X_MASK, CELL, 0, {VD, RA0, RB}}, +{"ldfcmux", APU(31,775,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, -{ "lwzu", OP(33), OP_MASK, PPCCOM, { RT, D, RAL } }, -{ "lu", OP(33), OP_MASK, PWRCOM, { RT, D, RA0 } }, +{"dozo", XO(31,264,1,0), XO_MASK, M601, 0, {RT, RA, RB}}, +{"dozo.", XO(31,264,1,1), XO_MASK, M601, 0, {RT, RA, RB}}, -{ "lbz", OP(34), OP_MASK, COM, { RT, D, RA0 } }, +{"addo", XO(31,266,1,0), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"caxo", XO(31,266,1,0), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, +{"addo.", XO(31,266,1,1), XO_MASK, PPCCOM, 0, {RT, RA, RB}}, +{"caxo.", XO(31,266,1,1), XO_MASK, PWRCOM, 0, {RT, RA, RB}}, -{ "lbzu", OP(35), OP_MASK, COM, { RT, D, RAL } }, +{"modsd", X(31,777), X_MASK, POWER9, 0, {RT, RA, RB}}, +{"modsw", X(31,779), X_MASK, POWER9, 0, {RT, RA, RB}}, -{ "stw", OP(36), OP_MASK, PPCCOM, { RS, D, RA0 } }, -{ "st", OP(36), OP_MASK, PWRCOM, { RS, D, RA0 } }, +{"lxvw4x", X(31,780), XX1_MASK, PPCVSX, 0, {XT6, RA0, RB}}, +{"lxsibzx", X(31,781), XX1_MASK, PPCVSX3, 0, {XT6, RA0, RB}}, -{ "stwu", OP(37), OP_MASK, PPCCOM, { RS, D, RAS } }, -{ "stu", OP(37), OP_MASK, PWRCOM, { RS, D, RA0 } }, +{"tabortwc.", XRC(31,782,1), X_MASK, PPCHTM, 0, {TO, RA, RB}}, -{ "stb", OP(38), OP_MASK, COM, { RS, D, RA0 } }, +{"tlbivax", X(31,786), XRT_MASK, BOOKE|PPCA2|PPC476, 0, {RA0, RB}}, -{ "stbu", OP(39), OP_MASK, COM, { RS, D, RAS } }, +{"lwzcix", X(31,789), X_MASK, POWER6, 0, {RT, RA0, RB}}, -{ "lhz", OP(40), OP_MASK, COM, { RT, D, RA0 } }, +{"lhbrx", X(31,790), X_MASK, COM, 0, {RT, RA0, RB}}, -{ "lhzu", OP(41), OP_MASK, COM, { RT, D, RAL } }, +{"lfdpx", X(31,791), X_MASK, POWER6, POWER7, {FRTp, RA0, RB}}, +{"lfqx", X(31,791), X_MASK, POWER2, 0, {FRT, RA, RB}}, -{ "lha", OP(42), OP_MASK, COM, { RT, D, RA0 } }, +{"sraw", XRC(31,792,0), X_MASK, PPCCOM, 0, {RA, RS, RB}}, +{"sra", XRC(31,792,0), X_MASK, PWRCOM, 0, {RA, RS, RB}}, +{"sraw.", XRC(31,792,1), X_MASK, PPCCOM, 0, {RA, RS, RB}}, +{"sra.", XRC(31,792,1), X_MASK, PWRCOM, 0, {RA, RS, RB}}, -{ "lhau", OP(43), OP_MASK, COM, { RT, D, RAL } }, +{"srad", XRC(31,794,0), X_MASK, PPC64, 0, {RA, RS, RB}}, +{"srad.", XRC(31,794,1), X_MASK, PPC64, 0, {RA, RS, RB}}, -{ "sth", OP(44), OP_MASK, COM, { RS, D, RA0 } }, +{"lfddx", X(31,803), X_MASK, E500MC, 0, {FRT, RA, RB}}, -{ "sthu", OP(45), OP_MASK, COM, { RS, D, RAS } }, +{"lvtrxl", X(31,805), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, +{"stvepx", X(31,807), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, +{"lvrxl", X(31,807), X_MASK, CELL, 0, {VD, RA0, RB}}, -{ "lmw", OP(46), OP_MASK, PPCCOM, { RT, D, RAM } }, -{ "lm", OP(46), OP_MASK, PWRCOM, { RT, D, RA0 } }, +{"lxvh8x", X(31,812), XX1_MASK, PPCVSX3, 0, {XT6, RA0, RB}}, +{"lxsihzx", X(31,813), XX1_MASK, PPCVSX3, 0, {XT6, RA0, RB}}, -{ "stmw", OP(47), OP_MASK, PPCCOM, { RS, D, RA0 } }, -{ "stm", OP(47), OP_MASK, PWRCOM, { RS, D, RA0 } }, +{"tabortdc.", XRC(31,814,1), X_MASK, PPCHTM, 0, {TO, RA, RB}}, -{ "lfs", OP(48), OP_MASK, COM, { FRT, D, RA0 } }, +{"rac", X(31,818), X_MASK, M601, 0, {RT, RA, RB}}, -{ "lfsu", OP(49), OP_MASK, COM, { FRT, D, RAS } }, +{"erativax", X(31,819), X_MASK, PPCA2, 0, {RS, RA0, RB}}, -{ "lfd", OP(50), OP_MASK, COM, { FRT, D, RA0 } }, +{"lhzcix", X(31,821), X_MASK, POWER6, 0, {RT, RA0, RB}}, -{ "lfdu", OP(51), OP_MASK, COM, { FRT, D, RAS } }, +{"dss", XDSS(31,822,0), XDSS_MASK, PPCVEC, 0, {STRM}}, -{ "stfs", OP(52), OP_MASK, COM, { FRS, D, RA0 } }, +{"lfqux", X(31,823), X_MASK, POWER2, 0, {FRT, RA, RB}}, -{ "stfsu", OP(53), OP_MASK, COM, { FRS, D, RAS } }, +{"srawi", XRC(31,824,0), X_MASK, PPCCOM, 0, {RA, RS, SH}}, +{"srai", XRC(31,824,0), X_MASK, PWRCOM, 0, {RA, RS, SH}}, +{"srawi.", XRC(31,824,1), X_MASK, PPCCOM, 0, {RA, RS, SH}}, +{"srai.", XRC(31,824,1), X_MASK, PWRCOM, 0, {RA, RS, SH}}, -{ "stfd", OP(54), OP_MASK, COM, { FRS, D, RA0 } }, +{"sradi", XS(31,413,0), XS_MASK, PPC64, 0, {RA, RS, SH6}}, +{"sradi.", XS(31,413,1), XS_MASK, PPC64, 0, {RA, RS, SH6}}, -{ "stfdu", OP(55), OP_MASK, COM, { FRS, D, RAS } }, +{"lvtlxl", X(31,837), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, -{ "lq", OP(56), OP_MASK, POWER4, { RTQ, DQ, RAQ } }, +{"cpabort", X(31,838), XRTRARB_MASK,POWER9, 0, {0}}, -{ "lfq", OP(56), OP_MASK, POWER2, { FRT, D, RA0 } }, +{"divo", XO(31,331,1,0), XO_MASK, M601, 0, {RT, RA, RB}}, +{"divo.", XO(31,331,1,1), XO_MASK, M601, 0, {RT, RA, RB}}, -{ "lfqu", OP(57), OP_MASK, POWER2, { FRT, D, RA0 } }, +{"lxvd2x", X(31,844), XX1_MASK, PPCVSX, 0, {XT6, RA0, RB}}, +{"lxvx", X(31,844), XX1_MASK, POWER8, POWER9|PPCVSX3, {XT6, RA0, RB}}, -{ "lfdp", OP(57), OP_MASK, POWER6, { FRT, D, RA0 } }, +{"tabortwci.", XRC(31,846,1), X_MASK, PPCHTM, 0, {TO, RA, HTM_SI}}, -{ "lbze", DEO(58,0), DE_MASK, BOOKE64, { RT, DE, RA0 } }, -{ "lbzue", DEO(58,1), DE_MASK, BOOKE64, { RT, DE, RAL } }, -{ "lhze", DEO(58,2), DE_MASK, BOOKE64, { RT, DE, RA0 } }, -{ "lhzue", DEO(58,3), DE_MASK, BOOKE64, { RT, DE, RAL } }, -{ "lhae", DEO(58,4), DE_MASK, BOOKE64, { RT, DE, RA0 } }, -{ "lhaue", DEO(58,5), DE_MASK, BOOKE64, { RT, DE, RAL } }, -{ "lwze", DEO(58,6), DE_MASK, BOOKE64, { RT, DE, RA0 } }, -{ "lwzue", DEO(58,7), DE_MASK, BOOKE64, { RT, DE, RAL } }, -{ "stbe", DEO(58,8), DE_MASK, BOOKE64, { RS, DE, RA0 } }, -{ "stbue", DEO(58,9), DE_MASK, BOOKE64, { RS, DE, RAS } }, -{ "sthe", DEO(58,10), DE_MASK, BOOKE64, { RS, DE, RA0 } }, -{ "sthue", DEO(58,11), DE_MASK, BOOKE64, { RS, DE, RAS } }, -{ "stwe", DEO(58,14), DE_MASK, BOOKE64, { RS, DE, RA0 } }, -{ "stwue", DEO(58,15), DE_MASK, BOOKE64, { RS, DE, RAS } }, +{"tlbsrx.", XRC(31,850,1), XRT_MASK, PPCA2, 0, {RA0, RB}}, -{ "ld", DSO(58,0), DS_MASK, PPC64, { RT, DS, RA0 } }, +{"slbiag", X(31,850), XRARB_MASK, POWER9, 0, {RS}}, +{"slbmfev", X(31,851), XRLA_MASK, POWER9, 0, {RT, RB, A_L}}, +{"slbmfev", X(31,851), XRA_MASK, PPC64, POWER9, {RT, RB}}, -{ "ldu", DSO(58,1), DS_MASK, PPC64, { RT, DS, RAL } }, +{"lbzcix", X(31,853), X_MASK, POWER6, 0, {RT, RA0, RB}}, -{ "lwa", DSO(58,2), DS_MASK, PPC64, { RT, DS, RA0 } }, +{"eieio", X(31,854), 0xffffffff, PPC, BOOKE|PPCA2|PPC476, {0}}, +{"mbar", X(31,854), X_MASK, BOOKE|PPCA2|PPC476, 0, {MO}}, +{"eieio", XMBAR(31,854,1),0xffffffff, E500, 0, {0}}, +{"eieio", X(31,854), 0xffffffff, PPCA2|PPC476, 0, {0}}, -{ "dadd", XRC(59,2,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "dadd.", XRC(59,2,1), X_MASK, POWER6, { FRT, FRA, FRB } }, +{"lfiwax", X(31,855), X_MASK, POWER6|PPCA2|PPC476, 0, {FRT, RA0, RB}}, -{ "dqua", ZRC(59,3,0), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } }, -{ "dqua.", ZRC(59,3,1), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } }, +{"lvswxl", X(31,869), X_MASK, PPCVEC2, 0, {VD, RA0, RB}}, -{ "fdivs", A(59,18,0), AFRC_MASK, PPC, { FRT, FRA, FRB } }, -{ "fdivs.", A(59,18,1), AFRC_MASK, PPC, { FRT, FRA, FRB } }, +{"abso", XO(31,360,1,0), XORB_MASK, M601, 0, {RT, RA}}, +{"abso.", XO(31,360,1,1), XORB_MASK, M601, 0, {RT, RA}}, -{ "fsubs", A(59,20,0), AFRC_MASK, PPC, { FRT, FRA, FRB } }, -{ "fsubs.", A(59,20,1), AFRC_MASK, PPC, { FRT, FRA, FRB } }, +{"divso", XO(31,363,1,0), XO_MASK, M601, 0, {RT, RA, RB}}, +{"divso.", XO(31,363,1,1), XO_MASK, M601, 0, {RT, RA, RB}}, -{ "fadds", A(59,21,0), AFRC_MASK, PPC, { FRT, FRA, FRB } }, -{ "fadds.", A(59,21,1), AFRC_MASK, PPC, { FRT, FRA, FRB } }, +{"lxvb16x", X(31,876), XX1_MASK, PPCVSX3, 0, {XT6, RA0, RB}}, -{ "fsqrts", A(59,22,0), AFRAFRC_MASK, PPC, { FRT, FRB } }, -{ "fsqrts.", A(59,22,1), AFRAFRC_MASK, PPC, { FRT, FRB } }, +{"tabortdci.", XRC(31,878,1), X_MASK, PPCHTM, 0, {TO, RA, HTM_SI}}, -{ "fres", A(59,24,0), AFRALFRC_MASK, PPC, { FRT, FRB, A_L } }, -{ "fres.", A(59,24,1), AFRALFRC_MASK, PPC, { FRT, FRB, A_L } }, +{"rmieg", X(31,882), XRTRA_MASK, POWER9, 0, {RB}}, -{ "fmuls", A(59,25,0), AFRB_MASK, PPC, { FRT, FRA, FRC } }, -{ "fmuls.", A(59,25,1), AFRB_MASK, PPC, { FRT, FRA, FRC } }, +{"ldcix", X(31,885), X_MASK, POWER6, 0, {RT, RA0, RB}}, -{ "frsqrtes", A(59,26,0), AFRALFRC_MASK,POWER5, { FRT, FRB, A_L } }, -{ "frsqrtes.",A(59,26,1), AFRALFRC_MASK,POWER5, { FRT, FRB, A_L } }, +{"msgsync", X(31,886), 0xffffffff, POWER9, 0, {0}}, -{ "fmsubs", A(59,28,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, -{ "fmsubs.", A(59,28,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, +{"lfiwzx", X(31,887), X_MASK, POWER7|PPCA2, 0, {FRT, RA0, RB}}, -{ "fmadds", A(59,29,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, -{ "fmadds.", A(59,29,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, +{"extswsli", XS(31,445,0), XS_MASK, POWER9, 0, {RA, RS, SH6}}, +{"extswsli.", XS(31,445,1), XS_MASK, POWER9, 0, {RA, RS, SH6}}, -{ "fnmsubs", A(59,30,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, -{ "fnmsubs.",A(59,30,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, +{"paste.", XRCL(31,902,1,1),XRT_MASK, POWER9, 0, {RA0, RB}}, -{ "fnmadds", A(59,31,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, -{ "fnmadds.",A(59,31,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, +{"stvlxl", X(31,903), X_MASK, CELL, 0, {VS, RA0, RB}}, +{"stdfcmux", APU(31,903,0), APU_MASK, PPC405, 0, {FCRT, RA, RB}}, -{ "dmul", XRC(59,34,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "dmul.", XRC(59,34,1), X_MASK, POWER6, { FRT, FRA, FRB } }, +{"divdeuo", XO(31,393,1,0), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, +{"divdeuo.", XO(31,393,1,1), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, +{"divweuo", XO(31,395,1,0), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, +{"divweuo.", XO(31,395,1,1), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, -{ "drrnd", ZRC(59,35,0), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } }, -{ "drrnd.", ZRC(59,35,1), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } }, +{"stxvw4x", X(31,908), XX1_MASK, PPCVSX, 0, {XS6, RA0, RB}}, +{"stxsibx", X(31,909), XX1_MASK, PPCVSX3, 0, {XS6, RA0, RB}}, -{ "dscli", ZRC(59,66,0), Z_MASK, POWER6, { FRT, FRA, SH16 } }, -{ "dscli.", ZRC(59,66,1), Z_MASK, POWER6, { FRT, FRA, SH16 } }, +{"tabort.", XRC(31,910,1), XRTRB_MASK, PPCHTM, 0, {RA}}, -{ "dquai", ZRC(59,67,0), Z_MASK, POWER6, { TE, FRT, FRB, RMC } }, -{ "dquai.", ZRC(59,67,1), Z_MASK, POWER6, { TE, FRT, FRB, RMC } }, +{"tlbsx", XRC(31,914,0), X_MASK, PPC403|BOOKE|PPCA2|PPC476, 0, {RTO, RA0, RB}}, +{"tlbsx.", XRC(31,914,1), X_MASK, PPC403|BOOKE|PPCA2|PPC476, 0, {RTO, RA0, RB}}, -{ "dscri", ZRC(59,98,0), Z_MASK, POWER6, { FRT, FRA, SH16 } }, -{ "dscri.", ZRC(59,98,1), Z_MASK, POWER6, { FRT, FRA, SH16 } }, +{"slbmfee", X(31,915), XRLA_MASK, POWER9, 0, {RT, RB, A_L}}, +{"slbmfee", X(31,915), XRA_MASK, PPC64, POWER9, {RT, RB}}, -{ "drintx", ZRC(59,99,0), Z_MASK, POWER6, { R, FRT, FRB, RMC } }, -{ "drintx.", ZRC(59,99,1), Z_MASK, POWER6, { R, FRT, FRB, RMC } }, +{"stwcix", X(31,917), X_MASK, POWER6, 0, {RS, RA0, RB}}, -{ "dcmpo", X(59,130), X_MASK, POWER6, { BF, FRA, FRB } }, +{"sthbrx", X(31,918), X_MASK, COM, 0, {RS, RA0, RB}}, -{ "dtstex", X(59,162), X_MASK, POWER6, { BF, FRA, FRB } }, -{ "dtstdc", Z(59,194), Z_MASK, POWER6, { BF, FRA, DCM } }, -{ "dtstdg", Z(59,226), Z_MASK, POWER6, { BF, FRA, DGM } }, +{"stfdpx", X(31,919), X_MASK, POWER6, POWER7, {FRSp, RA0, RB}}, +{"stfqx", X(31,919), X_MASK, POWER2, 0, {FRS, RA0, RB}}, -{ "drintn", ZRC(59,227,0), Z_MASK, POWER6, { R, FRT, FRB, RMC } }, -{ "drintn.", ZRC(59,227,1), Z_MASK, POWER6, { R, FRT, FRB, RMC } }, +{"sraq", XRC(31,920,0), X_MASK, M601, 0, {RA, RS, RB}}, +{"sraq.", XRC(31,920,1), X_MASK, M601, 0, {RA, RS, RB}}, -{ "dctdp", XRC(59,258,0), X_MASK, POWER6, { FRT, FRB } }, -{ "dctdp.", XRC(59,258,1), X_MASK, POWER6, { FRT, FRB } }, +{"srea", XRC(31,921,0), X_MASK, M601, 0, {RA, RS, RB}}, +{"srea.", XRC(31,921,1), X_MASK, M601, 0, {RA, RS, RB}}, -{ "dctfix", XRC(59,290,0), X_MASK, POWER6, { FRT, FRB } }, -{ "dctfix.", XRC(59,290,1), X_MASK, POWER6, { FRT, FRB } }, +{"extsh", XRC(31,922,0), XRB_MASK, PPCCOM, 0, {RA, RS}}, +{"exts", XRC(31,922,0), XRB_MASK, PWRCOM, 0, {RA, RS}}, +{"extsh.", XRC(31,922,1), XRB_MASK, PPCCOM, 0, {RA, RS}}, +{"exts.", XRC(31,922,1), XRB_MASK, PWRCOM, 0, {RA, RS}}, -{ "ddedpd", XRC(59,322,0), X_MASK, POWER6, { SP, FRT, FRB } }, -{ "ddedpd.", XRC(59,322,1), X_MASK, POWER6, { SP, FRT, FRB } }, +{"stfddx", X(31,931), X_MASK, E500MC, 0, {FRS, RA, RB}}, -{ "dxex", XRC(59,354,0), X_MASK, POWER6, { FRT, FRB } }, -{ "dxex.", XRC(59,354,1), X_MASK, POWER6, { FRT, FRB } }, +{"stvfrxl", X(31,933), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, -{ "dsub", XRC(59,514,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "dsub.", XRC(59,514,1), X_MASK, POWER6, { FRT, FRA, FRB } }, +{"wclrone", XOPL2(31,934,2),XRT_MASK, PPCA2, 0, {RA0, RB}}, +{"wclrall", X(31,934), XRARB_MASK, PPCA2, 0, {L2}}, +{"wclr", X(31,934), X_MASK, PPCA2, 0, {L2, RA0, RB}}, -{ "ddiv", XRC(59,546,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "ddiv.", XRC(59,546,1), X_MASK, POWER6, { FRT, FRA, FRB } }, +{"stvrxl", X(31,935), X_MASK, CELL, 0, {VS, RA0, RB}}, -{ "dcmpu", X(59,642), X_MASK, POWER6, { BF, FRA, FRB } }, +{"divdeo", XO(31,425,1,0), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, +{"divdeo.", XO(31,425,1,1), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, +{"divweo", XO(31,427,1,0), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, +{"divweo.", XO(31,427,1,1), XO_MASK, POWER7|PPCA2, 0, {RT, RA, RB}}, -{ "dtstsf", X(59,674), X_MASK, POWER6, { BF, FRA, FRB } }, +{"stxvh8x", X(31,940), XX1_MASK, PPCVSX3, 0, {XS6, RA0, RB}}, +{"stxsihx", X(31,941), XX1_MASK, PPCVSX3, 0, {XS6, RA0, RB}}, -{ "drsp", XRC(59,770,0), X_MASK, POWER6, { FRT, FRB } }, -{ "drsp.", XRC(59,770,1), X_MASK, POWER6, { FRT, FRB } }, +{"treclaim.", XRC(31,942,1), XRTRB_MASK, PPCHTM, 0, {RA}}, -{ "dcffix", XRC(59,802,0), X_MASK, POWER6, { FRT, FRB } }, -{ "dcffix.", XRC(59,802,1), X_MASK, POWER6, { FRT, FRB } }, +{"tlbrehi", XTLB(31,946,0), XTLB_MASK, PPC403, PPCA2, {RT, RA}}, +{"tlbrelo", XTLB(31,946,1), XTLB_MASK, PPC403, PPCA2, {RT, RA}}, +{"tlbre", X(31,946), X_MASK, PPC403|BOOKE|PPCA2|PPC476, 0, {RSO, RAOPT, SHO}}, -{ "denbcd", XRC(59,834,0), X_MASK, POWER6, { S, FRT, FRB } }, -{ "denbcd.", XRC(59,834,1), X_MASK, POWER6, { S, FRT, FRB } }, +{"sthcix", X(31,949), X_MASK, POWER6, 0, {RS, RA0, RB}}, -{ "diex", XRC(59,866,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "diex.", XRC(59,866,1), X_MASK, POWER6, { FRT, FRA, FRB } }, +{"icswepx", XRC(31,950,0), X_MASK, PPCA2, 0, {RS, RA, RB}}, +{"icswepx.", XRC(31,950,1), X_MASK, PPCA2, 0, {RS, RA, RB}}, -{ "stfq", OP(60), OP_MASK, POWER2, { FRS, D, RA } }, +{"stfqux", X(31,951), X_MASK, POWER2, 0, {FRS, RA, RB}}, -{ "stfqu", OP(61), OP_MASK, POWER2, { FRS, D, RA } }, +{"sraiq", XRC(31,952,0), X_MASK, M601, 0, {RA, RS, SH}}, +{"sraiq.", XRC(31,952,1), X_MASK, M601, 0, {RA, RS, SH}}, -{ "stfdp", OP(61), OP_MASK, POWER6, { FRT, D, RA0 } }, +{"extsb", XRC(31,954,0), XRB_MASK, PPC, 0, {RA, RS}}, +{"extsb.", XRC(31,954,1), XRB_MASK, PPC, 0, {RA, RS}}, -{ "lde", DEO(62,0), DE_MASK, BOOKE64, { RT, DES, RA0 } }, -{ "ldue", DEO(62,1), DE_MASK, BOOKE64, { RT, DES, RA0 } }, -{ "lfse", DEO(62,4), DE_MASK, BOOKE64, { FRT, DES, RA0 } }, -{ "lfsue", DEO(62,5), DE_MASK, BOOKE64, { FRT, DES, RAS } }, -{ "lfde", DEO(62,6), DE_MASK, BOOKE64, { FRT, DES, RA0 } }, -{ "lfdue", DEO(62,7), DE_MASK, BOOKE64, { FRT, DES, RAS } }, -{ "stde", DEO(62,8), DE_MASK, BOOKE64, { RS, DES, RA0 } }, -{ "stdue", DEO(62,9), DE_MASK, BOOKE64, { RS, DES, RAS } }, -{ "stfse", DEO(62,12), DE_MASK, BOOKE64, { FRS, DES, RA0 } }, -{ "stfsue", DEO(62,13), DE_MASK, BOOKE64, { FRS, DES, RAS } }, -{ "stfde", DEO(62,14), DE_MASK, BOOKE64, { FRS, DES, RA0 } }, -{ "stfdue", DEO(62,15), DE_MASK, BOOKE64, { FRS, DES, RAS } }, +{"stvflxl", X(31,965), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, -{ "std", DSO(62,0), DS_MASK, PPC64, { RS, DS, RA0 } }, +{"iccci", X(31,966), XRT_MASK, PPC403|PPC440|TITAN|PPCA2, 0, {RAOPT, RBOPT}}, +{"ici", X(31,966), XRARB_MASK, PPCA2|PPC476, 0, {CT}}, -{ "stdu", DSO(62,1), DS_MASK, PPC64, { RS, DS, RAS } }, +{"divduo", XO(31,457,1,0), XO_MASK, PPC64, 0, {RT, RA, RB}}, +{"divduo.", XO(31,457,1,1), XO_MASK, PPC64, 0, {RT, RA, RB}}, -{ "stq", DSO(62,2), DS_MASK, POWER4, { RSQ, DS, RA0 } }, +{"divwuo", XO(31,459,1,0), XO_MASK, PPC, 0, {RT, RA, RB}}, +{"divwuo.", XO(31,459,1,1), XO_MASK, PPC, 0, {RT, RA, RB}}, -{ "fcmpu", X(63,0), X_MASK|(3<<21), COM, { BF, FRA, FRB } }, +{"stxvd2x", X(31,972), XX1_MASK, PPCVSX, 0, {XS6, RA0, RB}}, +{"stxvx", X(31,972), XX1_MASK, POWER8, POWER9|PPCVSX3, {XS6, RA0, RB}}, -{ "daddq", XRC(63,2,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "daddq.", XRC(63,2,1), X_MASK, POWER6, { FRT, FRA, FRB } }, +{"tlbld", X(31,978), XRTRA_MASK, PPC, PPC403|BOOKE|PPCA2|PPC476, {RB}}, +{"tlbwehi", XTLB(31,978,0), XTLB_MASK, PPC403, 0, {RT, RA}}, +{"tlbwelo", XTLB(31,978,1), XTLB_MASK, PPC403, 0, {RT, RA}}, +{"tlbwe", X(31,978), X_MASK, PPC403|BOOKE|PPCA2|PPC476, 0, {RSO, RAOPT, SHO}}, -{ "dquaq", ZRC(63,3,0), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } }, -{ "dquaq.", ZRC(63,3,1), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } }, +{"slbfee.", XRC(31,979,1), XRA_MASK, POWER6, 0, {RT, RB}}, -{ "fcpsgn", XRC(63,8,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "fcpsgn.", XRC(63,8,1), X_MASK, POWER6, { FRT, FRA, FRB } }, +{"stbcix", X(31,981), X_MASK, POWER6, 0, {RS, RA0, RB}}, -{ "frsp", XRC(63,12,0), XRA_MASK, COM, { FRT, FRB } }, -{ "frsp.", XRC(63,12,1), XRA_MASK, COM, { FRT, FRB } }, +{"icbi", X(31,982), XRT_MASK, PPC, 0, {RA0, RB}}, -{ "fctiw", XRC(63,14,0), XRA_MASK, PPCCOM, { FRT, FRB } }, -{ "fcir", XRC(63,14,0), XRA_MASK, POWER2, { FRT, FRB } }, -{ "fctiw.", XRC(63,14,1), XRA_MASK, PPCCOM, { FRT, FRB } }, -{ "fcir.", XRC(63,14,1), XRA_MASK, POWER2, { FRT, FRB } }, +{"stfiwx", X(31,983), X_MASK, PPC, PPCEFS, {FRS, RA0, RB}}, -{ "fctiwz", XRC(63,15,0), XRA_MASK, PPCCOM, { FRT, FRB } }, -{ "fcirz", XRC(63,15,0), XRA_MASK, POWER2, { FRT, FRB } }, -{ "fctiwz.", XRC(63,15,1), XRA_MASK, PPCCOM, { FRT, FRB } }, -{ "fcirz.", XRC(63,15,1), XRA_MASK, POWER2, { FRT, FRB } }, +{"extsw", XRC(31,986,0), XRB_MASK, PPC64, 0, {RA, RS}}, +{"extsw.", XRC(31,986,1), XRB_MASK, PPC64, 0, {RA, RS}}, -{ "fdiv", A(63,18,0), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } }, -{ "fd", A(63,18,0), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } }, -{ "fdiv.", A(63,18,1), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } }, -{ "fd.", A(63,18,1), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } }, +{"icbiep", XRT(31,991,0), XRT_MASK, E500MC|PPCA2, 0, {RA0, RB}}, -{ "fsub", A(63,20,0), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } }, -{ "fs", A(63,20,0), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } }, -{ "fsub.", A(63,20,1), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } }, -{ "fs.", A(63,20,1), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } }, +{"stvswxl", X(31,997), X_MASK, PPCVEC2, 0, {VS, RA0, RB}}, -{ "fadd", A(63,21,0), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } }, -{ "fa", A(63,21,0), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } }, -{ "fadd.", A(63,21,1), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } }, -{ "fa.", A(63,21,1), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } }, +{"icread", X(31,998), XRT_MASK, PPC403|PPC440|PPC476|TITAN, 0, {RA0, RB}}, -{ "fsqrt", A(63,22,0), AFRAFRC_MASK, PPCPWR2, { FRT, FRB } }, -{ "fsqrt.", A(63,22,1), AFRAFRC_MASK, PPCPWR2, { FRT, FRB } }, +{"nabso", XO(31,488,1,0), XORB_MASK, M601, 0, {RT, RA}}, +{"nabso.", XO(31,488,1,1), XORB_MASK, M601, 0, {RT, RA}}, -{ "fsel", A(63,23,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, -{ "fsel.", A(63,23,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } }, +{"divdo", XO(31,489,1,0), XO_MASK, PPC64, 0, {RT, RA, RB}}, +{"divdo.", XO(31,489,1,1), XO_MASK, PPC64, 0, {RT, RA, RB}}, -{ "fre", A(63,24,0), AFRALFRC_MASK, POWER5, { FRT, FRB, A_L } }, -{ "fre.", A(63,24,1), AFRALFRC_MASK, POWER5, { FRT, FRB, A_L } }, +{"divwo", XO(31,491,1,0), XO_MASK, PPC, 0, {RT, RA, RB}}, +{"divwo.", XO(31,491,1,1), XO_MASK, PPC, 0, {RT, RA, RB}}, -{ "fmul", A(63,25,0), AFRB_MASK, PPCCOM, { FRT, FRA, FRC } }, -{ "fm", A(63,25,0), AFRB_MASK, PWRCOM, { FRT, FRA, FRC } }, -{ "fmul.", A(63,25,1), AFRB_MASK, PPCCOM, { FRT, FRA, FRC } }, -{ "fm.", A(63,25,1), AFRB_MASK, PWRCOM, { FRT, FRA, FRC } }, +{"stxvb16x", X(31,1004), XX1_MASK, PPCVSX3, 0, {XS6, RA0, RB}}, -{ "frsqrte", A(63,26,0), AFRALFRC_MASK, PPC, { FRT, FRB, A_L } }, -{ "frsqrte.",A(63,26,1), AFRALFRC_MASK, PPC, { FRT, FRB, A_L } }, +{"trechkpt.", XRC(31,1006,1), XRTRARB_MASK,PPCHTM, 0, {0}}, -{ "fmsub", A(63,28,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } }, -{ "fms", A(63,28,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } }, -{ "fmsub.", A(63,28,1), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } }, -{ "fms.", A(63,28,1), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } }, +{"tlbli", X(31,1010), XRTRA_MASK, PPC, TITAN, {RB}}, -{ "fmadd", A(63,29,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } }, -{ "fma", A(63,29,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } }, -{ "fmadd.", A(63,29,1), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } }, -{ "fma.", A(63,29,1), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } }, +{"stdcix", X(31,1013), X_MASK, POWER6, 0, {RS, RA0, RB}}, -{ "fnmsub", A(63,30,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } }, -{ "fnms", A(63,30,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } }, -{ "fnmsub.", A(63,30,1), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } }, -{ "fnms.", A(63,30,1), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } }, +{"dcbz", X(31,1014), XRT_MASK, PPC, 0, {RA0, RB}}, +{"dclz", X(31,1014), XRT_MASK, PPC, 0, {RA0, RB}}, -{ "fnmadd", A(63,31,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } }, -{ "fnma", A(63,31,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } }, -{ "fnmadd.", A(63,31,1), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } }, -{ "fnma.", A(63,31,1), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } }, +{"dcbzep", XRT(31,1023,0), XRT_MASK, E500MC|PPCA2, 0, {RA0, RB}}, -{ "fcmpo", X(63,32), X_MASK|(3<<21), COM, { BF, FRA, FRB } }, +{"dcbzl", XOPL(31,1014,1), XRT_MASK, POWER4|E500MC, PPC476, {RA0, RB}}, -{ "dmulq", XRC(63,34,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "dmulq.", XRC(63,34,1), X_MASK, POWER6, { FRT, FRA, FRB } }, +{"cctpl", 0x7c210b78, 0xffffffff, CELL, 0, {0}}, +{"cctpm", 0x7c421378, 0xffffffff, CELL, 0, {0}}, +{"cctph", 0x7c631b78, 0xffffffff, CELL, 0, {0}}, -{ "drrndq", ZRC(63,35,0), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } }, -{ "drrndq.", ZRC(63,35,1), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } }, +{"dstt", XDSS(31,342,1), XDSS_MASK, PPCVEC, 0, {RA, RB, STRM}}, +{"dststt", XDSS(31,374,1), XDSS_MASK, PPCVEC, 0, {RA, RB, STRM}}, +{"dssall", XDSS(31,822,1), XDSS_MASK, PPCVEC, 0, {0}}, -{ "mtfsb1", XRC(63,38,0), XRARB_MASK, COM, { BT } }, -{ "mtfsb1.", XRC(63,38,1), XRARB_MASK, COM, { BT } }, +{"db8cyc", 0x7f9ce378, 0xffffffff, CELL, 0, {0}}, +{"db10cyc", 0x7fbdeb78, 0xffffffff, CELL, 0, {0}}, +{"db12cyc", 0x7fdef378, 0xffffffff, CELL, 0, {0}}, +{"db16cyc", 0x7ffffb78, 0xffffffff, CELL, 0, {0}}, -{ "fneg", XRC(63,40,0), XRA_MASK, COM, { FRT, FRB } }, -{ "fneg.", XRC(63,40,1), XRA_MASK, COM, { FRT, FRB } }, +{"lwz", OP(32), OP_MASK, PPCCOM, PPCVLE, {RT, D, RA0}}, +{"l", OP(32), OP_MASK, PWRCOM, PPCVLE, {RT, D, RA0}}, -{ "mcrfs", X(63,64), XRB_MASK|(3<<21)|(3<<16), COM, { BF, BFA } }, +{"lwzu", OP(33), OP_MASK, PPCCOM, PPCVLE, {RT, D, RAL}}, +{"lu", OP(33), OP_MASK, PWRCOM, PPCVLE, {RT, D, RA0}}, -{ "dscliq", ZRC(63,66,0), Z_MASK, POWER6, { FRT, FRA, SH16 } }, -{ "dscliq.", ZRC(63,66,1), Z_MASK, POWER6, { FRT, FRA, SH16 } }, +{"lbz", OP(34), OP_MASK, COM, PPCVLE, {RT, D, RA0}}, -{ "dquaiq", ZRC(63,67,0), Z_MASK, POWER6, { TE, FRT, FRB, RMC } }, -{ "dquaiq.", ZRC(63,67,1), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } }, +{"lbzu", OP(35), OP_MASK, COM, PPCVLE, {RT, D, RAL}}, -{ "mtfsb0", XRC(63,70,0), XRARB_MASK, COM, { BT } }, -{ "mtfsb0.", XRC(63,70,1), XRARB_MASK, COM, { BT } }, +{"stw", OP(36), OP_MASK, PPCCOM, PPCVLE, {RS, D, RA0}}, +{"st", OP(36), OP_MASK, PWRCOM, PPCVLE, {RS, D, RA0}}, -{ "fmr", XRC(63,72,0), XRA_MASK, COM, { FRT, FRB } }, -{ "fmr.", XRC(63,72,1), XRA_MASK, COM, { FRT, FRB } }, +{"stwu", OP(37), OP_MASK, PPCCOM, PPCVLE, {RS, D, RAS}}, +{"stu", OP(37), OP_MASK, PWRCOM, PPCVLE, {RS, D, RA0}}, -{ "dscriq", ZRC(63,98,0), Z_MASK, POWER6, { FRT, FRA, SH16 } }, -{ "dscriq.", ZRC(63,98,1), Z_MASK, POWER6, { FRT, FRA, SH16 } }, +{"stb", OP(38), OP_MASK, COM, PPCVLE, {RS, D, RA0}}, -{ "drintxq", ZRC(63,99,0), Z_MASK, POWER6, { R, FRT, FRB, RMC } }, -{ "drintxq.",ZRC(63,99,1), Z_MASK, POWER6, { R, FRT, FRB, RMC } }, +{"stbu", OP(39), OP_MASK, COM, PPCVLE, {RS, D, RAS}}, -{ "dcmpoq", X(63,130), X_MASK, POWER6, { BF, FRA, FRB } }, +{"lhz", OP(40), OP_MASK, COM, PPCVLE, {RT, D, RA0}}, -{ "mtfsfi", XRC(63,134,0), XRA_MASK|(3<<21)|(1<<11), COM, { BF, U } }, -{ "mtfsfi.", XRC(63,134,1), XRA_MASK|(3<<21)|(1<<11), COM, { BF, U } }, +{"lhzu", OP(41), OP_MASK, COM, PPCVLE, {RT, D, RAL}}, -{ "fnabs", XRC(63,136,0), XRA_MASK, COM, { FRT, FRB } }, -{ "fnabs.", XRC(63,136,1), XRA_MASK, COM, { FRT, FRB } }, +{"lha", OP(42), OP_MASK, COM, PPCVLE, {RT, D, RA0}}, -{ "dtstexq", X(63,162), X_MASK, POWER6, { BF, FRA, FRB } }, -{ "dtstdcq", Z(63,194), Z_MASK, POWER6, { BF, FRA, DCM } }, -{ "dtstdgq", Z(63,226), Z_MASK, POWER6, { BF, FRA, DGM } }, +{"lhau", OP(43), OP_MASK, COM, PPCVLE, {RT, D, RAL}}, -{ "drintnq", ZRC(63,227,0), Z_MASK, POWER6, { R, FRT, FRB, RMC } }, -{ "drintnq.",ZRC(63,227,1), Z_MASK, POWER6, { R, FRT, FRB, RMC } }, +{"sth", OP(44), OP_MASK, COM, PPCVLE, {RS, D, RA0}}, -{ "dctqpq", XRC(63,258,0), X_MASK, POWER6, { FRT, FRB } }, -{ "dctqpq.", XRC(63,258,1), X_MASK, POWER6, { FRT, FRB } }, +{"sthu", OP(45), OP_MASK, COM, PPCVLE, {RS, D, RAS}}, -{ "fabs", XRC(63,264,0), XRA_MASK, COM, { FRT, FRB } }, -{ "fabs.", XRC(63,264,1), XRA_MASK, COM, { FRT, FRB } }, +{"lmw", OP(46), OP_MASK, PPCCOM, PPCVLE, {RT, D, RAM}}, +{"lm", OP(46), OP_MASK, PWRCOM, PPCVLE, {RT, D, RA0}}, -{ "dctfixq", XRC(63,290,0), X_MASK, POWER6, { FRT, FRB } }, -{ "dctfixq.",XRC(63,290,1), X_MASK, POWER6, { FRT, FRB } }, +{"stmw", OP(47), OP_MASK, PPCCOM, PPCVLE, {RS, D, RA0}}, +{"stm", OP(47), OP_MASK, PWRCOM, PPCVLE, {RS, D, RA0}}, -{ "ddedpdq", XRC(63,322,0), X_MASK, POWER6, { SP, FRT, FRB } }, -{ "ddedpdq.",XRC(63,322,1), X_MASK, POWER6, { SP, FRT, FRB } }, +{"lfs", OP(48), OP_MASK, COM, PPCEFS|PPCVLE, {FRT, D, RA0}}, -{ "dxexq", XRC(63,354,0), X_MASK, POWER6, { FRT, FRB } }, -{ "dxexq.", XRC(63,354,1), X_MASK, POWER6, { FRT, FRB } }, +{"lfsu", OP(49), OP_MASK, COM, PPCEFS|PPCVLE, {FRT, D, RAS}}, -{ "frin", XRC(63,392,0), XRA_MASK, POWER5, { FRT, FRB } }, -{ "frin.", XRC(63,392,1), XRA_MASK, POWER5, { FRT, FRB } }, -{ "friz", XRC(63,424,0), XRA_MASK, POWER5, { FRT, FRB } }, -{ "friz.", XRC(63,424,1), XRA_MASK, POWER5, { FRT, FRB } }, -{ "frip", XRC(63,456,0), XRA_MASK, POWER5, { FRT, FRB } }, -{ "frip.", XRC(63,456,1), XRA_MASK, POWER5, { FRT, FRB } }, -{ "frim", XRC(63,488,0), XRA_MASK, POWER5, { FRT, FRB } }, -{ "frim.", XRC(63,488,1), XRA_MASK, POWER5, { FRT, FRB } }, +{"lfd", OP(50), OP_MASK, COM, PPCEFS|PPCVLE, {FRT, D, RA0}}, -{ "dsubq", XRC(63,514,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "dsubq.", XRC(63,514,1), X_MASK, POWER6, { FRT, FRA, FRB } }, +{"lfdu", OP(51), OP_MASK, COM, PPCEFS|PPCVLE, {FRT, D, RAS}}, -{ "ddivq", XRC(63,546,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "ddivq.", XRC(63,546,1), X_MASK, POWER6, { FRT, FRA, FRB } }, +{"stfs", OP(52), OP_MASK, COM, PPCEFS|PPCVLE, {FRS, D, RA0}}, -{ "mffs", XRC(63,583,0), XRARB_MASK, COM, { FRT } }, -{ "mffs.", XRC(63,583,1), XRARB_MASK, COM, { FRT } }, +{"stfsu", OP(53), OP_MASK, COM, PPCEFS|PPCVLE, {FRS, D, RAS}}, -{ "dcmpuq", X(63,642), X_MASK, POWER6, { BF, FRA, FRB } }, +{"stfd", OP(54), OP_MASK, COM, PPCEFS|PPCVLE, {FRS, D, RA0}}, -{ "dtstsfq", X(63,674), X_MASK, POWER6, { BF, FRA, FRB } }, +{"stfdu", OP(55), OP_MASK, COM, PPCEFS|PPCVLE, {FRS, D, RAS}}, -{ "mtfsf", XFL(63,711,0), XFL_MASK, COM, { FLM, FRB } }, -{ "mtfsf.", XFL(63,711,1), XFL_MASK, COM, { FLM, FRB } }, +{"lq", OP(56), OP_MASK, POWER4, PPC476|PPCVLE, {RTQ, DQ, RAQ}}, +{"psq_l", OP(56), OP_MASK, PPCPS, PPCVLE, {FRT,PSD,RA,PSW,PSQ}}, +{"lfq", OP(56), OP_MASK, POWER2, PPCVLE, {FRT, D, RA0}}, -{ "drdpq", XRC(63,770,0), X_MASK, POWER6, { FRT, FRB } }, -{ "drdpq.", XRC(63,770,1), X_MASK, POWER6, { FRT, FRB } }, +{"lxsd", DSO(57,2), DS_MASK, PPCVSX3, PPCVLE, {VD, DS, RA0}}, +{"lxssp", DSO(57,3), DS_MASK, PPCVSX3, PPCVLE, {VD, DS, RA0}}, +{"lfdp", OP(57), OP_MASK, POWER6, POWER7|PPCVLE, {FRTp, DS, RA0}}, +{"psq_lu", OP(57), OP_MASK, PPCPS, PPCVLE, {FRT,PSD,RA,PSW,PSQ}}, +{"lfqu", OP(57), OP_MASK, POWER2, PPCVLE, {FRT, D, RA0}}, -{ "dcffixq", XRC(63,802,0), X_MASK, POWER6, { FRT, FRB } }, -{ "dcffixq.",XRC(63,802,1), X_MASK, POWER6, { FRT, FRB } }, +{"ld", DSO(58,0), DS_MASK, PPC64, PPCVLE, {RT, DS, RA0}}, +{"ldu", DSO(58,1), DS_MASK, PPC64, PPCVLE, {RT, DS, RAL}}, +{"lwa", DSO(58,2), DS_MASK, PPC64, PPCVLE, {RT, DS, RA0}}, -{ "fctid", XRC(63,814,0), XRA_MASK, PPC64, { FRT, FRB } }, -{ "fctid.", XRC(63,814,1), XRA_MASK, PPC64, { FRT, FRB } }, +{"dadd", XRC(59,2,0), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, +{"dadd.", XRC(59,2,1), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, -{ "fctidz", XRC(63,815,0), XRA_MASK, PPC64, { FRT, FRB } }, -{ "fctidz.", XRC(63,815,1), XRA_MASK, PPC64, { FRT, FRB } }, +{"dqua", ZRC(59,3,0), Z2_MASK, POWER6, PPCVLE, {FRT,FRA,FRB,RMC}}, +{"dqua.", ZRC(59,3,1), Z2_MASK, POWER6, PPCVLE, {FRT,FRA,FRB,RMC}}, -{ "denbcdq", XRC(63,834,0), X_MASK, POWER6, { S, FRT, FRB } }, -{ "denbcdq.",XRC(63,834,1), X_MASK, POWER6, { S, FRT, FRB } }, +{"fdivs", A(59,18,0), AFRC_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, +{"fdivs.", A(59,18,1), AFRC_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, -{ "fcfid", XRC(63,846,0), XRA_MASK, PPC64, { FRT, FRB } }, -{ "fcfid.", XRC(63,846,1), XRA_MASK, PPC64, { FRT, FRB } }, +{"fsubs", A(59,20,0), AFRC_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, +{"fsubs.", A(59,20,1), AFRC_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, -{ "diexq", XRC(63,866,0), X_MASK, POWER6, { FRT, FRA, FRB } }, -{ "diexq.", XRC(63,866,1), X_MASK, POWER6, { FRT, FRA, FRB } }, +{"fadds", A(59,21,0), AFRC_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, +{"fadds.", A(59,21,1), AFRC_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, +{"fsqrts", A(59,22,0), AFRAFRC_MASK, PPC, TITAN|PPCVLE, {FRT, FRB}}, +{"fsqrts.", A(59,22,1), AFRAFRC_MASK, PPC, TITAN|PPCVLE, {FRT, FRB}}, + +{"fres", A(59,24,0), AFRAFRC_MASK, POWER7, PPCVLE, {FRT, FRB}}, +{"fres", A(59,24,0), AFRALFRC_MASK, PPC, POWER7|PPCVLE, {FRT, FRB, A_L}}, +{"fres.", A(59,24,1), AFRAFRC_MASK, POWER7, PPCVLE, {FRT, FRB}}, +{"fres.", A(59,24,1), AFRALFRC_MASK, PPC, POWER7|PPCVLE, {FRT, FRB, A_L}}, + +{"fmuls", A(59,25,0), AFRB_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC}}, +{"fmuls.", A(59,25,1), AFRB_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC}}, + +{"frsqrtes", A(59,26,0), AFRAFRC_MASK, POWER7, PPCVLE, {FRT, FRB}}, +{"frsqrtes", A(59,26,0), AFRALFRC_MASK, POWER5, POWER7|PPCVLE, {FRT, FRB, A_L}}, +{"frsqrtes.", A(59,26,1), AFRAFRC_MASK, POWER7, PPCVLE, {FRT, FRB}}, +{"frsqrtes.", A(59,26,1), AFRALFRC_MASK, POWER5, POWER7|PPCVLE, {FRT, FRB, A_L}}, + +{"fmsubs", A(59,28,0), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fmsubs.", A(59,28,1), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, + +{"fmadds", A(59,29,0), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fmadds.", A(59,29,1), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, + +{"fnmsubs", A(59,30,0), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fnmsubs.", A(59,30,1), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, + +{"fnmadds", A(59,31,0), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fnmadds.", A(59,31,1), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, + +{"dmul", XRC(59,34,0), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, +{"dmul.", XRC(59,34,1), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, + +{"drrnd", ZRC(59,35,0), Z2_MASK, POWER6, PPCVLE, {FRT, FRA, FRB, RMC}}, +{"drrnd.", ZRC(59,35,1), Z2_MASK, POWER6, PPCVLE, {FRT, FRA, FRB, RMC}}, + +{"dscli", ZRC(59,66,0), Z_MASK, POWER6, PPCVLE, {FRT, FRA, SH16}}, +{"dscli.", ZRC(59,66,1), Z_MASK, POWER6, PPCVLE, {FRT, FRA, SH16}}, + +{"dquai", ZRC(59,67,0), Z2_MASK, POWER6, PPCVLE, {TE, FRT,FRB,RMC}}, +{"dquai.", ZRC(59,67,1), Z2_MASK, POWER6, PPCVLE, {TE, FRT,FRB,RMC}}, + +{"dscri", ZRC(59,98,0), Z_MASK, POWER6, PPCVLE, {FRT, FRA, SH16}}, +{"dscri.", ZRC(59,98,1), Z_MASK, POWER6, PPCVLE, {FRT, FRA, SH16}}, + +{"drintx", ZRC(59,99,0), Z2_MASK, POWER6, PPCVLE, {R, FRT, FRB, RMC}}, +{"drintx.", ZRC(59,99,1), Z2_MASK, POWER6, PPCVLE, {R, FRT, FRB, RMC}}, + +{"dcmpo", X(59,130), X_MASK, POWER6, PPCVLE, {BF, FRA, FRB}}, + +{"dtstex", X(59,162), X_MASK, POWER6, PPCVLE, {BF, FRA, FRB}}, +{"dtstdc", Z(59,194), Z_MASK, POWER6, PPCVLE, {BF, FRA, DCM}}, +{"dtstdg", Z(59,226), Z_MASK, POWER6, PPCVLE, {BF, FRA, DGM}}, + +{"drintn", ZRC(59,227,0), Z2_MASK, POWER6, PPCVLE, {R, FRT, FRB, RMC}}, +{"drintn.", ZRC(59,227,1), Z2_MASK, POWER6, PPCVLE, {R, FRT, FRB, RMC}}, + +{"dctdp", XRC(59,258,0), X_MASK, POWER6, PPCVLE, {FRT, FRB}}, +{"dctdp.", XRC(59,258,1), X_MASK, POWER6, PPCVLE, {FRT, FRB}}, + +{"dctfix", XRC(59,290,0), X_MASK, POWER6, PPCVLE, {FRT, FRB}}, +{"dctfix.", XRC(59,290,1), X_MASK, POWER6, PPCVLE, {FRT, FRB}}, + +{"ddedpd", XRC(59,322,0), X_MASK, POWER6, PPCVLE, {SP, FRT, FRB}}, +{"ddedpd.", XRC(59,322,1), X_MASK, POWER6, PPCVLE, {SP, FRT, FRB}}, + +{"dxex", XRC(59,354,0), X_MASK, POWER6, PPCVLE, {FRT, FRB}}, +{"dxex.", XRC(59,354,1), X_MASK, POWER6, PPCVLE, {FRT, FRB}}, + +{"dsub", XRC(59,514,0), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, +{"dsub.", XRC(59,514,1), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, + +{"ddiv", XRC(59,546,0), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, +{"ddiv.", XRC(59,546,1), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, + +{"dcmpu", X(59,642), X_MASK, POWER6, PPCVLE, {BF, FRA, FRB}}, + +{"dtstsf", X(59,674), X_MASK, POWER6, PPCVLE, {BF, FRA, FRB}}, +{"dtstsfi", X(59,675), X_MASK|1<<22,POWER9, PPCVLE, {BF, UIM6, FRB}}, + +{"drsp", XRC(59,770,0), X_MASK, POWER6, PPCVLE, {FRT, FRB}}, +{"drsp.", XRC(59,770,1), X_MASK, POWER6, PPCVLE, {FRT, FRB}}, + +{"dcffix", XRC(59,802,0), X_MASK|FRA_MASK, POWER7, PPCVLE, {FRT, FRB}}, +{"dcffix.", XRC(59,802,1), X_MASK|FRA_MASK, POWER7, PPCVLE, {FRT, FRB}}, + +{"denbcd", XRC(59,834,0), X_MASK, POWER6, PPCVLE, {S, FRT, FRB}}, +{"denbcd.", XRC(59,834,1), X_MASK, POWER6, PPCVLE, {S, FRT, FRB}}, + +{"fcfids", XRC(59,846,0), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, +{"fcfids.", XRC(59,846,1), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, + +{"diex", XRC(59,866,0), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, +{"diex.", XRC(59,866,1), X_MASK, POWER6, PPCVLE, {FRT, FRA, FRB}}, + +{"fcfidus", XRC(59,974,0), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, +{"fcfidus.", XRC(59,974,1), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, + +{"xsaddsp", XX3(60,0), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, +{"xsmaddasp", XX3(60,1), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, +{"xxsldwi", XX3(60,2), XX3SHW_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6, SHW}}, +{"xscmpeqdp", XX3(60,3), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, +{"xsrsqrtesp", XX2(60,10), XX2_MASK, PPCVSX2, PPCVLE, {XT6, XB6}}, +{"xssqrtsp", XX2(60,11), XX2_MASK, PPCVSX2, PPCVLE, {XT6, XB6}}, +{"xxsel", XX4(60,3), XX4_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6, XC6}}, +{"xssubsp", XX3(60,8), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, +{"xsmaddmsp", XX3(60,9), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, +{"xxspltd", XX3(60,10), XX3DM_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6S, DMEX}}, +{"xxmrghd", XX3(60,10), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xxswapd", XX3(60,10)|(2<<8), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6S}}, +{"xxmrgld", XX3(60,10)|(3<<8), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xxpermdi", XX3(60,10), XX3DM_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6, DM}}, +{"xscmpgtdp", XX3(60,11), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, +{"xsresp", XX2(60,26), XX2_MASK, PPCVSX2, PPCVLE, {XT6, XB6}}, +{"xsmulsp", XX3(60,16), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, +{"xsmsubasp", XX3(60,17), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, +{"xxmrghw", XX3(60,18), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xscmpgedp", XX3(60,19), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, +{"xsdivsp", XX3(60,24), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, +{"xsmsubmsp", XX3(60,25), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, +{"xxperm", XX3(60,26), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, +{"xsadddp", XX3(60,32), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xsmaddadp", XX3(60,33), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xscmpudp", XX3(60,35), XX3BF_MASK, PPCVSX, PPCVLE, {BF, XA6, XB6}}, +{"xscvdpuxws", XX2(60,72), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xsrdpi", XX2(60,73), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xsrsqrtedp", XX2(60,74), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xssqrtdp", XX2(60,75), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xssubdp", XX3(60,40), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xsmaddmdp", XX3(60,41), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xscmpodp", XX3(60,43), XX3BF_MASK, PPCVSX, PPCVLE, {BF, XA6, XB6}}, +{"xscvdpsxws", XX2(60,88), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xsrdpiz", XX2(60,89), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xsredp", XX2(60,90), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xsmuldp", XX3(60,48), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xsmsubadp", XX3(60,49), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xxmrglw", XX3(60,50), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xsrdpip", XX2(60,105), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xstsqrtdp", XX2(60,106), XX2BF_MASK, PPCVSX, PPCVLE, {BF, XB6}}, +{"xsrdpic", XX2(60,107), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xsdivdp", XX3(60,56), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xsmsubmdp", XX3(60,57), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xxpermr", XX3(60,58), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, +{"xscmpexpdp", XX3(60,59), XX3BF_MASK, PPCVSX3, PPCVLE, {BF, XA6, XB6}}, +{"xsrdpim", XX2(60,121), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xstdivdp", XX3(60,61), XX3BF_MASK, PPCVSX, PPCVLE, {BF, XA6, XB6}}, +{"xvaddsp", XX3(60,64), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvmaddasp", XX3(60,65), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcmpeqsp", XX3RC(60,67,0), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcmpeqsp.", XX3RC(60,67,1), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcvspuxws", XX2(60,136), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvrspi", XX2(60,137), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvrsqrtesp", XX2(60,138), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvsqrtsp", XX2(60,139), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvsubsp", XX3(60,72), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvmaddmsp", XX3(60,73), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcmpgtsp", XX3RC(60,75,0), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcmpgtsp.", XX3RC(60,75,1), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcvspsxws", XX2(60,152), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvrspiz", XX2(60,153), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvresp", XX2(60,154), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvmulsp", XX3(60,80), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvmsubasp", XX3(60,81), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xxspltw", XX2(60,164), XX2UIM_MASK, PPCVSX, PPCVLE, {XT6, XB6, UIM}}, +{"xxextractuw", XX2(60,165), XX2UIM4_MASK, PPCVSX3, PPCVLE, {XT6, XB6, UIMM4}}, +{"xvcmpgesp", XX3RC(60,83,0), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcmpgesp.", XX3RC(60,83,1), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcvuxwsp", XX2(60,168), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvrspip", XX2(60,169), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvtsqrtsp", XX2(60,170), XX2BF_MASK, PPCVSX, PPCVLE, {BF, XB6}}, +{"xvrspic", XX2(60,171), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvdivsp", XX3(60,88), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvmsubmsp", XX3(60,89), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xxspltib", X(60,360), XX1_MASK|3<<19, PPCVSX3, PPCVLE, {XT6, IMM8}}, +{"xxinsertw", XX2(60,181), XX2UIM4_MASK, PPCVSX3, PPCVLE, {XT6, XB6, UIMM4}}, +{"xvcvsxwsp", XX2(60,184), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvrspim", XX2(60,185), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvtdivsp", XX3(60,93), XX3BF_MASK, PPCVSX, PPCVLE, {BF, XA6, XB6}}, +{"xvadddp", XX3(60,96), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvmaddadp", XX3(60,97), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcmpeqdp", XX3RC(60,99,0), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcmpeqdp.", XX3RC(60,99,1), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcvdpuxws", XX2(60,200), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvrdpi", XX2(60,201), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvrsqrtedp", XX2(60,202), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvsqrtdp", XX2(60,203), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvsubdp", XX3(60,104), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvmaddmdp", XX3(60,105), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcmpgtdp", XX3RC(60,107,0), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcmpgtdp.", XX3RC(60,107,1), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcvdpsxws", XX2(60,216), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvrdpiz", XX2(60,217), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvredp", XX2(60,218), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvmuldp", XX3(60,112), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvmsubadp", XX3(60,113), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcmpgedp", XX3RC(60,115,0), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcmpgedp.", XX3RC(60,115,1), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcvuxwdp", XX2(60,232), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvrdpip", XX2(60,233), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvtsqrtdp", XX2(60,234), XX2BF_MASK, PPCVSX, PPCVLE, {BF, XB6}}, +{"xvrdpic", XX2(60,235), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvdivdp", XX3(60,120), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvmsubmdp", XX3(60,121), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcvsxwdp", XX2(60,248), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvrdpim", XX2(60,249), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvtdivdp", XX3(60,125), XX3BF_MASK, PPCVSX, PPCVLE, {BF, XA6, XB6}}, +{"xsmaxcdp", XX3(60,128), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, +{"xsnmaddasp", XX3(60,129), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, +{"xxland", XX3(60,130), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xscvdpsp", XX2(60,265), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xscvdpspn", XX2(60,267), XX2_MASK, PPCVSX2, PPCVLE, {XT6, XB6}}, +{"xsmincdp", XX3(60,136), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, +{"xsnmaddmsp", XX3(60,137), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, +{"xxlandc", XX3(60,138), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xsrsp", XX2(60,281), XX2_MASK, PPCVSX2, PPCVLE, {XT6, XB6}}, +{"xsmaxjdp", XX3(60,144), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, +{"xsnmsubasp", XX3(60,145), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, +{"xxlor", XX3(60,146), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xscvuxdsp", XX2(60,296), XX2_MASK, PPCVSX2, PPCVLE, {XT6, XB6}}, +{"xststdcsp", XX2(60,298), XX2BFD_MASK, PPCVSX3, PPCVLE, {BF, XB6, DCMX}}, +{"xsminjdp", XX3(60,152), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, +{"xsnmsubmsp", XX3(60,153), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, +{"xxlxor", XX3(60,154), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xscvsxdsp", XX2(60,312), XX2_MASK, PPCVSX2, PPCVLE, {XT6, XB6}}, +{"xsmaxdp", XX3(60,160), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xsnmaddadp", XX3(60,161), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xxlnor", XX3(60,162), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xscvdpuxds", XX2(60,328), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xscvspdp", XX2(60,329), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xscvspdpn", XX2(60,331), XX2_MASK, PPCVSX2, PPCVLE, {XT6, XB6}}, +{"xsmindp", XX3(60,168), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xsnmaddmdp", XX3(60,169), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xxlorc", XX3(60,170), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, +{"xscvdpsxds", XX2(60,344), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xsabsdp", XX2(60,345), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xsxexpdp", XX2VA(60,347,0),XX2_MASK|1, PPCVSX3, PPCVLE, {RT, XB6}}, +{"xsxsigdp", XX2VA(60,347,1),XX2_MASK|1, PPCVSX3, PPCVLE, {RT, XB6}}, +{"xscvhpdp", XX2VA(60,347,16),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, +{"xscvdphp", XX2VA(60,347,17),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, +{"xscpsgndp", XX3(60,176), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xsnmsubadp", XX3(60,177), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xxlnand", XX3(60,178), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, +{"xscvuxddp", XX2(60,360), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xsnabsdp", XX2(60,361), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xststdcdp", XX2(60,362), XX2BFD_MASK, PPCVSX3, PPCVLE, {BF, XB6, DCMX}}, +{"xsnmsubmdp", XX3(60,185), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xxleqv", XX3(60,186), XX3_MASK, PPCVSX2, PPCVLE, {XT6, XA6, XB6}}, +{"xscvsxddp", XX2(60,376), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xsnegdp", XX2(60,377), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvmaxsp", XX3(60,192), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvnmaddasp", XX3(60,193), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcvspuxds", XX2(60,392), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvcvdpsp", XX2(60,393), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvminsp", XX3(60,200), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvnmaddmsp", XX3(60,201), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcvspsxds", XX2(60,408), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvabssp", XX2(60,409), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvmovsp", XX3(60,208), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6S}}, +{"xvcpsgnsp", XX3(60,208), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvnmsubasp", XX3(60,209), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcvuxdsp", XX2(60,424), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvnabssp", XX2(60,425), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvtstdcsp", XX2(60,426), XX2DCMXS_MASK, PPCVSX3, PPCVLE, {XT6, XB6, DCMXS}}, +{"xviexpsp", XX3(60,216), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, +{"xvnmsubmsp", XX3(60,217), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcvsxdsp", XX2(60,440), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvnegsp", XX2(60,441), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvmaxdp", XX3(60,224), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvnmaddadp", XX3(60,225), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcvdpuxds", XX2(60,456), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvcvspdp", XX2(60,457), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xsiexpdp", X(60,918), XX1_MASK, PPCVSX3, PPCVLE, {XT6, RA, RB}}, +{"xvmindp", XX3(60,232), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvnmaddmdp", XX3(60,233), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcvdpsxds", XX2(60,472), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvabsdp", XX2(60,473), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvxexpdp", XX2VA(60,475,0),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, +{"xvxsigdp", XX2VA(60,475,1),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, +{"xxbrh", XX2VA(60,475,7),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, +{"xvxexpsp", XX2VA(60,475,8),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, +{"xvxsigsp", XX2VA(60,475,9),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, +{"xxbrw", XX2VA(60,475,15),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, +{"xxbrd", XX2VA(60,475,23),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, +{"xvcvhpsp", XX2VA(60,475,24),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, +{"xvcvsphp", XX2VA(60,475,25),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, +{"xxbrq", XX2VA(60,475,31),XX2_MASK, PPCVSX3, PPCVLE, {XT6, XB6}}, +{"xvmovdp", XX3(60,240), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6S}}, +{"xvcpsgndp", XX3(60,240), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvnmsubadp", XX3(60,241), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcvuxddp", XX2(60,488), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvnabsdp", XX2(60,489), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvtstdcdp", XX2(60,490), XX2DCMXS_MASK, PPCVSX3, PPCVLE, {XT6, XB6, DCMXS}}, +{"xviexpdp", XX3(60,248), XX3_MASK, PPCVSX3, PPCVLE, {XT6, XA6, XB6}}, +{"xvnmsubmdp", XX3(60,249), XX3_MASK, PPCVSX, PPCVLE, {XT6, XA6, XB6}}, +{"xvcvsxddp", XX2(60,504), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, +{"xvnegdp", XX2(60,505), XX2_MASK, PPCVSX, PPCVLE, {XT6, XB6}}, + +{"psq_st", OP(60), OP_MASK, PPCPS, PPCVLE, {FRS,PSD,RA,PSW,PSQ}}, +{"stfq", OP(60), OP_MASK, POWER2, PPCVLE, {FRS, D, RA}}, + +{"lxv", DQX(61,1), DQX_MASK, PPCVSX3, PPCVLE, {XTQ6, DQ, RA0}}, +{"stxv", DQX(61,5), DQX_MASK, PPCVSX3, PPCVLE, {XSQ6, DQ, RA0}}, +{"stxsd", DSO(61,2), DS_MASK, PPCVSX3, PPCVLE, {VS, DS, RA0}}, +{"stxssp", DSO(61,3), DS_MASK, PPCVSX3, PPCVLE, {VS, DS, RA0}}, +{"stfdp", OP(61), OP_MASK, POWER6, POWER7|PPCVLE, {FRSp, DS, RA0}}, +{"psq_stu", OP(61), OP_MASK, PPCPS, PPCVLE, {FRS,PSD,RA,PSW,PSQ}}, +{"stfqu", OP(61), OP_MASK, POWER2, PPCVLE, {FRS, D, RA}}, + +{"std", DSO(62,0), DS_MASK, PPC64, PPCVLE, {RS, DS, RA0}}, +{"stdu", DSO(62,1), DS_MASK, PPC64, PPCVLE, {RS, DS, RAS}}, +{"stq", DSO(62,2), DS_MASK, POWER4, PPC476|PPCVLE, {RSQ, DS, RA0}}, + +{"fcmpu", X(63,0), XBF_MASK, COM, PPCEFS|PPCVLE, {BF, FRA, FRB}}, + +{"daddq", XRC(63,2,0), X_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp}}, +{"daddq.", XRC(63,2,1), X_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp}}, + +{"dquaq", ZRC(63,3,0), Z2_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp, RMC}}, +{"dquaq.", ZRC(63,3,1), Z2_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp, RMC}}, + +{"xsaddqp", XRC(63,4,0), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, +{"xsaddqpo", XRC(63,4,1), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, + +{"xsrqpi", ZRC(63,5,0), Z2_MASK, PPCVSX3, PPCVLE, {R, VD, VB, RMC}}, +{"xsrqpix", ZRC(63,5,1), Z2_MASK, PPCVSX3, PPCVLE, {R, VD, VB, RMC}}, + +{"fcpsgn", XRC(63,8,0), X_MASK, POWER6|PPCA2|PPC476, PPCVLE, {FRT, FRA, FRB}}, +{"fcpsgn.", XRC(63,8,1), X_MASK, POWER6|PPCA2|PPC476, PPCVLE, {FRT, FRA, FRB}}, + +{"frsp", XRC(63,12,0), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, +{"frsp.", XRC(63,12,1), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, + +{"fctiw", XRC(63,14,0), XRA_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRB}}, +{"fcir", XRC(63,14,0), XRA_MASK, PWR2COM, PPCVLE, {FRT, FRB}}, +{"fctiw.", XRC(63,14,1), XRA_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRB}}, +{"fcir.", XRC(63,14,1), XRA_MASK, PWR2COM, PPCVLE, {FRT, FRB}}, + +{"fctiwz", XRC(63,15,0), XRA_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRB}}, +{"fcirz", XRC(63,15,0), XRA_MASK, PWR2COM, PPCVLE, {FRT, FRB}}, +{"fctiwz.", XRC(63,15,1), XRA_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRB}}, +{"fcirz.", XRC(63,15,1), XRA_MASK, PWR2COM, PPCVLE, {FRT, FRB}}, + +{"fdiv", A(63,18,0), AFRC_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, +{"fd", A(63,18,0), AFRC_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRB}}, +{"fdiv.", A(63,18,1), AFRC_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, +{"fd.", A(63,18,1), AFRC_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRB}}, + +{"fsub", A(63,20,0), AFRC_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, +{"fs", A(63,20,0), AFRC_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRB}}, +{"fsub.", A(63,20,1), AFRC_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, +{"fs.", A(63,20,1), AFRC_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRB}}, + +{"fadd", A(63,21,0), AFRC_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, +{"fa", A(63,21,0), AFRC_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRB}}, +{"fadd.", A(63,21,1), AFRC_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRB}}, +{"fa.", A(63,21,1), AFRC_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRB}}, + +{"fsqrt", A(63,22,0), AFRAFRC_MASK, PPCPWR2, TITAN|PPCVLE, {FRT, FRB}}, +{"fsqrt.", A(63,22,1), AFRAFRC_MASK, PPCPWR2, TITAN|PPCVLE, {FRT, FRB}}, + +{"fsel", A(63,23,0), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fsel.", A(63,23,1), A_MASK, PPC, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, + +{"fre", A(63,24,0), AFRAFRC_MASK, POWER7, PPCVLE, {FRT, FRB}}, +{"fre", A(63,24,0), AFRALFRC_MASK, POWER5, POWER7|PPCVLE, {FRT, FRB, A_L}}, +{"fre.", A(63,24,1), AFRAFRC_MASK, POWER7, PPCVLE, {FRT, FRB}}, +{"fre.", A(63,24,1), AFRALFRC_MASK, POWER5, POWER7|PPCVLE, {FRT, FRB, A_L}}, + +{"fmul", A(63,25,0), AFRB_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC}}, +{"fm", A(63,25,0), AFRB_MASK, PWRCOM, PPCVLE|PPCVLE, {FRT, FRA, FRC}}, +{"fmul.", A(63,25,1), AFRB_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC}}, +{"fm.", A(63,25,1), AFRB_MASK, PWRCOM, PPCVLE|PPCVLE, {FRT, FRA, FRC}}, + +{"frsqrte", A(63,26,0), AFRAFRC_MASK, POWER7, PPCVLE, {FRT, FRB}}, +{"frsqrte", A(63,26,0), AFRALFRC_MASK, PPC, POWER7|PPCVLE, {FRT, FRB, A_L}}, +{"frsqrte.", A(63,26,1), AFRAFRC_MASK, POWER7, PPCVLE, {FRT, FRB}}, +{"frsqrte.", A(63,26,1), AFRALFRC_MASK, PPC, POWER7|PPCVLE, {FRT, FRB, A_L}}, + +{"fmsub", A(63,28,0), A_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fms", A(63,28,0), A_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fmsub.", A(63,28,1), A_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fms.", A(63,28,1), A_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRC, FRB}}, + +{"fmadd", A(63,29,0), A_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fma", A(63,29,0), A_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fmadd.", A(63,29,1), A_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fma.", A(63,29,1), A_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRC, FRB}}, + +{"fnmsub", A(63,30,0), A_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fnms", A(63,30,0), A_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fnmsub.", A(63,30,1), A_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fnms.", A(63,30,1), A_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRC, FRB}}, + +{"fnmadd", A(63,31,0), A_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fnma", A(63,31,0), A_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fnmadd.", A(63,31,1), A_MASK, PPCCOM, PPCEFS|PPCVLE, {FRT, FRA, FRC, FRB}}, +{"fnma.", A(63,31,1), A_MASK, PWRCOM, PPCVLE, {FRT, FRA, FRC, FRB}}, + +{"fcmpo", X(63,32), XBF_MASK, COM, PPCEFS|PPCVLE, {BF, FRA, FRB}}, + +{"dmulq", XRC(63,34,0), X_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp}}, +{"dmulq.", XRC(63,34,1), X_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp}}, + +{"drrndq", ZRC(63,35,0), Z2_MASK, POWER6, PPCVLE, {FRTp, FRA, FRBp, RMC}}, +{"drrndq.", ZRC(63,35,1), Z2_MASK, POWER6, PPCVLE, {FRTp, FRA, FRBp, RMC}}, + +{"xsmulqp", XRC(63,36,0), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, +{"xsmulqpo", XRC(63,36,1), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, + +{"xsrqpxp", Z(63,37), Z2_MASK, PPCVSX3, PPCVLE, {R, VD, VB, RMC}}, + +{"mtfsb1", XRC(63,38,0), XRARB_MASK, COM, PPCVLE, {BT}}, +{"mtfsb1.", XRC(63,38,1), XRARB_MASK, COM, PPCVLE, {BT}}, + +{"fneg", XRC(63,40,0), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, +{"fneg.", XRC(63,40,1), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, + +{"mcrfs", X(63,64), XRB_MASK|(3<<21)|(3<<16), COM, PPCVLE, {BF, BFA}}, + +{"dscliq", ZRC(63,66,0), Z_MASK, POWER6, PPCVLE, {FRTp, FRAp, SH16}}, +{"dscliq.", ZRC(63,66,1), Z_MASK, POWER6, PPCVLE, {FRTp, FRAp, SH16}}, + +{"dquaiq", ZRC(63,67,0), Z2_MASK, POWER6, PPCVLE, {TE, FRTp, FRBp, RMC}}, +{"dquaiq.", ZRC(63,67,1), Z2_MASK, POWER6, PPCVLE, {TE, FRTp, FRBp, RMC}}, + +{"mtfsb0", XRC(63,70,0), XRARB_MASK, COM, PPCVLE, {BT}}, +{"mtfsb0.", XRC(63,70,1), XRARB_MASK, COM, PPCVLE, {BT}}, + +{"fmr", XRC(63,72,0), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, +{"fmr.", XRC(63,72,1), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, + +{"dscriq", ZRC(63,98,0), Z_MASK, POWER6, PPCVLE, {FRTp, FRAp, SH16}}, +{"dscriq.", ZRC(63,98,1), Z_MASK, POWER6, PPCVLE, {FRTp, FRAp, SH16}}, + +{"drintxq", ZRC(63,99,0), Z2_MASK, POWER6, PPCVLE, {R, FRTp, FRBp, RMC}}, +{"drintxq.", ZRC(63,99,1), Z2_MASK, POWER6, PPCVLE, {R, FRTp, FRBp, RMC}}, + +{"xscpsgnqp", X(63,100), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, + +{"ftdiv", X(63,128), XBF_MASK, POWER7, PPCVLE, {BF, FRA, FRB}}, + +{"dcmpoq", X(63,130), X_MASK, POWER6, PPCVLE, {BF, FRAp, FRBp}}, + +{"xscmpoqp", X(63,132), XBF_MASK, PPCVSX3, PPCVLE, {BF, VA, VB}}, + +{"mtfsfi", XRC(63,134,0), XWRA_MASK|(3<<21)|(1<<11), POWER6|PPCA2|PPC476, PPCVLE, {BFF, U, W}}, +{"mtfsfi", XRC(63,134,0), XRA_MASK|(3<<21)|(1<<11), COM, POWER6|PPCA2|PPC476|PPCVLE, {BFF, U}}, +{"mtfsfi.", XRC(63,134,1), XWRA_MASK|(3<<21)|(1<<11), POWER6|PPCA2|PPC476, PPCVLE, {BFF, U, W}}, +{"mtfsfi.", XRC(63,134,1), XRA_MASK|(3<<21)|(1<<11), COM, POWER6|PPCA2|PPC476|PPCVLE, {BFF, U}}, + +{"fnabs", XRC(63,136,0), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, +{"fnabs.", XRC(63,136,1), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, + +{"fctiwu", XRC(63,142,0), XRA_MASK, POWER7, PPCVLE, {FRT, FRB}}, +{"fctiwu.", XRC(63,142,1), XRA_MASK, POWER7, PPCVLE, {FRT, FRB}}, +{"fctiwuz", XRC(63,143,0), XRA_MASK, POWER7, PPCVLE, {FRT, FRB}}, +{"fctiwuz.", XRC(63,143,1), XRA_MASK, POWER7, PPCVLE, {FRT, FRB}}, + +{"ftsqrt", X(63,160), XBF_MASK|FRA_MASK, POWER7, PPCVLE, {BF, FRB}}, + +{"dtstexq", X(63,162), X_MASK, POWER6, PPCVLE, {BF, FRAp, FRBp}}, + +{"xscmpexpqp", X(63,164), XBF_MASK, PPCVSX3, PPCVLE, {BF, VA, VB}}, + +{"dtstdcq", Z(63,194), Z_MASK, POWER6, PPCVLE, {BF, FRAp, DCM}}, +{"dtstdgq", Z(63,226), Z_MASK, POWER6, PPCVLE, {BF, FRAp, DGM}}, + +{"drintnq", ZRC(63,227,0), Z2_MASK, POWER6, PPCVLE, {R, FRTp, FRBp, RMC}}, +{"drintnq.", ZRC(63,227,1), Z2_MASK, POWER6, PPCVLE, {R, FRTp, FRBp, RMC}}, + +{"dctqpq", XRC(63,258,0), X_MASK, POWER6, PPCVLE, {FRTp, FRB}}, +{"dctqpq.", XRC(63,258,1), X_MASK, POWER6, PPCVLE, {FRTp, FRB}}, + +{"fabs", XRC(63,264,0), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, +{"fabs.", XRC(63,264,1), XRA_MASK, COM, PPCEFS|PPCVLE, {FRT, FRB}}, + +{"dctfixq", XRC(63,290,0), X_MASK, POWER6, PPCVLE, {FRT, FRBp}}, +{"dctfixq.", XRC(63,290,1), X_MASK, POWER6, PPCVLE, {FRT, FRBp}}, + +{"ddedpdq", XRC(63,322,0), X_MASK, POWER6, PPCVLE, {SP, FRTp, FRBp}}, +{"ddedpdq.", XRC(63,322,1), X_MASK, POWER6, PPCVLE, {SP, FRTp, FRBp}}, + +{"dxexq", XRC(63,354,0), X_MASK, POWER6, PPCVLE, {FRT, FRBp}}, +{"dxexq.", XRC(63,354,1), X_MASK, POWER6, PPCVLE, {FRT, FRBp}}, + +{"xsmaddqp", XRC(63,388,0), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, +{"xsmaddqpo", XRC(63,388,1), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, + +{"frin", XRC(63,392,0), XRA_MASK, POWER5, PPCVLE, {FRT, FRB}}, +{"frin.", XRC(63,392,1), XRA_MASK, POWER5, PPCVLE, {FRT, FRB}}, + +{"xsmsubqp", XRC(63,420,0), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, +{"xsmsubqpo", XRC(63,420,1), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, + +{"friz", XRC(63,424,0), XRA_MASK, POWER5, PPCVLE, {FRT, FRB}}, +{"friz.", XRC(63,424,1), XRA_MASK, POWER5, PPCVLE, {FRT, FRB}}, + +{"xsnmaddqp", XRC(63,452,0), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, +{"xsnmaddqpo", XRC(63,452,1), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, + +{"frip", XRC(63,456,0), XRA_MASK, POWER5, PPCVLE, {FRT, FRB}}, +{"frip.", XRC(63,456,1), XRA_MASK, POWER5, PPCVLE, {FRT, FRB}}, + +{"xsnmsubqp", XRC(63,484,0), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, +{"xsnmsubqpo", XRC(63,484,1), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, + +{"frim", XRC(63,488,0), XRA_MASK, POWER5, PPCVLE, {FRT, FRB}}, +{"frim.", XRC(63,488,1), XRA_MASK, POWER5, PPCVLE, {FRT, FRB}}, + +{"dsubq", XRC(63,514,0), X_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp}}, +{"dsubq.", XRC(63,514,1), X_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp}}, + +{"xssubqp", XRC(63,516,0), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, +{"xssubqpo", XRC(63,516,1), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, + +{"ddivq", XRC(63,546,0), X_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp}}, +{"ddivq.", XRC(63,546,1), X_MASK, POWER6, PPCVLE, {FRTp, FRAp, FRBp}}, + +{"xsdivqp", XRC(63,548,0), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, +{"xsdivqpo", XRC(63,548,1), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, + +{"mffs", XRC(63,583,0), XRARB_MASK, COM, PPCEFS|PPCVLE, {FRT}}, +{"mffs.", XRC(63,583,1), XRARB_MASK, COM, PPCEFS|PPCVLE, {FRT}}, + +{"mffsce", XMMF(63,583,0,1), XMMF_MASK|RB_MASK, POWER9, PPCVLE, {FRT}}, +{"mffscdrn", XMMF(63,583,2,4), XMMF_MASK, POWER9, PPCVLE, {FRT, FRB}}, +{"mffscdrni", XMMF(63,583,2,5), XMMF_MASK|(3<<14), POWER9, PPCVLE, {FRT, DRM}}, +{"mffscrn", XMMF(63,583,2,6), XMMF_MASK, POWER9, PPCVLE, {FRT, FRB}}, +{"mffscrni", XMMF(63,583,2,7), XMMF_MASK|(7<<13), POWER9, PPCVLE, {FRT, RM}}, +{"mffsl", XMMF(63,583,3,0), XMMF_MASK|RB_MASK, POWER9, PPCVLE, {FRT}}, + +{"dcmpuq", X(63,642), X_MASK, POWER6, PPCVLE, {BF, FRAp, FRBp}}, + +{"xscmpuqp", X(63,644), XBF_MASK, PPCVSX3, PPCVLE, {BF, VA, VB}}, + +{"dtstsfq", X(63,674), X_MASK, POWER6, PPCVLE, {BF, FRA, FRBp}}, +{"dtstsfiq", X(63,675), X_MASK|1<<22,POWER9, PPCVLE, {BF, UIM6, FRBp}}, + +{"xststdcqp", X(63,708), X_MASK, PPCVSX3, PPCVLE, {BF, VB, DCMX}}, + +{"mtfsf", XFL(63,711,0), XFL_MASK, POWER6|PPCA2|PPC476, PPCVLE, {FLM, FRB, XFL_L, W}}, +{"mtfsf", XFL(63,711,0), XFL_MASK, COM, POWER6|PPCA2|PPC476|PPCEFS|PPCVLE, {FLM, FRB}}, +{"mtfsf.", XFL(63,711,1), XFL_MASK, POWER6|PPCA2|PPC476, PPCVLE, {FLM, FRB, XFL_L, W}}, +{"mtfsf.", XFL(63,711,1), XFL_MASK, COM, POWER6|PPCA2|PPC476|PPCEFS|PPCVLE, {FLM, FRB}}, + +{"drdpq", XRC(63,770,0), X_MASK, POWER6, PPCVLE, {FRTp, FRBp}}, +{"drdpq.", XRC(63,770,1), X_MASK, POWER6, PPCVLE, {FRTp, FRBp}}, + +{"dcffixq", XRC(63,802,0), X_MASK, POWER6, PPCVLE, {FRTp, FRB}}, +{"dcffixq.", XRC(63,802,1), X_MASK, POWER6, PPCVLE, {FRTp, FRB}}, + +{"xsabsqp", XVA(63,804,0), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, +{"xsxexpqp", XVA(63,804,2), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, +{"xsnabsqp", XVA(63,804,8), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, +{"xsnegqp", XVA(63,804,16), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, +{"xsxsigqp", XVA(63,804,18), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, +{"xssqrtqp", XVARC(63,804,27,0), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, +{"xssqrtqpo", XVARC(63,804,27,1), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, + +{"fctid", XRC(63,814,0), XRA_MASK, PPC64, PPCVLE, {FRT, FRB}}, +{"fctid", XRC(63,814,0), XRA_MASK, PPC476, PPCVLE, {FRT, FRB}}, +{"fctid.", XRC(63,814,1), XRA_MASK, PPC64, PPCVLE, {FRT, FRB}}, +{"fctid.", XRC(63,814,1), XRA_MASK, PPC476, PPCVLE, {FRT, FRB}}, + +{"fctidz", XRC(63,815,0), XRA_MASK, PPC64, PPCVLE, {FRT, FRB}}, +{"fctidz", XRC(63,815,0), XRA_MASK, PPC476, PPCVLE, {FRT, FRB}}, +{"fctidz.", XRC(63,815,1), XRA_MASK, PPC64, PPCVLE, {FRT, FRB}}, +{"fctidz.", XRC(63,815,1), XRA_MASK, PPC476, PPCVLE, {FRT, FRB}}, + +{"denbcdq", XRC(63,834,0), X_MASK, POWER6, PPCVLE, {S, FRTp, FRBp}}, +{"denbcdq.", XRC(63,834,1), X_MASK, POWER6, PPCVLE, {S, FRTp, FRBp}}, + +{"xscvqpuwz", XVA(63,836,1), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, +{"xscvudqp", XVA(63,836,2), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, +{"xscvqpswz", XVA(63,836,9), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, +{"xscvsdqp", XVA(63,836,10), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, +{"xscvqpudz", XVA(63,836,17), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, +{"xscvqpdp", XVARC(63,836,20,0), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, +{"xscvqpdpo", XVARC(63,836,20,1), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, +{"xscvdpqp", XVA(63,836,22), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, +{"xscvqpsdz", XVA(63,836,25), XVA_MASK, PPCVSX3, PPCVLE, {VD, VB}}, + +{"fmrgow", X(63,838), X_MASK, PPCVSX2, PPCVLE, {FRT, FRA, FRB}}, + +{"fcfid", XRC(63,846,0), XRA_MASK, PPC64, PPCVLE, {FRT, FRB}}, +{"fcfid", XRC(63,846,0), XRA_MASK, PPC476, PPCVLE, {FRT, FRB}}, +{"fcfid.", XRC(63,846,1), XRA_MASK, PPC64, PPCVLE, {FRT, FRB}}, +{"fcfid.", XRC(63,846,1), XRA_MASK, PPC476, PPCVLE, {FRT, FRB}}, + +{"diexq", XRC(63,866,0), X_MASK, POWER6, PPCVLE, {FRTp, FRA, FRBp}}, +{"diexq.", XRC(63,866,1), X_MASK, POWER6, PPCVLE, {FRTp, FRA, FRBp}}, + +{"xsiexpqp", X(63,868), X_MASK, PPCVSX3, PPCVLE, {VD, VA, VB}}, + +{"fctidu", XRC(63,942,0), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, +{"fctidu.", XRC(63,942,1), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, + +{"fctiduz", XRC(63,943,0), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, +{"fctiduz.", XRC(63,943,1), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, + +{"fmrgew", X(63,966), X_MASK, PPCVSX2, PPCVLE, {FRT, FRA, FRB}}, + +{"fcfidu", XRC(63,974,0), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, +{"fcfidu.", XRC(63,974,1), XRA_MASK, POWER7|PPCA2, PPCVLE, {FRT, FRB}}, +}; + +const int powerpc_num_opcodes = + sizeof (powerpc_opcodes) / sizeof (powerpc_opcodes[0]); + +/* The VLE opcode table. + + The format of this opcode table is the same as the main opcode table. */ + +const struct powerpc_opcode vle_opcodes[] = { +{"se_illegal", C(0), C_MASK, PPCVLE, 0, {}}, +{"se_isync", C(1), C_MASK, PPCVLE, 0, {}}, +{"se_sc", C(2), C_MASK, PPCVLE, 0, {}}, +{"se_blr", C_LK(2,0), C_LK_MASK, PPCVLE, 0, {}}, +{"se_blrl", C_LK(2,1), C_LK_MASK, PPCVLE, 0, {}}, +{"se_bctr", C_LK(3,0), C_LK_MASK, PPCVLE, 0, {}}, +{"se_bctrl", C_LK(3,1), C_LK_MASK, PPCVLE, 0, {}}, +{"se_rfi", C(8), C_MASK, PPCVLE, 0, {}}, +{"se_rfci", C(9), C_MASK, PPCVLE, 0, {}}, +{"se_rfdi", C(10), C_MASK, PPCVLE, 0, {}}, +{"se_rfmci", C(11), C_MASK, PPCRFMCI|PPCVLE, 0, {}}, +{"se_not", SE_R(0,2), SE_R_MASK, PPCVLE, 0, {RX}}, +{"se_neg", SE_R(0,3), SE_R_MASK, PPCVLE, 0, {RX}}, +{"se_mflr", SE_R(0,8), SE_R_MASK, PPCVLE, 0, {RX}}, +{"se_mtlr", SE_R(0,9), SE_R_MASK, PPCVLE, 0, {RX}}, +{"se_mfctr", SE_R(0,10), SE_R_MASK, PPCVLE, 0, {RX}}, +{"se_mtctr", SE_R(0,11), SE_R_MASK, PPCVLE, 0, {RX}}, +{"se_extzb", SE_R(0,12), SE_R_MASK, PPCVLE, 0, {RX}}, +{"se_extsb", SE_R(0,13), SE_R_MASK, PPCVLE, 0, {RX}}, +{"se_extzh", SE_R(0,14), SE_R_MASK, PPCVLE, 0, {RX}}, +{"se_extsh", SE_R(0,15), SE_R_MASK, PPCVLE, 0, {RX}}, +{"se_mr", SE_RR(0,1), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, +{"se_mtar", SE_RR(0,2), SE_RR_MASK, PPCVLE, 0, {ARX, RY}}, +{"se_mfar", SE_RR(0,3), SE_RR_MASK, PPCVLE, 0, {RX, ARY}}, +{"se_add", SE_RR(1,0), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, +{"se_mullw", SE_RR(1,1), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, +{"se_sub", SE_RR(1,2), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, +{"se_subf", SE_RR(1,3), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, +{"se_cmp", SE_RR(3,0), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, +{"se_cmpl", SE_RR(3,1), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, +{"se_cmph", SE_RR(3,2), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, +{"se_cmphl", SE_RR(3,3), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, + +{"e_cmpi", SCI8BF(6,0,21), SCI8BF_MASK, PPCVLE, 0, {CRD32, RA, SCLSCI8}}, +{"e_cmpwi", SCI8BF(6,0,21), SCI8BF_MASK, PPCVLE, 0, {CRD32, RA, SCLSCI8}}, +{"e_cmpli", SCI8BF(6,1,21), SCI8BF_MASK, PPCVLE, 0, {CRD32, RA, SCLSCI8}}, +{"e_cmplwi", SCI8BF(6,1,21), SCI8BF_MASK, PPCVLE, 0, {CRD32, RA, SCLSCI8}}, +{"e_addi", SCI8(6,16), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8}}, +{"e_subi", SCI8(6,16), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8N}}, +{"e_addi.", SCI8(6,17), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8}}, +{"e_addic", SCI8(6,18), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8}}, +{"e_subic", SCI8(6,18), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8N}}, +{"e_addic.", SCI8(6,19), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8}}, +{"e_subic.", SCI8(6,19), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8N}}, +{"e_mulli", SCI8(6,20), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8}}, +{"e_subfic", SCI8(6,22), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8}}, +{"e_subfic.", SCI8(6,23), SCI8_MASK, PPCVLE, 0, {RT, RA, SCLSCI8}}, +{"e_andi", SCI8(6,24), SCI8_MASK, PPCVLE, 0, {RA, RS, SCLSCI8}}, +{"e_andi.", SCI8(6,25), SCI8_MASK, PPCVLE, 0, {RA, RS, SCLSCI8}}, +{"e_nop", SCI8(6,26), 0xffffffff, PPCVLE, 0, {0}}, +{"e_ori", SCI8(6,26), SCI8_MASK, PPCVLE, 0, {RA, RS, SCLSCI8}}, +{"e_ori.", SCI8(6,27), SCI8_MASK, PPCVLE, 0, {RA, RS, SCLSCI8}}, +{"e_xori", SCI8(6,28), SCI8_MASK, PPCVLE, 0, {RA, RS, SCLSCI8}}, +{"e_xori.", SCI8(6,29), SCI8_MASK, PPCVLE, 0, {RA, RS, SCLSCI8}}, +{"e_lbzu", OPVUP(6,0), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, +{"e_lhau", OPVUP(6,3), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, +{"e_lhzu", OPVUP(6,1), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, +{"e_lmw", OPVUP(6,8), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, +{"e_lwzu", OPVUP(6,2), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, +{"e_stbu", OPVUP(6,4), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, +{"e_sthu", OPVUP(6,5), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, +{"e_stwu", OPVUP(6,6), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, +{"e_stmw", OPVUP(6,9), OPVUP_MASK, PPCVLE, 0, {RT, D8, RA0}}, +{"e_ldmvgprw", OPVUPRT(6,16,0),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, +{"e_stmvgprw", OPVUPRT(6,17,0),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, +{"e_ldmvsprw", OPVUPRT(6,16,1),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, +{"e_stmvsprw", OPVUPRT(6,17,1),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, +{"e_ldmvsrrw", OPVUPRT(6,16,4),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, +{"e_stmvsrrw", OPVUPRT(6,17,4),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, +{"e_ldmvcsrrw", OPVUPRT(6,16,5),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, +{"e_stmvcsrrw", OPVUPRT(6,17,5),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, +{"e_ldmvdsrrw", OPVUPRT(6,16,6),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, +{"e_stmvdsrrw", OPVUPRT(6,17,6),OPVUPRT_MASK, PPCVLE, 0, {D8, RA0}}, +{"e_add16i", OP(7), OP_MASK, PPCVLE, 0, {RT, RA, SI}}, +{"e_la", OP(7), OP_MASK, PPCVLE, 0, {RT, D, RA0}}, +{"e_sub16i", OP(7), OP_MASK, PPCVLE, 0, {RT, RA, NSI}}, + +{"se_addi", SE_IM5(8,0), SE_IM5_MASK, PPCVLE, 0, {RX, OIMM5}}, +{"se_cmpli", SE_IM5(8,1), SE_IM5_MASK, PPCVLE, 0, {RX, OIMM5}}, +{"se_subi", SE_IM5(9,0), SE_IM5_MASK, PPCVLE, 0, {RX, OIMM5}}, +{"se_subi.", SE_IM5(9,1), SE_IM5_MASK, PPCVLE, 0, {RX, OIMM5}}, +{"se_cmpi", SE_IM5(10,1), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, +{"se_bmaski", SE_IM5(11,0), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, +{"se_andi", SE_IM5(11,1), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, + +{"e_lbz", OP(12), OP_MASK, PPCVLE, 0, {RT, D, RA0}}, +{"e_stb", OP(13), OP_MASK, PPCVLE, 0, {RT, D, RA0}}, +{"e_lha", OP(14), OP_MASK, PPCVLE, 0, {RT, D, RA0}}, + +{"se_srw", SE_RR(16,0), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, +{"se_sraw", SE_RR(16,1), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, +{"se_slw", SE_RR(16,2), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, +{"se_nop", SE_RR(17,0), 0xffff, PPCVLE, 0, {0}}, +{"se_or", SE_RR(17,0), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, +{"se_andc", SE_RR(17,1), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, +{"se_and", SE_RR(17,2), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, +{"se_and.", SE_RR(17,3), SE_RR_MASK, PPCVLE, 0, {RX, RY}}, +{"se_li", IM7(9), IM7_MASK, PPCVLE, 0, {RX, UI7}}, + +{"e_lwz", OP(20), OP_MASK, PPCVLE, 0, {RT, D, RA0}}, +{"e_stw", OP(21), OP_MASK, PPCVLE, 0, {RT, D, RA0}}, +{"e_lhz", OP(22), OP_MASK, PPCVLE, 0, {RT, D, RA0}}, +{"e_sth", OP(23), OP_MASK, PPCVLE, 0, {RT, D, RA0}}, + +{"se_bclri", SE_IM5(24,0), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, +{"se_bgeni", SE_IM5(24,1), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, +{"se_bseti", SE_IM5(25,0), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, +{"se_btsti", SE_IM5(25,1), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, +{"se_srwi", SE_IM5(26,0), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, +{"se_srawi", SE_IM5(26,1), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, +{"se_slwi", SE_IM5(27,0), SE_IM5_MASK, PPCVLE, 0, {RX, UI5}}, + +{"e_lis", I16L(28,28), I16L_MASK, PPCVLE, 0, {RD, VLEUIMML}}, +{"e_and2is.", I16L(28,29), I16L_MASK, PPCVLE, 0, {RD, VLEUIMML}}, +{"e_or2is", I16L(28,26), I16L_MASK, PPCVLE, 0, {RD, VLEUIMML}}, +{"e_and2i.", I16L(28,25), I16L_MASK, PPCVLE, 0, {RD, VLEUIMML}}, +{"e_or2i", I16L(28,24), I16L_MASK, PPCVLE, 0, {RD, VLEUIMML}}, +{"e_cmphl16i", IA16(28,23), IA16_MASK, PPCVLE, 0, {RA, VLEUIMM}}, +{"e_cmph16i", IA16(28,22), IA16_MASK, PPCVLE, 0, {RA, VLESIMM}}, +{"e_cmpl16i", I16A(28,21), I16A_MASK, PPCVLE, 0, {RA, VLEUIMM}}, +{"e_mull2i", I16A(28,20), I16A_MASK, PPCVLE, 0, {RA, VLESIMM}}, +{"e_cmp16i", IA16(28,19), IA16_MASK, PPCVLE, 0, {RA, VLESIMM}}, +{"e_sub2is", I16A(28,18), I16A_MASK, PPCVLE, 0, {RA, VLENSIMM}}, +{"e_add2is", I16A(28,18), I16A_MASK, PPCVLE, 0, {RA, VLESIMM}}, +{"e_sub2i.", I16A(28,17), I16A_MASK, PPCVLE, 0, {RA, VLENSIMM}}, +{"e_add2i.", I16A(28,17), I16A_MASK, PPCVLE, 0, {RA, VLESIMM}}, +{"e_li", LI20(28,0), LI20_MASK, PPCVLE, 0, {RT, IMM20}}, +{"e_rlwimi", M(29,0), M_MASK, PPCVLE, 0, {RA, RS, SH, MB, ME}}, +{"e_rlwinm", M(29,1), M_MASK, PPCVLE, 0, {RA, RT, SH, MBE, ME}}, +{"e_b", BD24(30,0,0), BD24_MASK, PPCVLE, 0, {B24}}, +{"e_bl", BD24(30,0,1), BD24_MASK, PPCVLE, 0, {B24}}, +{"e_bdnz", EBD15(30,8,BO32DNZ,0), EBD15_MASK, PPCVLE, 0, {B15}}, +{"e_bdnzl", EBD15(30,8,BO32DNZ,1), EBD15_MASK, PPCVLE, 0, {B15}}, +{"e_bdz", EBD15(30,8,BO32DZ,0), EBD15_MASK, PPCVLE, 0, {B15}}, +{"e_bdzl", EBD15(30,8,BO32DZ,1), EBD15_MASK, PPCVLE, 0, {B15}}, +{"e_bge", EBD15BI(30,8,BO32F,CBLT,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bgel", EBD15BI(30,8,BO32F,CBLT,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bnl", EBD15BI(30,8,BO32F,CBLT,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bnll", EBD15BI(30,8,BO32F,CBLT,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_blt", EBD15BI(30,8,BO32T,CBLT,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bltl", EBD15BI(30,8,BO32T,CBLT,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bgt", EBD15BI(30,8,BO32T,CBGT,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bgtl", EBD15BI(30,8,BO32T,CBGT,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_ble", EBD15BI(30,8,BO32F,CBGT,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_blel", EBD15BI(30,8,BO32F,CBGT,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bng", EBD15BI(30,8,BO32F,CBGT,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bngl", EBD15BI(30,8,BO32F,CBGT,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bne", EBD15BI(30,8,BO32F,CBEQ,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bnel", EBD15BI(30,8,BO32F,CBEQ,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_beq", EBD15BI(30,8,BO32T,CBEQ,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_beql", EBD15BI(30,8,BO32T,CBEQ,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bso", EBD15BI(30,8,BO32T,CBSO,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bsol", EBD15BI(30,8,BO32T,CBSO,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bun", EBD15BI(30,8,BO32T,CBSO,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bunl", EBD15BI(30,8,BO32T,CBSO,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bns", EBD15BI(30,8,BO32F,CBSO,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bnsl", EBD15BI(30,8,BO32F,CBSO,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bnu", EBD15BI(30,8,BO32F,CBSO,0), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bnul", EBD15BI(30,8,BO32F,CBSO,1), EBD15BI_MASK, PPCVLE, 0, {CRS,B15}}, +{"e_bc", BD15(30,8,0), BD15_MASK, PPCVLE, 0, {BO32, BI32, B15}}, +{"e_bcl", BD15(30,8,1), BD15_MASK, PPCVLE, 0, {BO32, BI32, B15}}, + +{"e_bf", EBD15(30,8,BO32F,0), EBD15_MASK, PPCVLE, 0, {BI32,B15}}, +{"e_bfl", EBD15(30,8,BO32F,1), EBD15_MASK, PPCVLE, 0, {BI32,B15}}, +{"e_bt", EBD15(30,8,BO32T,0), EBD15_MASK, PPCVLE, 0, {BI32,B15}}, +{"e_btl", EBD15(30,8,BO32T,1), EBD15_MASK, PPCVLE, 0, {BI32,B15}}, + +{"e_cmph", X(31,14), X_MASK, PPCVLE, 0, {CRD, RA, RB}}, +{"e_cmphl", X(31,46), X_MASK, PPCVLE, 0, {CRD, RA, RB}}, +{"e_crandc", XL(31,129), XL_MASK, PPCVLE, 0, {BT, BA, BB}}, +{"e_crnand", XL(31,225), XL_MASK, PPCVLE, 0, {BT, BA, BB}}, +{"e_crnot", XL(31,33), XL_MASK, PPCVLE, 0, {BT, BA, BBA}}, +{"e_crnor", XL(31,33), XL_MASK, PPCVLE, 0, {BT, BA, BB}}, +{"e_crclr", XL(31,193), XL_MASK, PPCVLE, 0, {BT, BAT, BBA}}, +{"e_crxor", XL(31,193), XL_MASK, PPCVLE, 0, {BT, BA, BB}}, +{"e_mcrf", XL(31,16), XL_MASK, PPCVLE, 0, {CRD, CR}}, +{"e_slwi", EX(31,112), EX_MASK, PPCVLE, 0, {RA, RS, SH}}, +{"e_slwi.", EX(31,113), EX_MASK, PPCVLE, 0, {RA, RS, SH}}, + +{"e_crand", XL(31,257), XL_MASK, PPCVLE, 0, {BT, BA, BB}}, + +{"e_rlw", EX(31,560), EX_MASK, PPCVLE, 0, {RA, RS, RB}}, +{"e_rlw.", EX(31,561), EX_MASK, PPCVLE, 0, {RA, RS, RB}}, + +{"e_crset", XL(31,289), XL_MASK, PPCVLE, 0, {BT, BAT, BBA}}, +{"e_creqv", XL(31,289), XL_MASK, PPCVLE, 0, {BT, BA, BB}}, + +{"e_rlwi", EX(31,624), EX_MASK, PPCVLE, 0, {RA, RS, SH}}, +{"e_rlwi.", EX(31,625), EX_MASK, PPCVLE, 0, {RA, RS, SH}}, + +{"e_crorc", XL(31,417), XL_MASK, PPCVLE, 0, {BT, BA, BB}}, + +{"e_crmove", XL(31,449), XL_MASK, PPCVLE, 0, {BT, BA, BBA}}, +{"e_cror", XL(31,449), XL_MASK, PPCVLE, 0, {BT, BA, BB}}, + +{"mtmas1", XSPR(31,467,625), XSPR_MASK, PPCVLE, 0, {RS}}, + +{"e_srwi", EX(31,1136), EX_MASK, PPCVLE, 0, {RA, RS, SH}}, +{"e_srwi.", EX(31,1137), EX_MASK, PPCVLE, 0, {RA, RS, SH}}, + +{"se_lbz", SD4(8), SD4_MASK, PPCVLE, 0, {RZ, SE_SD, RX}}, + +{"se_stb", SD4(9), SD4_MASK, PPCVLE, 0, {RZ, SE_SD, RX}}, + +{"se_lhz", SD4(10), SD4_MASK, PPCVLE, 0, {RZ, SE_SDH, RX}}, + +{"se_sth", SD4(11), SD4_MASK, PPCVLE, 0, {RZ, SE_SDH, RX}}, + +{"se_lwz", SD4(12), SD4_MASK, PPCVLE, 0, {RZ, SE_SDW, RX}}, + +{"se_stw", SD4(13), SD4_MASK, PPCVLE, 0, {RZ, SE_SDW, RX}}, + +{"se_bge", EBD8IO(28,0,0), EBD8IO3_MASK, PPCVLE, 0, {B8}}, +{"se_bnl", EBD8IO(28,0,0), EBD8IO3_MASK, PPCVLE, 0, {B8}}, +{"se_ble", EBD8IO(28,0,1), EBD8IO3_MASK, PPCVLE, 0, {B8}}, +{"se_bng", EBD8IO(28,0,1), EBD8IO3_MASK, PPCVLE, 0, {B8}}, +{"se_bne", EBD8IO(28,0,2), EBD8IO3_MASK, PPCVLE, 0, {B8}}, +{"se_bns", EBD8IO(28,0,3), EBD8IO3_MASK, PPCVLE, 0, {B8}}, +{"se_bnu", EBD8IO(28,0,3), EBD8IO3_MASK, PPCVLE, 0, {B8}}, +{"se_bf", EBD8IO(28,0,0), EBD8IO2_MASK, PPCVLE, 0, {BI16, B8}}, +{"se_blt", EBD8IO(28,1,0), EBD8IO3_MASK, PPCVLE, 0, {B8}}, +{"se_bgt", EBD8IO(28,1,1), EBD8IO3_MASK, PPCVLE, 0, {B8}}, +{"se_beq", EBD8IO(28,1,2), EBD8IO3_MASK, PPCVLE, 0, {B8}}, +{"se_bso", EBD8IO(28,1,3), EBD8IO3_MASK, PPCVLE, 0, {B8}}, +{"se_bun", EBD8IO(28,1,3), EBD8IO3_MASK, PPCVLE, 0, {B8}}, +{"se_bt", EBD8IO(28,1,0), EBD8IO2_MASK, PPCVLE, 0, {BI16, B8}}, +{"se_bc", BD8IO(28), BD8IO_MASK, PPCVLE, 0, {BO16, BI16, B8}}, +{"se_b", BD8(58,0,0), BD8_MASK, PPCVLE, 0, {B8}}, +{"se_bl", BD8(58,0,1), BD8_MASK, PPCVLE, 0, {B8}}, }; -const int powerpc_num_opcodes = ARRAY_SIZE(powerpc_opcodes); +const int vle_num_opcodes = + sizeof (vle_opcodes) / sizeof (vle_opcodes[0]); /* The macro table. This is only used by the assembler. */ @@ -4949,45 +7235,58 @@ const int powerpc_num_opcodes = ARRAY_SIZE(powerpc_opcodes); support extracting the whole word (32 bits in this case). */ const struct powerpc_macro powerpc_macros[] = { -{ "extldi", 4, PPC64, "rldicr %0,%1,%3,(%2)-1" }, -{ "extldi.", 4, PPC64, "rldicr. %0,%1,%3,(%2)-1" }, -{ "extrdi", 4, PPC64, "rldicl %0,%1,(%2)+(%3),64-(%2)" }, -{ "extrdi.", 4, PPC64, "rldicl. %0,%1,(%2)+(%3),64-(%2)" }, -{ "insrdi", 4, PPC64, "rldimi %0,%1,64-((%2)+(%3)),%3" }, -{ "insrdi.", 4, PPC64, "rldimi. %0,%1,64-((%2)+(%3)),%3" }, -{ "rotrdi", 3, PPC64, "rldicl %0,%1,(-(%2)!63)&((%2)|63),0" }, -{ "rotrdi.", 3, PPC64, "rldicl. %0,%1,(-(%2)!63)&((%2)|63),0" }, -{ "sldi", 3, PPC64, "rldicr %0,%1,%2,63-(%2)" }, -{ "sldi.", 3, PPC64, "rldicr. %0,%1,%2,63-(%2)" }, -{ "srdi", 3, PPC64, "rldicl %0,%1,(-(%2)!63)&((%2)|63),%2" }, -{ "srdi.", 3, PPC64, "rldicl. %0,%1,(-(%2)!63)&((%2)|63),%2" }, -{ "clrrdi", 3, PPC64, "rldicr %0,%1,0,63-(%2)" }, -{ "clrrdi.", 3, PPC64, "rldicr. %0,%1,0,63-(%2)" }, -{ "clrlsldi",4, PPC64, "rldic %0,%1,%3,(%2)-(%3)" }, -{ "clrlsldi.",4, PPC64, "rldic. %0,%1,%3,(%2)-(%3)" }, - -{ "extlwi", 4, PPCCOM, "rlwinm %0,%1,%3,0,(%2)-1" }, -{ "extlwi.", 4, PPCCOM, "rlwinm. %0,%1,%3,0,(%2)-1" }, -{ "extrwi", 4, PPCCOM, "rlwinm %0,%1,((%2)+(%3))&((%2)+(%3)<>32),32-(%2),31" }, -{ "extrwi.", 4, PPCCOM, "rlwinm. %0,%1,((%2)+(%3))&((%2)+(%3)<>32),32-(%2),31" }, -{ "inslwi", 4, PPCCOM, "rlwimi %0,%1,(-(%3)!31)&((%3)|31),%3,(%2)+(%3)-1" }, -{ "inslwi.", 4, PPCCOM, "rlwimi. %0,%1,(-(%3)!31)&((%3)|31),%3,(%2)+(%3)-1"}, -{ "insrwi", 4, PPCCOM, "rlwimi %0,%1,32-((%2)+(%3)),%3,(%2)+(%3)-1" }, -{ "insrwi.", 4, PPCCOM, "rlwimi. %0,%1,32-((%2)+(%3)),%3,(%2)+(%3)-1"}, -{ "rotrwi", 3, PPCCOM, "rlwinm %0,%1,(-(%2)!31)&((%2)|31),0,31" }, -{ "rotrwi.", 3, PPCCOM, "rlwinm. %0,%1,(-(%2)!31)&((%2)|31),0,31" }, -{ "slwi", 3, PPCCOM, "rlwinm %0,%1,%2,0,31-(%2)" }, -{ "sli", 3, PWRCOM, "rlinm %0,%1,%2,0,31-(%2)" }, -{ "slwi.", 3, PPCCOM, "rlwinm. %0,%1,%2,0,31-(%2)" }, -{ "sli.", 3, PWRCOM, "rlinm. %0,%1,%2,0,31-(%2)" }, -{ "srwi", 3, PPCCOM, "rlwinm %0,%1,(-(%2)!31)&((%2)|31),%2,31" }, -{ "sri", 3, PWRCOM, "rlinm %0,%1,(-(%2)!31)&((%2)|31),%2,31" }, -{ "srwi.", 3, PPCCOM, "rlwinm. %0,%1,(-(%2)!31)&((%2)|31),%2,31" }, -{ "sri.", 3, PWRCOM, "rlinm. %0,%1,(-(%2)!31)&((%2)|31),%2,31" }, -{ "clrrwi", 3, PPCCOM, "rlwinm %0,%1,0,0,31-(%2)" }, -{ "clrrwi.", 3, PPCCOM, "rlwinm. %0,%1,0,0,31-(%2)" }, -{ "clrlslwi",4, PPCCOM, "rlwinm %0,%1,%3,(%2)-(%3),31-(%3)" }, -{ "clrlslwi.",4, PPCCOM, "rlwinm. %0,%1,%3,(%2)-(%3),31-(%3)" }, +{"extldi", 4, PPC64, "rldicr %0,%1,%3,(%2)-1"}, +{"extldi.", 4, PPC64, "rldicr. %0,%1,%3,(%2)-1"}, +{"extrdi", 4, PPC64, "rldicl %0,%1,((%2)+(%3))&((%2)+(%3)<>64),64-(%2)"}, +{"extrdi.", 4, PPC64, "rldicl. %0,%1,((%2)+(%3))&((%2)+(%3)<>64),64-(%2)"}, +{"insrdi", 4, PPC64, "rldimi %0,%1,64-((%2)+(%3)),%3"}, +{"insrdi.", 4, PPC64, "rldimi. %0,%1,64-((%2)+(%3)),%3"}, +{"rotrdi", 3, PPC64, "rldicl %0,%1,(-(%2)!63)&((%2)|63),0"}, +{"rotrdi.", 3, PPC64, "rldicl. %0,%1,(-(%2)!63)&((%2)|63),0"}, +{"sldi", 3, PPC64, "rldicr %0,%1,%2,63-(%2)"}, +{"sldi.", 3, PPC64, "rldicr. %0,%1,%2,63-(%2)"}, +{"srdi", 3, PPC64, "rldicl %0,%1,(-(%2)!63)&((%2)|63),%2"}, +{"srdi.", 3, PPC64, "rldicl. %0,%1,(-(%2)!63)&((%2)|63),%2"}, +{"clrrdi", 3, PPC64, "rldicr %0,%1,0,63-(%2)"}, +{"clrrdi.", 3, PPC64, "rldicr. %0,%1,0,63-(%2)"}, +{"clrlsldi", 4, PPC64, "rldic %0,%1,%3,(%2)-(%3)"}, +{"clrlsldi.",4, PPC64, "rldic. %0,%1,%3,(%2)-(%3)"}, + +{"extlwi", 4, PPCCOM, "rlwinm %0,%1,%3,0,(%2)-1"}, +{"extlwi.", 4, PPCCOM, "rlwinm. %0,%1,%3,0,(%2)-1"}, +{"extrwi", 4, PPCCOM, "rlwinm %0,%1,((%2)+(%3))&((%2)+(%3)<>32),32-(%2),31"}, +{"extrwi.", 4, PPCCOM, "rlwinm. %0,%1,((%2)+(%3))&((%2)+(%3)<>32),32-(%2),31"}, +{"inslwi", 4, PPCCOM, "rlwimi %0,%1,(-(%3)!31)&((%3)|31),%3,(%2)+(%3)-1"}, +{"inslwi.", 4, PPCCOM, "rlwimi. %0,%1,(-(%3)!31)&((%3)|31),%3,(%2)+(%3)-1"}, +{"insrwi", 4, PPCCOM, "rlwimi %0,%1,32-((%2)+(%3)),%3,(%2)+(%3)-1"}, +{"insrwi.", 4, PPCCOM, "rlwimi. %0,%1,32-((%2)+(%3)),%3,(%2)+(%3)-1"}, +{"rotrwi", 3, PPCCOM, "rlwinm %0,%1,(-(%2)!31)&((%2)|31),0,31"}, +{"rotrwi.", 3, PPCCOM, "rlwinm. %0,%1,(-(%2)!31)&((%2)|31),0,31"}, +{"slwi", 3, PPCCOM, "rlwinm %0,%1,%2,0,31-(%2)"}, +{"sli", 3, PWRCOM, "rlinm %0,%1,%2,0,31-(%2)"}, +{"slwi.", 3, PPCCOM, "rlwinm. %0,%1,%2,0,31-(%2)"}, +{"sli.", 3, PWRCOM, "rlinm. %0,%1,%2,0,31-(%2)"}, +{"srwi", 3, PPCCOM, "rlwinm %0,%1,(-(%2)!31)&((%2)|31),%2,31"}, +{"sri", 3, PWRCOM, "rlinm %0,%1,(-(%2)!31)&((%2)|31),%2,31"}, +{"srwi.", 3, PPCCOM, "rlwinm. %0,%1,(-(%2)!31)&((%2)|31),%2,31"}, +{"sri.", 3, PWRCOM, "rlinm. %0,%1,(-(%2)!31)&((%2)|31),%2,31"}, +{"clrrwi", 3, PPCCOM, "rlwinm %0,%1,0,0,31-(%2)"}, +{"clrrwi.", 3, PPCCOM, "rlwinm. %0,%1,0,0,31-(%2)"}, +{"clrlslwi", 4, PPCCOM, "rlwinm %0,%1,%3,(%2)-(%3),31-(%3)"}, +{"clrlslwi.",4, PPCCOM, "rlwinm. %0,%1,%3,(%2)-(%3),31-(%3)"}, + +{"e_extlwi", 4, PPCVLE, "e_rlwinm %0,%1,%3,0,(%2)-1"}, +{"e_extrwi", 4, PPCVLE, "e_rlwinm %0,%1,((%2)+(%3))&((%2)+(%3)<>32),32-(%2),31"}, +{"e_inslwi", 4, PPCVLE, "e_rlwimi %0,%1,(-(%3)!31)&((%3)|31),%3,(%2)+(%3)-1"}, +{"e_insrwi", 4, PPCVLE, "e_rlwimi %0,%1,32-((%2)+(%3)),%3,(%2)+(%3)-1"}, +{"e_rotlwi", 3, PPCVLE, "e_rlwinm %0,%1,%2,0,31"}, +{"e_rotrwi", 3, PPCVLE, "e_rlwinm %0,%1,(-(%2)!31)&((%2)|31),0,31"}, +{"e_slwi", 3, PPCVLE, "e_rlwinm %0,%1,%2,0,31-(%2)"}, +{"e_srwi", 3, PPCVLE, "e_rlwinm %0,%1,(-(%2)!31)&((%2)|31),%2,31"}, +{"e_clrlwi", 3, PPCVLE, "e_rlwinm %0,%1,0,%2,31"}, +{"e_clrrwi", 3, PPCVLE, "e_rlwinm %0,%1,0,0,31-(%2)"}, +{"e_clrlslwi",4, PPCVLE, "e_rlwinm %0,%1,%3,(%2)-(%3),31-(%3)"}, }; -const int powerpc_num_macros = ARRAY_SIZE(powerpc_macros); +const int powerpc_num_macros = + sizeof (powerpc_macros) / sizeof (powerpc_macros[0]); diff --git a/arch/powerpc/xmon/ppc.h b/arch/powerpc/xmon/ppc.h index 110df96354b4..d00f33dcf192 100644 --- a/arch/powerpc/xmon/ppc.h +++ b/arch/powerpc/xmon/ppc.h @@ -1,6 +1,5 @@ /* ppc.h -- Header file for PowerPC opcode table - Copyright 1994, 1995, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 - Free Software Foundation, Inc. + Copyright (C) 1994-2016 Free Software Foundation, Inc. Written by Ian Lance Taylor, Cygnus Support This file is part of GDB, GAS, and the GNU binutils. @@ -22,6 +21,12 @@ Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, US #ifndef PPC_H #define PPC_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef uint64_t ppc_cpu_t; + /* The opcode table is an array of struct powerpc_opcode. */ struct powerpc_opcode @@ -42,7 +47,12 @@ struct powerpc_opcode /* One bit flags for the opcode. These are used to indicate which specific processors support the instructions. The defined values are listed below. */ - unsigned long flags; + ppc_cpu_t flags; + + /* One bit flags for the opcode. These are used to indicate which + specific processors no longer support the instructions. The defined + values are listed below. */ + ppc_cpu_t deprecated; /* An array of operand codes. Each code is an index into the operand table. They appear in the order which the operands must @@ -55,6 +65,8 @@ struct powerpc_opcode instructions. */ extern const struct powerpc_opcode powerpc_opcodes[]; extern const int powerpc_num_opcodes; +extern const struct powerpc_opcode vle_opcodes[]; +extern const int vle_num_opcodes; /* Values defined for the flags field of a struct powerpc_opcode. */ @@ -67,106 +79,178 @@ extern const int powerpc_num_opcodes; /* Opcode is defined for the POWER2 (Rios 2) architecture. */ #define PPC_OPCODE_POWER2 4 -/* Opcode is only defined on 32 bit architectures. */ -#define PPC_OPCODE_32 8 - -/* Opcode is only defined on 64 bit architectures. */ -#define PPC_OPCODE_64 0x10 - /* Opcode is supported by the Motorola PowerPC 601 processor. The 601 is assumed to support all PowerPC (PPC_OPCODE_PPC) instructions, but it also supports many additional POWER instructions. */ -#define PPC_OPCODE_601 0x20 +#define PPC_OPCODE_601 8 /* Opcode is supported in both the Power and PowerPC architectures - (ie, compiler's -mcpu=common or assembler's -mcom). */ -#define PPC_OPCODE_COMMON 0x40 + (ie, compiler's -mcpu=common or assembler's -mcom). More than just + the intersection of PPC_OPCODE_PPC with the union of PPC_OPCODE_POWER + and PPC_OPCODE_POWER2 because many instructions changed mnemonics + between POWER and POWERPC. */ +#define PPC_OPCODE_COMMON 0x10 /* Opcode is supported for any Power or PowerPC platform (this is for the assembler's -many option, and it eliminates duplicates). */ -#define PPC_OPCODE_ANY 0x80 +#define PPC_OPCODE_ANY 0x20 + +/* Opcode is only defined on 64 bit architectures. */ +#define PPC_OPCODE_64 0x40 /* Opcode is supported as part of the 64-bit bridge. */ -#define PPC_OPCODE_64_BRIDGE 0x100 +#define PPC_OPCODE_64_BRIDGE 0x80 /* Opcode is supported by Altivec Vector Unit */ -#define PPC_OPCODE_ALTIVEC 0x200 +#define PPC_OPCODE_ALTIVEC 0x100 /* Opcode is supported by PowerPC 403 processor. */ -#define PPC_OPCODE_403 0x400 +#define PPC_OPCODE_403 0x200 /* Opcode is supported by PowerPC BookE processor. */ -#define PPC_OPCODE_BOOKE 0x800 - -/* Opcode is only supported by 64-bit PowerPC BookE processor. */ -#define PPC_OPCODE_BOOKE64 0x1000 +#define PPC_OPCODE_BOOKE 0x400 /* Opcode is supported by PowerPC 440 processor. */ -#define PPC_OPCODE_440 0x2000 +#define PPC_OPCODE_440 0x800 /* Opcode is only supported by Power4 architecture. */ -#define PPC_OPCODE_POWER4 0x4000 - -/* Opcode isn't supported by Power4 architecture. */ -#define PPC_OPCODE_NOPOWER4 0x8000 +#define PPC_OPCODE_POWER4 0x1000 -/* Opcode is only supported by POWERPC Classic architecture. */ -#define PPC_OPCODE_CLASSIC 0x10000 +/* Opcode is only supported by Power7 architecture. */ +#define PPC_OPCODE_POWER7 0x2000 /* Opcode is only supported by e500x2 Core. */ -#define PPC_OPCODE_SPE 0x20000 +#define PPC_OPCODE_SPE 0x4000 /* Opcode is supported by e500x2 Integer select APU. */ -#define PPC_OPCODE_ISEL 0x40000 +#define PPC_OPCODE_ISEL 0x8000 /* Opcode is an e500 SPE floating point instruction. */ -#define PPC_OPCODE_EFS 0x80000 +#define PPC_OPCODE_EFS 0x10000 /* Opcode is supported by branch locking APU. */ -#define PPC_OPCODE_BRLOCK 0x100000 +#define PPC_OPCODE_BRLOCK 0x20000 /* Opcode is supported by performance monitor APU. */ -#define PPC_OPCODE_PMR 0x200000 +#define PPC_OPCODE_PMR 0x40000 /* Opcode is supported by cache locking APU. */ -#define PPC_OPCODE_CACHELCK 0x400000 +#define PPC_OPCODE_CACHELCK 0x80000 /* Opcode is supported by machine check APU. */ -#define PPC_OPCODE_RFMCI 0x800000 +#define PPC_OPCODE_RFMCI 0x100000 /* Opcode is only supported by Power5 architecture. */ -#define PPC_OPCODE_POWER5 0x1000000 +#define PPC_OPCODE_POWER5 0x200000 /* Opcode is supported by PowerPC e300 family. */ -#define PPC_OPCODE_E300 0x2000000 +#define PPC_OPCODE_E300 0x400000 /* Opcode is only supported by Power6 architecture. */ -#define PPC_OPCODE_POWER6 0x4000000 +#define PPC_OPCODE_POWER6 0x800000 /* Opcode is only supported by PowerPC Cell family. */ -#define PPC_OPCODE_CELL 0x8000000 +#define PPC_OPCODE_CELL 0x1000000 + +/* Opcode is supported by CPUs with paired singles support. */ +#define PPC_OPCODE_PPCPS 0x2000000 + +/* Opcode is supported by Power E500MC */ +#define PPC_OPCODE_E500MC 0x4000000 + +/* Opcode is supported by PowerPC 405 processor. */ +#define PPC_OPCODE_405 0x8000000 + +/* Opcode is supported by Vector-Scalar (VSX) Unit */ +#define PPC_OPCODE_VSX 0x10000000 + +/* Opcode is supported by A2. */ +#define PPC_OPCODE_A2 0x20000000 + +/* Opcode is supported by PowerPC 476 processor. */ +#define PPC_OPCODE_476 0x40000000 + +/* Opcode is supported by AppliedMicro Titan core */ +#define PPC_OPCODE_TITAN 0x80000000 + +/* Opcode which is supported by the e500 family */ +#define PPC_OPCODE_E500 0x100000000ull + +/* Opcode is supported by Extended Altivec Vector Unit */ +#define PPC_OPCODE_ALTIVEC2 0x200000000ull + +/* Opcode is supported by Power E6500 */ +#define PPC_OPCODE_E6500 0x400000000ull + +/* Opcode is supported by Thread management APU */ +#define PPC_OPCODE_TMR 0x800000000ull + +/* Opcode which is supported by the VLE extension. */ +#define PPC_OPCODE_VLE 0x1000000000ull + +/* Opcode is only supported by Power8 architecture. */ +#define PPC_OPCODE_POWER8 0x2000000000ull + +/* Opcode which is supported by the Hardware Transactional Memory extension. */ +/* Currently, this is the same as the POWER8 mask. If another cpu comes out + that isn't a superset of POWER8, we can define this to its own mask. */ +#define PPC_OPCODE_HTM PPC_OPCODE_POWER8 + +/* Opcode is supported by ppc750cl. */ +#define PPC_OPCODE_750 0x4000000000ull + +/* Opcode is supported by ppc7450. */ +#define PPC_OPCODE_7450 0x8000000000ull + +/* Opcode is supported by ppc821/850/860. */ +#define PPC_OPCODE_860 0x10000000000ull + +/* Opcode is only supported by Power9 architecture. */ +#define PPC_OPCODE_POWER9 0x20000000000ull + +/* Opcode is supported by Vector-Scalar (VSX) Unit from ISA 2.08. */ +#define PPC_OPCODE_VSX3 0x40000000000ull + + /* Opcode is supported by e200z4. */ +#define PPC_OPCODE_E200Z4 0x80000000000ull /* A macro to extract the major opcode from an instruction. */ #define PPC_OP(i) (((i) >> 26) & 0x3f) + +/* A macro to determine if the instruction is a 2-byte VLE insn. */ +#define PPC_OP_SE_VLE(m) ((m) <= 0xffff) + +/* A macro to extract the major opcode from a VLE instruction. */ +#define VLE_OP(i,m) (((i) >> ((m) <= 0xffff ? 10 : 26)) & 0x3f) + +/* A macro to convert a VLE opcode to a VLE opcode segment. */ +#define VLE_OP_TO_SEG(i) ((i) >> 1) /* The operands table is an array of struct powerpc_operand. */ struct powerpc_operand { - /* The number of bits in the operand. */ - int bits; - - /* How far the operand is left shifted in the instruction. */ + /* A bitmask of bits in the operand. */ + unsigned int bitm; + + /* The shift operation to be applied to the operand. No shift + is made if this is zero. For positive values, the operand + is shifted left by SHIFT. For negative values, the operand + is shifted right by -SHIFT. Use PPC_OPSHIFT_INV to indicate + that BITM and SHIFT cannot be used to determine where the + operand goes in the insn. */ int shift; /* Insertion function. This is used by the assembler. To insert an operand value into an instruction, check this field. If it is NULL, execute - i |= (op & ((1 << o->bits) - 1)) << o->shift; + if (o->shift >= 0) + i |= (op & o->bitm) << o->shift; + else + i |= (op & o->bitm) >> -o->shift; (i is the instruction which we are filling in, o is a pointer to - this structure, and op is the opcode value; this assumes twos - complement arithmetic). + this structure, and op is the operand value). If this field is not NULL, then simply call it with the instruction and the operand value. It will return the new value @@ -176,18 +260,20 @@ struct powerpc_operand operand value is legal, *ERRMSG will be unchanged (most operands can accept any value). */ unsigned long (*insert) - (unsigned long instruction, long op, int dialect, const char **errmsg); + (unsigned long instruction, long op, ppc_cpu_t dialect, const char **errmsg); /* Extraction function. This is used by the disassembler. To extract this operand type from an instruction, check this field. If it is NULL, compute - op = ((i) >> o->shift) & ((1 << o->bits) - 1); - if ((o->flags & PPC_OPERAND_SIGNED) != 0 - && (op & (1 << (o->bits - 1))) != 0) - op -= 1 << o->bits; + if (o->shift >= 0) + op = (i >> o->shift) & o->bitm; + else + op = (i << -o->shift) & o->bitm; + if ((o->flags & PPC_OPERAND_SIGNED) != 0) + sign_extend (op); (i is the instruction, o is a pointer to this structure, and op - is the result; this assumes twos complement arithmetic). + is the result). If this field is not NULL, then simply call it with the instruction value. It will return the value of the operand. If @@ -195,7 +281,7 @@ struct powerpc_operand non-zero if this operand type can not actually be extracted from this operand (i.e., the instruction does not match). If the operand is valid, *INVALID will not be changed. */ - long (*extract) (unsigned long instruction, int dialect, int *invalid); + long (*extract) (unsigned long instruction, ppc_cpu_t dialect, int *invalid); /* One bit syntax flags. */ unsigned long flags; @@ -205,17 +291,23 @@ struct powerpc_operand the operands field of the powerpc_opcodes table. */ extern const struct powerpc_operand powerpc_operands[]; +extern const unsigned int num_powerpc_operands; + +/* Use with the shift field of a struct powerpc_operand to indicate + that BITM and SHIFT cannot be used to determine where the operand + goes in the insn. */ +#define PPC_OPSHIFT_INV (-1U << 31) /* Values defined for the flags field of a struct powerpc_operand. */ /* This operand takes signed values. */ -#define PPC_OPERAND_SIGNED (01) +#define PPC_OPERAND_SIGNED (0x1) /* This operand takes signed values, but also accepts a full positive range of values when running in 32 bit mode. That is, if bits is 16, it takes any value from -0x8000 to 0xffff. In 64 bit mode, this flag is ignored. */ -#define PPC_OPERAND_SIGNOPT (02) +#define PPC_OPERAND_SIGNOPT (0x2) /* This operand does not actually exist in the assembler input. This is used to support extended mnemonics such as mr, for which two @@ -223,14 +315,14 @@ extern const struct powerpc_operand powerpc_operands[]; insert function with any op value. The disassembler should call the extract function, ignore the return value, and check the value placed in the valid argument. */ -#define PPC_OPERAND_FAKE (04) +#define PPC_OPERAND_FAKE (0x4) /* The next operand should be wrapped in parentheses rather than separated from this one by a comma. This is used for the load and store instructions which want their operands to look like reg,displacement(reg) */ -#define PPC_OPERAND_PARENS (010) +#define PPC_OPERAND_PARENS (0x8) /* This operand may use the symbolic names for the CR fields, which are @@ -239,26 +331,26 @@ extern const struct powerpc_operand powerpc_operands[]; cr4 4 cr5 5 cr6 6 cr7 7 These may be combined arithmetically, as in cr2*4+gt. These are only supported on the PowerPC, not the POWER. */ -#define PPC_OPERAND_CR (020) +#define PPC_OPERAND_CR_BIT (0x10) /* This operand names a register. The disassembler uses this to print register names with a leading 'r'. */ -#define PPC_OPERAND_GPR (040) +#define PPC_OPERAND_GPR (0x20) /* Like PPC_OPERAND_GPR, but don't print a leading 'r' for r0. */ -#define PPC_OPERAND_GPR_0 (0100) +#define PPC_OPERAND_GPR_0 (0x40) /* This operand names a floating point register. The disassembler prints these with a leading 'f'. */ -#define PPC_OPERAND_FPR (0200) +#define PPC_OPERAND_FPR (0x80) /* This operand is a relative branch displacement. The disassembler prints these symbolically if possible. */ -#define PPC_OPERAND_RELATIVE (0400) +#define PPC_OPERAND_RELATIVE (0x100) /* This operand is an absolute branch address. The disassembler prints these symbolically if possible. */ -#define PPC_OPERAND_ABSOLUTE (01000) +#define PPC_OPERAND_ABSOLUTE (0x200) /* This operand is optional, and is zero if omitted. This is used for example, in the optional BF field in the comparison instructions. The @@ -266,7 +358,7 @@ extern const struct powerpc_operand powerpc_operands[]; and the number of operands remaining for the opcode, and decide whether this operand is present or not. The disassembler should print this operand out only if it is not zero. */ -#define PPC_OPERAND_OPTIONAL (02000) +#define PPC_OPERAND_OPTIONAL (0x400) /* This flag is only used with PPC_OPERAND_OPTIONAL. If this operand is omitted, then for the next operand use this operand value plus @@ -274,24 +366,48 @@ extern const struct powerpc_operand powerpc_operands[]; hack is needed because the Power rotate instructions can take either 4 or 5 operands. The disassembler should print this operand out regardless of the PPC_OPERAND_OPTIONAL field. */ -#define PPC_OPERAND_NEXT (04000) +#define PPC_OPERAND_NEXT (0x800) /* This operand should be regarded as a negative number for the purposes of overflow checking (i.e., the normal most negative number is disallowed and one more than the normal most positive number is allowed). This flag will only be set for a signed operand. */ -#define PPC_OPERAND_NEGATIVE (010000) +#define PPC_OPERAND_NEGATIVE (0x1000) /* This operand names a vector unit register. The disassembler prints these with a leading 'v'. */ -#define PPC_OPERAND_VR (020000) +#define PPC_OPERAND_VR (0x2000) /* This operand is for the DS field in a DS form instruction. */ -#define PPC_OPERAND_DS (040000) +#define PPC_OPERAND_DS (0x4000) /* This operand is for the DQ field in a DQ form instruction. */ -#define PPC_OPERAND_DQ (0100000) +#define PPC_OPERAND_DQ (0x8000) + +/* Valid range of operand is 0..n rather than 0..n-1. */ +#define PPC_OPERAND_PLUS1 (0x10000) + +/* Xilinx APU and FSL related operands */ +#define PPC_OPERAND_FSL (0x20000) +#define PPC_OPERAND_FCR (0x40000) +#define PPC_OPERAND_UDI (0x80000) + +/* This operand names a vector-scalar unit register. The disassembler + prints these with a leading 'vs'. */ +#define PPC_OPERAND_VSR (0x100000) + +/* This is a CR FIELD that does not use symbolic names. */ +#define PPC_OPERAND_CR_REG (0x200000) + +/* This flag is only used with PPC_OPERAND_OPTIONAL. If this operand + is omitted, then the value it should use for the operand is stored + in the SHIFT field of the immediatly following operand field. */ +#define PPC_OPERAND_OPTIONAL_VALUE (0x400000) + +/* This flag is only used with PPC_OPERAND_OPTIONAL. The operand is + only optional when generating 32-bit code. */ +#define PPC_OPERAND_OPTIONAL32 (0x800000) /* The POWER and PowerPC assemblers use a few macros. We keep them with the operands table for simplicity. The macro table is an @@ -308,7 +424,7 @@ struct powerpc_macro /* One bit flags for the opcode. These are used to indicate which specific processors support the instructions. The values are the same as those for the struct powerpc_opcode flags field. */ - unsigned long flags; + ppc_cpu_t flags; /* A format string to turn the macro into a normal instruction. Each %N in the string is replaced with operand number N (zero @@ -319,4 +435,18 @@ struct powerpc_macro extern const struct powerpc_macro powerpc_macros[]; extern const int powerpc_num_macros; +extern ppc_cpu_t ppc_parse_cpu (ppc_cpu_t, ppc_cpu_t *, const char *); + +static inline long +ppc_optional_operand_value (const struct powerpc_operand *operand) +{ + if ((operand->flags & PPC_OPERAND_OPTIONAL_VALUE) != 0) + return (operand+1)->shift; + return 0; +} + +#ifdef __cplusplus +} +#endif + #endif /* PPC_H */ diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 5720236d0266..16321ad9e70c 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -13,7 +13,7 @@ #include <linux/kernel.h> #include <linux/errno.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/smp.h> #include <linux/mm.h> #include <linux/reboot.h> @@ -212,6 +212,10 @@ Commands:\n\ "\ C checksum\n\ d dump bytes\n\ + d1 dump 1 byte values\n\ + d2 dump 2 byte values\n\ + d4 dump 4 byte values\n\ + d8 dump 8 byte values\n\ di dump instructions\n\ df dump float values\n\ dd dump double values\n\ @@ -2334,9 +2338,42 @@ static void dump_pacas(void) } #endif +static void dump_by_size(unsigned long addr, long count, int size) +{ + unsigned char temp[16]; + int i, j; + u64 val; + + count = ALIGN(count, 16); + + for (i = 0; i < count; i += 16, addr += 16) { + printf(REG, addr); + + if (mread(addr, temp, 16) != 16) { + printf("\nFaulted reading %d bytes from 0x"REG"\n", 16, addr); + return; + } + + for (j = 0; j < 16; j += size) { + putchar(' '); + switch (size) { + case 1: val = temp[j]; break; + case 2: val = *(u16 *)&temp[j]; break; + case 4: val = *(u32 *)&temp[j]; break; + case 8: val = *(u64 *)&temp[j]; break; + default: val = 0; + } + + printf("%0*lx", size * 2, val); + } + printf("\n"); + } +} + static void dump(void) { + static char last[] = { "d?\n" }; int c; c = inchar(); @@ -2350,8 +2387,9 @@ dump(void) } #endif - if ((isxdigit(c) && c != 'f' && c != 'd') || c == '\n') + if (c == '\n') termch = c; + scanhex((void *)&adrs); if (termch != '\n') termch = 0; @@ -2383,9 +2421,23 @@ dump(void) ndump = 64; else if (ndump > MAX_DUMP) ndump = MAX_DUMP; - prdump(adrs, ndump); + + switch (c) { + case '8': + case '4': + case '2': + case '1': + ndump = ALIGN(ndump, 16); + dump_by_size(adrs, ndump, c - '0'); + last[1] = c; + last_cmd = last; + break; + default: + prdump(adrs, ndump); + last_cmd = "d\n"; + } + adrs += ndump; - last_cmd = "d\n"; } } diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2ef031bee7ab..a2dcef0aacc7 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -134,9 +134,11 @@ config S390 select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL + select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS + select DMA_NOOP_OPS select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_EFFICIENT_UNALIGNED_ACCESS diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 5a8dfa22da7c..ef3fb1b9201f 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -12,6 +12,7 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> +#include <linux/sched/stat.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/errno.h> diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index 08b9e942a262..45b3178200ab 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c @@ -17,6 +17,8 @@ #include <linux/kernel_stat.h> #include <linux/netdevice.h> #include <linux/sched.h> +#include <linux/sched/loadavg.h> +#include <linux/sched/stat.h> #include <asm/appldata.h> #include <asm/smp.h> diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index e00975361fec..143b1e00b818 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -678,6 +678,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_ZCRYPT=m +CONFIG_PKEY=m CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA512_S390=m diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 2cf87343b590..2358bf33c5ef 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -628,6 +628,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_ZCRYPT=m +CONFIG_PKEY=m CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA512_S390=m diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile index d1033de4c4ee..402c530c6da5 100644 --- a/arch/s390/crypto/Makefile +++ b/arch/s390/crypto/Makefile @@ -6,7 +6,7 @@ obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o -obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o +obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o paes_s390.o obj-$(CONFIG_S390_PRNG) += prng.o obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c new file mode 100644 index 000000000000..d69ea495c4d7 --- /dev/null +++ b/arch/s390/crypto/paes_s390.c @@ -0,0 +1,619 @@ +/* + * Cryptographic API. + * + * s390 implementation of the AES Cipher Algorithm with protected keys. + * + * s390 Version: + * Copyright IBM Corp. 2017 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + * Harald Freudenberger <freude@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + */ + +#define KMSG_COMPONENT "paes_s390" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <linux/bug.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/cpufeature.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <crypto/xts.h> +#include <asm/cpacf.h> +#include <asm/pkey.h> + +static u8 *ctrblk; +static DEFINE_SPINLOCK(ctrblk_lock); + +static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; + +struct s390_paes_ctx { + struct pkey_seckey sk; + struct pkey_protkey pk; + unsigned long fc; +}; + +struct s390_pxts_ctx { + struct pkey_seckey sk[2]; + struct pkey_protkey pk[2]; + unsigned long fc; +}; + +static inline int __paes_convert_key(struct pkey_seckey *sk, + struct pkey_protkey *pk) +{ + int i, ret; + + /* try three times in case of failure */ + for (i = 0; i < 3; i++) { + ret = pkey_skey2pkey(sk, pk); + if (ret == 0) + break; + } + + return ret; +} + +static int __paes_set_key(struct s390_paes_ctx *ctx) +{ + unsigned long fc; + + if (__paes_convert_key(&ctx->sk, &ctx->pk)) + return -EINVAL; + + /* Pick the correct function code based on the protected key type */ + fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 : + (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 : + (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0; + + /* Check if the function code is available */ + ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; + + return ctx->fc ? 0 : -EINVAL; +} + +static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm); + + if (key_len != SECKEYBLOBSIZE) + return -EINVAL; + + memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE); + if (__paes_set_key(ctx)) { + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + return 0; +} + +static int ecb_paes_crypt(struct blkcipher_desc *desc, + unsigned long modifier, + struct blkcipher_walk *walk) +{ + struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + unsigned int nbytes, n, k; + int ret; + + ret = blkcipher_walk_virt(desc, walk); + while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { + /* only use complete blocks */ + n = nbytes & ~(AES_BLOCK_SIZE - 1); + k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey, + walk->dst.virt.addr, walk->src.virt.addr, n); + if (k) + ret = blkcipher_walk_done(desc, walk, nbytes - k); + if (k < n) { + if (__paes_set_key(ctx) != 0) + return blkcipher_walk_done(desc, walk, -EIO); + } + } + return ret; +} + +static int ecb_paes_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct blkcipher_walk walk; + + blkcipher_walk_init(&walk, dst, src, nbytes); + return ecb_paes_crypt(desc, CPACF_ENCRYPT, &walk); +} + +static int ecb_paes_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct blkcipher_walk walk; + + blkcipher_walk_init(&walk, dst, src, nbytes); + return ecb_paes_crypt(desc, CPACF_DECRYPT, &walk); +} + +static struct crypto_alg ecb_paes_alg = { + .cra_name = "ecb(paes)", + .cra_driver_name = "ecb-paes-s390", + .cra_priority = 400, /* combo: aes + ecb */ + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct s390_paes_ctx), + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(ecb_paes_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = SECKEYBLOBSIZE, + .max_keysize = SECKEYBLOBSIZE, + .setkey = ecb_paes_set_key, + .encrypt = ecb_paes_encrypt, + .decrypt = ecb_paes_decrypt, + } + } +}; + +static int __cbc_paes_set_key(struct s390_paes_ctx *ctx) +{ + unsigned long fc; + + if (__paes_convert_key(&ctx->sk, &ctx->pk)) + return -EINVAL; + + /* Pick the correct function code based on the protected key type */ + fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 : + (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 : + (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0; + + /* Check if the function code is available */ + ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; + + return ctx->fc ? 0 : -EINVAL; +} + +static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm); + + memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE); + if (__cbc_paes_set_key(ctx)) { + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + return 0; +} + +static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier, + struct blkcipher_walk *walk) +{ + struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + unsigned int nbytes, n, k; + int ret; + struct { + u8 iv[AES_BLOCK_SIZE]; + u8 key[MAXPROTKEYSIZE]; + } param; + + ret = blkcipher_walk_virt(desc, walk); + memcpy(param.iv, walk->iv, AES_BLOCK_SIZE); + memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); + while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { + /* only use complete blocks */ + n = nbytes & ~(AES_BLOCK_SIZE - 1); + k = cpacf_kmc(ctx->fc | modifier, ¶m, + walk->dst.virt.addr, walk->src.virt.addr, n); + if (k) + ret = blkcipher_walk_done(desc, walk, nbytes - k); + if (n < k) { + if (__cbc_paes_set_key(ctx) != 0) + return blkcipher_walk_done(desc, walk, -EIO); + memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); + } + } + memcpy(walk->iv, param.iv, AES_BLOCK_SIZE); + return ret; +} + +static int cbc_paes_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct blkcipher_walk walk; + + blkcipher_walk_init(&walk, dst, src, nbytes); + return cbc_paes_crypt(desc, 0, &walk); +} + +static int cbc_paes_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct blkcipher_walk walk; + + blkcipher_walk_init(&walk, dst, src, nbytes); + return cbc_paes_crypt(desc, CPACF_DECRYPT, &walk); +} + +static struct crypto_alg cbc_paes_alg = { + .cra_name = "cbc(paes)", + .cra_driver_name = "cbc-paes-s390", + .cra_priority = 400, /* combo: aes + cbc */ + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct s390_paes_ctx), + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(cbc_paes_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = SECKEYBLOBSIZE, + .max_keysize = SECKEYBLOBSIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = cbc_paes_set_key, + .encrypt = cbc_paes_encrypt, + .decrypt = cbc_paes_decrypt, + } + } +}; + +static int __xts_paes_set_key(struct s390_pxts_ctx *ctx) +{ + unsigned long fc; + + if (__paes_convert_key(&ctx->sk[0], &ctx->pk[0]) || + __paes_convert_key(&ctx->sk[1], &ctx->pk[1])) + return -EINVAL; + + if (ctx->pk[0].type != ctx->pk[1].type) + return -EINVAL; + + /* Pick the correct function code based on the protected key type */ + fc = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PXTS_128 : + (ctx->pk[0].type == PKEY_KEYTYPE_AES_256) ? + CPACF_KM_PXTS_256 : 0; + + /* Check if the function code is available */ + ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; + + return ctx->fc ? 0 : -EINVAL; +} + +static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm); + u8 ckey[2 * AES_MAX_KEY_SIZE]; + unsigned int ckey_len; + + memcpy(ctx->sk[0].seckey, in_key, SECKEYBLOBSIZE); + memcpy(ctx->sk[1].seckey, in_key + SECKEYBLOBSIZE, SECKEYBLOBSIZE); + if (__xts_paes_set_key(ctx)) { + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + /* + * xts_check_key verifies the key length is not odd and makes + * sure that the two keys are not the same. This can be done + * on the two protected keys as well + */ + ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? + AES_KEYSIZE_128 : AES_KEYSIZE_256; + memcpy(ckey, ctx->pk[0].protkey, ckey_len); + memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len); + return xts_check_key(tfm, ckey, 2*ckey_len); +} + +static int xts_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier, + struct blkcipher_walk *walk) +{ + struct s390_pxts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + unsigned int keylen, offset, nbytes, n, k; + int ret; + struct { + u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */ + u8 tweak[16]; + u8 block[16]; + u8 bit[16]; + u8 xts[16]; + } pcc_param; + struct { + u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */ + u8 init[16]; + } xts_param; + + ret = blkcipher_walk_virt(desc, walk); + keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64; + offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0; +retry: + memset(&pcc_param, 0, sizeof(pcc_param)); + memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); + memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen); + cpacf_pcc(ctx->fc, pcc_param.key + offset); + + memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen); + memcpy(xts_param.init, pcc_param.xts, 16); + + while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { + /* only use complete blocks */ + n = nbytes & ~(AES_BLOCK_SIZE - 1); + k = cpacf_km(ctx->fc | modifier, xts_param.key + offset, + walk->dst.virt.addr, walk->src.virt.addr, n); + if (k) + ret = blkcipher_walk_done(desc, walk, nbytes - k); + if (k < n) { + if (__xts_paes_set_key(ctx) != 0) + return blkcipher_walk_done(desc, walk, -EIO); + goto retry; + } + } + return ret; +} + +static int xts_paes_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct blkcipher_walk walk; + + blkcipher_walk_init(&walk, dst, src, nbytes); + return xts_paes_crypt(desc, 0, &walk); +} + +static int xts_paes_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct blkcipher_walk walk; + + blkcipher_walk_init(&walk, dst, src, nbytes); + return xts_paes_crypt(desc, CPACF_DECRYPT, &walk); +} + +static struct crypto_alg xts_paes_alg = { + .cra_name = "xts(paes)", + .cra_driver_name = "xts-paes-s390", + .cra_priority = 400, /* combo: aes + xts */ + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct s390_pxts_ctx), + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(xts_paes_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = 2 * SECKEYBLOBSIZE, + .max_keysize = 2 * SECKEYBLOBSIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = xts_paes_set_key, + .encrypt = xts_paes_encrypt, + .decrypt = xts_paes_decrypt, + } + } +}; + +static int __ctr_paes_set_key(struct s390_paes_ctx *ctx) +{ + unsigned long fc; + + if (__paes_convert_key(&ctx->sk, &ctx->pk)) + return -EINVAL; + + /* Pick the correct function code based on the protected key type */ + fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 : + (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 : + (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? + CPACF_KMCTR_PAES_256 : 0; + + /* Check if the function code is available */ + ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; + + return ctx->fc ? 0 : -EINVAL; +} + +static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm); + + memcpy(ctx->sk.seckey, in_key, key_len); + if (__ctr_paes_set_key(ctx)) { + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + return 0; +} + +static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) +{ + unsigned int i, n; + + /* only use complete blocks, max. PAGE_SIZE */ + memcpy(ctrptr, iv, AES_BLOCK_SIZE); + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); + for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { + memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); + crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); + ctrptr += AES_BLOCK_SIZE; + } + return n; +} + +static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier, + struct blkcipher_walk *walk) +{ + struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + u8 buf[AES_BLOCK_SIZE], *ctrptr; + unsigned int nbytes, n, k; + int ret, locked; + + locked = spin_trylock(&ctrblk_lock); + + ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); + while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { + n = AES_BLOCK_SIZE; + if (nbytes >= 2*AES_BLOCK_SIZE && locked) + n = __ctrblk_init(ctrblk, walk->iv, nbytes); + ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv; + k = cpacf_kmctr(ctx->fc | modifier, ctx->pk.protkey, + walk->dst.virt.addr, walk->src.virt.addr, + n, ctrptr); + if (k) { + if (ctrptr == ctrblk) + memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE, + AES_BLOCK_SIZE); + crypto_inc(walk->iv, AES_BLOCK_SIZE); + ret = blkcipher_walk_done(desc, walk, nbytes - n); + } + if (k < n) { + if (__ctr_paes_set_key(ctx) != 0) + return blkcipher_walk_done(desc, walk, -EIO); + } + } + if (locked) + spin_unlock(&ctrblk_lock); + /* + * final block may be < AES_BLOCK_SIZE, copy only nbytes + */ + if (nbytes) { + while (1) { + if (cpacf_kmctr(ctx->fc | modifier, + ctx->pk.protkey, buf, + walk->src.virt.addr, AES_BLOCK_SIZE, + walk->iv) == AES_BLOCK_SIZE) + break; + if (__ctr_paes_set_key(ctx) != 0) + return blkcipher_walk_done(desc, walk, -EIO); + } + memcpy(walk->dst.virt.addr, buf, nbytes); + crypto_inc(walk->iv, AES_BLOCK_SIZE); + ret = blkcipher_walk_done(desc, walk, 0); + } + + return ret; +} + +static int ctr_paes_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct blkcipher_walk walk; + + blkcipher_walk_init(&walk, dst, src, nbytes); + return ctr_paes_crypt(desc, 0, &walk); +} + +static int ctr_paes_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct blkcipher_walk walk; + + blkcipher_walk_init(&walk, dst, src, nbytes); + return ctr_paes_crypt(desc, CPACF_DECRYPT, &walk); +} + +static struct crypto_alg ctr_paes_alg = { + .cra_name = "ctr(paes)", + .cra_driver_name = "ctr-paes-s390", + .cra_priority = 400, /* combo: aes + ctr */ + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct s390_paes_ctx), + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(ctr_paes_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = SECKEYBLOBSIZE, + .max_keysize = SECKEYBLOBSIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = ctr_paes_set_key, + .encrypt = ctr_paes_encrypt, + .decrypt = ctr_paes_decrypt, + } + } +}; + +static inline void __crypto_unregister_alg(struct crypto_alg *alg) +{ + if (!list_empty(&alg->cra_list)) + crypto_unregister_alg(alg); +} + +static void paes_s390_fini(void) +{ + if (ctrblk) + free_page((unsigned long) ctrblk); + __crypto_unregister_alg(&ctr_paes_alg); + __crypto_unregister_alg(&xts_paes_alg); + __crypto_unregister_alg(&cbc_paes_alg); + __crypto_unregister_alg(&ecb_paes_alg); +} + +static int __init paes_s390_init(void) +{ + int ret; + + /* Query available functions for KM, KMC and KMCTR */ + cpacf_query(CPACF_KM, &km_functions); + cpacf_query(CPACF_KMC, &kmc_functions); + cpacf_query(CPACF_KMCTR, &kmctr_functions); + + if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) || + cpacf_test_func(&km_functions, CPACF_KM_PAES_192) || + cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) { + ret = crypto_register_alg(&ecb_paes_alg); + if (ret) + goto out_err; + } + + if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) || + cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) || + cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) { + ret = crypto_register_alg(&cbc_paes_alg); + if (ret) + goto out_err; + } + + if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) || + cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) { + ret = crypto_register_alg(&xts_paes_alg); + if (ret) + goto out_err; + } + + if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) || + cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) || + cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) { + ret = crypto_register_alg(&ctr_paes_alg); + if (ret) + goto out_err; + ctrblk = (u8 *) __get_free_page(GFP_KERNEL); + if (!ctrblk) { + ret = -ENOMEM; + goto out_err; + } + } + + return 0; +out_err: + paes_s390_fini(); + return ret; +} + +module_init(paes_s390_init); +module_exit(paes_s390_fini); + +MODULE_ALIAS_CRYPTO("aes-all"); + +MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys"); +MODULE_LICENSE("GPL"); diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index 85b7f5efe06a..5a3ec04a7082 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c @@ -20,6 +20,8 @@ #include <linux/cpufeature.h> #include <linux/random.h> #include <linux/slab.h> +#include <linux/sched/signal.h> + #include <asm/debug.h> #include <linux/uaccess.h> #include <asm/timex.h> diff --git a/arch/s390/defconfig b/arch/s390/defconfig index d00e368fb5e6..68bfd09f1b02 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -229,6 +229,7 @@ CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_ZCRYPT=m +CONFIG_PKEY=m CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA512_S390=m diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 352f7bdaf11f..0ddd37e6c29d 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -5,6 +5,7 @@ */ #include <linux/types.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/thread_info.h> #define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p(typeof(0?(t)0:0ULL), u64)) diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h index 2c680db7e5c1..e2dfbf280d12 100644 --- a/arch/s390/include/asm/cpacf.h +++ b/arch/s390/include/asm/cpacf.h @@ -28,8 +28,9 @@ #define CPACF_PPNO 0xb93c /* MSA5 */ /* - * Decryption modifier bit + * En/decryption modifier bits */ +#define CPACF_ENCRYPT 0x00 #define CPACF_DECRYPT 0x80 /* @@ -42,8 +43,13 @@ #define CPACF_KM_AES_128 0x12 #define CPACF_KM_AES_192 0x13 #define CPACF_KM_AES_256 0x14 +#define CPACF_KM_PAES_128 0x1a +#define CPACF_KM_PAES_192 0x1b +#define CPACF_KM_PAES_256 0x1c #define CPACF_KM_XTS_128 0x32 #define CPACF_KM_XTS_256 0x34 +#define CPACF_KM_PXTS_128 0x3a +#define CPACF_KM_PXTS_256 0x3c /* * Function codes for the KMC (CIPHER MESSAGE WITH CHAINING) @@ -56,6 +62,9 @@ #define CPACF_KMC_AES_128 0x12 #define CPACF_KMC_AES_192 0x13 #define CPACF_KMC_AES_256 0x14 +#define CPACF_KMC_PAES_128 0x1a +#define CPACF_KMC_PAES_192 0x1b +#define CPACF_KMC_PAES_256 0x1c #define CPACF_KMC_PRNG 0x43 /* @@ -69,6 +78,9 @@ #define CPACF_KMCTR_AES_128 0x12 #define CPACF_KMCTR_AES_192 0x13 #define CPACF_KMCTR_AES_256 0x14 +#define CPACF_KMCTR_PAES_128 0x1a +#define CPACF_KMCTR_PAES_192 0x1b +#define CPACF_KMCTR_PAES_256 0x1c /* * Function codes for the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) @@ -99,6 +111,18 @@ #define CPACF_KMAC_TDEA_192 0x03 /* + * Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT) + * instruction + */ +#define CPACF_PCKMO_QUERY 0x00 +#define CPACF_PCKMO_ENC_DES_KEY 0x01 +#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02 +#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03 +#define CPACF_PCKMO_ENC_AES_128_KEY 0x12 +#define CPACF_PCKMO_ENC_AES_192_KEY 0x13 +#define CPACF_PCKMO_ENC_AES_256_KEY 0x14 + +/* * Function codes for the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION) * instruction */ @@ -397,4 +421,24 @@ static inline void cpacf_pcc(unsigned long func, void *param) : "cc", "memory"); } +/** + * cpacf_pckmo() - executes the PCKMO (PERFORM CRYPTOGRAPHIC KEY + * MANAGEMENT) instruction + * @func: the function code passed to PCKMO; see CPACF_PCKMO_xxx defines + * @param: address of parameter block; see POP for details on each func + * + * Returns 0. + */ +static inline void cpacf_pckmo(long func, void *param) +{ + register unsigned long r0 asm("0") = (unsigned long) func; + register unsigned long r1 asm("1") = (unsigned long) param; + + asm volatile( + " .insn rre,%[opc] << 16,0,0\n" /* PCKMO opcode */ + : + : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCKMO) + : "cc", "memory"); +} + #endif /* _ASM_S390_CPACF_H */ diff --git a/arch/s390/include/asm/device.h b/arch/s390/include/asm/device.h index 4a9f35e0973f..5203fc87f080 100644 --- a/arch/s390/include/asm/device.h +++ b/arch/s390/include/asm/device.h @@ -4,7 +4,6 @@ * This file is released under the GPLv2 */ struct dev_archdata { - struct dma_map_ops *dma_ops; }; struct pdev_archdata { diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index ffaba07f50ab..3108b8dbe266 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -10,12 +10,10 @@ #define DMA_ERROR_CODE (~(dma_addr_t) 0x0) -extern struct dma_map_ops s390_pci_dma_ops; +extern const struct dma_map_ops s390_pci_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - if (dev && dev->archdata.dma_ops) - return dev->archdata.dma_ops; return &dma_noop_ops; } diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 83aaefed2a7b..1d48880b3cc1 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -132,7 +132,7 @@ typedef s390_fp_regs compat_elf_fpregset_t; typedef s390_compat_regs compat_elf_gregset_t; #include <linux/compat.h> -#include <linux/sched.h> /* for task_struct */ +#include <linux/sched/mm.h> /* for task_struct */ #include <asm/mmu_context.h> #include <asm/vdso.h> diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h index 591e5a5279b0..1293c4066cfc 100644 --- a/arch/s390/include/asm/kprobes.h +++ b/arch/s390/include/asm/kprobes.h @@ -27,9 +27,15 @@ * 2005-Dec Used as a template for s390 by Mike Grundy * <grundym@us.ibm.com> */ +#include <asm-generic/kprobes.h> + +#define BREAKPOINT_INSTRUCTION 0x0002 + +#ifdef CONFIG_KPROBES #include <linux/types.h> #include <linux/ptrace.h> #include <linux/percpu.h> +#include <linux/sched/task_stack.h> #define __ARCH_WANT_KPROBES_INSN_SLOT @@ -37,7 +43,6 @@ struct pt_regs; struct kprobe; typedef u16 kprobe_opcode_t; -#define BREAKPOINT_INSTRUCTION 0x0002 /* Maximum instruction size is 3 (16bit) halfwords: */ #define MAX_INSN_SIZE 0x0003 @@ -91,4 +96,5 @@ int probe_is_insn_relative_long(u16 *insn); #define flush_insn_slot(p) do { } while (0) +#endif /* CONFIG_KPROBES */ #endif /* _ASM_S390_KPROBES_H */ diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 67f7a991c929..6e31d87fb669 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -9,6 +9,7 @@ #include <asm/pgalloc.h> #include <linux/uaccess.h> +#include <linux/mm_types.h> #include <asm/tlbflush.h> #include <asm/ctl_reg.h> @@ -63,7 +64,7 @@ static inline void set_user_asce(struct mm_struct *mm) S390_lowcore.user_asce = mm->context.asce; if (current->thread.mm_segment.ar4) __ctl_load(S390_lowcore.user_asce, 7, 7); - set_cpu_flag(CIF_ASCE); + set_cpu_flag(CIF_ASCE_PRIMARY); } static inline void clear_user_asce(void) @@ -81,7 +82,7 @@ static inline void load_kernel_asce(void) __ctl_store(asce, 1, 1); if (asce != S390_lowcore.kernel_asce) __ctl_load(S390_lowcore.kernel_asce, 1, 1); - set_cpu_flag(CIF_ASCE); + set_cpu_flag(CIF_ASCE_PRIMARY); } static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 52511866fb14..7ed1972b1920 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -640,12 +640,12 @@ static inline int pud_bad(pud_t pud) static inline int pmd_present(pmd_t pmd) { - return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID; + return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY; } static inline int pmd_none(pmd_t pmd) { - return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID; + return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY; } static inline unsigned long pmd_pfn(pmd_t pmd) @@ -803,7 +803,7 @@ static inline void pud_clear(pud_t *pud) static inline void pmd_clear(pmd_t *pmdp) { - pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID; + pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; } static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) @@ -1357,7 +1357,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd) static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) { - return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID)); + return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); } #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL @@ -1367,10 +1367,10 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, { if (full) { pmd_t pmd = *pmdp; - *pmdp = __pmd(_SEGMENT_ENTRY_INVALID); + *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY); return pmd; } - return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID)); + return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); } #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH @@ -1384,7 +1384,7 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, static inline void pmdp_invalidate(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) { - pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID)); + pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); } #define __HAVE_ARCH_PMDP_SET_WRPROTECT diff --git a/arch/s390/include/asm/pkey.h b/arch/s390/include/asm/pkey.h new file mode 100644 index 000000000000..b48aef4188f6 --- /dev/null +++ b/arch/s390/include/asm/pkey.h @@ -0,0 +1,90 @@ +/* + * Kernelspace interface to the pkey device driver + * + * Copyright IBM Corp. 2016 + * + * Author: Harald Freudenberger <freude@de.ibm.com> + * + */ + +#ifndef _KAPI_PKEY_H +#define _KAPI_PKEY_H + +#include <linux/ioctl.h> +#include <linux/types.h> +#include <uapi/asm/pkey.h> + +/* + * Generate (AES) random secure key. + * @param cardnr may be -1 (use default card) + * @param domain may be -1 (use default domain) + * @param keytype one of the PKEY_KEYTYPE values + * @param seckey pointer to buffer receiving the secure key + * @return 0 on success, negative errno value on failure + */ +int pkey_genseckey(__u16 cardnr, __u16 domain, + __u32 keytype, struct pkey_seckey *seckey); + +/* + * Generate (AES) secure key with given key value. + * @param cardnr may be -1 (use default card) + * @param domain may be -1 (use default domain) + * @param keytype one of the PKEY_KEYTYPE values + * @param clrkey pointer to buffer with clear key data + * @param seckey pointer to buffer receiving the secure key + * @return 0 on success, negative errno value on failure + */ +int pkey_clr2seckey(__u16 cardnr, __u16 domain, __u32 keytype, + const struct pkey_clrkey *clrkey, + struct pkey_seckey *seckey); + +/* + * Derive (AES) proteced key from the (AES) secure key blob. + * @param cardnr may be -1 (use default card) + * @param domain may be -1 (use default domain) + * @param seckey pointer to buffer with the input secure key + * @param protkey pointer to buffer receiving the protected key and + * additional info (type, length) + * @return 0 on success, negative errno value on failure + */ +int pkey_sec2protkey(__u16 cardnr, __u16 domain, + const struct pkey_seckey *seckey, + struct pkey_protkey *protkey); + +/* + * Derive (AES) protected key from a given clear key value. + * @param keytype one of the PKEY_KEYTYPE values + * @param clrkey pointer to buffer with clear key data + * @param protkey pointer to buffer receiving the protected key and + * additional info (type, length) + * @return 0 on success, negative errno value on failure + */ +int pkey_clr2protkey(__u32 keytype, + const struct pkey_clrkey *clrkey, + struct pkey_protkey *protkey); + +/* + * Search for a matching crypto card based on the Master Key + * Verification Pattern provided inside a secure key. + * @param seckey pointer to buffer with the input secure key + * @param cardnr pointer to cardnr, receives the card number on success + * @param domain pointer to domain, receives the domain number on success + * @param verify if set, always verify by fetching verification pattern + * from card + * @return 0 on success, negative errno value on failure. If no card could be + * found, -ENODEV is returned. + */ +int pkey_findcard(const struct pkey_seckey *seckey, + __u16 *cardnr, __u16 *domain, int verify); + +/* + * Find card and transform secure key to protected key. + * @param seckey pointer to buffer with the input secure key + * @param protkey pointer to buffer receiving the protected key and + * additional info (type, length) + * @return 0 on success, negative errno value on failure + */ +int pkey_skey2pkey(const struct pkey_seckey *seckey, + struct pkey_protkey *protkey); + +#endif /* _KAPI_PKEY_H */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index dacba341e475..e4988710aa86 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -14,14 +14,16 @@ #include <linux/const.h> #define CIF_MCCK_PENDING 0 /* machine check handling is pending */ -#define CIF_ASCE 1 /* user asce needs fixup / uaccess */ -#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ -#define CIF_FPU 3 /* restore FPU registers */ -#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */ -#define CIF_ENABLED_WAIT 5 /* in enabled wait state */ +#define CIF_ASCE_PRIMARY 1 /* primary asce needs fixup / uaccess */ +#define CIF_ASCE_SECONDARY 2 /* secondary asce needs fixup / uaccess */ +#define CIF_NOHZ_DELAY 3 /* delay HZ disable for a tick */ +#define CIF_FPU 4 /* restore FPU registers */ +#define CIF_IGNORE_IRQ 5 /* ignore interrupt (for udelay) */ +#define CIF_ENABLED_WAIT 6 /* in enabled wait state */ #define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING) -#define _CIF_ASCE _BITUL(CIF_ASCE) +#define _CIF_ASCE_PRIMARY _BITUL(CIF_ASCE_PRIMARY) +#define _CIF_ASCE_SECONDARY _BITUL(CIF_ASCE_SECONDARY) #define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY) #define _CIF_FPU _BITUL(CIF_FPU) #define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ) @@ -89,7 +91,8 @@ extern void execve_tail(void); * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. */ -#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) +#define TASK_SIZE_OF(tsk) ((tsk)->mm ? \ + (tsk)->mm->context.asce_limit : TASK_MAX_SIZE) #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ (1UL << 30) : (1UL << 41)) #define TASK_SIZE TASK_SIZE_OF(current) @@ -200,10 +203,12 @@ struct stack_frame { struct task_struct; struct mm_struct; struct seq_file; +struct pt_regs; typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable); void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task, unsigned long sp); +void show_registers(struct pt_regs *regs); void show_cacheinfo(struct seq_file *m); diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index b2988fc60f65..136932ff4250 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -14,6 +14,7 @@ */ #include <linux/sched.h> #include <linux/errno.h> +#include <asm/processor.h> #include <asm/ctl_reg.h> #define VERIFY_READ 0 @@ -36,18 +37,20 @@ #define get_ds() (KERNEL_DS) #define get_fs() (current->thread.mm_segment) - -#define set_fs(x) \ -do { \ - unsigned long __pto; \ - current->thread.mm_segment = (x); \ - __pto = current->thread.mm_segment.ar4 ? \ - S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ - __ctl_load(__pto, 7, 7); \ -} while (0) - #define segment_eq(a,b) ((a).ar4 == (b).ar4) +static inline void set_fs(mm_segment_t fs) +{ + current->thread.mm_segment = fs; + if (segment_eq(fs, KERNEL_DS)) { + set_cpu_flag(CIF_ASCE_SECONDARY); + __ctl_load(S390_lowcore.kernel_asce, 7, 7); + } else { + clear_cpu_flag(CIF_ASCE_SECONDARY); + __ctl_load(S390_lowcore.user_asce, 7, 7); + } +} + static inline int __range_ok(unsigned long addr, unsigned long size) { return 1; diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild index bf736e764cb4..6848ba5c1454 100644 --- a/arch/s390/include/uapi/asm/Kbuild +++ b/arch/s390/include/uapi/asm/Kbuild @@ -24,6 +24,7 @@ header-y += mman.h header-y += monwriter.h header-y += msgbuf.h header-y += param.h +header-y += pkey.h header-y += poll.h header-y += posix_types.h header-y += ptrace.h diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h new file mode 100644 index 000000000000..ed7f19c27ce5 --- /dev/null +++ b/arch/s390/include/uapi/asm/pkey.h @@ -0,0 +1,112 @@ +/* + * Userspace interface to the pkey device driver + * + * Copyright IBM Corp. 2017 + * + * Author: Harald Freudenberger <freude@de.ibm.com> + * + */ + +#ifndef _UAPI_PKEY_H +#define _UAPI_PKEY_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +/* + * Ioctl calls supported by the pkey device driver + */ + +#define PKEY_IOCTL_MAGIC 'p' + +#define SECKEYBLOBSIZE 64 /* secure key blob size is always 64 bytes */ +#define MAXPROTKEYSIZE 64 /* a protected key blob may be up to 64 bytes */ +#define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */ + +/* defines for the type field within the pkey_protkey struct */ +#define PKEY_KEYTYPE_AES_128 1 +#define PKEY_KEYTYPE_AES_192 2 +#define PKEY_KEYTYPE_AES_256 3 + +/* Struct to hold a secure key blob */ +struct pkey_seckey { + __u8 seckey[SECKEYBLOBSIZE]; /* the secure key blob */ +}; + +/* Struct to hold protected key and length info */ +struct pkey_protkey { + __u32 type; /* key type, one of the PKEY_KEYTYPE values */ + __u32 len; /* bytes actually stored in protkey[] */ + __u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */ +}; + +/* Struct to hold a clear key value */ +struct pkey_clrkey { + __u8 clrkey[MAXCLRKEYSIZE]; /* 16, 24, or 32 byte clear key value */ +}; + +/* + * Generate secure key + */ +struct pkey_genseck { + __u16 cardnr; /* in: card to use or FFFF for any */ + __u16 domain; /* in: domain or FFFF for any */ + __u32 keytype; /* in: key type to generate */ + struct pkey_seckey seckey; /* out: the secure key blob */ +}; +#define PKEY_GENSECK _IOWR(PKEY_IOCTL_MAGIC, 0x01, struct pkey_genseck) + +/* + * Construct secure key from clear key value + */ +struct pkey_clr2seck { + __u16 cardnr; /* in: card to use or FFFF for any */ + __u16 domain; /* in: domain or FFFF for any */ + __u32 keytype; /* in: key type to generate */ + struct pkey_clrkey clrkey; /* in: the clear key value */ + struct pkey_seckey seckey; /* out: the secure key blob */ +}; +#define PKEY_CLR2SECK _IOWR(PKEY_IOCTL_MAGIC, 0x02, struct pkey_clr2seck) + +/* + * Fabricate protected key from a secure key + */ +struct pkey_sec2protk { + __u16 cardnr; /* in: card to use or FFFF for any */ + __u16 domain; /* in: domain or FFFF for any */ + struct pkey_seckey seckey; /* in: the secure key blob */ + struct pkey_protkey protkey; /* out: the protected key */ +}; +#define PKEY_SEC2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x03, struct pkey_sec2protk) + +/* + * Fabricate protected key from an clear key value + */ +struct pkey_clr2protk { + __u32 keytype; /* in: key type to generate */ + struct pkey_clrkey clrkey; /* in: the clear key value */ + struct pkey_protkey protkey; /* out: the protected key */ +}; +#define PKEY_CLR2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x04, struct pkey_clr2protk) + +/* + * Search for matching crypto card based on the Master Key + * Verification Pattern provided inside a secure key. + */ +struct pkey_findcard { + struct pkey_seckey seckey; /* in: the secure key blob */ + __u16 cardnr; /* out: card number */ + __u16 domain; /* out: domain number */ +}; +#define PKEY_FINDCARD _IOWR(PKEY_IOCTL_MAGIC, 0x05, struct pkey_findcard) + +/* + * Combined together: findcard + sec2prot + */ +struct pkey_skey2pkey { + struct pkey_seckey seckey; /* in: the secure key blob */ + struct pkey_protkey protkey; /* out: the protected key */ +}; +#define PKEY_SKEY2PKEY _IOWR(PKEY_IOCTL_MAGIC, 0x06, struct pkey_skey2pkey) + +#endif /* _UAPI_PKEY_H */ diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 362350cc485c..c620049c61f2 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -10,6 +10,7 @@ #include <linux/compat.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index 55d4fe174fd9..829e1c53005c 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -14,6 +14,8 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <asm/processor.h> #include <asm/debug.h> #include <asm/dis.h> diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index db469fa11462..dff2152350a7 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -50,7 +50,8 @@ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_UPROBE) _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_SYSCALL_TRACEPOINT) -_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU) +_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \ + _CIF_ASCE_SECONDARY | _CIF_FPU) _PIF_WORK = (_PIF_PER_TRAP) #define BASED(name) name-cleanup_critical(%r13) @@ -339,8 +340,8 @@ ENTRY(system_call) jo .Lsysc_notify_resume TSTMSK __LC_CPU_FLAGS,_CIF_FPU jo .Lsysc_vxrs - TSTMSK __LC_CPU_FLAGS,_CIF_ASCE - jo .Lsysc_uaccess + TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) + jnz .Lsysc_asce j .Lsysc_return # beware of critical section cleanup # @@ -358,12 +359,15 @@ ENTRY(system_call) jg s390_handle_mcck # TIF bit will be cleared by handler # -# _CIF_ASCE is set, load user space asce +# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce # -.Lsysc_uaccess: - ni __LC_CPU_FLAGS+7,255-_CIF_ASCE +.Lsysc_asce: + ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY lctlg %c1,%c1,__LC_USER_ASCE # load primary asce - j .Lsysc_return + TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY + jz .Lsysc_return + larl %r14,.Lsysc_return + jg set_fs_fixup # # CIF_FPU is set, restore floating-point controls and floating-point registers. @@ -661,8 +665,8 @@ ENTRY(io_int_handler) jo .Lio_notify_resume TSTMSK __LC_CPU_FLAGS,_CIF_FPU jo .Lio_vxrs - TSTMSK __LC_CPU_FLAGS,_CIF_ASCE - jo .Lio_uaccess + TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) + jnz .Lio_asce j .Lio_return # beware of critical section cleanup # @@ -675,12 +679,15 @@ ENTRY(io_int_handler) j .Lio_return # -# _CIF_ASCE is set, load user space asce +# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce # -.Lio_uaccess: - ni __LC_CPU_FLAGS+7,255-_CIF_ASCE +.Lio_asce: + ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY lctlg %c1,%c1,__LC_USER_ASCE # load primary asce - j .Lio_return + TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY + jz .Lio_return + larl %r14,.Lio_return + jg set_fs_fixup # # CIF_FPU is set, restore floating-point controls and floating-point registers. diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index e79f030dd276..33f901865326 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -80,5 +80,6 @@ long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t); DECLARE_PER_CPU(u64, mt_cycles[8]); void verify_facilities(void); +void set_fs_fixup(void); #endif /* _ENTRY_H */ diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c index fb07a70820af..9340b2a07935 100644 --- a/arch/s390/kernel/idle.c +++ b/arch/s390/kernel/idle.c @@ -12,7 +12,7 @@ #include <linux/notifier.h> #include <linux/init.h> #include <linux/cpu.h> -#include <linux/cputime.h> +#include <linux/sched/cputime.h> #include <asm/nmi.h> #include <asm/smp.h> #include "entry.h" diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 56e14d073167..9bf8327154ee 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -13,6 +13,9 @@ #include <linux/errno.h> #include <linux/hardirq.h> #include <linux/time.h> +#include <linux/module.h> +#include <linux/sched/signal.h> + #include <linux/export.h> #include <asm/lowcore.h> #include <asm/smp.h> @@ -116,6 +119,19 @@ static int notrace s390_validate_registers(union mci mci, int umode) s390_handle_damage(); kill_task = 1; } + /* Validate control registers */ + if (!mci.cr) { + /* + * Control registers have unknown contents. + * Can't recover and therefore stopping machine. + */ + s390_handle_damage(); + } else { + asm volatile( + " lctlg 0,15,0(%0)\n" + " ptlb\n" + : : "a" (&S390_lowcore.cregs_save_area) : "memory"); + } if (!mci.fp) { /* * Floating point registers can't be restored. If the @@ -208,18 +224,6 @@ static int notrace s390_validate_registers(union mci mci, int umode) */ kill_task = 1; } - /* Validate control registers */ - if (!mci.cr) { - /* - * Control registers have unknown contents. - * Can't recover and therefore stopping machine. - */ - s390_handle_damage(); - } else { - asm volatile( - " lctlg 0,15,0(%0)" - : : "a" (&S390_lowcore.cregs_save_area) : "memory"); - } /* * We don't even try to validate the TOD register, since we simply * can't write something sensible into that register. diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index c5b86b4a1a8b..20cd339e11ae 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -11,6 +11,9 @@ #include <linux/compiler.h> #include <linux/cpu.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/elfcore.h> @@ -100,8 +103,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return 0; } -int copy_thread(unsigned long clone_flags, unsigned long new_stackp, - unsigned long arg, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp, + unsigned long arg, struct task_struct *p, unsigned long tls) { struct fake_frame { @@ -156,7 +159,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, /* Set a new TLS ? */ if (clone_flags & CLONE_SETTLS) { - unsigned long tls = frame->childregs.gprs[6]; if (is_compat_task()) { p->thread.acrs[0] = (unsigned int)tls; } else { @@ -234,3 +236,16 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) ret = PAGE_ALIGN(mm->brk + brk_rnd()); return (ret > mm->brk) ? ret : mm->brk; } + +void set_fs_fixup(void) +{ + struct pt_regs *regs = current_pt_regs(); + static bool warned; + + set_fs(USER_DS); + if (warned) + return; + WARN(1, "Unbalanced set_fs - int code: 0x%x\n", regs->int_code); + show_registers(regs); + warned = true; +} diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 21004aaac69b..928b929a6261 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -8,10 +8,13 @@ #include <linux/cpufeature.h> #include <linux/kernel.h> +#include <linux/sched/mm.h> #include <linux/init.h> #include <linux/seq_file.h> +#include <linux/mm_types.h> #include <linux/delay.h> #include <linux/cpu.h> + #include <asm/diag.h> #include <asm/facility.h> #include <asm/elf.h> @@ -73,7 +76,7 @@ void cpu_init(void) get_cpu_id(id); if (machine_has_cpu_mhz) update_cpu_mhz(NULL); - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 12020b55887b..c14df0a1ec3c 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index fffa0e5462af..429d3a782f1c 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -11,6 +11,8 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/kernel_stat.h> +#include <linux/sched/task_stack.h> + #include <asm/runtime_instr.h> #include <asm/cpu_mf.h> #include <asm/irq.h> diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index e4d811f17971..911dc0b49be0 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -18,6 +18,8 @@ #include <linux/errno.h> #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/task.h> +#include <linux/cpu.h> #include <linux/kernel.h> #include <linux/memblock.h> #include <linux/mm.h> diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 62a4c263e887..289dd50f9744 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -10,6 +10,7 @@ */ #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index d0a74d7ce433..47a973b5b4f1 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -31,6 +31,8 @@ #include <linux/irqflags.h> #include <linux/cpu.h> #include <linux/slab.h> +#include <linux/sched/hotplug.h> +#include <linux/sched/task_stack.h> #include <linux/crash_dump.h> #include <linux/memblock.h> #include <asm/asm-offsets.h> diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 0085b2d8ed7d..e66687dc6144 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -6,6 +6,7 @@ */ #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/stacktrace.h> #include <linux/kallsyms.h> #include <linux/export.h> diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index de66abb479c9..c31da46bc037 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -18,6 +18,7 @@ #include <linux/errno.h> #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 2cd5f4f1013c..17660e800e74 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -13,6 +13,7 @@ #include <linux/export.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/topology.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 283ad7840335..f787b9d8f54c 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -17,6 +17,7 @@ #include <linux/extable.h> #include <linux/ptrace.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/uaccess.h> diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index 66956c09d5bf..314e0ee3016a 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c @@ -9,6 +9,8 @@ #include <linux/uprobes.h> #include <linux/compat.h> #include <linux/kdebug.h> +#include <linux/sched/task_stack.h> + #include <asm/switch_to.h> #include <asm/facility.h> #include <asm/kprobes.h> diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index b4a3e9e06ef2..c14fc9029912 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -6,7 +6,7 @@ */ #include <linux/kernel_stat.h> -#include <linux/cputime.h> +#include <linux/sched/cputime.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/timex.h> @@ -350,7 +350,7 @@ static void __add_vtimer(struct vtimer_list *timer, int periodic) } /* - * add_virt_timer - add an oneshot virtual CPU timer + * add_virt_timer - add a oneshot virtual CPU timer */ void add_virt_timer(struct vtimer_list *timer) { diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 4492c9363178..d55c829a5944 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -6,7 +6,9 @@ */ #include <linux/vmalloc.h> +#include <linux/mm_types.h> #include <linux/err.h> + #include <asm/pgtable.h> #include <asm/gmap.h> #include "kvm-s390.h" diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f5694838234d..fd6cd05bb6a7 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -29,6 +29,8 @@ #include <linux/timer.h> #include <linux/vmalloc.h> #include <linux/bitmap.h> +#include <linux/sched/signal.h> + #include <asm/asm-offsets.h> #include <asm/lowcore.h> #include <asm/stp.h> diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index fb4b494cde9b..64b6a309f2c4 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -15,6 +15,8 @@ #include <linux/gfp.h> #include <linux/errno.h> #include <linux/compat.h> +#include <linux/mm_types.h> + #include <asm/asm-offsets.h> #include <asm/facility.h> #include <asm/current.h> diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 38556e395915..5491be39776b 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -14,6 +14,8 @@ #include <linux/bug.h> #include <linux/list.h> #include <linux/bitmap.h> +#include <linux/sched/signal.h> + #include <asm/gmap.h> #include <asm/mmu_context.h> #include <asm/sclp.h> diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index bb5560eb2435..5845d3028ffc 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -12,6 +12,7 @@ #include <linux/perf_event.h> #include <linux/signal.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 59ac93714fa4..a07b1ec1391d 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -359,8 +359,8 @@ static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr) spin_lock(&gmap->guest_table_lock); entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT); if (entry) { - flush = (*entry != _SEGMENT_ENTRY_INVALID); - *entry = _SEGMENT_ENTRY_INVALID; + flush = (*entry != _SEGMENT_ENTRY_EMPTY); + *entry = _SEGMENT_ENTRY_EMPTY; } spin_unlock(&gmap->guest_table_lock); return flush; @@ -589,7 +589,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) return rc; ptl = pmd_lock(mm, pmd); spin_lock(&gmap->guest_table_lock); - if (*table == _SEGMENT_ENTRY_INVALID) { + if (*table == _SEGMENT_ENTRY_EMPTY) { rc = radix_tree_insert(&gmap->host_to_guest, vmaddr >> PMD_SHIFT, table); if (!rc) diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index a03816227719..9b4050caa4e9 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -62,7 +62,7 @@ static inline unsigned long __pte_to_rste(pte_t pte) rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC, _SEGMENT_ENTRY_NOEXEC); } else - rste = _SEGMENT_ENTRY_INVALID; + rste = _SEGMENT_ENTRY_EMPTY; return rste; } diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 7ae1282d5be9..50618614881f 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -26,6 +26,8 @@ #include <linux/personality.h> #include <linux/mm.h> #include <linux/mman.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/random.h> #include <linux/compat.h> #include <linux/security.h> diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 4c0fa9b3b2a0..364b9d824be3 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -641,7 +641,7 @@ int pcibios_add_device(struct pci_dev *pdev) int i; pdev->dev.groups = zpci_attr_groups; - pdev->dev.archdata.dma_ops = &s390_pci_dma_ops; + pdev->dev.dma_ops = &s390_pci_dma_ops; zpci_map_resources(pdev); for (i = 0; i < PCI_BAR_COUNT; i++) { diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 1d7a9c71944a..9081a57fa340 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -650,7 +650,7 @@ static int __init dma_debug_do_init(void) } fs_initcall(dma_debug_do_init); -struct dma_map_ops s390_pci_dma_ops = { +const struct dma_map_ops s390_pci_dma_ops = { .alloc = s390_dma_alloc, .free = s390_dma_free, .map_sg = s390_dma_map_sg, diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index db3e28ca3ae2..926943a49ea5 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -13,3 +13,4 @@ generic-y += trace_clock.h generic-y += xor.h generic-y += serial.h generic-y += word-at-a-time.h +generic-y += kprobes.h diff --git a/arch/score/include/asm/mmu_context.h b/arch/score/include/asm/mmu_context.h index 2644577c96e8..073f95d350de 100644 --- a/arch/score/include/asm/mmu_context.h +++ b/arch/score/include/asm/mmu_context.h @@ -3,7 +3,9 @@ #include <linux/errno.h> #include <linux/sched.h> +#include <linux/mm_types.h> #include <linux/slab.h> + #include <asm-generic/mm_hooks.h> #include <asm/cacheflush.h> diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c index aae9480706c2..eb64d7a677cb 100644 --- a/arch/score/kernel/process.c +++ b/arch/score/kernel/process.c @@ -28,6 +28,8 @@ #include <linux/elfcore.h> #include <linux/pm.h> #include <linux/rcupdate.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c index 8b75e54816c1..d8455e60bce0 100644 --- a/arch/score/kernel/ptrace.c +++ b/arch/score/kernel/ptrace.c @@ -28,6 +28,7 @@ #include <linux/mm.h> #include <linux/ptrace.h> #include <linux/regset.h> +#include <linux/sched/task_stack.h> #include <linux/uaccess.h> diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c index 2b22bcf02c27..e359ec675869 100644 --- a/arch/score/kernel/traps.c +++ b/arch/score/kernel/traps.c @@ -24,7 +24,10 @@ */ #include <linux/extable.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> +#include <linux/mm_types.h> #include <asm/cacheflush.h> #include <asm/irq.h> @@ -336,7 +339,7 @@ void __init trap_init(void) set_except_vector(18, handle_dbe); flush_icache_range(DEBUG_VECTOR_BASE_ADDR, IRQ_VECTOR_BASE_ADDR); - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; cpu_cache_init(); } diff --git a/arch/sh/drivers/heartbeat.c b/arch/sh/drivers/heartbeat.c index 49bace446a1a..c6d96049a0bb 100644 --- a/arch/sh/drivers/heartbeat.c +++ b/arch/sh/drivers/heartbeat.c @@ -21,6 +21,7 @@ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/sched.h> +#include <linux/sched/loadavg.h> #include <linux/timer.h> #include <linux/io.h> #include <linux/slab.h> diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 0052ad40e86d..d99008af5f73 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -1,10 +1,10 @@ #ifndef __ASM_SH_DMA_MAPPING_H #define __ASM_SH_DMA_MAPPING_H -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; extern void no_iommu_init(void); -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return dma_ops; } diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h index 09fc2bc8a790..50921c7cc3f0 100644 --- a/arch/sh/include/asm/fpu.h +++ b/arch/sh/include/asm/fpu.h @@ -3,6 +3,8 @@ #ifndef __ASSEMBLY__ +#include <asm/ptrace.h> + struct task_struct; #ifdef CONFIG_SH_FPU diff --git a/arch/sh/include/asm/kprobes.h b/arch/sh/include/asm/kprobes.h index 134f3980e44a..f0986f9b3844 100644 --- a/arch/sh/include/asm/kprobes.h +++ b/arch/sh/include/asm/kprobes.h @@ -1,13 +1,16 @@ #ifndef __ASM_SH_KPROBES_H #define __ASM_SH_KPROBES_H +#include <asm-generic/kprobes.h> + +#define BREAKPOINT_INSTRUCTION 0xc33a + #ifdef CONFIG_KPROBES #include <linux/types.h> #include <linux/ptrace.h> typedef insn_size_t kprobe_opcode_t; -#define BREAKPOINT_INSTRUCTION 0xc33a #define MAX_INSN_SIZE 16 #define MAX_STACK_SIZE 64 diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index 35ffdd081d26..eb6ac3c10c44 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h @@ -11,6 +11,8 @@ #include <cpu/mmu_context.h> #include <asm/tlbflush.h> #include <linux/uaccess.h> +#include <linux/mm_types.h> + #include <asm/io.h> #include <asm-generic/mm_hooks.h> diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c index 4e332244ea75..547c73478459 100644 --- a/arch/sh/kernel/cpu/fpu.c +++ b/arch/sh/kernel/cpu/fpu.c @@ -1,8 +1,11 @@ -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/slab.h> #include <asm/processor.h> #include <asm/fpu.h> #include <asm/traps.h> +#include <asm/ptrace.h> int init_fpu(struct task_struct *tsk) { diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c index 98bbaa447c93..352f894bece1 100644 --- a/arch/sh/kernel/cpu/sh2a/fpu.c +++ b/arch/sh/kernel/cpu/sh2a/fpu.c @@ -9,7 +9,7 @@ * * FIXME! These routines can be optimized in big endian case. */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/signal.h> #include <asm/processor.h> #include <asm/io.h> diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c index 69ab4d3c8d41..95fd2dcb83da 100644 --- a/arch/sh/kernel/cpu/sh4/fpu.c +++ b/arch/sh/kernel/cpu/sh4/fpu.c @@ -10,8 +10,7 @@ * * FIXME! These routines have not been tested for big endian case. */ -#include <linux/sched.h> -#include <linux/signal.h> +#include <linux/sched/signal.h> #include <linux/io.h> #include <cpu/fpu.h> #include <asm/processor.h> diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c index 64d5d8dded7c..015fee58014b 100644 --- a/arch/sh/kernel/disassemble.c +++ b/arch/sh/kernel/disassemble.c @@ -12,6 +12,8 @@ #include <linux/string.h> #include <linux/uaccess.h> +#include <asm/ptrace.h> + /* * Format of an instruction in memory. */ diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index 47fee3b6e29c..d24c707b2181 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c @@ -65,7 +65,7 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, } #endif -struct dma_map_ops nommu_dma_ops = { +const struct dma_map_ops nommu_dma_ops = { .alloc = dma_generic_alloc_coherent, .free = dma_generic_free_coherent, .map_page = nommu_map_page, diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index 8dfe645bcc4b..b564b1eae4ae 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c @@ -11,6 +11,8 @@ #include <linux/kallsyms.h> #include <linux/ftrace.h> #include <linux/debug_locks.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/kdebug.h> #include <linux/export.h> #include <linux/uaccess.h> diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c index 2197fc584186..afe965712a69 100644 --- a/arch/sh/kernel/hw_breakpoint.c +++ b/arch/sh/kernel/hw_breakpoint.c @@ -11,6 +11,7 @@ */ #include <linux/init.h> #include <linux/perf_event.h> +#include <linux/sched/signal.h> #include <linux/hw_breakpoint.h> #include <linux/percpu.h> #include <linux/kallsyms.h> diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index bc3591125df7..04487e8fc9b1 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -99,7 +99,7 @@ static inline void handle_one_irq(unsigned int irq) "mov %0, r4 \n" "mov r15, r8 \n" "jsr @%1 \n" - /* swith to the irq stack */ + /* switch to the irq stack */ " mov %2, r15 \n" /* restore the stack (ring zero) */ "mov r8, r15 \n" diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c index adad46e41a1d..4f04c6638a4d 100644 --- a/arch/sh/kernel/kgdb.c +++ b/arch/sh/kernel/kgdb.c @@ -14,6 +14,8 @@ #include <linux/irq.h> #include <linux/io.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> + #include <asm/cacheflush.h> #include <asm/traps.h> diff --git a/arch/sh/kernel/nmi_debug.c b/arch/sh/kernel/nmi_debug.c index ff0abbd1e652..730d928f0d12 100644 --- a/arch/sh/kernel/nmi_debug.c +++ b/arch/sh/kernel/nmi_debug.c @@ -9,6 +9,7 @@ #include <linux/kdebug.h> #include <linux/notifier.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/hardirq.h> enum nmi_action { diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index 53bc6c4c84ec..f8a695a223dd 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c @@ -1,10 +1,12 @@ #include <linux/mm.h> #include <linux/kernel.h> #include <linux/slab.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task_stack.h> #include <linux/export.h> #include <linux/stackprotector.h> #include <asm/fpu.h> +#include <asm/ptrace.h> struct kmem_cache *task_xstate_cachep = NULL; unsigned int xstate_size; diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 51741850a715..2c7bdf8cb934 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -15,6 +15,9 @@ */ #include <linux/module.h> #include <linux/mm.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/slab.h> #include <linux/elfcore.h> #include <linux/kallsyms.h> diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index e0b271bffd6a..ee2abe96f9f3 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c @@ -25,6 +25,9 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/io.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <asm/syscalls.h> #include <linux/uaccess.h> #include <asm/pgtable.h> diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 1aabfd356b35..5fc3ff606210 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -12,6 +12,7 @@ */ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c index c49d0d05a215..1e0656d9e7af 100644 --- a/arch/sh/kernel/ptrace_64.c +++ b/arch/sh/kernel/ptrace_64.c @@ -18,6 +18,7 @@ #include <linux/kernel.h> #include <linux/rwsem.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/bitops.h> diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 5128d3001ee5..08bce11badc6 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -9,6 +9,7 @@ * */ #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 38e7860845db..c483422ea4d0 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -20,7 +20,8 @@ #include <linux/module.h> #include <linux/cpu.h> #include <linux/interrupt.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/hotplug.h> #include <linux/atomic.h> #include <linux/clockchips.h> #include <asm/processor.h> @@ -178,8 +179,8 @@ asmlinkage void start_secondary(void) struct mm_struct *mm = &init_mm; enable_mmu(); - atomic_inc(&mm->mm_count); - atomic_inc(&mm->mm_users); + mmgrab(mm); + mmget(mm); current->active_mm = mm; #ifdef CONFIG_MMU enter_lazy_tlb(mm, current); diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c index bf989e063a0c..7a73d2763e1b 100644 --- a/arch/sh/kernel/stacktrace.c +++ b/arch/sh/kernel/stacktrace.c @@ -10,6 +10,7 @@ * for more details. */ #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/stacktrace.h> #include <linux/thread_info.h> #include <linux/module.h> diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c index d5287d76809c..a2e1231a90a3 100644 --- a/arch/sh/kernel/sys_sh32.c +++ b/arch/sh/kernel/sys_sh32.c @@ -1,5 +1,6 @@ #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/sem.h> diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index 9513fa7840aa..b32d1c3a4655 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c @@ -4,10 +4,14 @@ #include <linux/kdebug.h> #include <linux/signal.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/uaccess.h> #include <linux/hardirq.h> #include <linux/kernel.h> #include <linux/kexec.h> +#include <linux/sched/signal.h> + #include <linux/extable.h> #include <linux/module.h> /* print_modules */ #include <asm/unwinder.h> diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index ff639342a8be..57cff00cad17 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -25,6 +25,8 @@ #include <linux/sysfs.h> #include <linux/uaccess.h> #include <linux/perf_event.h> +#include <linux/sched/task_stack.h> + #include <asm/alignment.h> #include <asm/fpu.h> #include <asm/kprobes.h> diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c index 00835edb6e20..014fb08cf133 100644 --- a/arch/sh/kernel/traps_64.c +++ b/arch/sh/kernel/traps_64.c @@ -10,6 +10,7 @@ * for more details. */ #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c index 5078cb809750..c86f4360c6ce 100644 --- a/arch/sh/math-emu/math.c +++ b/arch/sh/math-emu/math.c @@ -10,7 +10,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/signal.h> #include <linux/perf_event.h> diff --git a/arch/sh/mm/asids-debugfs.c b/arch/sh/mm/asids-debugfs.c index bf95fdaedd0c..e5539e0f8e3b 100644 --- a/arch/sh/mm/asids-debugfs.c +++ b/arch/sh/mm/asids-debugfs.c @@ -20,6 +20,9 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/spinlock.h> +#include <linux/sched/signal.h> +#include <linux/sched/task.h> + #include <asm/processor.h> #include <asm/mmu_context.h> diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 92b6976fde59..d1275adfa0ef 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -22,7 +22,7 @@ #define PREALLOC_DMA_DEBUG_ENTRIES 4096 -struct dma_map_ops *dma_ops; +const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); static int __init dma_init(void) diff --git a/arch/sh/mm/extable_32.c b/arch/sh/mm/extable_32.c index 24a75d315dcb..940e871bc816 100644 --- a/arch/sh/mm/extable_32.c +++ b/arch/sh/mm/extable_32.c @@ -7,6 +7,8 @@ #include <linux/extable.h> #include <linux/uaccess.h> +#include <asm/ptrace.h> + int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 9bf876780cef..6fd1bf7481c7 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -13,6 +13,7 @@ */ #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/sched/signal.h> #include <linux/hardirq.h> #include <linux/kprobes.h> #include <linux/perf_event.h> diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 6777177807c2..08e7af0be4a7 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -9,6 +9,7 @@ */ #include <linux/io.h> #include <linux/mm.h> +#include <linux/sched/mm.h> #include <linux/mman.h> #include <linux/module.h> #include <asm/page.h> diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 1180ae254154..69cc627779f2 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -18,20 +18,20 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, */ } -extern struct dma_map_ops *dma_ops; -extern struct dma_map_ops *leon_dma_ops; -extern struct dma_map_ops pci32_dma_ops; +extern const struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *leon_dma_ops; +extern const struct dma_map_ops pci32_dma_ops; extern struct bus_type pci_bus_type; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { #ifdef CONFIG_SPARC_LEON if (sparc_cpu_model == sparc_leon) return leon_dma_ops; #endif #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) - if (dev->bus == &pci_bus_type) + if (bus == &pci_bus_type) return &pci32_dma_ops; #endif return dma_ops; diff --git a/arch/sparc/include/asm/kprobes.h b/arch/sparc/include/asm/kprobes.h index a145d798e112..49f8402035d7 100644 --- a/arch/sparc/include/asm/kprobes.h +++ b/arch/sparc/include/asm/kprobes.h @@ -1,13 +1,17 @@ #ifndef _SPARC64_KPROBES_H #define _SPARC64_KPROBES_H +#include <asm-generic/kprobes.h> + +#define BREAKPOINT_INSTRUCTION 0x91d02070 /* ta 0x70 */ +#define BREAKPOINT_INSTRUCTION_2 0x91d02071 /* ta 0x71 */ + +#ifdef CONFIG_KPROBES #include <linux/types.h> #include <linux/percpu.h> typedef u32 kprobe_opcode_t; -#define BREAKPOINT_INSTRUCTION 0x91d02070 /* ta 0x70 */ -#define BREAKPOINT_INSTRUCTION_2 0x91d02071 /* ta 0x71 */ #define MAX_INSN_SIZE 2 #define kretprobe_blacklist_size 0 @@ -48,4 +52,6 @@ int kprobe_exceptions_notify(struct notifier_block *self, int kprobe_fault_handler(struct pt_regs *regs, int trapnr); asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, struct pt_regs *regs); + +#endif /* CONFIG_KPROBES */ #endif /* _SPARC64_KPROBES_H */ diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index d0317993e947..22fede6eba11 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -6,6 +6,8 @@ #ifndef __ASSEMBLY__ #include <linux/spinlock.h> +#include <linux/mm_types.h> + #include <asm/spitfire.h> #include <asm-generic/mm_hooks.h> diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 7932a4a37817..56e49c8f770d 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -878,6 +878,9 @@ static inline unsigned long pud_pfn(pud_t pud) #define pte_offset_map pte_index #define pte_unmap(pte) do { } while (0) +/* We cannot include <linux/mm_types.h> at this point yet: */ +extern struct mm_struct init_mm; + /* Actual page table PTE updates. */ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig, int fullmm, diff --git a/arch/sparc/include/asm/switch_to_32.h b/arch/sparc/include/asm/switch_to_32.h index 16f10374feb3..475dd4158ae4 100644 --- a/arch/sparc/include/asm/switch_to_32.h +++ b/arch/sparc/include/asm/switch_to_32.h @@ -9,7 +9,7 @@ extern struct thread_info *current_set[NR_CPUS]; * Flush windows so that the VM switch which follows * would not pull the stack from under us. * - * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work) + * SWITCH_ENTER and SWITCH_DO_LAZY_FPU do not work yet (e.g. SMP does not work) * XXX WTF is the above comment? Found in late teen 2.4.x. */ #ifdef CONFIG_SMP diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c index f76389a32342..3f09e1c83f58 100644 --- a/arch/sparc/kernel/asm-offsets.c +++ b/arch/sparc/kernel/asm-offsets.c @@ -11,6 +11,7 @@ */ #include <linux/sched.h> +#include <linux/mm_types.h> // #include <linux/mm.h> #include <linux/kbuild.h> diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index f87a55d77094..b542cc7c8d94 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c @@ -9,6 +9,7 @@ #include <linux/string.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/kthread.h> diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 9df997995f6b..c63ba99ca551 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -741,7 +741,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, spin_unlock_irqrestore(&iommu->lock, flags); } -static struct dma_map_ops sun4u_dma_ops = { +static const struct dma_map_ops sun4u_dma_ops = { .alloc = dma_4u_alloc_coherent, .free = dma_4u_free_coherent, .map_page = dma_4u_map_page, @@ -752,7 +752,7 @@ static struct dma_map_ops sun4u_dma_ops = { .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, }; -struct dma_map_ops *dma_ops = &sun4u_dma_ops; +const struct dma_map_ops *dma_ops = &sun4u_dma_ops; EXPORT_SYMBOL(dma_ops); int dma_supported(struct device *dev, u64 device_mask) diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 6ffaec44931a..cf20033a1458 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -401,7 +401,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, BUG(); } -static struct dma_map_ops sbus_dma_ops = { +static const struct dma_map_ops sbus_dma_ops = { .alloc = sbus_alloc_coherent, .free = sbus_free_coherent, .map_page = sbus_map_page, @@ -637,7 +637,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist * } } -struct dma_map_ops pci32_dma_ops = { +const struct dma_map_ops pci32_dma_ops = { .alloc = pci32_alloc_coherent, .free = pci32_free_coherent, .map_page = pci32_map_page, @@ -652,10 +652,10 @@ struct dma_map_ops pci32_dma_ops = { EXPORT_SYMBOL(pci32_dma_ops); /* leon re-uses pci32_dma_ops */ -struct dma_map_ops *leon_dma_ops = &pci32_dma_ops; +const struct dma_map_ops *leon_dma_ops = &pci32_dma_ops; EXPORT_SYMBOL(leon_dma_ops); -struct dma_map_ops *dma_ops = &sbus_dma_ops; +const struct dma_map_ops *dma_ops = &sbus_dma_ops; EXPORT_SYMBOL(dma_ops); diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c index 3ae36f36e758..44a3ed93c214 100644 --- a/arch/sparc/kernel/led.c +++ b/arch/sparc/kernel/led.c @@ -8,6 +8,7 @@ #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/uaccess.h> +#include <linux/sched/loadavg.h> #include <asm/auxio.h> diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index 71e16f2241c2..db7acf27bea2 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c @@ -9,7 +9,7 @@ #include <asm/head.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/threads.h> #include <linux/smp.h> #include <linux/interrupt.h> @@ -93,7 +93,7 @@ void leon_cpu_pre_online(void *arg) : "memory" /* paranoid */); /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index f4daccd12bf5..68bec7c97cb8 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -669,7 +669,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, local_irq_restore(flags); } -static struct dma_map_ops sun4v_dma_ops = { +static const struct dma_map_ops sun4v_dma_ops = { .alloc = dma_4v_alloc_coherent, .free = dma_4v_free_coherent, .map_page = dma_4v_map_page, diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 48ffc3e7d1dd..b6dac8e980f0 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -14,6 +14,9 @@ #include <linux/errno.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index d249ca10b203..1badc493e62e 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -14,6 +14,9 @@ #include <linux/errno.h> #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/fs.h> diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 901063c1cf7e..df9e731a76f5 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/export.h> diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 90a02cb64e20..b3bc0ac757cc 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -5,7 +5,8 @@ #include <linux/export.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/hotplug.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/threads.h> @@ -122,7 +123,7 @@ void smp_callin(void) current_thread_info()->new_child = 0; /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; /* inform the notifiers about the new cpu */ diff --git a/arch/sparc/kernel/stacktrace.c b/arch/sparc/kernel/stacktrace.c index e78386a0029f..be4c14cccc05 100644 --- a/arch/sparc/kernel/stacktrace.c +++ b/arch/sparc/kernel/stacktrace.c @@ -1,4 +1,5 @@ #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/stacktrace.h> #include <linux/thread_info.h> #include <linux/ftrace.h> diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index 9d98e5002a09..af93b50e3ce4 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c @@ -10,7 +10,7 @@ #include <linux/interrupt.h> #include <linux/profile.h> #include <linux/delay.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/cpu.h> #include <asm/cacheflush.h> @@ -93,7 +93,7 @@ void sun4d_cpu_pre_online(void *arg) show_leds(cpuid); /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; local_ops->cache_all(); diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c index da737c712fa8..aa84da0b2d30 100644 --- a/arch/sparc/kernel/sun4m_irq.c +++ b/arch/sparc/kernel/sun4m_irq.c @@ -10,6 +10,7 @@ */ #include <linux/slab.h> +#include <linux/sched/debug.h> #include <asm/timer.h> #include <asm/traps.h> diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index 278c40abce82..5547fcb1d72d 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c @@ -8,7 +8,7 @@ #include <linux/interrupt.h> #include <linux/profile.h> #include <linux/delay.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/cpu.h> #include <asm/cacheflush.h> @@ -59,7 +59,7 @@ void sun4m_cpu_pre_online(void *arg) : "memory" /* paranoid */); /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index fb7b185ee941..7aecb239626d 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -7,7 +7,9 @@ #include <linux/errno.h> #include <linux/types.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> +#include <linux/sched/debug.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/file.h> diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 884c70331345..ef4520efc813 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -7,7 +7,9 @@ #include <linux/errno.h> #include <linux/types.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> +#include <linux/sched/debug.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/mm.h> diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c index 4808b6d23455..d63fc613e7a9 100644 --- a/arch/sparc/kernel/sysfs.c +++ b/arch/sparc/kernel/sysfs.c @@ -106,7 +106,7 @@ static unsigned long run_on_cpu(unsigned long cpu, cpumask_t old_affinity; unsigned long ret; - cpumask_copy(&old_affinity, tsk_cpus_allowed(current)); + cpumask_copy(&old_affinity, ¤t->cpus_allowed); /* should return -EINVAL to userspace */ if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) return 0; diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c index 4f21df7d4f13..466d4aed06c7 100644 --- a/arch/sparc/kernel/traps_32.c +++ b/arch/sparc/kernel/traps_32.c @@ -9,7 +9,9 @@ * I hate traps on the sparc, grrr... */ -#include <linux/sched.h> /* for jiffies */ +#include <linux/sched/mm.h> +#include <linux/sched/debug.h> +#include <linux/mm_types.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/smp.h> @@ -448,7 +450,7 @@ void trap_init(void) thread_info_offsets_are_bolixed_pete(); /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; /* NOTE: Other cpus have this done as they are started diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index dfc97a47c9a0..196ee5eb4d48 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -9,7 +9,8 @@ */ #include <linux/extable.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/debug.h> #include <linux/linkage.h> #include <linux/kernel.h> #include <linux/signal.h> @@ -2837,6 +2838,6 @@ void __init trap_init(void) /* Attach to the address space of init_task. On SMP we * do this in smp.c:smp_callin for other cpus. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; } diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index d20d4e3fd129..8367dce5f41b 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c @@ -8,7 +8,7 @@ #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/mm.h> #include <asm/ptrace.h> #include <asm/processor.h> diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c index 526fcb5d8ce9..b30b30ab3ddd 100644 --- a/arch/sparc/kernel/viohs.c +++ b/arch/sparc/kernel/viohs.c @@ -8,6 +8,7 @@ #include <linux/string.h> #include <linux/delay.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/slab.h> #include <asm/ldc.h> diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c index c4ac58e483a4..8f35eea2103a 100644 --- a/arch/sparc/kernel/visemul.c +++ b/arch/sparc/kernel/visemul.c @@ -30,7 +30,7 @@ /* 001001011 - two 32-bit merges */ #define FPMERGE_OPF 0x04b -/* 000110001 - 8-by-16-bit partitoned product */ +/* 000110001 - 8-by-16-bit partitioned product */ #define FMUL8x16_OPF 0x031 /* 000110011 - 8-by-16-bit upper alpha partitioned product */ diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 643c149a3151..b84c4dd14954 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -10,6 +10,7 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/signal.h> diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index e98a3f2e8f0f..323bc6b6e3ad 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -6,6 +6,7 @@ #include <linux/fs.h> #include <linux/mm.h> +#include <linux/sched/mm.h> #include <linux/hugetlb.h> #include <linux/pagemap.h> #include <linux/sysctl.h> diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 23479c3d39f0..0a04811f06b7 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -6,6 +6,8 @@ #include <linux/kernel.h> #include <linux/preempt.h> #include <linux/slab.h> +#include <linux/mm_types.h> + #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> diff --git a/arch/tile/include/asm/device.h b/arch/tile/include/asm/device.h index 6ab8bf146d4c..1cf45422a0df 100644 --- a/arch/tile/include/asm/device.h +++ b/arch/tile/include/asm/device.h @@ -17,9 +17,6 @@ #define _ASM_TILE_DEVICE_H struct dev_archdata { - /* DMA operations on that device */ - struct dma_map_ops *dma_ops; - /* Offset of the DMA address from the PA. */ dma_addr_t dma_offset; diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index 01ceb4a895b0..bbc71a29b2c6 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -24,17 +24,14 @@ #define ARCH_HAS_DMA_GET_REQUIRED_MASK #endif -extern struct dma_map_ops *tile_dma_map_ops; -extern struct dma_map_ops *gx_pci_dma_map_ops; -extern struct dma_map_ops *gx_legacy_pci_dma_map_ops; -extern struct dma_map_ops *gx_hybrid_pci_dma_map_ops; +extern const struct dma_map_ops *tile_dma_map_ops; +extern const struct dma_map_ops *gx_pci_dma_map_ops; +extern const struct dma_map_ops *gx_legacy_pci_dma_map_ops; +extern const struct dma_map_ops *gx_hybrid_pci_dma_map_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - if (dev && dev->archdata.dma_ops) - return dev->archdata.dma_ops; - else - return tile_dma_map_ops; + return tile_dma_map_ops; } static inline dma_addr_t get_dma_offset(struct device *dev) @@ -59,11 +56,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) static inline void dma_mark_clean(void *addr, size_t size) {} -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) -{ - dev->archdata.dma_ops = ops; -} - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) diff --git a/arch/tile/include/asm/kprobes.h b/arch/tile/include/asm/kprobes.h index d8f9a83943b1..4a8b1cadca24 100644 --- a/arch/tile/include/asm/kprobes.h +++ b/arch/tile/include/asm/kprobes.h @@ -17,10 +17,13 @@ #ifndef _ASM_TILE_KPROBES_H #define _ASM_TILE_KPROBES_H +#include <asm-generic/kprobes.h> + +#ifdef CONFIG_KPROBES + #include <linux/types.h> #include <linux/ptrace.h> #include <linux/percpu.h> - #include <arch/opcode.h> #define __ARCH_WANT_KPROBES_INSN_SLOT @@ -76,4 +79,5 @@ void arch_remove_kprobe(struct kprobe *); extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); +#endif /* CONFIG_KPROBES */ #endif /* _ASM_TILE_KPROBES_H */ diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h index f67753db1f78..45a4b4c424cf 100644 --- a/arch/tile/include/asm/mmu_context.h +++ b/arch/tile/include/asm/mmu_context.h @@ -16,6 +16,8 @@ #define _ASM_TILE_MMU_CONTEXT_H #include <linux/smp.h> +#include <linux/mm_types.h> + #include <asm/setup.h> #include <asm/page.h> #include <asm/pgalloc.h> diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h index c3cb42615a9f..3573325e340b 100644 --- a/arch/tile/include/asm/stack.h +++ b/arch/tile/include/asm/stack.h @@ -17,6 +17,8 @@ #include <linux/types.h> #include <linux/sched.h> +#include <linux/sched/debug.h> + #include <asm/backtrace.h> #include <asm/page.h> #include <hv/hypervisor.h> diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index c667e104a0c2..0e863f1ee08c 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c @@ -13,6 +13,7 @@ */ #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> diff --git a/arch/tile/kernel/kgdb.c b/arch/tile/kernel/kgdb.c index 9247d6b562f4..d4eb5fb2df9d 100644 --- a/arch/tile/kernel/kgdb.c +++ b/arch/tile/kernel/kgdb.c @@ -19,6 +19,8 @@ #include <linux/kdebug.h> #include <linux/uaccess.h> #include <linux/module.h> +#include <linux/sched/task_stack.h> + #include <asm/cacheflush.h> static tile_bundle_bits singlestep_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP; diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index 24e0f8c21f2f..569bb6dd154a 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c @@ -329,7 +329,7 @@ tile_dma_supported(struct device *dev, u64 mask) return 1; } -static struct dma_map_ops tile_default_dma_map_ops = { +static const struct dma_map_ops tile_default_dma_map_ops = { .alloc = tile_dma_alloc_coherent, .free = tile_dma_free_coherent, .map_page = tile_dma_map_page, @@ -344,7 +344,7 @@ static struct dma_map_ops tile_default_dma_map_ops = { .dma_supported = tile_dma_supported }; -struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; +const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; EXPORT_SYMBOL(tile_dma_map_ops); /* Generic PCI DMA mapping functions */ @@ -516,7 +516,7 @@ tile_pci_dma_supported(struct device *dev, u64 mask) return 1; } -static struct dma_map_ops tile_pci_default_dma_map_ops = { +static const struct dma_map_ops tile_pci_default_dma_map_ops = { .alloc = tile_pci_dma_alloc_coherent, .free = tile_pci_dma_free_coherent, .map_page = tile_pci_dma_map_page, @@ -531,7 +531,7 @@ static struct dma_map_ops tile_pci_default_dma_map_ops = { .dma_supported = tile_pci_dma_supported }; -struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; +const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; EXPORT_SYMBOL(gx_pci_dma_map_ops); /* PCI DMA mapping functions for legacy PCI devices */ @@ -552,7 +552,7 @@ static void tile_swiotlb_free_coherent(struct device *dev, size_t size, swiotlb_free_coherent(dev, size, vaddr, dma_addr); } -static struct dma_map_ops pci_swiotlb_dma_ops = { +static const struct dma_map_ops pci_swiotlb_dma_ops = { .alloc = tile_swiotlb_alloc_coherent, .free = tile_swiotlb_free_coherent, .map_page = swiotlb_map_page, @@ -567,7 +567,7 @@ static struct dma_map_ops pci_swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, }; -static struct dma_map_ops pci_hybrid_dma_ops = { +static const struct dma_map_ops pci_hybrid_dma_ops = { .alloc = tile_swiotlb_alloc_coherent, .free = tile_swiotlb_free_coherent, .map_page = tile_pci_dma_map_page, @@ -582,18 +582,18 @@ static struct dma_map_ops pci_hybrid_dma_ops = { .dma_supported = tile_pci_dma_supported }; -struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; -struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; +const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; +const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; #else -struct dma_map_ops *gx_legacy_pci_dma_map_ops; -struct dma_map_ops *gx_hybrid_pci_dma_map_ops; +const struct dma_map_ops *gx_legacy_pci_dma_map_ops; +const struct dma_map_ops *gx_hybrid_pci_dma_map_ops; #endif EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops); int dma_set_mask(struct device *dev, u64 mask) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); /* * For PCI devices with 64-bit DMA addressing capability, promote @@ -623,7 +623,7 @@ EXPORT_SYMBOL(dma_set_mask); #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK int dma_set_coherent_mask(struct device *dev, u64 mask) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); /* * For PCI devices with 64-bit DMA addressing capability, promote diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index c84c54a1ac55..f0a0e18e4dfb 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -13,6 +13,9 @@ */ #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/preempt.h> #include <linux/module.h> #include <linux/fs.h> diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c index e279572824b1..e1a078e6828e 100644 --- a/arch/tile/kernel/ptrace.c +++ b/arch/tile/kernel/ptrace.c @@ -23,6 +23,8 @@ #include <linux/elf.h> #include <linux/tracehook.h> #include <linux/context_tracking.h> +#include <linux/sched/task_stack.h> + #include <asm/traps.h> #include <arch/chip.h> diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c index 87299a6cfec8..f2bf557bb005 100644 --- a/arch/tile/kernel/signal.c +++ b/arch/tile/kernel/signal.c @@ -14,6 +14,8 @@ */ #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c index 6c0abaacec33..869c22e57561 100644 --- a/arch/tile/kernel/smpboot.c +++ b/arch/tile/kernel/smpboot.c @@ -16,7 +16,8 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include <linux/kernel_stat.h> #include <linux/bootmem.h> #include <linux/notifier.h> @@ -160,7 +161,7 @@ static void start_secondary(void) __this_cpu_write(current_asid, min_asid); /* Set up this thread as another owner of the init_mm */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; if (current->mm) BUG(); diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index 22bbbd3ff4a3..94ecbc6676e5 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c @@ -13,6 +13,8 @@ */ #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/module.h> diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index c9357012b1c8..5bd4e88c7c60 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -20,6 +20,7 @@ #include <linux/clockchips.h> #include <linux/hardirq.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/smp.h> #include <linux/delay.h> #include <linux/module.h> diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c index 39f427bb0de2..54804866f238 100644 --- a/arch/tile/kernel/traps.c +++ b/arch/tile/kernel/traps.c @@ -13,6 +13,7 @@ */ #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/kdebug.h> diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c index f229e979584e..8149c38f67b6 100644 --- a/arch/tile/kernel/unaligned.c +++ b/arch/tile/kernel/unaligned.c @@ -17,6 +17,8 @@ #include <linux/smp.h> #include <linux/ptrace.h> #include <linux/slab.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> #include <linux/thread_info.h> #include <linux/uaccess.h> #include <linux/mman.h> diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 709f8e9ba3e9..f58fa06a2214 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -16,6 +16,9 @@ #include <linux/signal.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index 77ceaa343fce..cb10153b5c9f 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/fs.h> #include <linux/mm.h> +#include <linux/sched/mm.h> #include <linux/hugetlb.h> #include <linux/pagemap.h> #include <linux/slab.h> diff --git a/arch/tile/mm/mmap.c b/arch/tile/mm/mmap.c index ef61c597898b..8ab28167c44b 100644 --- a/arch/tile/mm/mmap.c +++ b/arch/tile/mm/mmap.c @@ -17,7 +17,8 @@ #include <linux/mm.h> #include <linux/random.h> #include <linux/limits.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/mman.h> #include <linux/compat.h> diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 62087028a9ce..366e57f5e8d6 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -5,8 +5,9 @@ #include <linux/irqreturn.h> #include <linux/kd.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> + #include "chan.h" #include <irq_kern.h> #include <irq_user.h> diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 8a4c72af3bc0..af326fb6510d 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -13,6 +13,7 @@ #include <linux/module.h> #include <linux/notifier.h> #include <linux/reboot.h> +#include <linux/sched/debug.h> #include <linux/proc_fs.h> #include <linux/slab.h> #include <linux/syscalls.h> diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c index 57f03050c850..37c51a6be690 100644 --- a/arch/um/drivers/random.c +++ b/arch/um/drivers/random.c @@ -6,7 +6,7 @@ * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/interrupt.h> diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 90c281cd7e1d..e9d42aab76dc 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -25,3 +25,4 @@ generic-y += topology.h generic-y += trace_clock.h generic-y += word-at-a-time.h generic-y += xor.h +generic-y += kprobes.h diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index 1a60e1328e2f..94ac2739918c 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -7,6 +7,8 @@ #define __UM_MMU_CONTEXT_H #include <linux/sched.h> +#include <linux/mm_types.h> + #include <asm/mmu.h> extern void uml_setup_stubs(struct mm_struct *mm); diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c index 770ec07b6a6a..a43d42bf0a86 100644 --- a/arch/um/kernel/exec.c +++ b/arch/um/kernel/exec.c @@ -7,7 +7,9 @@ #include <linux/module.h> #include <linux/fs.h> #include <linux/ptrace.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/slab.h> #include <asm/current.h> #include <asm/processor.h> diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 078630d6448c..a9bd61820042 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -17,6 +17,9 @@ #include <linux/random.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/seq_file.h> #include <linux/tick.h> #include <linux/threads.h> diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c index b60a9f8cda75..71f3e9217cf2 100644 --- a/arch/um/kernel/reboot.c +++ b/arch/um/kernel/reboot.c @@ -3,7 +3,9 @@ * Licensed under the GPL */ -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task.h> +#include <linux/sched/mm.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/oom.h> diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 3943e9d7d13d..7a1f2a936fd1 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c @@ -5,8 +5,9 @@ */ #include <linux/mm.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> + #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/sections.h> diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c index 527fa5881915..d4dbf08722d6 100644 --- a/arch/um/kernel/skas/process.c +++ b/arch/um/kernel/skas/process.c @@ -4,7 +4,10 @@ */ #include <linux/init.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task_stack.h> +#include <linux/sched/task.h> + #include <as-layout.h> #include <kern.h> #include <os.h> diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c index aa1b56f5ac68..a76295f7ede9 100644 --- a/arch/um/kernel/sysrq.c +++ b/arch/um/kernel/sysrq.c @@ -11,6 +11,9 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> + #include <asm/sysrq.h> #include <asm/stacktrace.h> #include <os.h> diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 3777b82759bd..37508b190106 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -5,7 +5,8 @@ #include <linux/mm.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> + #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <as-layout.h> diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index ad8f206ab5e8..59158871b9fc 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -4,10 +4,11 @@ */ #include <linux/mm.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/hardirq.h> #include <linux/module.h> #include <linux/uaccess.h> +#include <linux/sched/debug.h> #include <asm/current.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index e8175a8aa22c..4b85acd4020c 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -11,7 +11,9 @@ #include <linux/string.h> #include <linux/utsname.h> #include <linux/sched.h> +#include <linux/sched/task.h> #include <linux/kmsg_dump.h> + #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/sections.h> diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 5d51ade89f4c..84205fe1cd79 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -63,3 +63,4 @@ generic-y += user.h generic-y += vga.h generic-y += word-at-a-time.h generic-y += xor.h +generic-y += kprobes.h diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h index 4749854afd03..518ba5848dd6 100644 --- a/arch/unicore32/include/asm/dma-mapping.h +++ b/arch/unicore32/include/asm/dma-mapping.h @@ -21,9 +21,9 @@ #include <asm/memory.h> #include <asm/cacheflush.h> -extern struct dma_map_ops swiotlb_dma_map_ops; +extern const struct dma_map_ops swiotlb_dma_map_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &swiotlb_dma_map_ops; } diff --git a/arch/unicore32/kernel/fpu-ucf64.c b/arch/unicore32/kernel/fpu-ucf64.c index a53343a90ca2..12c8c9527b8e 100644 --- a/arch/unicore32/kernel/fpu-ucf64.c +++ b/arch/unicore32/kernel/fpu-ucf64.c @@ -13,7 +13,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/signal.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/init.h> #include <asm/fpu-ucf64.h> diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index d7c6b676b3a5..d22c1dc7e39e 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c @@ -13,6 +13,9 @@ #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> diff --git a/arch/unicore32/kernel/ptrace.c b/arch/unicore32/kernel/ptrace.c index 9f07c08da050..a102c2b4f358 100644 --- a/arch/unicore32/kernel/ptrace.c +++ b/arch/unicore32/kernel/ptrace.c @@ -15,6 +15,7 @@ #include <linux/ptrace.h> #include <linux/signal.h> #include <linux/uaccess.h> +#include <linux/sched/task_stack.h> /* * this routine will get a word off of the processes privileged stack. diff --git a/arch/unicore32/kernel/stacktrace.c b/arch/unicore32/kernel/stacktrace.c index b34030bdabe3..9976e767d51c 100644 --- a/arch/unicore32/kernel/stacktrace.c +++ b/arch/unicore32/kernel/stacktrace.c @@ -11,6 +11,7 @@ */ #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/stacktrace.h> #include <asm/stacktrace.h> diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c index c54e32410ead..5f25b39f04d4 100644 --- a/arch/unicore32/kernel/traps.c +++ b/arch/unicore32/kernel/traps.c @@ -14,6 +14,9 @@ */ #include <linux/module.h> #include <linux/signal.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/spinlock.h> #include <linux/personality.h> #include <linux/kallsyms.h> diff --git a/arch/unicore32/mm/alignment.c b/arch/unicore32/mm/alignment.c index 24e836023e6c..3a7f6faa8794 100644 --- a/arch/unicore32/mm/alignment.c +++ b/arch/unicore32/mm/alignment.c @@ -15,6 +15,7 @@ */ #include <linux/compiler.h> #include <linux/kernel.h> +#include <linux/sched/debug.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/init.h> diff --git a/arch/unicore32/mm/dma-swiotlb.c b/arch/unicore32/mm/dma-swiotlb.c index 3e9f6489ba38..525413d6690e 100644 --- a/arch/unicore32/mm/dma-swiotlb.c +++ b/arch/unicore32/mm/dma-swiotlb.c @@ -31,7 +31,7 @@ static void unicore_swiotlb_free_coherent(struct device *dev, size_t size, swiotlb_free_coherent(dev, size, vaddr, dma_addr); } -struct dma_map_ops swiotlb_dma_map_ops = { +const struct dma_map_ops swiotlb_dma_map_ops = { .alloc = unicore_swiotlb_alloc_coherent, .free = unicore_swiotlb_free_coherent, .map_sg = swiotlb_map_sg_attrs, diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index b656d216a8a8..bbefcc46a45e 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c @@ -17,7 +17,7 @@ #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/page-flags.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/io.h> #include <asm/pgtable.h> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 33007aa74111..cc98d5a294ee 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2787,10 +2787,6 @@ config X86_DMA_REMAP bool depends on STA2X11 -config PMC_ATOM - def_bool y - depends on PCI - source "net/Kconfig" source "drivers/Kconfig" diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index c4cba00dbdee..63c1d13aaf9f 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -74,14 +74,6 @@ config EFI_PGT_DUMP issues with the mapping of the EFI runtime regions into that table. -config DEBUG_RODATA_TEST - bool "Testcase for the marking rodata read-only" - default y - ---help--- - This option enables a testcase for the setting rodata read-only - as well as for the change_page_attr() infrastructure. - If in doubt, say "N" - config DEBUG_WX bool "Warn on W+X mappings at boot" select X86_PTDUMP_CORE diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index b83c61cfd154..370c42c7f046 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -9,6 +9,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 2b3618542544..9ba050fe47f3 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -389,3 +389,4 @@ 380 i386 pkey_mprotect sys_pkey_mprotect 381 i386 pkey_alloc sys_pkey_alloc 382 i386 pkey_free sys_pkey_free +383 i386 statx sys_statx diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index e93ef0b38db8..5aef183e2f85 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -338,6 +338,7 @@ 329 common pkey_mprotect sys_pkey_mprotect 330 common pkey_alloc sys_pkey_alloc 331 common pkey_free sys_pkey_free +332 common statx sys_statx # # x32-specific system call numbers start at 512 to avoid cache impact diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 572cee3fccff..226ca70dc6bd 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -7,6 +7,7 @@ #include <linux/mm.h> #include <linux/err.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/random.h> diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 636c4b341f36..ce1d7534fa53 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -27,6 +27,8 @@ #include <linux/kernel.h> #include <linux/timer.h> +#include <linux/sched/signal.h> +#include <linux/mm_types.h> #include <linux/syscalls.h> #include <linux/ratelimit.h> diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 496e60391fac..786fd875de92 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -12,6 +12,7 @@ #include <linux/pci.h> #include <linux/ptrace.h> #include <linux/syscore_ops.h> +#include <linux/sched/clock.h> #include <asm/apic.h> diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 1635c0c8df23..349d4d17aa7f 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -20,7 +20,8 @@ #include <linux/export.h> #include <linux/init.h> #include <linux/kdebug.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/clock.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/cpu.h> diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 7c0a711989d2..8d0879f1d42c 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -26,6 +26,7 @@ #include <linux/init.h> #include <linux/jiffies.h> #include <linux/perf_event.h> +#include <linux/sched/task_stack.h> #include <linux/uaccess.h> #include <asm/pgalloc.h> diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 95c0b4ae09b0..724153797209 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -9,6 +9,7 @@ */ #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h index 7a15588e45d4..7d3ece8bfb61 100644 --- a/arch/x86/include/asm/a.out-core.h +++ b/arch/x86/include/asm/a.out-core.h @@ -17,6 +17,8 @@ #include <linux/user.h> #include <linux/elfcore.h> +#include <linux/mm_types.h> + #include <asm/debugreg.h> /* diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index eff8e36aaf72..730ef65e8393 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -2,7 +2,6 @@ #define _ASM_X86_APIC_H #include <linux/cpumask.h> -#include <linux/pm.h> #include <asm/alternative.h> #include <asm/cpufeature.h> diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 872877d930de..e7e1942edff7 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h @@ -90,18 +90,8 @@ void clflush_cache_range(void *addr, unsigned int size); #define mmio_flush_range(addr, size) clflush_cache_range(addr, size) -extern const int rodata_test_data; extern int kernel_set_to_readonly; void set_kernel_text_rw(void); void set_kernel_text_ro(void); -#ifdef CONFIG_DEBUG_RODATA_TEST -int rodata_test(void); -#else -static inline int rodata_test(void) -{ - return 0; -} -#endif - #endif /* _ASM_X86_CACHEFLUSH_H */ diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h index eb5deb42484d..49265345d4d2 100644 --- a/arch/x86/include/asm/desc_defs.h +++ b/arch/x86/include/asm/desc_defs.h @@ -15,7 +15,7 @@ * FIXME: Accessing the desc_struct through its fields is more elegant, * and should be the one valid thing to do. However, a lot of open code * still touches the a and b accessors, and doing this allow us to do it - * incrementally. We keep the signature as a struct, rather than an union, + * incrementally. We keep the signature as a struct, rather than a union, * so we can get rid of it transparently in the future -- glommer */ /* 8 byte segment descriptor */ diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index 684ed6c3aa67..1b3ef26e77df 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h @@ -2,9 +2,6 @@ #define _ASM_X86_DEVICE_H struct dev_archdata { -#ifdef CONFIG_X86_DEV_DMA_OPS - struct dma_map_ops *dma_ops; -#endif #if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) void *iommu; /* hook for IOMMU specific extension */ #endif @@ -13,7 +10,7 @@ struct dev_archdata { #if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS) struct dma_domain { struct list_head node; - struct dma_map_ops *dma_ops; + const struct dma_map_ops *dma_ops; int domain_nr; }; void add_dma_domain(struct dma_domain *domain); diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 44461626830e..08a0838b83fb 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -25,18 +25,11 @@ extern int iommu_merge; extern struct device x86_dma_fallback_dev; extern int panic_on_overflow; -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { -#ifndef CONFIG_X86_DEV_DMA_OPS return dma_ops; -#else - if (unlikely(!dev) || !dev->archdata.dma_ops) - return dma_ops; - else - return dev->archdata.dma_ops; -#endif } bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 8167fdb67ae8..9814db42b790 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -59,6 +59,7 @@ #define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */ #define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */ #define INTEL_FAM6_ATOM_GOLDMONT 0x5C +#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A #define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */ /* Xeon Phi */ diff --git a/arch/x86/include/asm/intel_pmc_ipc.h b/arch/x86/include/asm/intel_pmc_ipc.h index cd0310e186f4..4291b6a5ddf7 100644 --- a/arch/x86/include/asm/intel_pmc_ipc.h +++ b/arch/x86/include/asm/intel_pmc_ipc.h @@ -30,6 +30,7 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, u32 outlen, u32 dptr, u32 sptr); int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, u32 outlen); +int intel_pmc_s0ix_counter_read(u64 *data); #else @@ -50,6 +51,11 @@ static inline int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, return -EINVAL; } +static inline int intel_pmc_s0ix_counter_read(u64 *data) +{ + return -EINVAL; +} + #endif /*CONFIG_INTEL_PMC_IPC*/ #endif diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h index 95ce5c85b009..0d64397cee58 100644 --- a/arch/x86/include/asm/intel_rdt.h +++ b/arch/x86/include/asm/intel_rdt.h @@ -3,6 +3,7 @@ #ifdef CONFIG_INTEL_RDT_A +#include <linux/sched.h> #include <linux/kernfs.h> #include <linux/jump_label.h> diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index 345c99cef152..793869879464 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -1,7 +1,7 @@ #ifndef _ASM_X86_IOMMU_H #define _ASM_X86_IOMMU_H -extern struct dma_map_ops nommu_dma_ops; +extern const struct dma_map_ops nommu_dma_ops; extern int force_iommu, no_iommu; extern int iommu_detected; extern int iommu_pass_through; diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index d1d1e5094c28..200581691c6e 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -21,6 +21,12 @@ * * See arch/x86/kernel/kprobes.c for x86 kprobes history. */ + +#include <asm-generic/kprobes.h> + +#define BREAKPOINT_INSTRUCTION 0xcc + +#ifdef CONFIG_KPROBES #include <linux/types.h> #include <linux/ptrace.h> #include <linux/percpu.h> @@ -32,7 +38,6 @@ struct pt_regs; struct kprobe; typedef u8 kprobe_opcode_t; -#define BREAKPOINT_INSTRUCTION 0xcc #define RELATIVEJUMP_OPCODE 0xe9 #define RELATIVEJUMP_SIZE 5 #define RELATIVECALL_OPCODE 0xe8 @@ -116,4 +121,6 @@ extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); extern int kprobe_int3_handler(struct pt_regs *regs); extern int kprobe_debug_handler(struct pt_regs *regs); + +#endif /* CONFIG_KPROBES */ #endif /* _ASM_X86_KPROBES_H */ diff --git a/arch/x86/include/asm/mpx.h b/arch/x86/include/asm/mpx.h index 0b416d4cf73b..a0d662be4c5b 100644 --- a/arch/x86/include/asm/mpx.h +++ b/arch/x86/include/asm/mpx.h @@ -2,6 +2,8 @@ #define _ASM_X86_MPX_H #include <linux/types.h> +#include <linux/mm_types.h> + #include <asm/ptrace.h> #include <asm/insn.h> diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 00293a94ffaf..d8b5f8ab8ef9 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -46,7 +46,7 @@ #define MSR_FSB_FREQ 0x000000cd #define MSR_PLATFORM_INFO 0x000000ce -#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 +#define MSR_PKG_CST_CONFIG_CONTROL 0x000000e2 #define NHM_C3_AUTO_DEMOTE (1UL << 25) #define NHM_C1_AUTO_DEMOTE (1UL << 26) #define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) @@ -147,6 +147,7 @@ /* C-state Residency Counters */ #define MSR_PKG_C3_RESIDENCY 0x000003f8 #define MSR_PKG_C6_RESIDENCY 0x000003f9 +#define MSR_ATOM_PKG_C6_RESIDENCY 0x000003fa #define MSR_PKG_C7_RESIDENCY 0x000003fa #define MSR_CORE_C3_RESIDENCY 0x000003fc #define MSR_CORE_C6_RESIDENCY 0x000003fd @@ -203,10 +204,17 @@ #define MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B #define MSR_CORE_C1_RES 0x00000660 +#define MSR_MODULE_C6_RES_MS 0x00000664 #define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 #define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 +#define MSR_ATOM_CORE_RATIOS 0x0000066a +#define MSR_ATOM_CORE_VIDS 0x0000066b +#define MSR_ATOM_CORE_TURBO_RATIOS 0x0000066c +#define MSR_ATOM_CORE_TURBO_VIDS 0x0000066d + + #define MSR_CORE_PERF_LIMIT_REASONS 0x00000690 #define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0 #define MSR_RING_PERF_LIMIT_REASONS 0x000006B1 @@ -459,6 +467,7 @@ #define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 +#define MSR_MISC_FEATURE_CONTROL 0x000001a4 #define MSR_MISC_PWR_MGMT 0x000001aa #define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index f37f2d8a2989..bda3c27f0da0 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -2,6 +2,7 @@ #define _ASM_X86_MWAIT_H #include <linux/sched.h> +#include <linux/sched/idle.h> #include <asm/cpufeature.h> diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 8f50fb3f04e1..72277b1028a5 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -121,7 +121,8 @@ static inline void native_pmd_clear(pmd_t *pmd) *(tmp + 1) = 0; } -#ifndef CONFIG_SMP +#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \ + defined(CONFIG_PARAVIRT)) static inline void native_pud_clear(pud_t *pudp) { } diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index bdcdb3b3a219..84c00592d359 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -100,7 +100,6 @@ obj-$(CONFIG_HPET_TIMER) += hpet.o obj-$(CONFIG_APB_TIMER) += apb_timer.o obj-$(CONFIG_AMD_NB) += amd_nb.o -obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o obj-$(CONFIG_DEBUG_NMI_SELFTEST) += nmi_selftest.o obj-$(CONFIG_KVM_GUEST) += kvm.o kvmclock.o diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 63ff468a7986..df083efe6ee0 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -17,6 +17,7 @@ #include <linux/init.h> #include <linux/mm.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/string.h> #include <linux/spinlock.h> #include <linux/pci.h> @@ -695,7 +696,7 @@ static __init int init_amd_gatt(struct agp_kern_info *info) return -1; } -static struct dma_map_ops gart_dma_ops = { +static const struct dma_map_ops gart_dma_ops = { .map_sg = gart_map_sg, .unmap_sg = gart_unmap_sg, .map_page = gart_map_page, diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 8567c851172c..4261b3282ad9 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1865,14 +1865,14 @@ static void __smp_spurious_interrupt(u8 vector) "should never happen.\n", vector, smp_processor_id()); } -__visible void smp_spurious_interrupt(struct pt_regs *regs) +__visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs) { entering_irq(); __smp_spurious_interrupt(~regs->orig_ax); exiting_irq(); } -__visible void smp_trace_spurious_interrupt(struct pt_regs *regs) +__visible void __irq_entry smp_trace_spurious_interrupt(struct pt_regs *regs) { u8 vector = ~regs->orig_ax; @@ -1923,14 +1923,14 @@ static void __smp_error_interrupt(struct pt_regs *regs) } -__visible void smp_error_interrupt(struct pt_regs *regs) +__visible void __irq_entry smp_error_interrupt(struct pt_regs *regs) { entering_irq(); __smp_error_interrupt(regs); exiting_irq(); } -__visible void smp_trace_error_interrupt(struct pt_regs *regs) +__visible void __irq_entry smp_trace_error_interrupt(struct pt_regs *regs) { entering_irq(); trace_error_apic_entry(ERROR_APIC_VECTOR); diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 5d30c5e42bb1..f3557a1eb562 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -559,7 +559,7 @@ void send_cleanup_vector(struct irq_cfg *cfg) __send_cleanup_vector(data); } -asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) +asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void) { unsigned vector, me; diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 4a7080c84a5a..5a414545e8a3 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -218,7 +218,8 @@ #include <linux/apm_bios.h> #include <linux/init.h> #include <linux/time.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/cputime.h> #include <linux/pm.h> #include <linux/capability.h> #include <linux/device.h> diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 4e95b2e0d95f..35a5d5dca2fa 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -5,6 +5,7 @@ #include <linux/io.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/random.h> #include <asm/processor.h> #include <asm/apic.h> diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 2c234a6d94c4..adc0ebd8bed0 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -1,5 +1,6 @@ #include <linux/sched.h> +#include <linux/sched/clock.h> #include <asm/cpufeature.h> #include <asm/e820.h> diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f07005e6f461..b11b38c3b0bd 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -7,7 +7,9 @@ #include <linux/string.h> #include <linux/ctype.h> #include <linux/delay.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/clock.h> +#include <linux/sched/task.h> #include <linux/init.h> #include <linux/kprobes.h> #include <linux/kgdb.h> @@ -1510,7 +1512,7 @@ void cpu_init(void) for (i = 0; i <= IO_BITMAP_LONGS; i++) t->io_bitmap[i] = ~0UL; - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); me->active_mm = &init_mm; BUG_ON(me->mm); enter_lazy_tlb(&init_mm, me); @@ -1561,7 +1563,7 @@ void cpu_init(void) /* * Set up and load the per-CPU TSS and LDT */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); curr->active_mm = &init_mm; BUG_ON(curr->mm); enter_lazy_tlb(&init_mm, curr); diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 47416f959a48..0a3bc19de017 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -10,6 +10,7 @@ #include <asm/tsc.h> #include <asm/cpufeature.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include "cpu.h" diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 017ecd3bb553..fe0a615a051b 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -4,6 +4,7 @@ #include <linux/bitops.h> #include <linux/smp.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/thread_info.h> #include <linux/init.h> #include <linux/uaccess.h> diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 0282b0df004a..c55fb2cb2acc 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -11,6 +11,7 @@ #include <linux/cacheinfo.h> #include <linux/cpu.h> #include <linux/sched.h> +#include <linux/capability.h> #include <linux/sysfs.h> #include <linux/pci.h> diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index 8af04afdfcb9..0bbe0f3a039f 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -25,7 +25,8 @@ #include <linux/sysfs.h> #include <linux/kernfs.h> #include <linux/seq_file.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task.h> #include <linux/slab.h> #include <linux/cpu.h> #include <linux/task_work.h> diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 9e5427df3243..524cc5780a77 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -816,14 +816,14 @@ static inline void __smp_deferred_error_interrupt(void) deferred_error_int_vector(); } -asmlinkage __visible void smp_deferred_error_interrupt(void) +asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void) { entering_irq(); __smp_deferred_error_interrupt(); exiting_ack_irq(); } -asmlinkage __visible void smp_trace_deferred_error_interrupt(void) +asmlinkage __visible void __irq_entry smp_trace_deferred_error_interrupt(void) { entering_irq(); trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR); diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 85469f84c921..d7cc190ae457 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -396,14 +396,16 @@ static inline void __smp_thermal_interrupt(void) smp_thermal_vector(); } -asmlinkage __visible void smp_thermal_interrupt(struct pt_regs *regs) +asmlinkage __visible void __irq_entry +smp_thermal_interrupt(struct pt_regs *regs) { entering_irq(); __smp_thermal_interrupt(); exiting_ack_irq(); } -asmlinkage __visible void smp_trace_thermal_interrupt(struct pt_regs *regs) +asmlinkage __visible void __irq_entry +smp_trace_thermal_interrupt(struct pt_regs *regs) { entering_irq(); trace_thermal_apic_entry(THERMAL_APIC_VECTOR); diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c index 9beb092d68a5..bb0e75eed10a 100644 --- a/arch/x86/kernel/cpu/mcheck/threshold.c +++ b/arch/x86/kernel/cpu/mcheck/threshold.c @@ -23,14 +23,14 @@ static inline void __smp_threshold_interrupt(void) mce_threshold_vector(); } -asmlinkage __visible void smp_threshold_interrupt(void) +asmlinkage __visible void __irq_entry smp_threshold_interrupt(void) { entering_irq(); __smp_threshold_interrupt(); exiting_ack_irq(); } -asmlinkage __visible void smp_trace_threshold_interrupt(void) +asmlinkage __visible void __irq_entry smp_trace_threshold_interrupt(void) { entering_irq(); trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR); diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index c1ea5b999839..8457b4978668 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c @@ -1,5 +1,6 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/mm.h> #include <asm/cpufeature.h> #include <asm/msr.h> diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c index b2f7207ba86c..f9c324e08d85 100644 --- a/arch/x86/kernel/doublefault.c +++ b/arch/x86/kernel/doublefault.c @@ -1,5 +1,6 @@ #include <linux/mm.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/init_task.h> #include <linux/fs.h> diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 0cfd01d2754c..09d4ac0d2661 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -10,6 +10,8 @@ #include <linux/kdebug.h> #include <linux/module.h> #include <linux/ptrace.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/ftrace.h> #include <linux/kexec.h> #include <linux/bug.h> diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index bb3b5b9a6899..b0b3a3df7c20 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -2,6 +2,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs */ +#include <linux/sched/debug.h> #include <linux/kallsyms.h> #include <linux/kprobes.h> #include <linux/uaccess.h> diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index fac189efcc34..a8b117e93b46 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -2,6 +2,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs */ +#include <linux/sched/debug.h> #include <linux/kallsyms.h> #include <linux/kprobes.h> #include <linux/uaccess.h> diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 19bdd1bf8160..c2f8dde3255c 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -7,6 +7,7 @@ #include <asm/cmdline.h> #include <linux/sched.h> +#include <linux/sched/task.h> #include <linux/init.h> /* diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c index c114b132d121..b188b16841e3 100644 --- a/arch/x86/kernel/fpu/regset.c +++ b/arch/x86/kernel/fpu/regset.c @@ -5,6 +5,7 @@ #include <asm/fpu/signal.h> #include <asm/fpu/regset.h> #include <asm/fpu/xstate.h> +#include <linux/sched/task_stack.h> /* * The xstateregs_active() routine is the same as the regset_fpregs_active() routine, diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index b01bc8517450..ca49bab3e467 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c @@ -4,6 +4,7 @@ */ #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/capability.h> #include <linux/errno.h> diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 7c6e9ffe4424..4d8183b5f113 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -264,7 +264,7 @@ void __smp_x86_platform_ipi(void) x86_platform_ipi_callback(); } -__visible void smp_x86_platform_ipi(struct pt_regs *regs) +__visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); @@ -315,7 +315,7 @@ __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs) } #endif -__visible void smp_trace_x86_platform_ipi(struct pt_regs *regs) +__visible void __irq_entry smp_trace_x86_platform_ipi(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 6b0678a541e2..3be74fbdeff2 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -15,6 +15,7 @@ #include <linux/ftrace.h> #include <linux/uaccess.h> #include <linux/smp.h> +#include <linux/sched/task_stack.h> #include <asm/io_apic.h> #include <asm/apic.h> diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c index 3512ba607361..275487872be2 100644 --- a/arch/x86/kernel/irq_work.c +++ b/arch/x86/kernel/irq_work.c @@ -9,6 +9,7 @@ #include <linux/hardirq.h> #include <asm/apic.h> #include <asm/trace/irq_vectors.h> +#include <linux/interrupt.h> static inline void __smp_irq_work_interrupt(void) { @@ -16,14 +17,14 @@ static inline void __smp_irq_work_interrupt(void) irq_work_run(); } -__visible void smp_irq_work_interrupt(struct pt_regs *regs) +__visible void __irq_entry smp_irq_work_interrupt(struct pt_regs *regs) { ipi_entering_ack_irq(); __smp_irq_work_interrupt(); exiting_irq(); } -__visible void smp_trace_irq_work_interrupt(struct pt_regs *regs) +__visible void __irq_entry smp_trace_irq_work_interrupt(struct pt_regs *regs) { ipi_entering_ack_irq(); trace_irq_work_entry(IRQ_WORK_VECTOR); diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 520b8dfe1640..6384eb754a58 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -45,6 +45,7 @@ #include <linux/slab.h> #include <linux/hardirq.h> #include <linux/preempt.h> +#include <linux/sched/debug.h> #include <linux/extable.h> #include <linux/kdebug.h> #include <linux/kallsyms.h> diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index bae6ea6cfb94..d88967659098 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -25,6 +25,7 @@ #include <linux/hardirq.h> #include <linux/memblock.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <asm/x86_init.h> #include <asm/reboot.h> diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index bfe4d6c96fbd..f088ea4c66e7 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -13,6 +13,7 @@ #include <linux/spinlock.h> #include <linux/kprobes.h> #include <linux/kdebug.h> +#include <linux/sched/debug.h> #include <linux/nmi.h> #include <linux/debugfs.h> #include <linux/delay.h> @@ -20,6 +21,7 @@ #include <linux/ratelimit.h> #include <linux/slab.h> #include <linux/export.h> +#include <linux/sched/clock.h> #if defined(CONFIG_EDAC) #include <linux/edac.h> diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index d47517941bbc..0c150c06fa5a 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -478,7 +478,7 @@ static void calgary_free_coherent(struct device *dev, size_t size, free_pages((unsigned long)vaddr, get_order(size)); } -static struct dma_map_ops calgary_dma_ops = { +static const struct dma_map_ops calgary_dma_ops = { .alloc = calgary_alloc_coherent, .free = calgary_free_coherent, .map_sg = calgary_map_sg, @@ -1177,7 +1177,7 @@ static int __init calgary_init(void) tbl = find_iommu_table(&dev->dev); if (translation_enabled(tbl)) - dev->dev.archdata.dma_ops = &calgary_dma_ops; + dev->dev.dma_ops = &calgary_dma_ops; } return ret; @@ -1201,7 +1201,7 @@ error: calgary_disable_translation(dev); calgary_free_bus(dev); pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */ - dev->dev.archdata.dma_ops = NULL; + dev->dev.dma_ops = NULL; } while (1); return ret; diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index d5c223c9cf11..3a216ec869cd 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -17,7 +17,7 @@ static int forbid_dac __read_mostly; -struct dma_map_ops *dma_ops = &nommu_dma_ops; +const struct dma_map_ops *dma_ops = &nommu_dma_ops; EXPORT_SYMBOL(dma_ops); static int iommu_sac_force __read_mostly; @@ -215,7 +215,7 @@ early_param("iommu", iommu_setup); int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); #ifdef CONFIG_PCI if (mask > 0xffffffff && forbid_dac > 0) { diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 00e71ce396a8..a88952ef371c 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c @@ -88,7 +88,7 @@ static void nommu_sync_sg_for_device(struct device *dev, flush_write_buffers(); } -struct dma_map_ops nommu_dma_ops = { +const struct dma_map_ops nommu_dma_ops = { .alloc = dma_generic_alloc_coherent, .free = dma_generic_free_coherent, .map_sg = nommu_map_sg, diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 410efb2c7b80..1e23577e17cf 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -45,7 +45,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size, dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); } -static struct dma_map_ops swiotlb_dma_ops = { +static const struct dma_map_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, .alloc = x86_swiotlb_alloc_coherent, .free = x86_swiotlb_free_coherent, diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index da8cb987b973..587d887f7f17 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -1,6 +1,7 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/perf_event.h> #include <linux/bug.h> #include <linux/stddef.h> diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 7780efa635b9..56b059486c3b 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -7,6 +7,10 @@ #include <linux/prctl.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/sched/idle.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/init.h> #include <linux/export.h> #include <linux/pm.h> diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index a0ac3e81518a..4c818f8bc135 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -12,6 +12,8 @@ #include <linux/cpu.h> #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index a61e141b6891..d6b784a5520d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -17,6 +17,8 @@ #include <linux/cpu.h> #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 9cc7d5a330ef..2364b23ea3e5 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 9e93fe5803b4..5c3f6d6a5078 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -21,6 +21,8 @@ #include <linux/sched.h> #include <linux/gfp.h> #include <linux/bootmem.h> +#include <linux/nmi.h> + #include <asm/fixmap.h> #include <asm/pvclock.h> diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 69780edf0dde..4bf0c8926a1c 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -575,7 +575,9 @@ static void __init reserve_crashkernel(void) /* 0 means: find the address automatically */ if (crash_base <= 0) { /* - * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX + * Set CRASH_ADDR_LOW_MAX upper bound for crash memory, + * as old kexec-tools loads bzImage below that, unless + * "crashkernel=size[KMG],high" is specified. */ crash_base = memblock_find_in_range(CRASH_ALIGN, high ? CRASH_ADDR_HIGH_MAX diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 763af1d0de64..396c042e9d0e 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -10,6 +10,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 68f8cc222f25..d3c66a15bbde 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -259,7 +259,7 @@ static inline void __smp_reschedule_interrupt(void) scheduler_ipi(); } -__visible void smp_reschedule_interrupt(struct pt_regs *regs) +__visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); __smp_reschedule_interrupt(); @@ -268,7 +268,7 @@ __visible void smp_reschedule_interrupt(struct pt_regs *regs) */ } -__visible void smp_trace_reschedule_interrupt(struct pt_regs *regs) +__visible void __irq_entry smp_trace_reschedule_interrupt(struct pt_regs *regs) { /* * Need to call irq_enter() before calling the trace point. @@ -292,14 +292,15 @@ static inline void __smp_call_function_interrupt(void) inc_irq_stat(irq_call_count); } -__visible void smp_call_function_interrupt(struct pt_regs *regs) +__visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs) { ipi_entering_ack_irq(); __smp_call_function_interrupt(); exiting_irq(); } -__visible void smp_trace_call_function_interrupt(struct pt_regs *regs) +__visible void __irq_entry +smp_trace_call_function_interrupt(struct pt_regs *regs) { ipi_entering_ack_irq(); trace_call_function_entry(CALL_FUNCTION_VECTOR); @@ -314,14 +315,16 @@ static inline void __smp_call_function_single_interrupt(void) inc_irq_stat(irq_call_count); } -__visible void smp_call_function_single_interrupt(struct pt_regs *regs) +__visible void __irq_entry +smp_call_function_single_interrupt(struct pt_regs *regs) { ipi_entering_ack_irq(); __smp_call_function_single_interrupt(); exiting_irq(); } -__visible void smp_trace_call_function_single_interrupt(struct pt_regs *regs) +__visible void __irq_entry +smp_trace_call_function_single_interrupt(struct pt_regs *regs) { ipi_entering_ack_irq(); trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index a0d38685f7df..bd1f1ad35284 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -45,6 +45,9 @@ #include <linux/smp.h> #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/topology.h> +#include <linux/sched/hotplug.h> +#include <linux/sched/task_stack.h> #include <linux/percpu.h> #include <linux/bootmem.h> #include <linux/err.h> diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 0653788026e2..8e2b79b88e51 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -4,6 +4,8 @@ * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> */ #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <linux/export.h> #include <linux/uaccess.h> diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index a23ce84a3f6c..f07f83b3611b 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c @@ -2,6 +2,7 @@ * x86 single-step support code, common to 32-bit and 64-bit. */ #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/ptrace.h> #include <asm/desc.h> diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index a55ed63b9f91..50215a4b9347 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -1,5 +1,6 @@ #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/syscalls.h> #include <linux/mm.h> #include <linux/fs.h> diff --git a/arch/x86/kernel/test_rodata.c b/arch/x86/kernel/test_rodata.c deleted file mode 100644 index 222e84e2432e..000000000000 --- a/arch/x86/kernel/test_rodata.c +++ /dev/null @@ -1,75 +0,0 @@ -/* - * test_rodata.c: functional test for mark_rodata_ro function - * - * (C) Copyright 2008 Intel Corporation - * Author: Arjan van de Ven <arjan@linux.intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; version 2 - * of the License. - */ -#include <asm/cacheflush.h> -#include <asm/sections.h> -#include <asm/asm.h> - -int rodata_test(void) -{ - unsigned long result; - unsigned long start, end; - - /* test 1: read the value */ - /* If this test fails, some previous testrun has clobbered the state */ - if (!rodata_test_data) { - printk(KERN_ERR "rodata_test: test 1 fails (start data)\n"); - return -ENODEV; - } - - /* test 2: write to the variable; this should fault */ - /* - * If this test fails, we managed to overwrite the data - * - * This is written in assembly to be able to catch the - * exception that is supposed to happen in the correct - * case - */ - - result = 1; - asm volatile( - "0: mov %[zero],(%[rodata_test])\n" - " mov %[zero], %[rslt]\n" - "1:\n" - ".section .fixup,\"ax\"\n" - "2: jmp 1b\n" - ".previous\n" - _ASM_EXTABLE(0b,2b) - : [rslt] "=r" (result) - : [rodata_test] "r" (&rodata_test_data), [zero] "r" (0UL) - ); - - - if (!result) { - printk(KERN_ERR "rodata_test: test data was not read only\n"); - return -ENODEV; - } - - /* test 3: check the value hasn't changed */ - /* If this test fails, we managed to overwrite the data */ - if (!rodata_test_data) { - printk(KERN_ERR "rodata_test: Test 3 fails (end data)\n"); - return -ENODEV; - } - /* test 4: check if the rodata section is 4Kb aligned */ - start = (unsigned long)__start_rodata; - end = (unsigned long)__end_rodata; - if (start & (PAGE_SIZE - 1)) { - printk(KERN_ERR "rodata_test: .rodata is not 4k aligned\n"); - return -ENODEV; - } - if (end & (PAGE_SIZE - 1)) { - printk(KERN_ERR "rodata_test: .rodata end is not 4k aligned\n"); - return -ENODEV; - } - - return 0; -} diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 1dc86ee60a03..948443e115c1 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -29,6 +29,7 @@ #include <linux/errno.h> #include <linux/kexec.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/bug.h> diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 2724dc82f992..46bcda4cb1c2 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -2,6 +2,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/init.h> #include <linux/export.h> #include <linux/timer.h> diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index 23d15565d02a..478d15dbaee4 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c @@ -1,4 +1,6 @@ #include <linux/sched.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <asm/ptrace.h> #include <asm/bitops.h> #include <asm/stacktrace.h> diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 0442d98367ae..23ee89ce59a9 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -35,6 +35,7 @@ #include <linux/interrupt.h> #include <linux/syscalls.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/string.h> diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index e79f15f108a8..c74ae9ce8dc4 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -345,7 +345,6 @@ SECTIONS DISCARDS /DISCARD/ : { *(.eh_frame) - *(__func_stack_frame_non_standard) } } diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 1d155cc56629..efde6cc50875 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -16,6 +16,8 @@ #include <linux/export.h> #include <linux/vmalloc.h> #include <linux/uaccess.h> +#include <linux/sched/stat.h> + #include <asm/processor.h> #include <asm/user.h> #include <asm/fpu/xstate.h> diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index f701d4430727..ebae57ac5902 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -28,6 +28,8 @@ #include <linux/kvm_host.h> #include <linux/highmem.h> +#include <linux/sched/cputime.h> + #include <asm/apicdef.h> #include <trace/events/kvm.h> diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index b96d3893f121..6825cd36d13b 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -23,6 +23,8 @@ #include <linux/kvm_host.h> #include <linux/slab.h> #include <linux/export.h> +#include <linux/rculist.h> + #include <trace/events/kvm.h> #include <asm/msidef.h> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2fd7586aad4d..ac7810513d0e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -36,6 +36,7 @@ #include <linux/compiler.h> #include <linux/srcu.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/uaccess.h> #include <linux/hash.h> #include <linux/kern_levels.h> @@ -4102,7 +4103,7 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu, * as a SMAP violation if all of the following * conditions are ture: * - X86_CR4_SMAP is set in CR4 - * - An user page is accessed + * - A user page is accessed * - Page fault in kernel mode * - if CPL = 3 or X86_EFLAGS_AC is clear * diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c index 4a1c13eaa518..37942e419c32 100644 --- a/arch/x86/kvm/page_track.c +++ b/arch/x86/kvm/page_track.c @@ -14,6 +14,8 @@ */ #include <linux/kvm_host.h> +#include <linux/rculist.h> + #include <asm/kvm_host.h> #include <asm/kvm_page_track.h> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b2a4b11274b0..1faf620a6fdc 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -54,6 +54,8 @@ #include <linux/pvclock_gtod.h> #include <linux/kvm_irqfd.h> #include <linux/irqbypass.h> +#include <linux/sched/stat.h> + #include <trace/events/kvm.h> #include <asm/debugreg.h> diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 61a7e9ea9aa1..35ea061010a1 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -1,5 +1,7 @@ #include <linux/extable.h> #include <linux/uaccess.h> +#include <linux/sched/debug.h> + #include <asm/traps.h> #include <asm/kdebug.h> diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index e3254ca0eec4..428e31763cb9 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -4,6 +4,7 @@ * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar */ #include <linux/sched.h> /* test_thread_flag(), ... */ +#include <linux/sched/task_stack.h> /* task_stack_*(), ... */ #include <linux/kdebug.h> /* oops_begin/end, ... */ #include <linux/extable.h> /* search_exception_tables */ #include <linux/bootmem.h> /* max_low_pfn */ diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 2ae8584b44c7..c5066a260803 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -7,6 +7,7 @@ #include <linux/init.h> #include <linux/fs.h> #include <linux/mm.h> +#include <linux/sched/mm.h> #include <linux/hugetlb.h> #include <linux/pagemap.h> #include <linux/err.h> diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 928d657de829..2b4b53e6793f 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -864,9 +864,6 @@ static noinline int do_test_wp_bit(void) return flag; } -const int rodata_test_data = 0xC3; -EXPORT_SYMBOL_GPL(rodata_test_data); - int kernel_set_to_readonly __read_mostly; void set_kernel_text_rw(void) @@ -939,7 +936,6 @@ void mark_rodata_ro(void) set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", size >> 10); - rodata_test(); #ifdef CONFIG_CPA_DEBUG printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 97346f987ef2..15173d37f399 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1000,9 +1000,6 @@ void __init mem_init(void) mem_init_print_info(NULL); } -const int rodata_test_data = 0xC3; -EXPORT_SYMBOL_GPL(rodata_test_data); - int kernel_set_to_readonly; void set_kernel_text_rw(void) @@ -1071,8 +1068,6 @@ void mark_rodata_ro(void) all_end = roundup((unsigned long)_brk_end, PMD_SIZE); set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); - rodata_test(); - #ifdef CONFIG_CPA_DEBUG printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); set_memory_rw(start, (end-start) >> PAGE_SHIFT); diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 0493c17b8a51..8d63d7a104c3 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -4,6 +4,7 @@ #include <linux/kdebug.h> #include <linux/mm.h> #include <linux/sched.h> +#include <linux/sched/task.h> #include <linux/vmalloc.h> #include <asm/tlbflush.h> diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index d2dc0438d654..7940166c799b 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -28,7 +28,8 @@ #include <linux/mm.h> #include <linux/random.h> #include <linux/limits.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <asm/elf.h> struct va_alignment __read_mostly va_align = { diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index c98079684bdb..5126dfd52b18 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -7,6 +7,7 @@ */ #include <linux/kernel.h> #include <linux/slab.h> +#include <linux/mm_types.h> #include <linux/syscalls.h> #include <linux/sched/sysctl.h> diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index a4fdfa7dcc1b..0cb52ae0a8f0 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -667,7 +667,7 @@ static void set_dma_domain_ops(struct pci_dev *pdev) spin_lock(&dma_domain_list_lock); list_for_each_entry(domain, &dma_domain_list, node) { if (pci_domain_nr(pdev->bus) == domain->domain_nr) { - pdev->dev.archdata.dma_ops = domain->dma_ops; + pdev->dev.dma_ops = domain->dma_ops; break; } } diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index 052c1cb76305..ec008e800b45 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c @@ -179,7 +179,7 @@ static void *sta2x11_swiotlb_alloc_coherent(struct device *dev, } /* We have our own dma_ops: the same as swiotlb but from alloc (above) */ -static struct dma_map_ops sta2x11_dma_ops = { +static const struct dma_map_ops sta2x11_dma_ops = { .alloc = sta2x11_swiotlb_alloc_coherent, .free = x86_swiotlb_free_coherent, .map_page = swiotlb_map_page, @@ -203,7 +203,7 @@ static void sta2x11_setup_pdev(struct pci_dev *pdev) return; pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1); pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1); - pdev->dev.archdata.dma_ops = &sta2x11_dma_ops; + pdev->dev.dma_ops = &sta2x11_dma_ops; /* We must enable all devices as master, for audio DMA to work */ pci_set_master(pdev); @@ -223,7 +223,7 @@ bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { struct sta2x11_mapping *map; - if (dev->archdata.dma_ops != &sta2x11_dma_ops) { + if (dev->dma_ops != &sta2x11_dma_ops) { if (!dev->dma_mask) return false; return addr + size - 1 <= *dev->dma_mask; @@ -247,7 +247,7 @@ bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) */ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { - if (dev->archdata.dma_ops != &sta2x11_dma_ops) + if (dev->dma_ops != &sta2x11_dma_ops) return paddr; return p2a(paddr, to_pci_dev(dev)); } @@ -259,7 +259,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) */ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) { - if (dev->archdata.dma_ops != &sta2x11_dma_ops) + if (dev->dma_ops != &sta2x11_dma_ops) return daddr; return a2p(daddr, to_pci_dev(dev)); } diff --git a/arch/x86/platform/atom/Makefile b/arch/x86/platform/atom/Makefile index 40983f5b0858..57be88fa34bb 100644 --- a/arch/x86/platform/atom/Makefile +++ b/arch/x86/platform/atom/Makefile @@ -1,2 +1 @@ -obj-$(CONFIG_PMC_ATOM) += pmc_atom.o obj-$(CONFIG_PUNIT_ATOM_DEBUG) += punit_atom_debug.o diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c index 9743d0ccfec6..c34bd8233f7c 100644 --- a/arch/x86/platform/uv/uv_nmi.c +++ b/arch/x86/platform/uv/uv_nmi.c @@ -27,6 +27,7 @@ #include <linux/moduleparam.h> #include <linux/nmi.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/slab.h> #include <linux/clocksource.h> diff --git a/arch/x86/um/syscalls_64.c b/arch/x86/um/syscalls_64.c index e6552275320b..10d907098c26 100644 --- a/arch/x86/um/syscalls_64.c +++ b/arch/x86/um/syscalls_64.c @@ -6,6 +6,7 @@ */ #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/uaccess.h> #include <asm/prctl.h> /* XXX This should get the constants from libc */ #include <os.h> diff --git a/arch/x86/um/sysrq_32.c b/arch/x86/um/sysrq_32.c index 16ee0e450e3e..f2383484840d 100644 --- a/arch/x86/um/sysrq_32.c +++ b/arch/x86/um/sysrq_32.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <linux/smp.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/kallsyms.h> #include <asm/ptrace.h> #include <asm/sysrq.h> diff --git a/arch/x86/um/sysrq_64.c b/arch/x86/um/sysrq_64.c index 38b4e4abd0f8..903ad91b624f 100644 --- a/arch/x86/um/sysrq_64.c +++ b/arch/x86/um/sysrq_64.c @@ -7,6 +7,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/utsname.h> #include <asm/current.h> #include <asm/ptrace.h> diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index f6740b5b1738..37cb5aad71de 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -38,7 +38,7 @@ * * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 */ -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/highmem.h> #include <linux/debugfs.h> #include <linux/bug.h> diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c index a0b36a9d5df1..42b08f8fc2ca 100644 --- a/arch/x86/xen/pci-swiotlb-xen.c +++ b/arch/x86/xen/pci-swiotlb-xen.c @@ -18,7 +18,7 @@ int xen_swiotlb __read_mostly; -static struct dma_map_ops xen_swiotlb_dma_ops = { +static const struct dma_map_ops xen_swiotlb_dma_ops = { .alloc = xen_swiotlb_alloc_coherent, .free = xen_swiotlb_free_coherent, .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 0dee6f59ea82..7ff2f1bfb7ec 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -18,6 +18,7 @@ #include <linux/smp.h> #include <linux/irq_work.h> #include <linux/tick.h> +#include <linux/nmi.h> #include <asm/paravirt.h> #include <asm/desc.h> diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index e54189427b31..7ee02fe4a63d 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile @@ -93,11 +93,7 @@ endif boot := arch/xtensa/boot -all: zImage - -bzImage : zImage - -zImage: vmlinux +all Image zImage uImage: vmlinux $(Q)$(MAKE) $(build)=$(boot) $@ %.dtb: @@ -107,6 +103,8 @@ dtbs: scripts $(Q)$(MAKE) $(build)=$(boot)/dts define archhelp + @echo '* Image - Kernel ELF image with reset vector' @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)' + @echo '* uImage - U-Boot wrapped image' @echo ' dtbs - Build device tree blobs for enabled boards' endef diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile index ca20a892021b..53e4178711e6 100644 --- a/arch/xtensa/boot/Makefile +++ b/arch/xtensa/boot/Makefile @@ -21,14 +21,17 @@ subdir-y := lib # Subdirs for the boot loader(s) -bootdir-$(CONFIG_XTENSA_PLATFORM_ISS) += boot-elf -bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf boot-uboot -bootdir-$(CONFIG_XTENSA_PLATFORM_XTFPGA) += boot-redboot boot-elf boot-uboot +boot-$(CONFIG_XTENSA_PLATFORM_ISS) += Image +boot-$(CONFIG_XTENSA_PLATFORM_XT2000) += Image zImage uImage +boot-$(CONFIG_XTENSA_PLATFORM_XTFPGA) += Image zImage uImage -zImage Image: $(bootdir-y) +all: $(boot-y) +Image: boot-elf +zImage: boot-redboot +uImage: $(obj)/uImage -$(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \ - $(addprefix $(obj)/,$(host-progs)) +boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y)) \ + $(addprefix $(obj)/,$(host-progs)) $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS) OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary @@ -41,4 +44,10 @@ vmlinux.bin.gz: vmlinux.bin FORCE boot-elf: vmlinux.bin boot-redboot: vmlinux.bin.gz -boot-uboot: vmlinux.bin.gz + +UIMAGE_LOADADDR = $(CONFIG_KERNEL_LOAD_ADDRESS) +UIMAGE_COMPRESSION = gzip + +$(obj)/uImage: vmlinux.bin.gz FORCE + $(call if_changed,uimage) + $(Q)$(kecho) ' Kernel: $@ is ready' diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile index 89db089f5a12..521471981356 100644 --- a/arch/xtensa/boot/boot-elf/Makefile +++ b/arch/xtensa/boot/boot-elf/Makefile @@ -31,4 +31,4 @@ $(obj)/../Image.elf: $(obj)/Image.o $(obj)/boot.lds -o $@ $(obj)/Image.o $(Q)$(kecho) ' Kernel: $@ is ready' -zImage: $(obj)/../Image.elf +all Image: $(obj)/../Image.elf diff --git a/arch/xtensa/boot/boot-redboot/Makefile b/arch/xtensa/boot/boot-redboot/Makefile index 8be8b9436981..8632473ad319 100644 --- a/arch/xtensa/boot/boot-redboot/Makefile +++ b/arch/xtensa/boot/boot-redboot/Makefile @@ -32,4 +32,4 @@ $(obj)/../zImage.redboot: $(obj)/zImage.elf $(Q)$(OBJCOPY) -S -O binary $< $@ $(Q)$(kecho) ' Kernel: $@ is ready' -zImage: $(obj)/../zImage.redboot +all zImage: $(obj)/../zImage.redboot diff --git a/arch/xtensa/boot/boot-uboot/Makefile b/arch/xtensa/boot/boot-uboot/Makefile deleted file mode 100644 index 0f4c417b4196..000000000000 --- a/arch/xtensa/boot/boot-uboot/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -# -# This file is subject to the terms and conditions of the GNU General Public -# License. See the file "COPYING" in the main directory of this archive -# for more details. -# - -UIMAGE_LOADADDR = $(CONFIG_KERNEL_LOAD_ADDRESS) -UIMAGE_COMPRESSION = gzip - -$(obj)/../uImage: vmlinux.bin.gz FORCE - $(call if_changed,uimage) - $(Q)$(kecho) ' Kernel: $@ is ready' - -zImage: $(obj)/../uImage diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 9e9760b20be5..f41408c53fe1 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -31,3 +31,4 @@ generic-y += topology.h generic-y += trace_clock.h generic-y += word-at-a-time.h generic-y += xor.h +generic-y += kprobes.h diff --git a/arch/xtensa/include/asm/device.h b/arch/xtensa/include/asm/device.h index fe1f5c878493..1deeb8ebbb1b 100644 --- a/arch/xtensa/include/asm/device.h +++ b/arch/xtensa/include/asm/device.h @@ -6,11 +6,7 @@ #ifndef _ASM_XTENSA_DEVICE_H #define _ASM_XTENSA_DEVICE_H -struct dma_map_ops; - struct dev_archdata { - /* DMA operations on that device */ - struct dma_map_ops *dma_ops; }; struct pdev_archdata { diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 3fc1170a6488..c6140fa8c0be 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -18,14 +18,11 @@ #define DMA_ERROR_CODE (~(dma_addr_t)0x0) -extern struct dma_map_ops xtensa_dma_map_ops; +extern const struct dma_map_ops xtensa_dma_map_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - if (dev && dev->archdata.dma_ops) - return dev->archdata.dma_ops; - else - return &xtensa_dma_map_ops; + return &xtensa_dma_map_ops; } void dma_cache_sync(struct device *dev, void *vaddr, size_t size, diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h index 04c8ebdc4517..f7e186dfc4e4 100644 --- a/arch/xtensa/include/asm/mmu_context.h +++ b/arch/xtensa/include/asm/mmu_context.h @@ -17,6 +17,7 @@ #include <linux/stringify.h> #include <linux/sched.h> +#include <linux/mm_types.h> #include <asm/vectors.h> diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h index 77d41cc7a688..65d3da9db19b 100644 --- a/arch/xtensa/include/asm/vectors.h +++ b/arch/xtensa/include/asm/vectors.h @@ -67,7 +67,11 @@ static inline unsigned long xtensa_get_kio_paddr(void) #endif /* CONFIG_MMU */ #define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR) +#ifdef CONFIG_VECTORS_OFFSET #define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET) +#else +#define VECBASE_VADDR _vecbase +#endif #if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 34c1f9fa6acc..cec86a1c2acc 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -250,7 +250,7 @@ int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return 0; } -struct dma_map_ops xtensa_dma_map_ops = { +const struct dma_map_ops xtensa_dma_map_ops = { .alloc = xtensa_dma_alloc, .free = xtensa_dma_free, .map_page = xtensa_map_page, diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 826d25104846..58f96d1230d4 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -17,6 +17,9 @@ #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index 32519b71d914..e0f583fed06a 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -20,6 +20,7 @@ #include <linux/perf_event.h> #include <linux/ptrace.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/smp.h> diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 8fd4be610607..197e75b400b1 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -126,6 +126,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag) __tagtable(BP_TAG_INITRD, parse_tag_initrd); +#endif /* CONFIG_BLK_DEV_INITRD */ + #ifdef CONFIG_OF static int __init parse_tag_fdt(const bp_tag_t *tag) @@ -138,8 +140,6 @@ __tagtable(BP_TAG_FDT, parse_tag_fdt); #endif /* CONFIG_OF */ -#endif /* CONFIG_BLK_DEV_INITRD */ - static int __init parse_tag_cmdline(const bp_tag_t* tag) { strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE); @@ -334,6 +334,7 @@ void __init setup_arch(char **cmdline_p) mem_reserve(__pa(&_stext), __pa(&_end)); +#ifdef CONFIG_VECTORS_OFFSET mem_reserve(__pa(&_WindowVectors_text_start), __pa(&_WindowVectors_text_end)); @@ -370,6 +371,8 @@ void __init setup_arch(char **cmdline_p) __pa(&_Level6InterruptVector_text_end)); #endif +#endif /* CONFIG_VECTORS_OFFSET */ + #ifdef CONFIG_SMP mem_reserve(__pa(&_SecondaryResetVector_text_start), __pa(&_SecondaryResetVector_text_end)); diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index c41294745731..70a131945443 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -20,6 +20,7 @@ #include <linux/ptrace.h> #include <linux/personality.h> #include <linux/tracehook.h> +#include <linux/sched/task_stack.h> #include <asm/ucontext.h> #include <linux/uaccess.h> diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c index fc4ad21a5ed4..932d64689bac 100644 --- a/arch/xtensa/kernel/smp.c +++ b/arch/xtensa/kernel/smp.c @@ -21,6 +21,9 @@ #include <linux/irq.h> #include <linux/kdebug.h> #include <linux/module.h> +#include <linux/sched/mm.h> +#include <linux/sched/hotplug.h> +#include <linux/sched/task_stack.h> #include <linux/reboot.h> #include <linux/seq_file.h> #include <linux/smp.h> @@ -135,8 +138,8 @@ void secondary_start_kernel(void) /* All kernel threads share the same mm context. */ - atomic_inc(&mm->mm_users); - atomic_inc(&mm->mm_count); + mmget(mm); + mmgrab(mm); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); enter_lazy_tlb(mm, current); diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index d3fd100dffc9..06937928cb72 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -25,6 +25,7 @@ #include <linux/file.h> #include <linux/fs.h> #include <linux/mman.h> +#include <linux/sched/mm.h> #include <linux/shm.h> typedef void (*syscall_t)(void); diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 282bf721a4d6..c82c43bff296 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -24,7 +24,9 @@ */ #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/init.h> #include <linux/module.h> #include <linux/stringify.h> diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 31411fc82662..30d9fc21e076 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -59,6 +59,7 @@ jiffies = jiffies_64; * garbage.) */ +#ifdef CONFIG_VECTORS_OFFSET #define SECTION_VECTOR(sym, section, addr, max_prevsec_size, prevsec) \ section addr : AT((MIN(LOADADDR(prevsec) + max_prevsec_size, \ LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \ @@ -68,6 +69,11 @@ jiffies = jiffies_64; *(section) \ sym ## _end = ABSOLUTE(.); \ } +#else +#define SECTION_VECTOR(section, addr) \ + . = addr; \ + *(section) +#endif /* * Mapping of input sections to output sections when linking. @@ -85,6 +91,37 @@ SECTIONS { /* The HEAD_TEXT section must be the first section! */ HEAD_TEXT + +#ifndef CONFIG_VECTORS_OFFSET + . = ALIGN(PAGE_SIZE); + _vecbase = .; + + SECTION_VECTOR (.WindowVectors.text, WINDOW_VECTORS_VADDR) +#if XCHAL_EXCM_LEVEL >= 2 + SECTION_VECTOR (.Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR) +#endif +#if XCHAL_EXCM_LEVEL >= 3 + SECTION_VECTOR (.Level3InterruptVector.text, INTLEVEL3_VECTOR_VADDR) +#endif +#if XCHAL_EXCM_LEVEL >= 4 + SECTION_VECTOR (.Level4InterruptVector.text, INTLEVEL4_VECTOR_VADDR) +#endif +#if XCHAL_EXCM_LEVEL >= 5 + SECTION_VECTOR (.Level5InterruptVector.text, INTLEVEL5_VECTOR_VADDR) +#endif +#if XCHAL_EXCM_LEVEL >= 6 + SECTION_VECTOR (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR) +#endif + SECTION_VECTOR (.DebugInterruptVector.literal, DEBUG_VECTOR_VADDR - 4) + SECTION_VECTOR (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR) + SECTION_VECTOR (.KernelExceptionVector.literal, KERNEL_VECTOR_VADDR - 4) + SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) + SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4) + SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) + SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 48) + SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) +#endif + TEXT_TEXT VMLINUX_SYMBOL(__sched_text_start) = .; *(.sched.literal .sched.text) @@ -132,6 +169,7 @@ SECTIONS . = ALIGN(16); __boot_reloc_table_start = ABSOLUTE(.); +#ifdef CONFIG_VECTORS_OFFSET RELOCATE_ENTRY(_WindowVectors_text, .WindowVectors.text); #if XCHAL_EXCM_LEVEL >= 2 @@ -164,6 +202,7 @@ SECTIONS .DoubleExceptionVector.text); RELOCATE_ENTRY(_DebugInterruptVector_text, .DebugInterruptVector.text); +#endif #if defined(CONFIG_SMP) RELOCATE_ENTRY(_SecondaryResetVector_text, .SecondaryResetVector.text); @@ -186,6 +225,7 @@ SECTIONS . = ALIGN(4); .dummy : { LONG(0) } +#ifdef CONFIG_VECTORS_OFFSET /* The vectors are relocated to the real position at startup time */ SECTION_VECTOR (_WindowVectors_text, @@ -277,6 +317,7 @@ SECTIONS . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; +#endif #if defined(CONFIG_SMP) SECTION_VECTOR (_SecondaryResetVector_text, diff --git a/block/Kconfig b/block/Kconfig index a2a92e57a87d..e9f780f815f5 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -189,4 +189,9 @@ config BLK_MQ_PCI depends on BLOCK && PCI default y +config BLK_MQ_VIRTIO + bool + depends on BLOCK && VIRTIO + default y + source block/Kconfig.iosched diff --git a/block/Makefile b/block/Makefile index 2ad7c304e3f5..081bb680789b 100644 --- a/block/Makefile +++ b/block/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o +obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o obj-$(CONFIG_BLK_WBT) += blk-wbt.o obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 295e98c2c8cc..bbe7ee00bd3d 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -17,6 +17,7 @@ #include <linux/ioprio.h> #include <linux/kdev_t.h> #include <linux/module.h> +#include <linux/sched/signal.h> #include <linux/err.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> diff --git a/block/blk-core.c b/block/blk-core.c index b9e857f4afe8..1086dac8724c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -578,7 +578,6 @@ void blk_cleanup_queue(struct request_queue *q) q->queue_lock = &q->__queue_lock; spin_unlock_irq(lock); - bdi_unregister(q->backing_dev_info); put_disk_devt(q->disk_devt); /* @q is and will stay empty, shutdown and put */ diff --git a/block/blk-ioc.c b/block/blk-ioc.c index b12f9c87b4c3..63898d229cb9 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -7,6 +7,7 @@ #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/slab.h> +#include <linux/sched/task.h> #include "blk.h" @@ -36,8 +37,8 @@ static void icq_free_icq_rcu(struct rcu_head *head) } /* - * Exit an icq. Called with both ioc and q locked for sq, only ioc locked for - * mq. + * Exit an icq. Called with ioc locked for blk-mq, and with both ioc + * and queue locked for legacy. */ static void ioc_exit_icq(struct io_cq *icq) { @@ -54,7 +55,10 @@ static void ioc_exit_icq(struct io_cq *icq) icq->flags |= ICQ_EXITED; } -/* Release an icq. Called with both ioc and q locked. */ +/* + * Release an icq. Called with ioc locked for blk-mq, and with both ioc + * and queue locked for legacy. + */ static void ioc_destroy_icq(struct io_cq *icq) { struct io_context *ioc = icq->ioc; @@ -62,7 +66,6 @@ static void ioc_destroy_icq(struct io_cq *icq) struct elevator_type *et = q->elevator->type; lockdep_assert_held(&ioc->lock); - lockdep_assert_held(q->queue_lock); radix_tree_delete(&ioc->icq_tree, icq->q->id); hlist_del_init(&icq->ioc_node); @@ -222,24 +225,40 @@ void exit_io_context(struct task_struct *task) put_io_context_active(ioc); } +static void __ioc_clear_queue(struct list_head *icq_list) +{ + unsigned long flags; + + while (!list_empty(icq_list)) { + struct io_cq *icq = list_entry(icq_list->next, + struct io_cq, q_node); + struct io_context *ioc = icq->ioc; + + spin_lock_irqsave(&ioc->lock, flags); + ioc_destroy_icq(icq); + spin_unlock_irqrestore(&ioc->lock, flags); + } +} + /** * ioc_clear_queue - break any ioc association with the specified queue * @q: request_queue being cleared * - * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked. + * Walk @q->icq_list and exit all io_cq's. */ void ioc_clear_queue(struct request_queue *q) { - lockdep_assert_held(q->queue_lock); + LIST_HEAD(icq_list); - while (!list_empty(&q->icq_list)) { - struct io_cq *icq = list_entry(q->icq_list.next, - struct io_cq, q_node); - struct io_context *ioc = icq->ioc; + spin_lock_irq(q->queue_lock); + list_splice_init(&q->icq_list, &icq_list); - spin_lock(&ioc->lock); - ioc_destroy_icq(icq); - spin_unlock(&ioc->lock); + if (q->mq_ops) { + spin_unlock_irq(q->queue_lock); + __ioc_clear_queue(&icq_list); + } else { + __ioc_clear_queue(&icq_list); + spin_unlock_irq(q->queue_lock); } } diff --git a/block/blk-map.c b/block/blk-map.c index 2f18c2a0be1b..3b5cb863318f 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -2,6 +2,7 @@ * Functions related to mapping data to requests */ #include <linux/kernel.h> +#include <linux/sched/task_stack.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 98c7b061781e..09af8ff18719 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -110,15 +110,14 @@ struct request *blk_mq_sched_get_request(struct request_queue *q, struct blk_mq_alloc_data *data) { struct elevator_queue *e = q->elevator; - struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx; struct request *rq; blk_queue_enter_live(q); - ctx = blk_mq_get_ctx(q); - hctx = blk_mq_map_queue(q, ctx->cpu); - - blk_mq_set_alloc_data(data, q, data->flags, ctx, hctx); + data->q = q; + if (likely(!data->ctx)) + data->ctx = blk_mq_get_ctx(q); + if (likely(!data->hctx)) + data->hctx = blk_mq_map_queue(q, data->ctx->cpu); if (e) { data->flags |= BLK_MQ_REQ_INTERNAL; @@ -135,8 +134,6 @@ struct request *blk_mq_sched_get_request(struct request_queue *q, rq = __blk_mq_alloc_request(data, op); } else { rq = __blk_mq_alloc_request(data, op); - if (rq) - data->hctx->tags->rqs[rq->tag] = rq; } if (rq) { @@ -454,7 +451,8 @@ int blk_mq_sched_setup(struct request_queue *q) */ ret = 0; queue_for_each_hw_ctx(q, hctx, i) { - hctx->sched_tags = blk_mq_alloc_rq_map(set, i, q->nr_requests, 0); + hctx->sched_tags = blk_mq_alloc_rq_map(set, i, + q->nr_requests, set->reserved_tags); if (!hctx->sched_tags) { ret = -ENOMEM; break; diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 54c84363c1b2..e48bc2c72615 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -181,7 +181,7 @@ found_tag: void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag) { - if (tag >= tags->nr_reserved_tags) { + if (!blk_mq_tag_is_reserved(tags, tag)) { const int real_tag = tag - tags->nr_reserved_tags; BUG_ON(real_tag >= tags->nr_tags); diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 63497423c5cd..5cb51e53cc03 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -85,4 +85,10 @@ static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx, hctx->tags->rqs[tag] = rq; } +static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, + unsigned int tag) +{ + return tag < tags->nr_reserved_tags; +} + #endif diff --git a/block/blk-mq-virtio.c b/block/blk-mq-virtio.c new file mode 100644 index 000000000000..c3afbca11299 --- /dev/null +++ b/block/blk-mq-virtio.c @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2016 Christoph Hellwig. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#include <linux/device.h> +#include <linux/blk-mq.h> +#include <linux/blk-mq-virtio.h> +#include <linux/virtio_config.h> +#include <linux/module.h> +#include "blk-mq.h" + +/** + * blk_mq_virtio_map_queues - provide a default queue mapping for virtio device + * @set: tagset to provide the mapping for + * @vdev: virtio device associated with @set. + * @first_vec: first interrupt vectors to use for queues (usually 0) + * + * This function assumes the virtio device @vdev has at least as many available + * interrupt vetors as @set has queues. It will then queuery the vector + * corresponding to each queue for it's affinity mask and built queue mapping + * that maps a queue to the CPUs that have irq affinity for the corresponding + * vector. + */ +int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set, + struct virtio_device *vdev, int first_vec) +{ + const struct cpumask *mask; + unsigned int queue, cpu; + + if (!vdev->config->get_vq_affinity) + goto fallback; + + for (queue = 0; queue < set->nr_hw_queues; queue++) { + mask = vdev->config->get_vq_affinity(vdev, first_vec + queue); + if (!mask) + goto fallback; + + for_each_cpu(cpu, mask) + set->mq_map[cpu] = queue; + } + + return 0; +fallback: + return blk_mq_map_queues(set); +} +EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues); diff --git a/block/blk-mq.c b/block/blk-mq.c index 9e6b064e5339..b2fd175e84d7 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -20,6 +20,8 @@ #include <linux/cpu.h> #include <linux/cache.h> #include <linux/sched/sysctl.h> +#include <linux/sched/topology.h> +#include <linux/sched/signal.h> #include <linux/delay.h> #include <linux/crash_dump.h> #include <linux/prefetch.h> @@ -75,10 +77,20 @@ void blk_mq_freeze_queue_start(struct request_queue *q) } EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); -static void blk_mq_freeze_queue_wait(struct request_queue *q) +void blk_mq_freeze_queue_wait(struct request_queue *q) { wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); } +EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); + +int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, + unsigned long timeout) +{ + return wait_event_timeout(q->mq_freeze_wq, + percpu_ref_is_zero(&q->q_usage_counter), + timeout); +} +EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); /* * Guarantee no request is in use, so we can change any data structure of @@ -234,6 +246,7 @@ struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data, } rq->tag = tag; rq->internal_tag = -1; + data->hctx->tags->rqs[rq->tag] = rq; } blk_mq_rq_ctx_init(data->q, data->ctx, rq, op); @@ -273,10 +286,9 @@ EXPORT_SYMBOL(blk_mq_alloc_request); struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw, unsigned int flags, unsigned int hctx_idx) { - struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx; + struct blk_mq_alloc_data alloc_data = { .flags = flags }; struct request *rq; - struct blk_mq_alloc_data alloc_data; + unsigned int cpu; int ret; /* @@ -299,25 +311,23 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw, * Check if the hardware context is actually mapped to anything. * If not tell the caller that it should skip this queue. */ - hctx = q->queue_hw_ctx[hctx_idx]; - if (!blk_mq_hw_queue_mapped(hctx)) { - ret = -EXDEV; - goto out_queue_exit; + alloc_data.hctx = q->queue_hw_ctx[hctx_idx]; + if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) { + blk_queue_exit(q); + return ERR_PTR(-EXDEV); } - ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask)); + cpu = cpumask_first(alloc_data.hctx->cpumask); + alloc_data.ctx = __blk_mq_get_ctx(q, cpu); - blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); - rq = __blk_mq_alloc_request(&alloc_data, rw); - if (!rq) { - ret = -EWOULDBLOCK; - goto out_queue_exit; - } - - return rq; + rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data); -out_queue_exit: + blk_mq_put_ctx(alloc_data.ctx); blk_queue_exit(q); - return ERR_PTR(ret); + + if (!rq) + return ERR_PTR(-EWOULDBLOCK); + + return rq; } EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); @@ -852,6 +862,9 @@ done: return true; } + if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) + data.flags |= BLK_MQ_REQ_RESERVED; + rq->tag = blk_mq_get_tag(&data); if (rq->tag >= 0) { if (blk_mq_tag_busy(data.hctx)) { @@ -865,12 +878,9 @@ done: return false; } -static void blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, - struct request *rq) +static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, + struct request *rq) { - if (rq->tag == -1 || rq->internal_tag == -1) - return; - blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag); rq->tag = -1; @@ -880,6 +890,26 @@ static void blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, } } +static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx, + struct request *rq) +{ + if (rq->tag == -1 || rq->internal_tag == -1) + return; + + __blk_mq_put_driver_tag(hctx, rq); +} + +static void blk_mq_put_driver_tag(struct request *rq) +{ + struct blk_mq_hw_ctx *hctx; + + if (rq->tag == -1 || rq->internal_tag == -1) + return; + + hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); + __blk_mq_put_driver_tag(hctx, rq); +} + /* * If we fail getting a driver tag because all the driver tags are already * assigned and on the dispatch list, BUT the first entry does not have a @@ -989,7 +1019,19 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) bd.rq = rq; bd.list = dptr; - bd.last = list_empty(list); + + /* + * Flag last if we have no more requests, or if we have more + * but can't assign a driver tag to it. + */ + if (list_empty(list)) + bd.last = true; + else { + struct request *nxt; + + nxt = list_first_entry(list, struct request, queuelist); + bd.last = !blk_mq_get_driver_tag(nxt, NULL, false); + } ret = q->mq_ops->queue_rq(hctx, &bd); switch (ret) { @@ -997,7 +1039,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) queued++; break; case BLK_MQ_RQ_QUEUE_BUSY: - blk_mq_put_driver_tag(hctx, rq); + blk_mq_put_driver_tag_hctx(hctx, rq); list_add(&rq->queuelist, list); __blk_mq_requeue_request(rq); break; @@ -1027,6 +1069,13 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) * that is where we will continue on next queue run. */ if (!list_empty(list)) { + /* + * If we got a driver tag for the next request already, + * free it again. + */ + rq = list_first_entry(list, struct request, queuelist); + blk_mq_put_driver_tag(rq); + spin_lock(&hctx->lock); list_splice_init(list, &hctx->dispatch); spin_unlock(&hctx->lock); @@ -1713,16 +1762,20 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, unsigned int reserved_tags) { struct blk_mq_tags *tags; + int node; - tags = blk_mq_init_tags(nr_tags, reserved_tags, - set->numa_node, + node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx); + if (node == NUMA_NO_NODE) + node = set->numa_node; + + tags = blk_mq_init_tags(nr_tags, reserved_tags, node, BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); if (!tags) return NULL; tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, - set->numa_node); + node); if (!tags->rqs) { blk_mq_free_tags(tags); return NULL; @@ -1730,7 +1783,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, - set->numa_node); + node); if (!tags->static_rqs) { kfree(tags->rqs); blk_mq_free_tags(tags); @@ -1750,6 +1803,11 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, { unsigned int i, j, entries_per_page, max_order = 4; size_t rq_size, left; + int node; + + node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx); + if (node == NUMA_NO_NODE) + node = set->numa_node; INIT_LIST_HEAD(&tags->page_list); @@ -1771,7 +1829,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, this_order--; do { - page = alloc_pages_node(set->numa_node, + page = alloc_pages_node(node, GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, this_order); if (page) @@ -1804,7 +1862,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, if (set->ops->init_request) { if (set->ops->init_request(set->driver_data, rq, hctx_idx, i, - set->numa_node)) { + node)) { tags->static_rqs[i] = NULL; goto fail; } diff --git a/block/blk-mq.h b/block/blk-mq.h index 24b2256186f3..088ced003c13 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -146,16 +146,6 @@ struct blk_mq_alloc_data { struct blk_mq_hw_ctx *hctx; }; -static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data, - struct request_queue *q, unsigned int flags, - struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx) -{ - data->q = q; - data->flags = flags; - data->ctx = ctx; - data->hctx = hctx; -} - static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) { if (data->flags & BLK_MQ_REQ_INTERNAL) diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 06cf9807f49a..87b7df4851bf 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -9,6 +9,7 @@ #include <linux/interrupt.h> #include <linux/cpu.h> #include <linux/sched.h> +#include <linux/sched/topology.h> #include "blk.h" diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 002af836aa87..c44b321335f3 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -815,9 +815,7 @@ static void blk_release_queue(struct kobject *kobj) blkcg_exit_queue(q); if (q->elevator) { - spin_lock_irq(q->queue_lock); ioc_clear_queue(q); - spin_unlock_irq(q->queue_lock); elevator_exit(q->elevator); } diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 82fd0cc394eb..8fab716e4059 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -185,7 +185,7 @@ static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq) * sq_to_td - return throtl_data the specified service queue belongs to * @sq: the throtl_service_queue of interest * - * A service_queue can be embeded in either a throtl_grp or throtl_data. + * A service_queue can be embedded in either a throtl_grp or throtl_data. * Determine the associated throtl_data accordingly and return it. */ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) diff --git a/block/bsg.c b/block/bsg.c index a9a8b8e0446f..74835dbf0c47 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -573,7 +573,7 @@ bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) int ret; ssize_t bytes_read; - dprintk("%s: read %Zd bytes\n", bd->name, count); + dprintk("%s: read %zd bytes\n", bd->name, count); bsg_set_block(bd, file); @@ -648,7 +648,7 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) ssize_t bytes_written; int ret; - dprintk("%s: write %Zd bytes\n", bd->name, count); + dprintk("%s: write %zd bytes\n", bd->name, count); if (unlikely(segment_eq(get_fs(), KERNEL_DS))) return -EINVAL; @@ -667,7 +667,7 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) if (!bytes_written || err_block_err(ret)) bytes_written = ret; - dprintk("%s: returning %Zd\n", bd->name, bytes_written); + dprintk("%s: returning %zd\n", bd->name, bytes_written); return bytes_written; } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 137944777859..440b95ee593c 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -8,6 +8,7 @@ */ #include <linux/module.h> #include <linux/slab.h> +#include <linux/sched/clock.h> #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/ktime.h> diff --git a/block/elevator.c b/block/elevator.c index ac1c9f481a98..01139f549b5b 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -983,9 +983,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) if (old_registered) elv_unregister_queue(q); - spin_lock_irq(q->queue_lock); ioc_clear_queue(q); - spin_unlock_irq(q->queue_lock); } /* allocate, init and register new elevator */ diff --git a/block/genhd.c b/block/genhd.c index 2f444b87a5f2..b26a5ea115d0 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -681,6 +681,11 @@ void del_gendisk(struct gendisk *disk) disk->flags &= ~GENHD_FL_UP; sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); + /* + * Unregister bdi before releasing device numbers (as they can get + * reused and we'd get clashes in sysfs). + */ + bdi_unregister(disk->queue->backing_dev_info); blk_unregister_queue(disk); blk_unregister_region(disk_devt(disk), disk->minors); diff --git a/block/ioprio.c b/block/ioprio.c index 3790669232ff..0c47a00f92a8 100644 --- a/block/ioprio.c +++ b/block/ioprio.c @@ -23,8 +23,11 @@ #include <linux/kernel.h> #include <linux/export.h> #include <linux/ioprio.h> +#include <linux/cred.h> #include <linux/blkdev.h> #include <linux/capability.h> +#include <linux/sched/user.h> +#include <linux/sched/task.h> #include <linux/syscalls.h> #include <linux/security.h> #include <linux/pid_namespace.h> diff --git a/crypto/algboss.c b/crypto/algboss.c index ccb85e1798f2..960d8548171b 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -19,7 +19,7 @@ #include <linux/module.h> #include <linux/notifier.h> #include <linux/rtnetlink.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/string.h> diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 533265f110e0..5a8053758657 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/net.h> diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index a9e79d8eff87..43839b00fe6c 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/net.h> diff --git a/crypto/api.c b/crypto/api.c index b16ce1653284..941cd4c6c7ec 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -21,7 +21,7 @@ #include <linux/kmod.h> #include <linux/module.h> #include <linux/param.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/string.h> #include "internal.h" diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index f1bf3418d968..727bd5c3569e 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -16,6 +16,7 @@ #include <linux/delay.h> #include <crypto/engine.h> #include <crypto/internal/hash.h> +#include <uapi/linux/sched/types.h> #include "internal.h" #define CRYPTO_ENGINE_MAX_QLEN 10 diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index c207458d6299..4e6472658852 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -24,6 +24,7 @@ #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/sched.h> +#include <linux/sched/stat.h> #include <linux/slab.h> #include <linux/hardirq.h> diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c index f77956c3fd45..747c2ba98534 100644 --- a/drivers/acpi/acpi_ipmi.c +++ b/drivers/acpi/acpi_ipmi.c @@ -56,7 +56,7 @@ struct acpi_ipmi_device { struct ipmi_driver_data { struct list_head ipmi_devices; struct ipmi_smi_watcher bmc_events; - struct ipmi_user_hndl ipmi_hndlrs; + const struct ipmi_user_hndl ipmi_hndlrs; struct mutex ipmi_lock; /* diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 8ea836c046f8..5edfd9c49044 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -18,8 +18,10 @@ #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/platform_data/clk-lpss.h> +#include <linux/platform_data/x86/pmc_atom.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include <linux/pwm.h> #include <linux/delay.h> #include "internal.h" @@ -31,7 +33,6 @@ ACPI_MODULE_NAME("acpi_lpss"); #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include <asm/iosf_mbi.h> -#include <asm/pmc_atom.h> #define LPSS_ADDR(desc) ((unsigned long)&desc) @@ -154,6 +155,18 @@ static void byt_i2c_setup(struct lpss_private_data *pdata) writel(0, pdata->mmio_base + LPSS_I2C_ENABLE); } +/* BSW PWM used for backlight control by the i915 driver */ +static struct pwm_lookup bsw_pwm_lookup[] = { + PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0", + "pwm_backlight", 0, PWM_POLARITY_NORMAL, + "pwm-lpss-platform"), +}; + +static void bsw_pwm_setup(struct lpss_private_data *pdata) +{ + pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); +} + static const struct lpss_device_desc lpt_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, .prv_offset = 0x800, @@ -191,6 +204,7 @@ static const struct lpss_device_desc byt_pwm_dev_desc = { static const struct lpss_device_desc bsw_pwm_dev_desc = { .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, + .setup = bsw_pwm_setup, }; static const struct lpss_device_desc byt_uart_dev_desc = { diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index eb76a4c10dbf..754431031282 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -20,6 +20,7 @@ #include <linux/init.h> #include <linux/types.h> #include <linux/kthread.h> +#include <uapi/linux/sched/types.h> #include <linux/freezer.h> #include <linux/cpu.h> #include <linux/tick.h> diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c index 251f9477a984..857dbc43a9b1 100644 --- a/drivers/acpi/acpica/dbconvert.c +++ b/drivers/acpi/acpica/dbconvert.c @@ -242,7 +242,7 @@ acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object) * * RETURN: Status * - * DESCRIPTION: Convert a typed and tokenized string to an union acpi_object. Typing: + * DESCRIPTION: Convert a typed and tokenized string to a union acpi_object. Typing: * 1) String objects were surrounded by quotes. * 2) Buffer objects were surrounded by parentheses. * 3) Package objects were surrounded by brackets "[]". diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c index 3dbbecf22087..9d14b509529e 100644 --- a/drivers/acpi/acpica/nspredef.c +++ b/drivers/acpi/acpica/nspredef.c @@ -323,7 +323,7 @@ acpi_ns_check_reference(struct acpi_evaluate_info *info, /* * Check the reference object for the correct reference type (opcode). - * The only type of reference that can be converted to an union acpi_object is + * The only type of reference that can be converted to a union acpi_object is * a reference to a named object (reference class: NAME) */ if (return_object->reference.class == ACPI_REFCLASS_NAME) { diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c index 8e365c0e766b..c944ff5c9c3d 100644 --- a/drivers/acpi/acpica/nsxfeval.c +++ b/drivers/acpi/acpica/nsxfeval.c @@ -495,9 +495,9 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info) /* * Two types of references are supported - those created by Index and * ref_of operators. A name reference (AML_NAMEPATH_OP) can be converted - * to an union acpi_object, so it is not dereferenced here. A ddb_handle + * to a union acpi_object, so it is not dereferenced here. A ddb_handle * (AML_LOAD_OP) cannot be dereferenced, nor can it be converted to - * an union acpi_object. + * a union acpi_object. */ switch (info->return_object->reference.class) { case ACPI_REFCLASS_INDEX: diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c index a05b5c0cf181..12771fcf0417 100644 --- a/drivers/acpi/apei/bert.c +++ b/drivers/acpi/apei/bert.c @@ -97,6 +97,7 @@ static int __init bert_check_table(struct acpi_table_bert *bert_tab) static int __init bert_init(void) { + struct apei_resources bert_resources; struct acpi_bert_region *boot_error_region; struct acpi_table_bert *bert_tab; unsigned int region_len; @@ -127,13 +128,14 @@ static int __init bert_init(void) } region_len = bert_tab->region_length; - if (!request_mem_region(bert_tab->address, region_len, "APEI BERT")) { - pr_err("Can't request iomem region <%016llx-%016llx>.\n", - (unsigned long long)bert_tab->address, - (unsigned long long)bert_tab->address + region_len - 1); - return -EIO; - } - + apei_resources_init(&bert_resources); + rc = apei_resources_add(&bert_resources, bert_tab->address, + region_len, true); + if (rc) + return rc; + rc = apei_resources_request(&bert_resources, "APEI BERT"); + if (rc) + goto out_fini; boot_error_region = ioremap_cache(bert_tab->address, region_len); if (boot_error_region) { bert_print_all(boot_error_region, region_len); @@ -142,7 +144,9 @@ static int __init bert_init(void) rc = -ENOMEM; } - release_mem_region(bert_tab->address, region_len); + apei_resources_release(&bert_resources); +out_fini: + apei_resources_fini(&bert_resources); return rc; } diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index e53bef6cf53c..b192b42a8351 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -44,6 +44,7 @@ #include <linux/pci.h> #include <linux/aer.h> #include <linux/nmi.h> +#include <linux/sched/clock.h> #include <acpi/ghes.h> #include <acpi/apei.h> diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 8b11d6d385dc..cd4c4271dc4c 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -406,7 +406,7 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, } /* - * In IO-APIC mode, use overrided attribute. Two reasons: + * In IO-APIC mode, use overridden attribute. Two reasons: * 1. BIOS bug in DSDT * 2. BIOS uses IO-APIC mode Interrupt Source Override * diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index 2b5d0fac81f0..01c94669a2b0 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c @@ -46,7 +46,7 @@ static bool qdf2400_erratum_44_present(struct acpi_table_header *h) * console is registered and if @earlycon is true, earlycon is set up. * * When CONFIG_ACPI_SPCR_TABLE is defined, this function should be called - * from arch inintialization code as soon as the DT/ACPI decision is made. + * from arch initialization code as soon as the DT/ACPI decision is made. * */ int __init parse_spcr(bool earlycon) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 2bbcdc6fdfee..aae4d8d4be36 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -31,7 +31,8 @@ #include <linux/poll.h> #include <linux/debugfs.h> #include <linux/rbtree.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 4e5bf36c5f46..ef68232b5222 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2034,7 +2034,7 @@ static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) * This is to expedite speed down decisions right after device is * initially configured. * - * The followings are speed down rules. #1 and #2 deal with + * The following are speed down rules. #1 and #2 deal with * DUBIOUS errors. * * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index f1a9198dfe5a..4a610795b585 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -2394,12 +2394,7 @@ static int __init amb_module_init (void) { PRINTD (DBG_FLOW|DBG_INIT, "init_module"); - // sanity check - cast needed as printk does not support %Zu - if (sizeof(amb_mem) != 4*16 + 4*12) { - PRINTK (KERN_ERR, "Fix amb_mem (is %lu words).", - (unsigned long) sizeof(amb_mem)); - return -ENOMEM; - } + BUILD_BUG_ON(sizeof(amb_mem) != 4*16 + 4*12); show_version(); diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 623359e407aa..b042ec458544 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2326,11 +2326,7 @@ static int __init eni_init(void) { struct sk_buff *skb; /* dummy for sizeof */ - if (sizeof(skb->cb) < sizeof(struct eni_skb_prv)) { - printk(KERN_ERR "eni_detect: skb->cb is too small (%Zd < %Zd)\n", - sizeof(skb->cb),sizeof(struct eni_skb_prv)); - return -EIO; - } + BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct eni_skb_prv)); return pci_register_driver(&eni_driver); } diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 80c2ddcfa92c..22dcab952a24 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -895,7 +895,7 @@ static int fs_open(struct atm_vcc *atm_vcc) /* XXX handle qos parameters (rate limiting) ? */ vcc = kmalloc(sizeof(struct fs_vcc), GFP_KERNEL); - fs_dprintk (FS_DEBUG_ALLOC, "Alloc VCC: %p(%Zd)\n", vcc, sizeof(struct fs_vcc)); + fs_dprintk (FS_DEBUG_ALLOC, "Alloc VCC: %p(%zd)\n", vcc, sizeof(struct fs_vcc)); if (!vcc) { clear_bit(ATM_VF_ADDR, &atm_vcc->flags); return -ENOMEM; @@ -946,7 +946,7 @@ static int fs_open(struct atm_vcc *atm_vcc) if (DO_DIRECTION (txtp)) { tc = kmalloc (sizeof (struct fs_transmit_config), GFP_KERNEL); - fs_dprintk (FS_DEBUG_ALLOC, "Alloc tc: %p(%Zd)\n", + fs_dprintk (FS_DEBUG_ALLOC, "Alloc tc: %p(%zd)\n", tc, sizeof (struct fs_transmit_config)); if (!tc) { fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n"); @@ -1185,7 +1185,7 @@ static int fs_send (struct atm_vcc *atm_vcc, struct sk_buff *skb) vcc->last_skb = skb; td = kmalloc (sizeof (struct FS_BPENTRY), GFP_ATOMIC); - fs_dprintk (FS_DEBUG_ALLOC, "Alloc transd: %p(%Zd)\n", td, sizeof (struct FS_BPENTRY)); + fs_dprintk (FS_DEBUG_ALLOC, "Alloc transd: %p(%zd)\n", td, sizeof (struct FS_BPENTRY)); if (!td) { /* Oops out of mem */ return -ENOMEM; @@ -1492,7 +1492,7 @@ static void top_off_fp (struct fs_dev *dev, struct freepool *fp, fs_dprintk (FS_DEBUG_ALLOC, "Alloc rec-skb: %p(%d)\n", skb, fp->bufsize); if (!skb) break; ne = kmalloc (sizeof (struct FS_BPENTRY), gfp_flags); - fs_dprintk (FS_DEBUG_ALLOC, "Alloc rec-d: %p(%Zd)\n", ne, sizeof (struct FS_BPENTRY)); + fs_dprintk (FS_DEBUG_ALLOC, "Alloc rec-d: %p(%zd)\n", ne, sizeof (struct FS_BPENTRY)); if (!ne) { fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", skb); dev_kfree_skb_any (skb); @@ -1803,7 +1803,7 @@ static int fs_init(struct fs_dev *dev) } dev->atm_vccs = kcalloc (dev->nchannels, sizeof (struct atm_vcc *), GFP_KERNEL); - fs_dprintk (FS_DEBUG_ALLOC, "Alloc atmvccs: %p(%Zd)\n", + fs_dprintk (FS_DEBUG_ALLOC, "Alloc atmvccs: %p(%zd)\n", dev->atm_vccs, dev->nchannels * sizeof (struct atm_vcc *)); if (!dev->atm_vccs) { @@ -1911,7 +1911,7 @@ static int firestream_init_one(struct pci_dev *pci_dev, goto err_out; fs_dev = kzalloc (sizeof (struct fs_dev), GFP_KERNEL); - fs_dprintk (FS_DEBUG_ALLOC, "Alloc fs-dev: %p(%Zd)\n", + fs_dprintk (FS_DEBUG_ALLOC, "Alloc fs-dev: %p(%zd)\n", fs_dev, sizeof (struct fs_dev)); if (!fs_dev) goto err_out; diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index 584aa881882b..0f18480b33b5 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -27,6 +27,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/errno.h> @@ -2884,12 +2885,7 @@ static struct pci_driver hrz_driver = { /********** module entry **********/ static int __init hrz_module_init (void) { - // sanity check - cast is needed since printk does not support %Zu - if (sizeof(struct MEMMAP) != 128*1024/4) { - PRINTK (KERN_ERR, "Fix struct MEMMAP (is %lu fakewords).", - (unsigned long) sizeof(struct MEMMAP)); - return -ENOMEM; - } + BUILD_BUG_ON(sizeof(struct MEMMAP) != 128*1024/4); show_version(); diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 8640bafeb471..a4fa6c82261e 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -21,7 +21,7 @@ supports a variety of varients of Interphase ATM PCI (i)Chip adapter card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) in terms of PHY type, the size of control memory and the size of - packet memory. The followings are the change log and history: + packet memory. The following are the change log and history: Bugfix the Mona's UBR driver. Modify the basic memory allocation and dma logic. diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h index 53ecac5a2161..2beacf2fc1ec 100644 --- a/drivers/atm/iphase.h +++ b/drivers/atm/iphase.h @@ -21,7 +21,7 @@ supports a variety of varients of Interphase ATM PCI (i)Chip adapter card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) in terms of PHY type, the size of control memory and the size of - packet memory. The followings are the change log and history: + packet memory. The following are the change log and history: Bugfix the Mona's UBR driver. Modify the basic memory allocation and dma logic. diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index 445505d9ea07..1a9bc51284b0 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -1389,7 +1389,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr) if (n < 0) n += lanai_buf_size(&lvcc->rx.buf); APRINTK(n >= 0 && n < lanai_buf_size(&lvcc->rx.buf) && !(n & 15), - "vcc_rx_aal5: n out of range (%d/%Zu)\n", + "vcc_rx_aal5: n out of range (%d/%zu)\n", n, lanai_buf_size(&lvcc->rx.buf)); /* Recover the second-to-last word to get true pdu length */ if ((x = &end[-2]) < lvcc->rx.buf.start) @@ -1493,9 +1493,9 @@ static int lanai_get_sized_buffer(struct lanai_dev *lanai, return -ENOMEM; if (unlikely(lanai_buf_size(buf) < size)) printk(KERN_WARNING DEV_LABEL "(itf %d): wanted %d bytes " - "for %s buffer, got only %Zu\n", lanai->number, size, + "for %s buffer, got only %zu\n", lanai->number, size, name, lanai_buf_size(buf)); - DPRINTK("Allocated %Zu byte %s buffer\n", lanai_buf_size(buf), name); + DPRINTK("Allocated %zu byte %s buffer\n", lanai_buf_size(buf), name); return 0; } @@ -1586,7 +1586,7 @@ static int service_buffer_allocate(struct lanai_dev *lanai) lanai->pci); if (unlikely(lanai->service.start == NULL)) return -ENOMEM; - DPRINTK("allocated service buffer at 0x%08lX, size %Zu(%d)\n", + DPRINTK("allocated service buffer at 0x%08lX, size %zu(%d)\n", (unsigned long) lanai->service.start, lanai_buf_size(&lanai->service), lanai_buf_size_cardorder(&lanai->service)); @@ -2467,8 +2467,8 @@ static int lanai_proc_read(struct atm_dev *atmdev, loff_t *pos, char *page) (lanai->status & STATUS_LED) ? 1 : 0, (lanai->status & STATUS_GPIN) ? 1 : 0); if (left-- == 0) - return sprintf(page, "global buffer sizes: service=%Zu, " - "aal0_rx=%Zu\n", lanai_buf_size(&lanai->service), + return sprintf(page, "global buffer sizes: service=%zu, " + "aal0_rx=%zu\n", lanai_buf_size(&lanai->service), lanai->naal0 ? lanai_buf_size(&lanai->aal0buf) : 0); if (left-- == 0) { get_statistics(lanai); @@ -2513,7 +2513,7 @@ static int lanai_proc_read(struct atm_dev *atmdev, loff_t *pos, char *page) left += sprintf(&page[left], ",\n rx_AAL=%d", lvcc->rx.atmvcc->qos.aal == ATM_AAL5 ? 5 : 0); if (lvcc->rx.atmvcc->qos.aal == ATM_AAL5) - left += sprintf(&page[left], ", rx_buf_size=%Zu, " + left += sprintf(&page[left], ", rx_buf_size=%zu, " "rx_bad_len=%u,\n rx_service_trash=%u, " "rx_service_stream=%u, rx_bad_crc=%u", lanai_buf_size(&lvcc->rx.buf), @@ -2524,7 +2524,7 @@ static int lanai_proc_read(struct atm_dev *atmdev, loff_t *pos, char *page) } if (lvcc->tx.atmvcc != NULL) left += sprintf(&page[left], ",\n tx_AAL=%d, " - "tx_buf_size=%Zu, tx_qos=%cBR, tx_backlogged=%c", + "tx_buf_size=%zu, tx_qos=%cBR, tx_backlogged=%c", lvcc->tx.atmvcc->qos.aal == ATM_AAL5 ? 5 : 0, lanai_buf_size(&lvcc->tx.buf), lvcc->tx.atmvcc == lanai->cbrvcc ? 'C' : 'U', diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index cb28579e8a94..d879f3bca107 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -1980,13 +1980,12 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) card->lbfqc = ns_stat_lfbqc_get(stat); id = le32_to_cpu(rsqe->buffer_handle); - skb = idr_find(&card->idr, id); + skb = idr_remove(&card->idr, id); if (!skb) { RXPRINTK(KERN_ERR - "nicstar%d: idr_find() failed!\n", card->index); + "nicstar%d: skb not found!\n", card->index); return; } - idr_remove(&card->idr, id); dma_sync_single_for_cpu(&card->pcidev->dev, NS_PRV_DMA(skb), (NS_PRV_BUFTYPE(skb) == BUF_SM diff --git a/drivers/base/core.c b/drivers/base/core.c index 3050e6f99403..684bda4d14a1 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -26,6 +26,7 @@ #include <linux/mutex.h> #include <linux/pm_runtime.h> #include <linux/netdevice.h> +#include <linux/sched/signal.h> #include <linux/sysfs.h> #include "base.h" diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 44a74cf1372c..d2fb9c8ed205 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -309,7 +309,8 @@ static int handle_remove(const char *nodename, struct device *dev) if (d_really_is_positive(dentry)) { struct kstat stat; struct path p = {.mnt = parent.mnt, .dentry = dentry}; - err = vfs_getattr(&p, &stat); + err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE, + AT_STATX_SYNC_AS_STAT); if (!err && dev_mynode(dev, d_inode(dentry), &stat)) { struct iattr newattrs; /* diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 3a75fb1b4126..e697dec9d25b 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -274,6 +274,93 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) } /** + * genpd_power_off - Remove power from a given PM domain. + * @genpd: PM domain to power down. + * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the + * RPM status of the releated device is in an intermediate state, not yet turned + * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not + * be RPM_SUSPENDED, while it tries to power off the PM domain. + * + * If all of the @genpd's devices have been suspended and all of its subdomains + * have been powered down, remove power from @genpd. + */ +static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, + unsigned int depth) +{ + struct pm_domain_data *pdd; + struct gpd_link *link; + unsigned int not_suspended = 0; + + /* + * Do not try to power off the domain in the following situations: + * (1) The domain is already in the "power off" state. + * (2) System suspend is in progress. + */ + if (genpd->status == GPD_STATE_POWER_OFF + || genpd->prepared_count > 0) + return 0; + + if (atomic_read(&genpd->sd_count) > 0) + return -EBUSY; + + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + enum pm_qos_flags_status stat; + + stat = dev_pm_qos_flags(pdd->dev, + PM_QOS_FLAG_NO_POWER_OFF + | PM_QOS_FLAG_REMOTE_WAKEUP); + if (stat > PM_QOS_FLAGS_NONE) + return -EBUSY; + + /* + * Do not allow PM domain to be powered off, when an IRQ safe + * device is part of a non-IRQ safe domain. + */ + if (!pm_runtime_suspended(pdd->dev) || + irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) + not_suspended++; + } + + if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on)) + return -EBUSY; + + if (genpd->gov && genpd->gov->power_down_ok) { + if (!genpd->gov->power_down_ok(&genpd->domain)) + return -EAGAIN; + } + + if (genpd->power_off) { + int ret; + + if (atomic_read(&genpd->sd_count) > 0) + return -EBUSY; + + /* + * If sd_count > 0 at this point, one of the subdomains hasn't + * managed to call genpd_power_on() for the master yet after + * incrementing it. In that case genpd_power_on() will wait + * for us to drop the lock, so we can call .power_off() and let + * the genpd_power_on() restore power for us (this shouldn't + * happen very often). + */ + ret = _genpd_power_off(genpd, true); + if (ret) + return ret; + } + + genpd->status = GPD_STATE_POWER_OFF; + + list_for_each_entry(link, &genpd->slave_links, slave_node) { + genpd_sd_counter_dec(link->master); + genpd_lock_nested(link->master, depth + 1); + genpd_power_off(link->master, false, depth + 1); + genpd_unlock(link->master); + } + + return 0; +} + +/** * genpd_power_on - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. * @depth: nesting count for lockdep. @@ -321,7 +408,9 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); - genpd_queue_power_off_work(link->master); + genpd_lock_nested(link->master, depth + 1); + genpd_power_off(link->master, false, depth + 1); + genpd_unlock(link->master); } return ret; @@ -368,87 +457,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, } /** - * genpd_power_off - Remove power from a given PM domain. - * @genpd: PM domain to power down. - * @is_async: PM domain is powered down from a scheduled work - * - * If all of the @genpd's devices have been suspended and all of its subdomains - * have been powered down, remove power from @genpd. - */ -static int genpd_power_off(struct generic_pm_domain *genpd, bool is_async) -{ - struct pm_domain_data *pdd; - struct gpd_link *link; - unsigned int not_suspended = 0; - - /* - * Do not try to power off the domain in the following situations: - * (1) The domain is already in the "power off" state. - * (2) System suspend is in progress. - */ - if (genpd->status == GPD_STATE_POWER_OFF - || genpd->prepared_count > 0) - return 0; - - if (atomic_read(&genpd->sd_count) > 0) - return -EBUSY; - - list_for_each_entry(pdd, &genpd->dev_list, list_node) { - enum pm_qos_flags_status stat; - - stat = dev_pm_qos_flags(pdd->dev, - PM_QOS_FLAG_NO_POWER_OFF - | PM_QOS_FLAG_REMOTE_WAKEUP); - if (stat > PM_QOS_FLAGS_NONE) - return -EBUSY; - - /* - * Do not allow PM domain to be powered off, when an IRQ safe - * device is part of a non-IRQ safe domain. - */ - if (!pm_runtime_suspended(pdd->dev) || - irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) - not_suspended++; - } - - if (not_suspended > 1 || (not_suspended == 1 && is_async)) - return -EBUSY; - - if (genpd->gov && genpd->gov->power_down_ok) { - if (!genpd->gov->power_down_ok(&genpd->domain)) - return -EAGAIN; - } - - if (genpd->power_off) { - int ret; - - if (atomic_read(&genpd->sd_count) > 0) - return -EBUSY; - - /* - * If sd_count > 0 at this point, one of the subdomains hasn't - * managed to call genpd_power_on() for the master yet after - * incrementing it. In that case genpd_power_on() will wait - * for us to drop the lock, so we can call .power_off() and let - * the genpd_power_on() restore power for us (this shouldn't - * happen very often). - */ - ret = _genpd_power_off(genpd, true); - if (ret) - return ret; - } - - genpd->status = GPD_STATE_POWER_OFF; - - list_for_each_entry(link, &genpd->slave_links, slave_node) { - genpd_sd_counter_dec(link->master); - genpd_queue_power_off_work(link->master); - } - - return 0; -} - -/** * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. * @work: Work structure used for scheduling the execution of this function. */ @@ -459,7 +467,7 @@ static void genpd_power_off_work_fn(struct work_struct *work) genpd = container_of(work, struct generic_pm_domain, power_off_work); genpd_lock(genpd); - genpd_power_off(genpd, true); + genpd_power_off(genpd, false, 0); genpd_unlock(genpd); } @@ -578,7 +586,7 @@ static int genpd_runtime_suspend(struct device *dev) return 0; genpd_lock(genpd); - genpd_power_off(genpd, false); + genpd_power_off(genpd, true, 0); genpd_unlock(genpd); return 0; @@ -658,7 +666,7 @@ err_poweroff: if (!pm_runtime_is_irq_safe(dev) || (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) { genpd_lock(genpd); - genpd_power_off(genpd, 0); + genpd_power_off(genpd, true, 0); genpd_unlock(genpd); } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 249e0304597f..9faee1c893e5 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -27,6 +27,7 @@ #include <linux/pm_wakeirq.h> #include <linux/interrupt.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/async.h> #include <linux/suspend.h> #include <trace/events/power.h> diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index 91ec3232d630..dae61720b314 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -231,7 +231,8 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) * The caller needs to ensure that opp_table (and hence the regulator) * isn't freed, while we are executing this routine. */ - for (i = 0; reg = regulators[i], i < count; i++) { + for (i = 0; i < count; i++) { + reg = regulators[i]; ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max); if (ret > 0) latency_ns += ret * 1000; diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index d888d9869b6a..f850daeffba4 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -17,12 +17,9 @@ * * This QoS design is best effort based. Dependents register their QoS needs. * Watchers register to keep track of the current QoS needs of the system. - * Watchers can register different types of notification callbacks: - * . a per-device notification callback using the dev_pm_qos_*_notifier API. - * The notification chain data is stored in the per-device constraint - * data struct. - * . a system-wide notification callback using the dev_pm_qos_*_global_notifier - * API. The notification chain data is stored in a static variable. + * Watchers can register a per-device notification callback using the + * dev_pm_qos_*_notifier API. The notification chain data is stored in the + * per-device constraint data struct. * * Note about the per-device constraint data struct allocation: * . The per-device constraints data struct ptr is tored into the device @@ -49,8 +46,6 @@ static DEFINE_MUTEX(dev_pm_qos_mtx); static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx); -static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); - /** * __dev_pm_qos_flags - Check PM QoS flags for a given device. * @dev: Device to check the PM QoS flags for. @@ -108,8 +103,7 @@ s32 __dev_pm_qos_read_value(struct device *dev) { lockdep_assert_held(&dev->power.lock); - return IS_ERR_OR_NULL(dev->power.qos) ? - 0 : pm_qos_read_value(&dev->power.qos->resume_latency); + return dev_pm_qos_raw_read_value(dev); } /** @@ -135,8 +129,7 @@ s32 dev_pm_qos_read_value(struct device *dev) * @value: Value to assign to the QoS request. * * Internal function to update the constraints list using the PM QoS core - * code and if needed call the per-device and the global notification - * callbacks + * code and if needed call the per-device callbacks. */ static int apply_constraint(struct dev_pm_qos_request *req, enum pm_qos_req_action action, s32 value) @@ -148,12 +141,6 @@ static int apply_constraint(struct dev_pm_qos_request *req, case DEV_PM_QOS_RESUME_LATENCY: ret = pm_qos_update_target(&qos->resume_latency, &req->data.pnode, action, value); - if (ret) { - value = pm_qos_read_value(&qos->resume_latency); - blocking_notifier_call_chain(&dev_pm_notifiers, - (unsigned long)value, - req); - } break; case DEV_PM_QOS_LATENCY_TOLERANCE: ret = pm_qos_update_target(&qos->latency_tolerance, @@ -536,36 +523,6 @@ int dev_pm_qos_remove_notifier(struct device *dev, EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); /** - * dev_pm_qos_add_global_notifier - sets notification entry for changes to - * target value of the PM QoS constraints for any device - * - * @notifier: notifier block managed by caller. - * - * Will register the notifier into a notification chain that gets called - * upon changes to the target value for any device. - */ -int dev_pm_qos_add_global_notifier(struct notifier_block *notifier) -{ - return blocking_notifier_chain_register(&dev_pm_notifiers, notifier); -} -EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier); - -/** - * dev_pm_qos_remove_global_notifier - deletes notification for changes to - * target value of PM QoS constraints for any device - * - * @notifier: notifier block to be removed. - * - * Will remove the notifier from the notification chain that gets called - * upon changes to the target value for any device. - */ -int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier) -{ - return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); -} -EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); - -/** * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. * @dev: Device whose ancestor to add the request for. * @req: Pointer to the preallocated handle. diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index a14fac6a01d3..7bcf80fa9ada 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -7,7 +7,7 @@ * This file is released under the GPLv2. */ -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/export.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeirq.h> diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index f546f8f107b0..136854970489 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -8,7 +8,7 @@ #include <linux/device.h> #include <linux/slab.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/capability.h> #include <linux/export.h> #include <linux/suspend.h> diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 4cb8f21ff4ef..724d1c50fc52 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -30,7 +30,7 @@ #include <linux/compiler.h> #include <linux/types.h> #include <linux/list.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/ratelimit.h> diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 615e5b5178a0..92c60cbd04ee 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -52,6 +52,7 @@ #define __KERNEL_SYSCALLS__ #include <linux/unistd.h> #include <linux/vmalloc.h> +#include <linux/sched/signal.h> #include <linux/drbd_limits.h> #include "drbd_int.h" @@ -1846,7 +1847,7 @@ int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_requ int drbd_send(struct drbd_connection *connection, struct socket *sock, void *buf, size_t size, unsigned msg_flags) { - struct kvec iov; + struct kvec iov = {.iov_base = buf, .iov_len = size}; struct msghdr msg; int rv, sent = 0; @@ -1855,15 +1856,14 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock, /* THINK if (signal_pending) return ... ? */ - iov.iov_base = buf; - iov.iov_len = size; - msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = msg_flags | MSG_NOSIGNAL; + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size); + if (sock == connection->data.socket) { rcu_read_lock(); connection->ko_count = rcu_dereference(connection->net_conf)->ko_count; @@ -1871,7 +1871,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock, drbd_update_congested(connection); } do { - rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); + rv = sock_sendmsg(sock, &msg); if (rv == -EAGAIN) { if (we_should_drop_the_connection(connection, sock)) break; @@ -1885,8 +1885,6 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock, if (rv < 0) break; sent += rv; - iov.iov_base += rv; - iov.iov_len -= rv; } while (sent < size); if (sock == connection->data.socket) @@ -2915,11 +2913,9 @@ out_idr_remove_vol: idr_remove(&connection->peer_devices, vnr); out_idr_remove_from_resource: for_each_connection(connection, resource) { - peer_device = idr_find(&connection->peer_devices, vnr); - if (peer_device) { - idr_remove(&connection->peer_devices, vnr); + peer_device = idr_remove(&connection->peer_devices, vnr); + if (peer_device) kref_put(&connection->kref, drbd_destroy_connection); - } } for_each_peer_device_safe(peer_device, tmp_peer_device, device) { list_del(&peer_device->peer_devices); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index c7728dd77230..aa6bf9692eff 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -36,6 +36,8 @@ #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/slab.h> +#include <uapi/linux/sched/types.h> +#include <linux/sched/signal.h> #include <linux/pkt_sched.h> #define __KERNEL_SYSCALLS__ #include <linux/unistd.h> diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index c6755c9a0aea..3bff33f21435 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -25,7 +25,7 @@ #include <linux/module.h> #include <linux/drbd.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/memcontrol.h> diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 304377182c1a..0ecb6461ed81 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -186,7 +186,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) * * TODO: the above condition may be loosed in the future, and * direct I/O may be switched runtime at that time because most - * of requests in sane appplications should be PAGE_SIZE algined + * of requests in sane applications should be PAGE_SIZE aligned */ if (dio) { if (queue_logical_block_size(lo->lo_queue) >= sb_bsize && @@ -501,9 +501,9 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, cmd->iocb.ki_flags = IOCB_DIRECT; if (rw == WRITE) - ret = file->f_op->write_iter(&cmd->iocb, &iter); + ret = call_write_iter(file, &cmd->iocb, &iter); else - ret = file->f_op->read_iter(&cmd->iocb, &iter); + ret = call_read_iter(file, &cmd->iocb, &iter); if (ret != -EIOCBQUEUED) cmd->iocb.ki_complete(&cmd->iocb, ret, 0); @@ -1142,13 +1142,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) (info->lo_flags & LO_FLAGS_AUTOCLEAR)) lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; - if ((info->lo_flags & LO_FLAGS_PARTSCAN) && - !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { - lo->lo_flags |= LO_FLAGS_PARTSCAN; - lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; - loop_reread_partitions(lo, lo->lo_device); - } - lo->lo_encrypt_key_size = info->lo_encrypt_key_size; lo->lo_init[0] = info->lo_init[0]; lo->lo_init[1] = info->lo_init[1]; @@ -1163,6 +1156,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) exit: blk_mq_unfreeze_queue(lo->lo_queue); + + if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && + !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { + lo->lo_flags |= LO_FLAGS_PARTSCAN; + lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; + loop_reread_partitions(lo, lo->lo_device); + } + return err; } @@ -1175,7 +1176,8 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info) if (lo->lo_state != Lo_bound) return -ENXIO; - error = vfs_getattr(&file->f_path, &stat); + error = vfs_getattr(&file->f_path, &stat, + STATX_INO, AT_STATX_SYNC_AS_STAT); if (error) return error; memset(info, 0, sizeof(*info)); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 0bf2b21a62cb..7e4287bc19e5 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -201,13 +201,12 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, /* * Send or receive packet. */ -static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf, - int size, int msg_flags) +static int sock_xmit(struct nbd_device *nbd, int index, int send, + struct iov_iter *iter, int msg_flags) { struct socket *sock = nbd->socks[index]->sock; int result; struct msghdr msg; - struct kvec iov; unsigned long pflags = current->flags; if (unlikely(!sock)) { @@ -217,11 +216,11 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf, return -EINVAL; } + msg.msg_iter = *iter; + current->flags |= PF_MEMALLOC; do { sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; - iov.iov_base = buf; - iov.iov_len = size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; @@ -229,47 +228,37 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf, msg.msg_flags = msg_flags | MSG_NOSIGNAL; if (send) - result = kernel_sendmsg(sock, &msg, &iov, 1, size); + result = sock_sendmsg(sock, &msg); else - result = kernel_recvmsg(sock, &msg, &iov, 1, size, - msg.msg_flags); + result = sock_recvmsg(sock, &msg, msg.msg_flags); if (result <= 0) { if (result == 0) result = -EPIPE; /* short read */ break; } - size -= result; - buf += result; - } while (size > 0); + } while (msg_data_left(&msg)); tsk_restore_flags(current, pflags, PF_MEMALLOC); return result; } -static inline int sock_send_bvec(struct nbd_device *nbd, int index, - struct bio_vec *bvec, int flags) -{ - int result; - void *kaddr = kmap(bvec->bv_page); - result = sock_xmit(nbd, index, 1, kaddr + bvec->bv_offset, - bvec->bv_len, flags); - kunmap(bvec->bv_page); - return result; -} - /* always call with the tx_lock held */ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) { struct request *req = blk_mq_rq_from_pdu(cmd); int result; - struct nbd_request request; + struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; + struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; + struct iov_iter from; unsigned long size = blk_rq_bytes(req); struct bio *bio; u32 type; u32 tag = blk_mq_unique_tag(req); + iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); + switch (req_op(req)) { case REQ_OP_DISCARD: type = NBD_CMD_TRIM; @@ -294,8 +283,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) return -EIO; } - memset(&request, 0, sizeof(request)); - request.magic = htonl(NBD_REQUEST_MAGIC); request.type = htonl(type); if (type != NBD_CMD_FLUSH) { request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); @@ -306,7 +293,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", cmd, nbdcmd_to_ascii(type), (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); - result = sock_xmit(nbd, index, 1, &request, sizeof(request), + result = sock_xmit(nbd, index, 1, &from, (type == NBD_CMD_WRITE) ? MSG_MORE : 0); if (result <= 0) { dev_err_ratelimited(disk_to_dev(nbd->disk), @@ -329,7 +316,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", cmd, bvec.bv_len); - result = sock_send_bvec(nbd, index, &bvec, flags); + iov_iter_bvec(&from, ITER_BVEC | WRITE, + &bvec, 1, bvec.bv_len); + result = sock_xmit(nbd, index, 1, &from, flags); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", @@ -350,17 +339,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) return 0; } -static inline int sock_recv_bvec(struct nbd_device *nbd, int index, - struct bio_vec *bvec) -{ - int result; - void *kaddr = kmap(bvec->bv_page); - result = sock_xmit(nbd, index, 0, kaddr + bvec->bv_offset, - bvec->bv_len, MSG_WAITALL); - kunmap(bvec->bv_page); - return result; -} - /* NULL returned = something went wrong, inform userspace */ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) { @@ -370,9 +348,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) struct request *req = NULL; u16 hwq; u32 tag; + struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; + struct iov_iter to; reply.magic = 0; - result = sock_xmit(nbd, index, 0, &reply, sizeof(reply), MSG_WAITALL); + iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); + result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); if (result <= 0) { if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) @@ -412,7 +393,9 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) struct bio_vec bvec; rq_for_each_segment(bvec, req, iter) { - result = sock_recv_bvec(nbd, index, &bvec); + iov_iter_bvec(&to, ITER_BVEC | READ, + &bvec, 1, bvec.bv_len); + result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", result); @@ -641,14 +624,17 @@ static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev) static void send_disconnects(struct nbd_device *nbd) { - struct nbd_request request = {}; + struct nbd_request request = { + .magic = htonl(NBD_REQUEST_MAGIC), + .type = htonl(NBD_CMD_DISC), + }; + struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; + struct iov_iter from; int i, ret; - request.magic = htonl(NBD_REQUEST_MAGIC); - request.type = htonl(NBD_CMD_DISC); - for (i = 0; i < nbd->num_connections; i++) { - ret = sock_xmit(nbd, i, 1, &request, sizeof(request), 0); + iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); + ret = sock_xmit(nbd, i, 1, &from, 0); if (ret <= 0) dev_err(disk_to_dev(nbd->disk), "Send disconnect failed %d\n", ret); @@ -689,8 +675,10 @@ static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev) nbd->num_connections) { int i; - for (i = 0; i < nbd->num_connections; i++) + for (i = 0; i < nbd->num_connections; i++) { + sockfd_put(nbd->socks[i]->sock); kfree(nbd->socks[i]); + } kfree(nbd->socks); nbd->socks = NULL; nbd->num_connections = 0; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 362cecc77130..4d6807723798 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -123,9 +123,11 @@ static int atomic_dec_return_safe(atomic_t *v) #define RBD_FEATURE_LAYERING (1<<0) #define RBD_FEATURE_STRIPINGV2 (1<<1) #define RBD_FEATURE_EXCLUSIVE_LOCK (1<<2) +#define RBD_FEATURE_DATA_POOL (1<<7) #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ RBD_FEATURE_STRIPINGV2 | \ - RBD_FEATURE_EXCLUSIVE_LOCK) + RBD_FEATURE_EXCLUSIVE_LOCK | \ + RBD_FEATURE_DATA_POOL) /* Features supported by this (client software) implementation. */ @@ -144,10 +146,9 @@ struct rbd_image_header { /* These six fields never change for a given rbd image */ char *object_prefix; __u8 obj_order; - __u8 crypt_type; - __u8 comp_type; u64 stripe_unit; u64 stripe_count; + s64 data_pool_id; u64 features; /* Might be changeable someday? */ /* The remaining fields need to be updated occasionally */ @@ -230,7 +231,7 @@ enum obj_req_flags { }; struct rbd_obj_request { - const char *object_name; + u64 object_no; u64 offset; /* object start byte */ u64 length; /* bytes from offset */ unsigned long flags; @@ -438,7 +439,6 @@ static DEFINE_SPINLOCK(rbd_client_list_lock); static struct kmem_cache *rbd_img_request_cache; static struct kmem_cache *rbd_obj_request_cache; -static struct kmem_cache *rbd_segment_name_cache; static int rbd_major; static DEFINE_IDA(rbd_dev_id_ida); @@ -973,6 +973,30 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk) } /* + * returns the size of an object in the image + */ +static u32 rbd_obj_bytes(struct rbd_image_header *header) +{ + return 1U << header->obj_order; +} + +static void rbd_init_layout(struct rbd_device *rbd_dev) +{ + if (rbd_dev->header.stripe_unit == 0 || + rbd_dev->header.stripe_count == 0) { + rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header); + rbd_dev->header.stripe_count = 1; + } + + rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit; + rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count; + rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header); + rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ? + rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id; + RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL); +} + +/* * Fill an rbd image header with information from the given format 1 * on-disk header. */ @@ -992,15 +1016,11 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev, /* Allocate this now to avoid having to handle failure below */ if (first_time) { - size_t len; - - len = strnlen(ondisk->object_prefix, - sizeof (ondisk->object_prefix)); - object_prefix = kmalloc(len + 1, GFP_KERNEL); + object_prefix = kstrndup(ondisk->object_prefix, + sizeof(ondisk->object_prefix), + GFP_KERNEL); if (!object_prefix) return -ENOMEM; - memcpy(object_prefix, ondisk->object_prefix, len); - object_prefix[len] = '\0'; } /* Allocate the snapshot context and fill it in */ @@ -1051,12 +1071,7 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev, if (first_time) { header->object_prefix = object_prefix; header->obj_order = ondisk->options.order; - header->crypt_type = ondisk->options.crypt_type; - header->comp_type = ondisk->options.comp_type; - /* The rest aren't used for format 1 images */ - header->stripe_unit = 0; - header->stripe_count = 0; - header->features = 0; + rbd_init_layout(rbd_dev); } else { ceph_put_snap_context(header->snapc); kfree(header->snap_names); @@ -1232,42 +1247,9 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev) rbd_dev->mapping.features = 0; } -static void rbd_segment_name_free(const char *name) -{ - /* The explicit cast here is needed to drop the const qualifier */ - - kmem_cache_free(rbd_segment_name_cache, (void *)name); -} - -static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) -{ - char *name; - u64 segment; - int ret; - char *name_format; - - name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO); - if (!name) - return NULL; - segment = offset >> rbd_dev->header.obj_order; - name_format = "%s.%012llx"; - if (rbd_dev->image_format == 2) - name_format = "%s.%016llx"; - ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format, - rbd_dev->header.object_prefix, segment); - if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) { - pr_err("error formatting segment name for #%llu (%d)\n", - segment, ret); - rbd_segment_name_free(name); - name = NULL; - } - - return name; -} - static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset) { - u64 segment_size = (u64) 1 << rbd_dev->header.obj_order; + u64 segment_size = rbd_obj_bytes(&rbd_dev->header); return offset & (segment_size - 1); } @@ -1275,7 +1257,7 @@ static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset) static u64 rbd_segment_length(struct rbd_device *rbd_dev, u64 offset, u64 length) { - u64 segment_size = (u64) 1 << rbd_dev->header.obj_order; + u64 segment_size = rbd_obj_bytes(&rbd_dev->header); offset &= segment_size - 1; @@ -1287,14 +1269,6 @@ static u64 rbd_segment_length(struct rbd_device *rbd_dev, } /* - * returns the size of an object in the image - */ -static u64 rbd_obj_bytes(struct rbd_image_header *header) -{ - return 1 << header->obj_order; -} - -/* * bio helpers */ @@ -1623,7 +1597,9 @@ static void rbd_obj_request_submit(struct rbd_obj_request *obj_request) { struct ceph_osd_request *osd_req = obj_request->osd_req; - dout("%s %p osd_req %p\n", __func__, obj_request, osd_req); + dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__, + obj_request, obj_request->object_no, obj_request->offset, + obj_request->length, osd_req); if (obj_request_img_data_test(obj_request)) { WARN_ON(obj_request->callback != rbd_img_obj_callback); rbd_img_request_get(obj_request->img_request); @@ -1631,44 +1607,6 @@ static void rbd_obj_request_submit(struct rbd_obj_request *obj_request) ceph_osdc_start_request(osd_req->r_osdc, osd_req, false); } -static void rbd_obj_request_end(struct rbd_obj_request *obj_request) -{ - dout("%s %p\n", __func__, obj_request); - ceph_osdc_cancel_request(obj_request->osd_req); -} - -/* - * Wait for an object request to complete. If interrupted, cancel the - * underlying osd request. - * - * @timeout: in jiffies, 0 means "wait forever" - */ -static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request, - unsigned long timeout) -{ - long ret; - - dout("%s %p\n", __func__, obj_request); - ret = wait_for_completion_interruptible_timeout( - &obj_request->completion, - ceph_timeout_jiffies(timeout)); - if (ret <= 0) { - if (ret == 0) - ret = -ETIMEDOUT; - rbd_obj_request_end(obj_request); - } else { - ret = 0; - } - - dout("%s %p ret %d\n", __func__, obj_request, (int)ret); - return ret; -} - -static int rbd_obj_request_wait(struct rbd_obj_request *obj_request) -{ - return __rbd_obj_request_wait(obj_request, 0); -} - static void rbd_img_request_complete(struct rbd_img_request *img_request) { @@ -1955,8 +1893,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req) rbd_osd_call_callback(obj_request); break; default: - rbd_warn(NULL, "%s: unsupported op %hu", - obj_request->object_name, (unsigned short) opcode); + rbd_warn(NULL, "unexpected OSD op: object_no %016llx opcode %d", + obj_request->object_no, opcode); break; } @@ -1980,6 +1918,40 @@ static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request) osd_req->r_data_offset = obj_request->offset; } +static struct ceph_osd_request * +__rbd_osd_req_create(struct rbd_device *rbd_dev, + struct ceph_snap_context *snapc, + int num_ops, unsigned int flags, + struct rbd_obj_request *obj_request) +{ + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; + struct ceph_osd_request *req; + const char *name_format = rbd_dev->image_format == 1 ? + RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT; + + req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO); + if (!req) + return NULL; + + req->r_flags = flags; + req->r_callback = rbd_osd_req_callback; + req->r_priv = obj_request; + + req->r_base_oloc.pool = rbd_dev->layout.pool_id; + if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format, + rbd_dev->header.object_prefix, obj_request->object_no)) + goto err_req; + + if (ceph_osdc_alloc_messages(req, GFP_NOIO)) + goto err_req; + + return req; + +err_req: + ceph_osdc_put_request(req); + return NULL; +} + /* * Create an osd request. A read request has one osd op (read). * A write request has either one (watch) or two (hint+write) osd ops. @@ -1993,8 +1965,6 @@ static struct ceph_osd_request *rbd_osd_req_create( struct rbd_obj_request *obj_request) { struct ceph_snap_context *snapc = NULL; - struct ceph_osd_client *osdc; - struct ceph_osd_request *osd_req; if (obj_request_img_data_test(obj_request) && (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) { @@ -2009,35 +1979,9 @@ static struct ceph_osd_request *rbd_osd_req_create( rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2)); - /* Allocate and initialize the request, for the num_ops ops */ - - osdc = &rbd_dev->rbd_client->client->osdc; - osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, - GFP_NOIO); - if (!osd_req) - goto fail; - - if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD) - osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK; - else - osd_req->r_flags = CEPH_OSD_FLAG_READ; - - osd_req->r_callback = rbd_osd_req_callback; - osd_req->r_priv = obj_request; - - osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id; - if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s", - obj_request->object_name)) - goto fail; - - if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO)) - goto fail; - - return osd_req; - -fail: - ceph_osdc_put_request(osd_req); - return NULL; + return __rbd_osd_req_create(rbd_dev, snapc, num_ops, + (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD) ? + CEPH_OSD_FLAG_WRITE : CEPH_OSD_FLAG_READ, obj_request); } /* @@ -2050,10 +1994,6 @@ static struct ceph_osd_request * rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request; - struct ceph_snap_context *snapc; - struct rbd_device *rbd_dev; - struct ceph_osd_client *osdc; - struct ceph_osd_request *osd_req; int num_osd_ops = 3; rbd_assert(obj_request_img_data_test(obj_request)); @@ -2065,77 +2005,34 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request) if (img_request_discard_test(img_request)) num_osd_ops = 2; - /* Allocate and initialize the request, for all the ops */ - - snapc = img_request->snapc; - rbd_dev = img_request->rbd_dev; - osdc = &rbd_dev->rbd_client->client->osdc; - osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops, - false, GFP_NOIO); - if (!osd_req) - goto fail; - - osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK; - osd_req->r_callback = rbd_osd_req_callback; - osd_req->r_priv = obj_request; - - osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id; - if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s", - obj_request->object_name)) - goto fail; - - if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO)) - goto fail; - - return osd_req; - -fail: - ceph_osdc_put_request(osd_req); - return NULL; + return __rbd_osd_req_create(img_request->rbd_dev, + img_request->snapc, num_osd_ops, + CEPH_OSD_FLAG_WRITE, obj_request); } - static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req) { ceph_osdc_put_request(osd_req); } -/* object_name is assumed to be a non-null pointer and NUL-terminated */ - -static struct rbd_obj_request *rbd_obj_request_create(const char *object_name, - u64 offset, u64 length, - enum obj_request_type type) +static struct rbd_obj_request * +rbd_obj_request_create(enum obj_request_type type) { struct rbd_obj_request *obj_request; - size_t size; - char *name; rbd_assert(obj_request_type_valid(type)); - size = strlen(object_name) + 1; - name = kmalloc(size, GFP_NOIO); - if (!name) - return NULL; - obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO); - if (!obj_request) { - kfree(name); + if (!obj_request) return NULL; - } - obj_request->object_name = memcpy(name, object_name, size); - obj_request->offset = offset; - obj_request->length = length; - obj_request->flags = 0; obj_request->which = BAD_WHICH; obj_request->type = type; INIT_LIST_HEAD(&obj_request->links); init_completion(&obj_request->completion); kref_init(&obj_request->kref); - dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name, - offset, length, (int)type, obj_request); - + dout("%s %p\n", __func__, obj_request); return obj_request; } @@ -2170,8 +2067,6 @@ static void rbd_obj_request_destroy(struct kref *kref) break; } - kfree(obj_request->object_name); - obj_request->object_name = NULL; kmem_cache_free(rbd_obj_request_cache, obj_request); } @@ -2546,22 +2441,18 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request, while (resid) { struct ceph_osd_request *osd_req; - const char *object_name; - u64 offset; - u64 length; + u64 object_no = img_offset >> rbd_dev->header.obj_order; + u64 offset = rbd_segment_offset(rbd_dev, img_offset); + u64 length = rbd_segment_length(rbd_dev, img_offset, resid); - object_name = rbd_segment_name(rbd_dev, img_offset); - if (!object_name) - goto out_unwind; - offset = rbd_segment_offset(rbd_dev, img_offset); - length = rbd_segment_length(rbd_dev, img_offset, resid); - obj_request = rbd_obj_request_create(object_name, - offset, length, type); - /* object request has its own copy of the object name */ - rbd_segment_name_free(object_name); + obj_request = rbd_obj_request_create(type); if (!obj_request) goto out_unwind; + obj_request->object_no = object_no; + obj_request->offset = offset; + obj_request->length = length; + /* * set obj_request->img_request before creating the * osd_request so that it gets the right snapc @@ -2771,7 +2662,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) * child image to which the original request was to be sent. */ img_offset = obj_request->img_offset - obj_request->offset; - length = (u64)1 << rbd_dev->header.obj_order; + length = rbd_obj_bytes(&rbd_dev->header); /* * There is no defined parent data beyond the parent @@ -2900,11 +2791,12 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request) size_t size; int ret; - stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0, - OBJ_REQUEST_PAGES); + stat_request = rbd_obj_request_create(OBJ_REQUEST_PAGES); if (!stat_request) return -ENOMEM; + stat_request->object_no = obj_request->object_no; + stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1, stat_request); if (!stat_request->osd_req) { @@ -3983,17 +3875,17 @@ out: * returned in the outbound buffer, or a negative error code. */ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, - const char *object_name, - const char *class_name, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, const char *method_name, const void *outbound, size_t outbound_size, void *inbound, size_t inbound_size) { - struct rbd_obj_request *obj_request; - struct page **pages; - u32 page_count; + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; + struct page *req_page = NULL; + struct page *reply_page; int ret; /* @@ -4003,61 +3895,35 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, * method. Currently if this is present it will be a * snapshot id. */ - page_count = (u32)calc_pages_for(0, inbound_size); - pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); - if (IS_ERR(pages)) - return PTR_ERR(pages); - - ret = -ENOMEM; - obj_request = rbd_obj_request_create(object_name, 0, inbound_size, - OBJ_REQUEST_PAGES); - if (!obj_request) - goto out; + if (outbound) { + if (outbound_size > PAGE_SIZE) + return -E2BIG; - obj_request->pages = pages; - obj_request->page_count = page_count; - - obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1, - obj_request); - if (!obj_request->osd_req) - goto out; - - osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL, - class_name, method_name); - if (outbound_size) { - struct ceph_pagelist *pagelist; - - pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); - if (!pagelist) - goto out; + req_page = alloc_page(GFP_KERNEL); + if (!req_page) + return -ENOMEM; - ceph_pagelist_init(pagelist); - ceph_pagelist_append(pagelist, outbound, outbound_size); - osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0, - pagelist); + memcpy(page_address(req_page), outbound, outbound_size); } - osd_req_op_cls_response_data_pages(obj_request->osd_req, 0, - obj_request->pages, inbound_size, - 0, false, false); - - rbd_obj_request_submit(obj_request); - ret = rbd_obj_request_wait(obj_request); - if (ret) - goto out; - ret = obj_request->result; - if (ret < 0) - goto out; + reply_page = alloc_page(GFP_KERNEL); + if (!reply_page) { + if (req_page) + __free_page(req_page); + return -ENOMEM; + } - rbd_assert(obj_request->xferred < (u64)INT_MAX); - ret = (int)obj_request->xferred; - ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred); -out: - if (obj_request) - rbd_obj_request_put(obj_request); - else - ceph_release_page_vector(pages, page_count); + ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name, + CEPH_OSD_FLAG_READ, req_page, outbound_size, + reply_page, &inbound_size); + if (!ret) { + memcpy(inbound, page_address(reply_page), inbound_size); + ret = inbound_size; + } + if (req_page) + __free_page(req_page); + __free_page(reply_page); return ret; } @@ -4256,63 +4122,46 @@ static void rbd_free_disk(struct rbd_device *rbd_dev) } static int rbd_obj_read_sync(struct rbd_device *rbd_dev, - const char *object_name, - u64 offset, u64 length, void *buf) + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + void *buf, int buf_len) { - struct rbd_obj_request *obj_request; - struct page **pages = NULL; - u32 page_count; - size_t size; + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; + struct ceph_osd_request *req; + struct page **pages; + int num_pages = calc_pages_for(0, buf_len); int ret; - page_count = (u32) calc_pages_for(offset, length); - pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); - if (IS_ERR(pages)) - return PTR_ERR(pages); - - ret = -ENOMEM; - obj_request = rbd_obj_request_create(object_name, offset, length, - OBJ_REQUEST_PAGES); - if (!obj_request) - goto out; - - obj_request->pages = pages; - obj_request->page_count = page_count; - - obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1, - obj_request); - if (!obj_request->osd_req) - goto out; + req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL); + if (!req) + return -ENOMEM; - osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ, - offset, length, 0, 0); - osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0, - obj_request->pages, - obj_request->length, - obj_request->offset & ~PAGE_MASK, - false, false); + ceph_oid_copy(&req->r_base_oid, oid); + ceph_oloc_copy(&req->r_base_oloc, oloc); + req->r_flags = CEPH_OSD_FLAG_READ; - rbd_obj_request_submit(obj_request); - ret = rbd_obj_request_wait(obj_request); + ret = ceph_osdc_alloc_messages(req, GFP_KERNEL); if (ret) - goto out; + goto out_req; - ret = obj_request->result; - if (ret < 0) - goto out; + pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto out_req; + } - rbd_assert(obj_request->xferred <= (u64) SIZE_MAX); - size = (size_t) obj_request->xferred; - ceph_copy_from_page_vector(pages, buf, 0, size); - rbd_assert(size <= (size_t)INT_MAX); - ret = (int)size; -out: - if (obj_request) - rbd_obj_request_put(obj_request); - else - ceph_release_page_vector(pages, page_count); + osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0); + osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false, + true); + + ceph_osdc_start_request(osdc, req, false); + ret = ceph_osdc_wait_request(osdc, req); + if (ret >= 0) + ceph_copy_from_page_vector(pages, buf, 0, ret); +out_req: + ceph_osdc_put_request(req); return ret; } @@ -4348,8 +4197,8 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) if (!ondisk) return -ENOMEM; - ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name, - 0, size, ondisk); + ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, ondisk, size); if (ret < 0) goto out; if ((size_t)ret < size) { @@ -4781,7 +4630,7 @@ static const struct attribute_group *rbd_attr_groups[] = { static void rbd_dev_release(struct device *dev); -static struct device_type rbd_device_type = { +static const struct device_type rbd_device_type = { .name = "rbd", .groups = rbd_attr_groups, .release = rbd_dev_release, @@ -4876,8 +4725,9 @@ static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc, INIT_LIST_HEAD(&rbd_dev->node); init_rwsem(&rbd_dev->header_rwsem); + rbd_dev->header.data_pool_id = CEPH_NOPOOL; ceph_oid_init(&rbd_dev->header_oid); - ceph_oloc_init(&rbd_dev->header_oloc); + rbd_dev->header_oloc.pool = spec->pool_id; mutex_init(&rbd_dev->watch_mutex); rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED; @@ -4899,12 +4749,6 @@ static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc, rbd_dev->rbd_client = rbdc; rbd_dev->spec = spec; - rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER; - rbd_dev->layout.stripe_count = 1; - rbd_dev->layout.object_size = 1 << RBD_MAX_OBJ_ORDER; - rbd_dev->layout.pool_id = spec->pool_id; - RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL); - return rbd_dev; } @@ -4970,10 +4814,10 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, __le64 size; } __attribute__ ((packed)) size_buf = { 0 }; - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, - "rbd", "get_size", - &snapid, sizeof (snapid), - &size_buf, sizeof (size_buf)); + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_size", + &snapid, sizeof(snapid), + &size_buf, sizeof(size_buf)); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) return ret; @@ -5010,9 +4854,9 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) if (!reply_buf) return -ENOMEM; - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, - "rbd", "get_object_prefix", NULL, 0, - reply_buf, RBD_OBJ_PREFIX_LEN_MAX); + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_object_prefix", + NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) goto out; @@ -5045,10 +4889,10 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, u64 unsup; int ret; - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, - "rbd", "get_features", - &snapid, sizeof (snapid), - &features_buf, sizeof (features_buf)); + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_features", + &snapid, sizeof(snapid), + &features_buf, sizeof(features_buf)); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) return ret; @@ -5107,10 +4951,9 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) } snapid = cpu_to_le64(rbd_dev->spec->snap_id); - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, - "rbd", "get_parent", - &snapid, sizeof (snapid), - reply_buf, size); + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_parent", + &snapid, sizeof(snapid), reply_buf, size); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) goto out_err; @@ -5210,9 +5053,9 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) u64 stripe_count; int ret; - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, - "rbd", "get_stripe_unit_count", NULL, 0, - (char *)&striping_info_buf, size); + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_stripe_unit_count", + NULL, 0, &striping_info_buf, size); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) return ret; @@ -5226,7 +5069,7 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) * out, and only fail if the image has non-default values. */ ret = -EINVAL; - obj_size = (u64)1 << rbd_dev->header.obj_order; + obj_size = rbd_obj_bytes(&rbd_dev->header); p = &striping_info_buf; stripe_unit = ceph_decode_64(&p); if (stripe_unit != obj_size) { @@ -5247,8 +5090,27 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) return 0; } +static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev) +{ + __le64 data_pool_id; + int ret; + + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_data_pool", + NULL, 0, &data_pool_id, sizeof(data_pool_id)); + if (ret < 0) + return ret; + if (ret < sizeof(data_pool_id)) + return -EBADMSG; + + rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id); + WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL); + return 0; +} + static char *rbd_dev_image_name(struct rbd_device *rbd_dev) { + CEPH_DEFINE_OID_ONSTACK(oid); size_t image_id_size; char *image_id; void *p; @@ -5276,10 +5138,10 @@ static char *rbd_dev_image_name(struct rbd_device *rbd_dev) if (!reply_buf) goto out; - ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY, - "rbd", "dir_get_name", - image_id, image_id_size, - reply_buf, size); + ceph_oid_printf(&oid, "%s", RBD_DIRECTORY); + ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc, + "dir_get_name", image_id, image_id_size, + reply_buf, size); if (ret < 0) goto out; p = reply_buf; @@ -5458,9 +5320,9 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) if (!reply_buf) return -ENOMEM; - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, - "rbd", "get_snapcontext", NULL, 0, - reply_buf, size); + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_snapcontext", + NULL, 0, reply_buf, size); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) goto out; @@ -5523,10 +5385,9 @@ static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, return ERR_PTR(-ENOMEM); snapid = cpu_to_le64(snap_id); - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, - "rbd", "get_snapshot_name", - &snapid, sizeof (snapid), - reply_buf, size); + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_snapshot_name", + &snapid, sizeof(snapid), reply_buf, size); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) { snap_name = ERR_PTR(ret); @@ -5833,7 +5694,7 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev) { int ret; size_t size; - char *object_name; + CEPH_DEFINE_OID_ONSTACK(oid); void *response; char *image_id; @@ -5853,12 +5714,12 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev) * First, see if the format 2 image id file exists, and if * so, get the image's persistent id from it. */ - size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name); - object_name = kmalloc(size, GFP_NOIO); - if (!object_name) - return -ENOMEM; - sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name); - dout("rbd id object name is %s\n", object_name); + ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX, + rbd_dev->spec->image_name); + if (ret) + return ret; + + dout("rbd id object name is %s\n", oid.name); /* Response will be an encoded string, which includes a length */ @@ -5871,9 +5732,9 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev) /* If it doesn't exist we'll assume it's a format 1 image */ - ret = rbd_obj_method_sync(rbd_dev, object_name, - "rbd", "get_id", NULL, 0, - response, RBD_IMAGE_ID_LEN_MAX); + ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc, + "get_id", NULL, 0, + response, RBD_IMAGE_ID_LEN_MAX); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret == -ENOENT) { image_id = kstrdup("", GFP_KERNEL); @@ -5896,8 +5757,7 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev) } out: kfree(response); - kfree(object_name); - + ceph_oid_destroy(&oid); return ret; } @@ -5944,14 +5804,20 @@ static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev) if (ret < 0) goto out_err; } - /* No support for crypto and compression type format 2 images */ + if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) { + ret = rbd_dev_v2_data_pool(rbd_dev); + if (ret) + goto out_err; + } + + rbd_init_layout(rbd_dev); return 0; + out_err: rbd_dev->header.features = 0; kfree(rbd_dev->header.object_prefix); rbd_dev->header.object_prefix = NULL; - return ret; } @@ -6077,8 +5943,6 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev) /* Record the header object name for this rbd image. */ rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); - - rbd_dev->header_oloc.pool = rbd_dev->layout.pool_id; if (rbd_dev->image_format == 1) ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s", spec->image_name, RBD_SUFFIX); @@ -6471,27 +6335,16 @@ static int rbd_slab_init(void) if (!rbd_obj_request_cache) goto out_err; - rbd_assert(!rbd_segment_name_cache); - rbd_segment_name_cache = kmem_cache_create("rbd_segment_name", - CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL); - if (rbd_segment_name_cache) - return 0; -out_err: - kmem_cache_destroy(rbd_obj_request_cache); - rbd_obj_request_cache = NULL; + return 0; +out_err: kmem_cache_destroy(rbd_img_request_cache); rbd_img_request_cache = NULL; - return -ENOMEM; } static void rbd_slab_exit(void) { - rbd_assert(rbd_segment_name_cache); - kmem_cache_destroy(rbd_segment_name_cache); - rbd_segment_name_cache = NULL; - rbd_assert(rbd_obj_request_cache); kmem_cache_destroy(rbd_obj_request_cache); rbd_obj_request_cache = NULL; diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h index 94f367db27b0..62ff50d3e7a6 100644 --- a/drivers/block/rbd_types.h +++ b/drivers/block/rbd_types.h @@ -25,8 +25,8 @@ */ #define RBD_HEADER_PREFIX "rbd_header." -#define RBD_DATA_PREFIX "rbd_data." #define RBD_ID_PREFIX "rbd_id." +#define RBD_V2_DATA_FORMAT "%s.%016llx" #define RBD_LOCK_NAME "rbd_lock" #define RBD_LOCK_TAG "internal" @@ -42,13 +42,14 @@ enum rbd_notify_op { /* * For format version 1, rbd image 'foo' consists of objects * foo.rbd - image metadata - * rb.<idhi>.<idlo>.00000000 - * rb.<idhi>.<idlo>.00000001 + * rb.<idhi>.<idlo>.<extra>.000000000000 + * rb.<idhi>.<idlo>.<extra>.000000000001 * ... - data * There is no notion of a persistent image id in rbd format 1. */ #define RBD_SUFFIX ".rbd" +#define RBD_V1_DATA_FORMAT "%s.%012llx" #define RBD_DIRECTORY "rbd_directory" #define RBD_INFO "rbd_info" @@ -57,9 +58,6 @@ enum rbd_notify_op { #define RBD_MIN_OBJ_ORDER 16 #define RBD_MAX_OBJ_ORDER 30 -#define RBD_COMP_NONE 0 -#define RBD_CRYPT_NONE 0 - #define RBD_HEADER_TEXT "<<< Rados Block Device Image >>>\n" #define RBD_HEADER_SIGNATURE "RBD" #define RBD_HEADER_VERSION "001.005" diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index aabd8e9d3035..61b3ffa4f458 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -20,7 +20,7 @@ #include <linux/stddef.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/fd.h> diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 024b473524c0..1d4c9f8bc1e1 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -5,6 +5,7 @@ #include <linux/hdreg.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/interrupt.h> #include <linux/virtio.h> #include <linux/virtio_blk.h> #include <linux/scatterlist.h> @@ -12,6 +13,7 @@ #include <scsi/scsi_cmnd.h> #include <linux/idr.h> #include <linux/blk-mq.h> +#include <linux/blk-mq-virtio.h> #include <linux/numa.h> #define PART_BITS 4 @@ -426,6 +428,7 @@ static int init_vq(struct virtio_blk *vblk) struct virtqueue **vqs; unsigned short num_vqs; struct virtio_device *vdev = vblk->vdev; + struct irq_affinity desc = { 0, }; err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ, struct virtio_blk_config, num_queues, @@ -452,7 +455,8 @@ static int init_vq(struct virtio_blk *vblk) } /* Discover virtqueues and write information to configuration. */ - err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); + err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names, + &desc); if (err) goto out; @@ -586,10 +590,18 @@ static int virtblk_init_request(void *data, struct request *rq, return 0; } +static int virtblk_map_queues(struct blk_mq_tag_set *set) +{ + struct virtio_blk *vblk = set->driver_data; + + return blk_mq_virtio_map_queues(set, vblk->vdev, 0); +} + static struct blk_mq_ops virtio_mq_ops = { .queue_rq = virtio_queue_rq, .complete = virtblk_request_done, .init_request = virtblk_init_request, + .map_queues = virtblk_map_queues, }; static unsigned int virtblk_queue_depth; diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index e5c62dcf2c11..e770ad977472 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -23,7 +23,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/mutex.h> diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 20b32bb8c2af..8bdc38d81adf 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -25,6 +25,7 @@ #include <linux/spinlock.h> #include <linux/sysctl.h> #include <linux/wait.h> +#include <linux/sched/signal.h> #include <linux/bcd.h> #include <linux/seq_file.h> #include <linux/bitops.h> diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 5c654b5d4adf..503a41dfa193 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -17,6 +17,7 @@ #include <linux/hw_random.h> #include <linux/kernel.h> #include <linux/kthread.h> +#include <linux/sched/signal.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/random.h> diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig index 7f816655cbbf..90f3edffb067 100644 --- a/drivers/char/ipmi/Kconfig +++ b/drivers/char/ipmi/Kconfig @@ -78,7 +78,8 @@ config IPMI_POWEROFF endif # IPMI_HANDLER config ASPEED_BT_IPMI_BMC - depends on ARCH_ASPEED + depends on ARCH_ASPEED || COMPILE_TEST + depends on REGMAP && REGMAP_MMIO && MFD_SYSCON tristate "BT IPMI bmc driver" help Provides a driver for the BT (Block Transfer) IPMI interface diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c index fc9e8891eae3..d6f5d9eb102d 100644 --- a/drivers/char/ipmi/bt-bmc.c +++ b/drivers/char/ipmi/bt-bmc.c @@ -12,10 +12,13 @@ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/mfd/syscon.h> #include <linux/miscdevice.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/poll.h> +#include <linux/regmap.h> #include <linux/sched.h> #include <linux/timer.h> @@ -60,7 +63,8 @@ struct bt_bmc { struct device dev; struct miscdevice miscdev; - void __iomem *base; + struct regmap *map; + int offset; int irq; wait_queue_head_t queue; struct timer_list poll_timer; @@ -69,14 +73,29 @@ struct bt_bmc { static atomic_t open_count = ATOMIC_INIT(0); +static const struct regmap_config bt_regmap_cfg = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + static u8 bt_inb(struct bt_bmc *bt_bmc, int reg) { - return ioread8(bt_bmc->base + reg); + uint32_t val = 0; + int rc; + + rc = regmap_read(bt_bmc->map, bt_bmc->offset + reg, &val); + WARN(rc != 0, "regmap_read() failed: %d\n", rc); + + return rc == 0 ? (u8) val : 0; } static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg) { - iowrite8(data, bt_bmc->base + reg); + int rc; + + rc = regmap_write(bt_bmc->map, bt_bmc->offset + reg, data); + WARN(rc != 0, "regmap_write() failed: %d\n", rc); } static void clr_rd_ptr(struct bt_bmc *bt_bmc) @@ -367,14 +386,18 @@ static irqreturn_t bt_bmc_irq(int irq, void *arg) { struct bt_bmc *bt_bmc = arg; u32 reg; + int rc; + + rc = regmap_read(bt_bmc->map, bt_bmc->offset + BT_CR2, ®); + if (rc) + return IRQ_NONE; - reg = ioread32(bt_bmc->base + BT_CR2); reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY; if (!reg) return IRQ_NONE; /* ack pending IRQs */ - iowrite32(reg, bt_bmc->base + BT_CR2); + regmap_write(bt_bmc->map, bt_bmc->offset + BT_CR2, reg); wake_up(&bt_bmc->queue); return IRQ_HANDLED; @@ -384,7 +407,6 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc, struct platform_device *pdev) { struct device *dev = &pdev->dev; - u32 reg; int rc; bt_bmc->irq = platform_get_irq(pdev, 0); @@ -405,18 +427,17 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc, * will be cleared (along with B2H) when we can write the next * message to the BT buffer */ - reg = ioread32(bt_bmc->base + BT_CR1); - reg |= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY; - iowrite32(reg, bt_bmc->base + BT_CR1); + rc = regmap_update_bits(bt_bmc->map, bt_bmc->offset + BT_CR1, + (BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY), + (BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY)); - return 0; + return rc; } static int bt_bmc_probe(struct platform_device *pdev) { struct bt_bmc *bt_bmc; struct device *dev; - struct resource *res; int rc; if (!pdev || !pdev->dev.of_node) @@ -431,10 +452,27 @@ static int bt_bmc_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, bt_bmc); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - bt_bmc->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(bt_bmc->base)) - return PTR_ERR(bt_bmc->base); + bt_bmc->map = syscon_node_to_regmap(pdev->dev.parent->of_node); + if (IS_ERR(bt_bmc->map)) { + struct resource *res; + void __iomem *base; + + /* + * Assume it's not the MFD-based devicetree description, in + * which case generate a regmap ourselves + */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + bt_bmc->map = devm_regmap_init_mmio(dev, base, &bt_regmap_cfg); + bt_bmc->offset = 0; + } else { + rc = of_property_read_u32(dev->of_node, "reg", &bt_bmc->offset); + if (rc) + return rc; + } mutex_init(&bt_bmc->mutex); init_waitqueue_head(&bt_bmc->queue); @@ -461,12 +499,12 @@ static int bt_bmc_probe(struct platform_device *pdev) add_timer(&bt_bmc->poll_timer); } - iowrite32((BT_IO_BASE << BT_CR0_IO_BASE) | - (BT_IRQ << BT_CR0_IRQ) | - BT_CR0_EN_CLR_SLV_RDP | - BT_CR0_EN_CLR_SLV_WRP | - BT_CR0_ENABLE_IBT, - bt_bmc->base + BT_CR0); + regmap_write(bt_bmc->map, bt_bmc->offset + BT_CR0, + (BT_IO_BASE << BT_CR0_IO_BASE) | + (BT_IRQ << BT_CR0_IRQ) | + BT_CR0_EN_CLR_SLV_RDP | + BT_CR0_EN_CLR_SLV_WRP | + BT_CR0_ENABLE_IBT); clr_b_busy(bt_bmc); diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index a21407de46ae..f45119c5337d 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c @@ -108,7 +108,7 @@ static int ipmi_fasync(int fd, struct file *file, int on) return (result); } -static struct ipmi_user_hndl ipmi_hndlrs = +static const struct ipmi_user_hndl ipmi_hndlrs = { .ipmi_recv_hndl = file_receive_handler, }; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 92e53acf2cd2..9f699951b75a 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -102,7 +102,7 @@ struct ipmi_user { struct kref refcount; /* The upper layer that handles receive messages. */ - struct ipmi_user_hndl *handler; + const struct ipmi_user_hndl *handler; void *handler_data; /* The interface this user is bound to. */ @@ -919,7 +919,7 @@ static int intf_err_seq(ipmi_smi_t intf, int ipmi_create_user(unsigned int if_num, - struct ipmi_user_hndl *handler, + const struct ipmi_user_hndl *handler, void *handler_data, ipmi_user_t *user) { diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c index 6e658aa114f1..b338a4becbf8 100644 --- a/drivers/char/ipmi/ipmi_powernv.c +++ b/drivers/char/ipmi/ipmi_powernv.c @@ -196,7 +196,7 @@ static void ipmi_powernv_poll(void *send_info) ipmi_powernv_recv(smi); } -static struct ipmi_smi_handlers ipmi_powernv_smi_handlers = { +static const struct ipmi_smi_handlers ipmi_powernv_smi_handlers = { .owner = THIS_MODULE, .start_processing = ipmi_powernv_start_processing, .sender = ipmi_powernv_send, diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 4035495f3a86..5ca24d9b101b 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -53,6 +53,7 @@ #include <linux/ctype.h> #include <linux/delay.h> #include <linux/atomic.h> +#include <linux/sched/signal.h> #ifdef CONFIG_X86 /* @@ -985,7 +986,7 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data) pretimeout_since_last_heartbeat = 1; } -static struct ipmi_user_hndl ipmi_hndlrs = { +static const struct ipmi_user_hndl ipmi_hndlrs = { .ipmi_recv_hndl = ipmi_wdog_msg_handler, .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler }; diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 5b6742770656..565e4cf04a02 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -117,7 +117,7 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/major.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/fcntl.h> #include <linux/delay.h> diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index e051fc8aa7d7..cd53771b9ae7 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c @@ -655,7 +655,7 @@ static void terminate_monitor(struct cm4000_dev *dev) * monitor the card every 50msec. as a side-effect, retrieve the * atr once a card is inserted. another side-effect of retrieving the * atr is that the card will be powered on, so there is no need to - * power on the card explictely from the application: the driver + * power on the card explicitly from the application: the driver * is already doing that for you. */ @@ -1037,7 +1037,7 @@ release_io: clear_bit(LOCK_IO, &dev->flags); wake_up_interruptible(&dev->ioq); - DEBUGP(2, dev, "<- cmm_read returns: rc = %Zi\n", + DEBUGP(2, dev, "<- cmm_read returns: rc = %zi\n", (rc < 0 ? rc : count)); return rc < 0 ? rc : count; } diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c index d7123259143e..d4dbd8d8e524 100644 --- a/drivers/char/pcmcia/cm4040_cs.c +++ b/drivers/char/pcmcia/cm4040_cs.c @@ -331,7 +331,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, } if ((count < 5) || (count > READ_WRITE_BUFFER_SIZE)) { - DEBUGP(2, dev, "<- cm4040_write buffersize=%Zd < 5\n", count); + DEBUGP(2, dev, "<- cm4040_write buffersize=%zd < 5\n", count); return -EIO; } diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index 87885d146dbb..2a558c706581 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -58,7 +58,7 @@ #include <linux/module.h> #include <linux/init.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/device.h> #include <linux/ioctl.h> #include <linux/parport.h> diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index 35259961cc38..974d48927b07 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c @@ -74,7 +74,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/spinlock.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/sysctl.h> #include <linux/wait.h> #include <linux/bcd.h> diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c index ec07f0e99732..6aa32679fd58 100644 --- a/drivers/char/snsc.c +++ b/drivers/char/snsc.c @@ -16,7 +16,7 @@ */ #include <linux/interrupt.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/device.h> #include <linux/poll.h> #include <linux/init.h> diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c index 59bcefd6ec7c..e452673dff66 100644 --- a/drivers/char/snsc_event.c +++ b/drivers/char/snsc_event.c @@ -16,7 +16,7 @@ */ #include <linux/interrupt.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <asm/byteorder.h> #include <asm/sn/sn_sal.h> diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index 4fa7fcd8af36..f4f866ee54bc 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c @@ -603,7 +603,7 @@ static void sonypi_type3_srs(void) u16 v16; u8 v8; - /* This model type uses the same initialiazation of + /* This model type uses the same initialization of * the embedded controller as the type2 models. */ sonypi_type2_srs(); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 17857beb4892..e9b7e0b3cabe 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1136,6 +1136,8 @@ static int put_chars(u32 vtermno, const char *buf, int count) { struct port *port; struct scatterlist sg[1]; + void *data; + int ret; if (unlikely(early_put_chars)) return early_put_chars(vtermno, buf, count); @@ -1144,8 +1146,14 @@ static int put_chars(u32 vtermno, const char *buf, int count) if (!port) return -EPIPE; - sg_init_one(sg, buf, count); - return __send_to_port(port, sg, 1, count, (void *)buf, false); + data = kmemdup(buf, count, GFP_ATOMIC); + if (!data) + return -ENOMEM; + + sg_init_one(sg, data, count); + ret = __send_to_port(port, sg, 1, count, data, false); + kfree(data); + return ret; } /* @@ -1939,7 +1947,7 @@ static int init_vqs(struct ports_device *portdev) /* Find the queues. */ err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, io_callbacks, - (const char **)io_names); + (const char **)io_names, NULL); if (err) goto free; diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 56c1998ced3e..9356ab4b7d76 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -95,16 +95,17 @@ config COMMON_CLK_CDCE706 This driver supports TI CDCE706 programmable 3-PLL clock synthesizer. config COMMON_CLK_CDCE925 - tristate "Clock driver for TI CDCE925 devices" + tristate "Clock driver for TI CDCE913/925/937/949 devices" depends on I2C depends on OF select REGMAP_I2C help ---help--- - This driver supports the TI CDCE925 programmable clock synthesizer. - The chip contains two PLLs with spread-spectrum clocking support and - five output dividers. The driver only supports the following setup, - and uses a fixed setting for the output muxes. + This driver supports the TI CDCE913/925/937/949 programmable clock + synthesizer. Each chip has different number of PLLs and outputs. + For example, the CDCE925 contains two PLLs with spread-spectrum + clocking support and five output dividers. The driver only supports + the following setup, and uses a fixed setting for the output muxes. Y1 is derived from the input clock Y2 and Y3 derive from PLL1 Y4 and Y5 derive from PLL2 @@ -198,6 +199,16 @@ config COMMON_CLK_OXNAS ---help--- Support for the OXNAS SoC Family clocks. +config COMMON_CLK_VC5 + tristate "Clock driver for IDT VersaClock5 devices" + depends on I2C + depends on OF + select REGMAP_I2C + help + ---help--- + This driver supports the IDT VersaClock5 programmable clock + generator. + source "drivers/clk/bcm/Kconfig" source "drivers/clk/hisilicon/Kconfig" source "drivers/clk/mediatek/Kconfig" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 925081ec14c0..92c12b86c2e8 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o obj-$(CONFIG_ARCH_U300) += clk-u300.o obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o +obj-$(CONFIG_COMMON_CLK_VC5) += clk-versaclock5.o obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o @@ -87,6 +88,8 @@ obj-y += ti/ obj-$(CONFIG_CLK_UNIPHIER) += uniphier/ obj-$(CONFIG_ARCH_U8500) += ux500/ obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/ +ifeq ($(CONFIG_COMMON_CLK), y) obj-$(CONFIG_X86) += x86/ +endif obj-$(CONFIG_ARCH_ZX) += zte/ obj-$(CONFIG_ARCH_ZYNQ) += zynq/ diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c index 411310d29581..02d3bcd6216c 100644 --- a/drivers/clk/axs10x/i2s_pll_clock.c +++ b/drivers/clk/axs10x/i2s_pll_clock.c @@ -182,6 +182,7 @@ static int i2s_pll_clk_probe(struct platform_device *pdev) if (IS_ERR(pll_clk->base)) return PTR_ERR(pll_clk->base); + memset(&init, 0, sizeof(init)); clk_name = node->name; init.name = clk_name; init.ops = &i2s_pll_ops; diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 0d14409097e7..025853870619 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -39,6 +39,7 @@ #include <linux/clk.h> #include <linux/clk/bcm2835.h> #include <linux/debugfs.h> +#include <linux/delay.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> @@ -98,7 +99,8 @@ #define CM_SMIDIV 0x0b4 /* no definition for 0x0b8 and 0x0bc */ #define CM_TCNTCTL 0x0c0 -#define CM_TCNTDIV 0x0c4 +# define CM_TCNT_SRC1_SHIFT 12 +#define CM_TCNTCNT 0x0c4 #define CM_TECCTL 0x0c8 #define CM_TECDIV 0x0cc #define CM_TD0CTL 0x0d0 @@ -297,11 +299,32 @@ #define LOCK_TIMEOUT_NS 100000000 #define BCM2835_MAX_FB_RATE 1750000000u +/* + * Names of clocks used within the driver that need to be replaced + * with an external parent's name. This array is in the order that + * the clocks node in the DT references external clocks. + */ +static const char *const cprman_parent_names[] = { + "xosc", + "dsi0_byte", + "dsi0_ddr2", + "dsi0_ddr", + "dsi1_byte", + "dsi1_ddr2", + "dsi1_ddr", +}; + struct bcm2835_cprman { struct device *dev; void __iomem *regs; spinlock_t regs_lock; /* spinlock for all clocks */ - const char *osc_name; + + /* + * Real names of cprman clock parents looked up through + * of_clk_get_parent_name(), which will be used in the + * parent_names[] arrays for clock registration. + */ + const char *real_parent_names[ARRAY_SIZE(cprman_parent_names)]; /* Must be last */ struct clk_hw_onecell_data onecell; @@ -317,6 +340,61 @@ static inline u32 cprman_read(struct bcm2835_cprman *cprman, u32 reg) return readl(cprman->regs + reg); } +/* Does a cycle of measuring a clock through the TCNT clock, which may + * source from many other clocks in the system. + */ +static unsigned long bcm2835_measure_tcnt_mux(struct bcm2835_cprman *cprman, + u32 tcnt_mux) +{ + u32 osccount = 19200; /* 1ms */ + u32 count; + ktime_t timeout; + + spin_lock(&cprman->regs_lock); + + cprman_write(cprman, CM_TCNTCTL, CM_KILL); + + cprman_write(cprman, CM_TCNTCTL, + (tcnt_mux & CM_SRC_MASK) | + (tcnt_mux >> CM_SRC_BITS) << CM_TCNT_SRC1_SHIFT); + + cprman_write(cprman, CM_OSCCOUNT, osccount); + + /* do a kind delay at the start */ + mdelay(1); + + /* Finish off whatever is left of OSCCOUNT */ + timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS); + while (cprman_read(cprman, CM_OSCCOUNT)) { + if (ktime_after(ktime_get(), timeout)) { + dev_err(cprman->dev, "timeout waiting for OSCCOUNT\n"); + count = 0; + goto out; + } + cpu_relax(); + } + + /* Wait for BUSY to clear. */ + timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS); + while (cprman_read(cprman, CM_TCNTCTL) & CM_BUSY) { + if (ktime_after(ktime_get(), timeout)) { + dev_err(cprman->dev, "timeout waiting for !BUSY\n"); + count = 0; + goto out; + } + cpu_relax(); + } + + count = cprman_read(cprman, CM_TCNTCNT); + + cprman_write(cprman, CM_TCNTCTL, 0); + +out: + spin_unlock(&cprman->regs_lock); + + return count * 1000; +} + static int bcm2835_debugfs_regset(struct bcm2835_cprman *cprman, u32 base, struct debugfs_reg32 *regs, size_t nregs, struct dentry *dentry) @@ -428,6 +506,7 @@ struct bcm2835_pll_divider_data { u32 load_mask; u32 hold_mask; u32 fixed_divider; + u32 flags; }; struct bcm2835_clock_data { @@ -451,6 +530,8 @@ struct bcm2835_clock_data { bool is_vpu_clock; bool is_mash_clock; + + u32 tcnt_mux; }; struct bcm2835_gate_data { @@ -906,6 +987,9 @@ static long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock, const struct bcm2835_clock_data *data = clock->data; u64 temp; + if (data->int_bits == 0 && data->frac_bits == 0) + return parent_rate; + /* * The divisor is a 12.12 fixed point field, but only some of * the bits are populated in any given clock. @@ -929,7 +1013,12 @@ static unsigned long bcm2835_clock_get_rate(struct clk_hw *hw, struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw); struct bcm2835_cprman *cprman = clock->cprman; const struct bcm2835_clock_data *data = clock->data; - u32 div = cprman_read(cprman, data->div_reg); + u32 div; + + if (data->int_bits == 0 && data->frac_bits == 0) + return parent_rate; + + div = cprman_read(cprman, data->div_reg); return bcm2835_clock_rate_from_divisor(clock, parent_rate, div); } @@ -978,6 +1067,17 @@ static int bcm2835_clock_on(struct clk_hw *hw) CM_GATE); spin_unlock(&cprman->regs_lock); + /* Debug code to measure the clock once it's turned on to see + * if it's ticking at the rate we expect. + */ + if (data->tcnt_mux && false) { + dev_info(cprman->dev, + "clk %s: rate %ld, measure %ld\n", + data->name, + clk_hw_get_rate(hw), + bcm2835_measure_tcnt_mux(cprman, data->tcnt_mux)); + } + return 0; } @@ -1208,7 +1308,7 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman, memset(&init, 0, sizeof(init)); /* All of the PLLs derive from the external oscillator. */ - init.parent_names = &cprman->osc_name; + init.parent_names = &cprman->real_parent_names[0]; init.num_parents = 1; init.name = data->name; init.ops = &bcm2835_pll_clk_ops; @@ -1252,7 +1352,7 @@ bcm2835_register_pll_divider(struct bcm2835_cprman *cprman, init.num_parents = 1; init.name = divider_name; init.ops = &bcm2835_pll_divider_clk_ops; - init.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED; + init.flags = data->flags | CLK_IGNORE_UNUSED; divider = devm_kzalloc(cprman->dev, sizeof(*divider), GFP_KERNEL); if (!divider) @@ -1294,18 +1394,22 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman, struct bcm2835_clock *clock; struct clk_init_data init; const char *parents[1 << CM_SRC_BITS]; - size_t i; + size_t i, j; int ret; /* - * Replace our "xosc" references with the oscillator's - * actual name. + * Replace our strings referencing parent clocks with the + * actual clock-output-name of the parent. */ for (i = 0; i < data->num_mux_parents; i++) { - if (strcmp(data->parents[i], "xosc") == 0) - parents[i] = cprman->osc_name; - else - parents[i] = data->parents[i]; + parents[i] = data->parents[i]; + + for (j = 0; j < ARRAY_SIZE(cprman_parent_names); j++) { + if (strcmp(parents[i], cprman_parent_names[j]) == 0) { + parents[i] = cprman->real_parent_names[j]; + break; + } + } } memset(&init, 0, sizeof(init)); @@ -1432,6 +1536,47 @@ static const char *const bcm2835_clock_vpu_parents[] = { __VA_ARGS__) /* + * DSI parent clocks. The DSI byte/DDR/DDR2 clocks come from the DSI + * analog PHY. The _inv variants are generated internally to cprman, + * but we don't use them so they aren't hooked up. + */ +static const char *const bcm2835_clock_dsi0_parents[] = { + "gnd", + "xosc", + "testdebug0", + "testdebug1", + "dsi0_ddr", + "dsi0_ddr_inv", + "dsi0_ddr2", + "dsi0_ddr2_inv", + "dsi0_byte", + "dsi0_byte_inv", +}; + +static const char *const bcm2835_clock_dsi1_parents[] = { + "gnd", + "xosc", + "testdebug0", + "testdebug1", + "dsi1_ddr", + "dsi1_ddr_inv", + "dsi1_ddr2", + "dsi1_ddr2_inv", + "dsi1_byte", + "dsi1_byte_inv", +}; + +#define REGISTER_DSI0_CLK(...) REGISTER_CLK( \ + .num_mux_parents = ARRAY_SIZE(bcm2835_clock_dsi0_parents), \ + .parents = bcm2835_clock_dsi0_parents, \ + __VA_ARGS__) + +#define REGISTER_DSI1_CLK(...) REGISTER_CLK( \ + .num_mux_parents = ARRAY_SIZE(bcm2835_clock_dsi1_parents), \ + .parents = bcm2835_clock_dsi1_parents, \ + __VA_ARGS__) + +/* * the real definition of all the pll, pll_dividers and clocks * these make use of the above REGISTER_* macros */ @@ -1466,7 +1611,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .a2w_reg = A2W_PLLA_CORE, .load_mask = CM_PLLA_LOADCORE, .hold_mask = CM_PLLA_HOLDCORE, - .fixed_divider = 1), + .fixed_divider = 1, + .flags = CLK_SET_RATE_PARENT), [BCM2835_PLLA_PER] = REGISTER_PLL_DIV( .name = "plla_per", .source_pll = "plla", @@ -1474,7 +1620,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .a2w_reg = A2W_PLLA_PER, .load_mask = CM_PLLA_LOADPER, .hold_mask = CM_PLLA_HOLDPER, - .fixed_divider = 1), + .fixed_divider = 1, + .flags = CLK_SET_RATE_PARENT), [BCM2835_PLLA_DSI0] = REGISTER_PLL_DIV( .name = "plla_dsi0", .source_pll = "plla", @@ -1490,7 +1637,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .a2w_reg = A2W_PLLA_CCP2, .load_mask = CM_PLLA_LOADCCP2, .hold_mask = CM_PLLA_HOLDCCP2, - .fixed_divider = 1), + .fixed_divider = 1, + .flags = CLK_SET_RATE_PARENT), /* PLLB is used for the ARM's clock. */ [BCM2835_PLLB] = REGISTER_PLL( @@ -1514,7 +1662,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .a2w_reg = A2W_PLLB_ARM, .load_mask = CM_PLLB_LOADARM, .hold_mask = CM_PLLB_HOLDARM, - .fixed_divider = 1), + .fixed_divider = 1, + .flags = CLK_SET_RATE_PARENT), /* * PLLC is the core PLL, used to drive the core VPU clock. @@ -1543,7 +1692,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .a2w_reg = A2W_PLLC_CORE0, .load_mask = CM_PLLC_LOADCORE0, .hold_mask = CM_PLLC_HOLDCORE0, - .fixed_divider = 1), + .fixed_divider = 1, + .flags = CLK_SET_RATE_PARENT), [BCM2835_PLLC_CORE1] = REGISTER_PLL_DIV( .name = "pllc_core1", .source_pll = "pllc", @@ -1551,7 +1701,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .a2w_reg = A2W_PLLC_CORE1, .load_mask = CM_PLLC_LOADCORE1, .hold_mask = CM_PLLC_HOLDCORE1, - .fixed_divider = 1), + .fixed_divider = 1, + .flags = CLK_SET_RATE_PARENT), [BCM2835_PLLC_CORE2] = REGISTER_PLL_DIV( .name = "pllc_core2", .source_pll = "pllc", @@ -1559,7 +1710,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .a2w_reg = A2W_PLLC_CORE2, .load_mask = CM_PLLC_LOADCORE2, .hold_mask = CM_PLLC_HOLDCORE2, - .fixed_divider = 1), + .fixed_divider = 1, + .flags = CLK_SET_RATE_PARENT), [BCM2835_PLLC_PER] = REGISTER_PLL_DIV( .name = "pllc_per", .source_pll = "pllc", @@ -1567,7 +1719,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .a2w_reg = A2W_PLLC_PER, .load_mask = CM_PLLC_LOADPER, .hold_mask = CM_PLLC_HOLDPER, - .fixed_divider = 1), + .fixed_divider = 1, + .flags = CLK_SET_RATE_PARENT), /* * PLLD is the display PLL, used to drive DSI display panels. @@ -1596,7 +1749,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .a2w_reg = A2W_PLLD_CORE, .load_mask = CM_PLLD_LOADCORE, .hold_mask = CM_PLLD_HOLDCORE, - .fixed_divider = 1), + .fixed_divider = 1, + .flags = CLK_SET_RATE_PARENT), [BCM2835_PLLD_PER] = REGISTER_PLL_DIV( .name = "plld_per", .source_pll = "plld", @@ -1604,7 +1758,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .a2w_reg = A2W_PLLD_PER, .load_mask = CM_PLLD_LOADPER, .hold_mask = CM_PLLD_HOLDPER, - .fixed_divider = 1), + .fixed_divider = 1, + .flags = CLK_SET_RATE_PARENT), [BCM2835_PLLD_DSI0] = REGISTER_PLL_DIV( .name = "plld_dsi0", .source_pll = "plld", @@ -1649,7 +1804,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .a2w_reg = A2W_PLLH_RCAL, .load_mask = CM_PLLH_LOADRCAL, .hold_mask = 0, - .fixed_divider = 10), + .fixed_divider = 10, + .flags = CLK_SET_RATE_PARENT), [BCM2835_PLLH_AUX] = REGISTER_PLL_DIV( .name = "pllh_aux", .source_pll = "pllh", @@ -1657,7 +1813,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .a2w_reg = A2W_PLLH_AUX, .load_mask = CM_PLLH_LOADAUX, .hold_mask = 0, - .fixed_divider = 1), + .fixed_divider = 1, + .flags = CLK_SET_RATE_PARENT), [BCM2835_PLLH_PIX] = REGISTER_PLL_DIV( .name = "pllh_pix", .source_pll = "pllh", @@ -1665,7 +1822,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .a2w_reg = A2W_PLLH_PIX, .load_mask = CM_PLLH_LOADPIX, .hold_mask = 0, - .fixed_divider = 10), + .fixed_divider = 10, + .flags = CLK_SET_RATE_PARENT), /* the clocks */ @@ -1677,7 +1835,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .ctl_reg = CM_OTPCTL, .div_reg = CM_OTPDIV, .int_bits = 4, - .frac_bits = 0), + .frac_bits = 0, + .tcnt_mux = 6), /* * Used for a 1Mhz clock for the system clocksource, and also used * bythe watchdog timer and the camera pulse generator. @@ -1711,13 +1870,15 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .ctl_reg = CM_H264CTL, .div_reg = CM_H264DIV, .int_bits = 4, - .frac_bits = 8), + .frac_bits = 8, + .tcnt_mux = 1), [BCM2835_CLOCK_ISP] = REGISTER_VPU_CLK( .name = "isp", .ctl_reg = CM_ISPCTL, .div_reg = CM_ISPDIV, .int_bits = 4, - .frac_bits = 8), + .frac_bits = 8, + .tcnt_mux = 2), /* * Secondary SDRAM clock. Used for low-voltage modes when the PLL @@ -1728,13 +1889,15 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .ctl_reg = CM_SDCCTL, .div_reg = CM_SDCDIV, .int_bits = 6, - .frac_bits = 0), + .frac_bits = 0, + .tcnt_mux = 3), [BCM2835_CLOCK_V3D] = REGISTER_VPU_CLK( .name = "v3d", .ctl_reg = CM_V3DCTL, .div_reg = CM_V3DDIV, .int_bits = 4, - .frac_bits = 8), + .frac_bits = 8, + .tcnt_mux = 4), /* * VPU clock. This doesn't have an enable bit, since it drives * the bus for everything else, and is special so it doesn't need @@ -1748,7 +1911,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .int_bits = 12, .frac_bits = 8, .flags = CLK_IS_CRITICAL, - .is_vpu_clock = true), + .is_vpu_clock = true, + .tcnt_mux = 5), /* clocks with per parent mux */ [BCM2835_CLOCK_AVEO] = REGISTER_PER_CLK( @@ -1756,19 +1920,22 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .ctl_reg = CM_AVEOCTL, .div_reg = CM_AVEODIV, .int_bits = 4, - .frac_bits = 0), + .frac_bits = 0, + .tcnt_mux = 38), [BCM2835_CLOCK_CAM0] = REGISTER_PER_CLK( .name = "cam0", .ctl_reg = CM_CAM0CTL, .div_reg = CM_CAM0DIV, .int_bits = 4, - .frac_bits = 8), + .frac_bits = 8, + .tcnt_mux = 14), [BCM2835_CLOCK_CAM1] = REGISTER_PER_CLK( .name = "cam1", .ctl_reg = CM_CAM1CTL, .div_reg = CM_CAM1DIV, .int_bits = 4, - .frac_bits = 8), + .frac_bits = 8, + .tcnt_mux = 15), [BCM2835_CLOCK_DFT] = REGISTER_PER_CLK( .name = "dft", .ctl_reg = CM_DFTCTL, @@ -1780,7 +1947,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .ctl_reg = CM_DPICTL, .div_reg = CM_DPIDIV, .int_bits = 4, - .frac_bits = 8), + .frac_bits = 8, + .tcnt_mux = 17), /* Arasan EMMC clock */ [BCM2835_CLOCK_EMMC] = REGISTER_PER_CLK( @@ -1788,7 +1956,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .ctl_reg = CM_EMMCCTL, .div_reg = CM_EMMCDIV, .int_bits = 4, - .frac_bits = 8), + .frac_bits = 8, + .tcnt_mux = 39), /* General purpose (GPIO) clocks */ [BCM2835_CLOCK_GP0] = REGISTER_PER_CLK( @@ -1797,7 +1966,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .div_reg = CM_GP0DIV, .int_bits = 12, .frac_bits = 12, - .is_mash_clock = true), + .is_mash_clock = true, + .tcnt_mux = 20), [BCM2835_CLOCK_GP1] = REGISTER_PER_CLK( .name = "gp1", .ctl_reg = CM_GP1CTL, @@ -1805,7 +1975,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .int_bits = 12, .frac_bits = 12, .flags = CLK_IS_CRITICAL, - .is_mash_clock = true), + .is_mash_clock = true, + .tcnt_mux = 21), [BCM2835_CLOCK_GP2] = REGISTER_PER_CLK( .name = "gp2", .ctl_reg = CM_GP2CTL, @@ -1820,40 +1991,46 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .ctl_reg = CM_HSMCTL, .div_reg = CM_HSMDIV, .int_bits = 4, - .frac_bits = 8), + .frac_bits = 8, + .tcnt_mux = 22), [BCM2835_CLOCK_PCM] = REGISTER_PER_CLK( .name = "pcm", .ctl_reg = CM_PCMCTL, .div_reg = CM_PCMDIV, .int_bits = 12, .frac_bits = 12, - .is_mash_clock = true), + .is_mash_clock = true, + .tcnt_mux = 23), [BCM2835_CLOCK_PWM] = REGISTER_PER_CLK( .name = "pwm", .ctl_reg = CM_PWMCTL, .div_reg = CM_PWMDIV, .int_bits = 12, .frac_bits = 12, - .is_mash_clock = true), + .is_mash_clock = true, + .tcnt_mux = 24), [BCM2835_CLOCK_SLIM] = REGISTER_PER_CLK( .name = "slim", .ctl_reg = CM_SLIMCTL, .div_reg = CM_SLIMDIV, .int_bits = 12, .frac_bits = 12, - .is_mash_clock = true), + .is_mash_clock = true, + .tcnt_mux = 25), [BCM2835_CLOCK_SMI] = REGISTER_PER_CLK( .name = "smi", .ctl_reg = CM_SMICTL, .div_reg = CM_SMIDIV, .int_bits = 4, - .frac_bits = 8), + .frac_bits = 8, + .tcnt_mux = 27), [BCM2835_CLOCK_UART] = REGISTER_PER_CLK( .name = "uart", .ctl_reg = CM_UARTCTL, .div_reg = CM_UARTDIV, .int_bits = 10, - .frac_bits = 12), + .frac_bits = 12, + .tcnt_mux = 28), /* TV encoder clock. Only operating frequency is 108Mhz. */ [BCM2835_CLOCK_VEC] = REGISTER_PER_CLK( @@ -1866,7 +2043,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { * Allow rate change propagation only on PLLH_AUX which is * assigned index 7 in the parent array. */ - .set_rate_parent = BIT(7)), + .set_rate_parent = BIT(7), + .tcnt_mux = 29), /* dsi clocks */ [BCM2835_CLOCK_DSI0E] = REGISTER_PER_CLK( @@ -1874,13 +2052,29 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .ctl_reg = CM_DSI0ECTL, .div_reg = CM_DSI0EDIV, .int_bits = 4, - .frac_bits = 8), + .frac_bits = 8, + .tcnt_mux = 18), [BCM2835_CLOCK_DSI1E] = REGISTER_PER_CLK( .name = "dsi1e", .ctl_reg = CM_DSI1ECTL, .div_reg = CM_DSI1EDIV, .int_bits = 4, - .frac_bits = 8), + .frac_bits = 8, + .tcnt_mux = 19), + [BCM2835_CLOCK_DSI0P] = REGISTER_DSI0_CLK( + .name = "dsi0p", + .ctl_reg = CM_DSI0PCTL, + .div_reg = CM_DSI0PDIV, + .int_bits = 0, + .frac_bits = 0, + .tcnt_mux = 12), + [BCM2835_CLOCK_DSI1P] = REGISTER_DSI1_CLK( + .name = "dsi1p", + .ctl_reg = CM_DSI1PCTL, + .div_reg = CM_DSI1PDIV, + .int_bits = 0, + .frac_bits = 0, + .tcnt_mux = 13), /* the gates */ @@ -1939,8 +2133,19 @@ static int bcm2835_clk_probe(struct platform_device *pdev) if (IS_ERR(cprman->regs)) return PTR_ERR(cprman->regs); - cprman->osc_name = of_clk_get_parent_name(dev->of_node, 0); - if (!cprman->osc_name) + memcpy(cprman->real_parent_names, cprman_parent_names, + sizeof(cprman_parent_names)); + of_clk_parent_fill(dev->of_node, cprman->real_parent_names, + ARRAY_SIZE(cprman_parent_names)); + + /* + * Make sure the external oscillator has been registered. + * + * The other (DSI) clocks are not present on older device + * trees, which we still need to support for backwards + * compatibility. + */ + if (!cprman->real_parent_names[0]) return -ENODEV; platform_set_drvdata(pdev, cprman); diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c index f793b2d9238c..c933be01c7db 100644 --- a/drivers/clk/clk-cdce925.c +++ b/drivers/clk/clk-cdce925.c @@ -1,8 +1,8 @@ /* - * Driver for TI Dual PLL CDCE925 clock synthesizer + * Driver for TI Multi PLL CDCE913/925/937/949 clock synthesizer * - * This driver always connects the Y1 to the input clock, Y2/Y3 to PLL1 - * and Y4/Y5 to PLL2. PLL frequency is set on a first-come-first-serve + * This driver always connects the Y1 to the input clock, Y2/Y3 to PLL1, + * Y4/Y5 to PLL2, and so on. PLL frequency is set on a first-come-first-serve * basis. Clients can directly request any frequency that the chip can * deliver using the standard clk framework. In addition, the device can * be configured and activated via the devicetree. @@ -19,11 +19,32 @@ #include <linux/slab.h> #include <linux/gcd.h> -/* The chip has 2 PLLs which can be routed through dividers to 5 outputs. +/* Each chip has different number of PLLs and outputs, for example: + * The CECE925 has 2 PLLs which can be routed through dividers to 5 outputs. * Model this as 2 PLL clocks which are parents to the outputs. */ -#define NUMBER_OF_PLLS 2 -#define NUMBER_OF_OUTPUTS 5 + +enum { + CDCE913, + CDCE925, + CDCE937, + CDCE949, +}; + +struct clk_cdce925_chip_info { + int num_plls; + int num_outputs; +}; + +static const struct clk_cdce925_chip_info clk_cdce925_chip_info_tbl[] = { + [CDCE913] = { .num_plls = 1, .num_outputs = 3 }, + [CDCE925] = { .num_plls = 2, .num_outputs = 5 }, + [CDCE937] = { .num_plls = 3, .num_outputs = 7 }, + [CDCE949] = { .num_plls = 4, .num_outputs = 9 }, +}; + +#define MAX_NUMBER_OF_PLLS 4 +#define MAX_NUMBER_OF_OUTPUTS 9 #define CDCE925_REG_GLOBAL1 0x01 #define CDCE925_REG_Y1SPIPDIVH 0x02 @@ -43,7 +64,7 @@ struct clk_cdce925_output { struct clk_hw hw; struct clk_cdce925_chip *chip; u8 index; - u16 pdiv; /* 1..127 for Y2-Y5; 1..1023 for Y1 */ + u16 pdiv; /* 1..127 for Y2-Y9; 1..1023 for Y1 */ }; #define to_clk_cdce925_output(_hw) \ container_of(_hw, struct clk_cdce925_output, hw) @@ -60,8 +81,9 @@ struct clk_cdce925_pll { struct clk_cdce925_chip { struct regmap *regmap; struct i2c_client *i2c_client; - struct clk_cdce925_pll pll[NUMBER_OF_PLLS]; - struct clk_cdce925_output clk[NUMBER_OF_OUTPUTS]; + const struct clk_cdce925_chip_info *chip_info; + struct clk_cdce925_pll pll[MAX_NUMBER_OF_PLLS]; + struct clk_cdce925_output clk[MAX_NUMBER_OF_OUTPUTS]; }; /* ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** */ @@ -284,6 +306,18 @@ static void cdce925_clk_set_pdiv(struct clk_cdce925_output *data, u16 pdiv) case 4: regmap_update_bits(data->chip->regmap, 0x27, 0x7F, pdiv); break; + case 5: + regmap_update_bits(data->chip->regmap, 0x36, 0x7F, pdiv); + break; + case 6: + regmap_update_bits(data->chip->regmap, 0x37, 0x7F, pdiv); + break; + case 7: + regmap_update_bits(data->chip->regmap, 0x46, 0x7F, pdiv); + break; + case 8: + regmap_update_bits(data->chip->regmap, 0x47, 0x7F, pdiv); + break; } } @@ -302,6 +336,14 @@ static void cdce925_clk_activate(struct clk_cdce925_output *data) case 4: regmap_update_bits(data->chip->regmap, 0x24, 0x03, 0x03); break; + case 5: + case 6: + regmap_update_bits(data->chip->regmap, 0x34, 0x03, 0x03); + break; + case 7: + case 8: + regmap_update_bits(data->chip->regmap, 0x44, 0x03, 0x03); + break; } } @@ -474,15 +516,6 @@ static const struct clk_ops cdce925_clk_y1_ops = { .set_rate = cdce925_clk_y1_set_rate, }; - -static struct regmap_config cdce925_regmap_config = { - .name = "configuration0", - .reg_bits = 8, - .val_bits = 8, - .cache_type = REGCACHE_RBTREE, - .max_register = 0x2F, -}; - #define CDCE925_I2C_COMMAND_BLOCK_TRANSFER 0x00 #define CDCE925_I2C_COMMAND_BYTE_TRANSFER 0x80 @@ -582,13 +615,19 @@ static int cdce925_probe(struct i2c_client *client, struct clk_cdce925_chip *data; struct device_node *node = client->dev.of_node; const char *parent_name; - const char *pll_clk_name[NUMBER_OF_PLLS] = {NULL,}; + const char *pll_clk_name[MAX_NUMBER_OF_PLLS] = {NULL,}; struct clk_init_data init; u32 value; int i; int err; struct device_node *np_output; char child_name[6]; + struct regmap_config config = { + .name = "configuration0", + .reg_bits = 8, + .val_bits = 8, + .cache_type = REGCACHE_RBTREE, + }; dev_dbg(&client->dev, "%s\n", __func__); data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); @@ -596,8 +635,11 @@ static int cdce925_probe(struct i2c_client *client, return -ENOMEM; data->i2c_client = client; + data->chip_info = &clk_cdce925_chip_info_tbl[id->driver_data]; + config.max_register = CDCE925_OFFSET_PLL + + data->chip_info->num_plls * 0x10 - 1; data->regmap = devm_regmap_init(&client->dev, ®map_cdce925_bus, - &client->dev, &cdce925_regmap_config); + &client->dev, &config); if (IS_ERR(data->regmap)) { dev_err(&client->dev, "failed to allocate register map\n"); return PTR_ERR(data->regmap); @@ -626,7 +668,7 @@ static int cdce925_probe(struct i2c_client *client, init.num_parents = parent_name ? 1 : 0; /* Register PLL clocks */ - for (i = 0; i < NUMBER_OF_PLLS; ++i) { + for (i = 0; i < data->chip_info->num_plls; ++i) { pll_clk_name[i] = kasprintf(GFP_KERNEL, "%s.pll%d", client->dev.of_node->name, i); init.name = pll_clk_name[i]; @@ -684,7 +726,7 @@ static int cdce925_probe(struct i2c_client *client, init.ops = &cdce925_clk_ops; init.flags = CLK_SET_RATE_PARENT; init.num_parents = 1; - for (i = 1; i < NUMBER_OF_OUTPUTS; ++i) { + for (i = 1; i < data->chip_info->num_outputs; ++i) { init.name = kasprintf(GFP_KERNEL, "%s.Y%d", client->dev.of_node->name, i+1); data->clk[i].chip = data; @@ -702,6 +744,16 @@ static int cdce925_probe(struct i2c_client *client, /* Mux Y4/5 to PLL2 */ init.parent_names = &pll_clk_name[1]; break; + case 5: + case 6: + /* Mux Y6/7 to PLL3 */ + init.parent_names = &pll_clk_name[2]; + break; + case 7: + case 8: + /* Mux Y8/9 to PLL4 */ + init.parent_names = &pll_clk_name[3]; + break; } err = devm_clk_hw_register(&client->dev, &data->clk[i].hw); kfree(init.name); /* clock framework made a copy of the name */ @@ -720,7 +772,7 @@ static int cdce925_probe(struct i2c_client *client, err = 0; error: - for (i = 0; i < NUMBER_OF_PLLS; ++i) + for (i = 0; i < data->chip_info->num_plls; ++i) /* clock framework made a copy of the name */ kfree(pll_clk_name[i]); @@ -728,13 +780,19 @@ error: } static const struct i2c_device_id cdce925_id[] = { - { "cdce925", 0 }, + { "cdce913", CDCE913 }, + { "cdce925", CDCE925 }, + { "cdce937", CDCE937 }, + { "cdce949", CDCE949 }, { } }; MODULE_DEVICE_TABLE(i2c, cdce925_id); static const struct of_device_id clk_cdce925_of_match[] = { + { .compatible = "ti,cdce913" }, { .compatible = "ti,cdce925" }, + { .compatible = "ti,cdce937" }, + { .compatible = "ti,cdce949" }, { }, }; MODULE_DEVICE_TABLE(of, clk_cdce925_of_match); @@ -750,5 +808,5 @@ static struct i2c_driver cdce925_driver = { module_i2c_driver(cdce925_driver); MODULE_AUTHOR("Mike Looijmans <mike.looijmans@topic.nl>"); -MODULE_DESCRIPTION("cdce925 driver"); +MODULE_DESCRIPTION("TI CDCE913/925/937/949 driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c index 674785d968a3..e0e02a6e5900 100644 --- a/drivers/clk/clk-conf.c +++ b/drivers/clk/clk-conf.c @@ -40,8 +40,9 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier) return 0; pclk = of_clk_get_from_provider(&clkspec); if (IS_ERR(pclk)) { - pr_warn("clk: couldn't get parent clock %d for %s\n", - index, node->full_name); + if (PTR_ERR(pclk) != -EPROBE_DEFER) + pr_warn("clk: couldn't get parent clock %d for %s\n", + index, node->full_name); return PTR_ERR(pclk); } @@ -55,8 +56,9 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier) } clk = of_clk_get_from_provider(&clkspec); if (IS_ERR(clk)) { - pr_warn("clk: couldn't get assigned clock %d for %s\n", - index, node->full_name); + if (PTR_ERR(clk) != -EPROBE_DEFER) + pr_warn("clk: couldn't get assigned clock %d for %s\n", + index, node->full_name); rc = PTR_ERR(clk); goto err; } @@ -99,8 +101,9 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier) clk = of_clk_get_from_provider(&clkspec); if (IS_ERR(clk)) { - pr_warn("clk: couldn't get clock %d for %s\n", - index, node->full_name); + if (PTR_ERR(clk) != -EPROBE_DEFER) + pr_warn("clk: couldn't get clock %d for %s\n", + index, node->full_name); return PTR_ERR(clk); } diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c index 021f3daf34e1..3fca0526d940 100644 --- a/drivers/clk/clk-cs2000-cp.c +++ b/drivers/clk/clk-cs2000-cp.c @@ -59,6 +59,10 @@ struct cs2000_priv { struct i2c_client *client; struct clk *clk_in; struct clk *ref_clk; + + /* suspend/resume */ + unsigned long saved_rate; + unsigned long saved_parent_rate; }; static const struct of_device_id cs2000_of_match[] = { @@ -286,6 +290,9 @@ static int __cs2000_set_rate(struct cs2000_priv *priv, int ch, if (ret < 0) return ret; + priv->saved_rate = rate; + priv->saved_parent_rate = parent_rate; + return 0; } @@ -489,9 +496,24 @@ probe_err: return ret; } +static int cs2000_resume(struct device *dev) +{ + struct cs2000_priv *priv = dev_get_drvdata(dev); + int ch = 0; /* it uses ch0 only at this point */ + + return __cs2000_set_rate(priv, ch, + priv->saved_rate, + priv->saved_parent_rate); +} + +static const struct dev_pm_ops cs2000_pm_ops = { + .resume_early = cs2000_resume, +}; + static struct i2c_driver cs2000_driver = { .driver = { .name = "cs2000-cp", + .pm = &cs2000_pm_ops, .of_match_table = cs2000_of_match, }, .probe = cs2000_probe, diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c index 2a3e9d8e88b0..96d37175d0ad 100644 --- a/drivers/clk/clk-scpi.c +++ b/drivers/clk/clk-scpi.c @@ -290,13 +290,15 @@ static int scpi_clocks_probe(struct platform_device *pdev) of_node_put(child); return ret; } - } - /* Add the virtual cpufreq device */ - cpufreq_dev = platform_device_register_simple("scpi-cpufreq", - -1, NULL, 0); - if (IS_ERR(cpufreq_dev)) - pr_warn("unable to register cpufreq device"); + if (match->data != &scpi_dvfs_ops) + continue; + /* Add the virtual cpufreq device if it's DVFS clock provider */ + cpufreq_dev = platform_device_register_simple("scpi-cpufreq", + -1, NULL, 0); + if (IS_ERR(cpufreq_dev)) + pr_warn("unable to register cpufreq device"); + } return 0; } diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c index fc585f370549..ab609a76706f 100644 --- a/drivers/clk/clk-stm32f4.c +++ b/drivers/clk/clk-stm32f4.c @@ -28,6 +28,14 @@ #include <linux/regmap.h> #include <linux/mfd/syscon.h> +/* + * Include list of clocks wich are not derived from system clock (SYSCLOCK) + * The index of these clocks is the secondary index of DT bindings + * + */ +#include <dt-bindings/clock/stm32fx-clock.h> + +#define STM32F4_RCC_CR 0x00 #define STM32F4_RCC_PLLCFGR 0x04 #define STM32F4_RCC_CFGR 0x08 #define STM32F4_RCC_AHB1ENR 0x30 @@ -37,6 +45,15 @@ #define STM32F4_RCC_APB2ENR 0x44 #define STM32F4_RCC_BDCR 0x70 #define STM32F4_RCC_CSR 0x74 +#define STM32F4_RCC_PLLI2SCFGR 0x84 +#define STM32F4_RCC_PLLSAICFGR 0x88 +#define STM32F4_RCC_DCKCFGR 0x8c +#define STM32F7_RCC_DCKCFGR2 0x90 + +#define NONE -1 +#define NO_IDX NONE +#define NO_MUX NONE +#define NO_GATE NONE struct stm32f4_gate_data { u8 offset; @@ -195,7 +212,7 @@ static const struct stm32f4_gate_data stm32f469_gates[] __initconst = { { STM32F4_RCC_APB2ENR, 8, "adc1", "apb2_div" }, { STM32F4_RCC_APB2ENR, 9, "adc2", "apb2_div" }, { STM32F4_RCC_APB2ENR, 10, "adc3", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 11, "sdio", "pll48" }, + { STM32F4_RCC_APB2ENR, 11, "sdio", "sdmux" }, { STM32F4_RCC_APB2ENR, 12, "spi1", "apb2_div" }, { STM32F4_RCC_APB2ENR, 13, "spi4", "apb2_div" }, { STM32F4_RCC_APB2ENR, 14, "syscfg", "apb2_div" }, @@ -208,7 +225,79 @@ static const struct stm32f4_gate_data stm32f469_gates[] __initconst = { { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" }, }; -enum { SYSTICK, FCLK, CLK_LSI, CLK_LSE, CLK_HSE_RTC, CLK_RTC, END_PRIMARY_CLK }; +static const struct stm32f4_gate_data stm32f746_gates[] __initconst = { + { STM32F4_RCC_AHB1ENR, 0, "gpioa", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 1, "gpiob", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 2, "gpioc", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 3, "gpiod", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 4, "gpioe", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 5, "gpiof", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 6, "gpiog", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 7, "gpioh", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 8, "gpioi", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 9, "gpioj", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 10, "gpiok", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 12, "crc", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 18, "bkpsra", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 20, "dtcmram", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 21, "dma1", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 22, "dma2", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 23, "dma2d", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 25, "ethmac", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 26, "ethmactx", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 27, "ethmacrx", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 28, "ethmacptp", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 29, "otghs", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 30, "otghsulpi", "ahb_div" }, + + { STM32F4_RCC_AHB2ENR, 0, "dcmi", "ahb_div" }, + { STM32F4_RCC_AHB2ENR, 4, "cryp", "ahb_div" }, + { STM32F4_RCC_AHB2ENR, 5, "hash", "ahb_div" }, + { STM32F4_RCC_AHB2ENR, 6, "rng", "pll48" }, + { STM32F4_RCC_AHB2ENR, 7, "otgfs", "pll48" }, + + { STM32F4_RCC_AHB3ENR, 0, "fmc", "ahb_div", + CLK_IGNORE_UNUSED }, + { STM32F4_RCC_AHB3ENR, 1, "qspi", "ahb_div", + CLK_IGNORE_UNUSED }, + + { STM32F4_RCC_APB1ENR, 0, "tim2", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 1, "tim3", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 2, "tim4", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 3, "tim5", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 4, "tim6", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 5, "tim7", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 6, "tim12", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 7, "tim13", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 8, "tim14", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 11, "wwdg", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 14, "spi2", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 15, "spi3", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 16, "spdifrx", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 25, "can1", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 26, "can2", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 27, "cec", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 28, "pwr", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 29, "dac", "apb1_div" }, + + { STM32F4_RCC_APB2ENR, 0, "tim1", "apb2_mul" }, + { STM32F4_RCC_APB2ENR, 1, "tim8", "apb2_mul" }, + { STM32F4_RCC_APB2ENR, 8, "adc1", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 9, "adc2", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 10, "adc3", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 11, "sdmmc", "sdmux" }, + { STM32F4_RCC_APB2ENR, 12, "spi1", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 13, "spi4", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 14, "syscfg", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 16, "tim9", "apb2_mul" }, + { STM32F4_RCC_APB2ENR, 17, "tim10", "apb2_mul" }, + { STM32F4_RCC_APB2ENR, 18, "tim11", "apb2_mul" }, + { STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 23, "sai2", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" }, +}; /* * This bitmask tells us which bit offsets (0..192) on STM32F4[23]xxx @@ -224,6 +313,10 @@ static const u64 stm32f46xx_gate_map[MAX_GATE_MAP] = { 0x000000f17ef417ffull, 0x0000000000000003ull, 0x0c777f33f6fec9ffull }; +static const u64 stm32f746_gate_map[MAX_GATE_MAP] = { 0x000000f17ef417ffull, + 0x0000000000000003ull, + 0x04f77f033e01c9ffull }; + static const u64 *stm32f4_gate_map; static struct clk_hw **clks; @@ -233,6 +326,8 @@ static void __iomem *base; static struct regmap *pdrm; +static int stm32fx_end_primary_clk; + /* * "Multiplier" device for APBx clocks. * @@ -324,23 +419,342 @@ static struct clk *clk_register_apb_mul(struct device *dev, const char *name, return clk; } -/* - * Decode current PLL state and (statically) model the state we inherit from - * the bootloader. - */ -static void stm32f4_rcc_register_pll(const char *hse_clk, const char *hsi_clk) +enum { + PLL, + PLL_I2S, + PLL_SAI, +}; + +static const struct clk_div_table pll_divp_table[] = { + { 0, 2 }, { 1, 4 }, { 2, 6 }, { 3, 8 }, { 0 } +}; + +static const struct clk_div_table pll_divr_table[] = { + { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 } +}; + +struct stm32f4_pll { + spinlock_t *lock; + struct clk_gate gate; + u8 offset; + u8 bit_rdy_idx; + u8 status; + u8 n_start; +}; + +#define to_stm32f4_pll(_gate) container_of(_gate, struct stm32f4_pll, gate) + +struct stm32f4_pll_post_div_data { + int idx; + u8 pll_num; + const char *name; + const char *parent; + u8 flag; + u8 offset; + u8 shift; + u8 width; + u8 flag_div; + const struct clk_div_table *div_table; +}; + +struct stm32f4_vco_data { + const char *vco_name; + u8 offset; + u8 bit_idx; + u8 bit_rdy_idx; +}; + +static const struct stm32f4_vco_data vco_data[] = { + { "vco", STM32F4_RCC_PLLCFGR, 24, 25 }, + { "vco-i2s", STM32F4_RCC_PLLI2SCFGR, 26, 27 }, + { "vco-sai", STM32F4_RCC_PLLSAICFGR, 28, 29 }, +}; + + +static const struct clk_div_table post_divr_table[] = { + { 0, 2 }, { 1, 4 }, { 2, 8 }, { 3, 16 }, { 0 } +}; + +#define MAX_POST_DIV 3 +static const struct stm32f4_pll_post_div_data post_div_data[MAX_POST_DIV] = { + { CLK_I2SQ_PDIV, PLL_I2S, "plli2s-q-div", "plli2s-q", + CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 0, 5, 0, NULL}, + + { CLK_SAIQ_PDIV, PLL_SAI, "pllsai-q-div", "pllsai-q", + CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 8, 5, 0, NULL }, + + { NO_IDX, PLL_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT, + STM32F4_RCC_DCKCFGR, 16, 2, 0, post_divr_table }, +}; + +struct stm32f4_div_data { + u8 shift; + u8 width; + u8 flag_div; + const struct clk_div_table *div_table; +}; + +#define MAX_PLL_DIV 3 +static const struct stm32f4_div_data div_data[MAX_PLL_DIV] = { + { 16, 2, 0, pll_divp_table }, + { 24, 4, CLK_DIVIDER_ONE_BASED, NULL }, + { 28, 3, 0, pll_divr_table }, +}; + +struct stm32f4_pll_data { + u8 pll_num; + u8 n_start; + const char *div_name[MAX_PLL_DIV]; +}; + +static const struct stm32f4_pll_data stm32f429_pll[MAX_PLL_DIV] = { + { PLL, 192, { "pll", "pll48", NULL } }, + { PLL_I2S, 192, { NULL, "plli2s-q", "plli2s-r" } }, + { PLL_SAI, 49, { NULL, "pllsai-q", "pllsai-r" } }, +}; + +static const struct stm32f4_pll_data stm32f469_pll[MAX_PLL_DIV] = { + { PLL, 50, { "pll", "pll-q", NULL } }, + { PLL_I2S, 50, { "plli2s-p", "plli2s-q", "plli2s-r" } }, + { PLL_SAI, 50, { "pllsai-p", "pllsai-q", "pllsai-r" } }, +}; + +static int stm32f4_pll_is_enabled(struct clk_hw *hw) +{ + return clk_gate_ops.is_enabled(hw); +} + +static int stm32f4_pll_enable(struct clk_hw *hw) +{ + struct clk_gate *gate = to_clk_gate(hw); + struct stm32f4_pll *pll = to_stm32f4_pll(gate); + int ret = 0; + unsigned long reg; + + ret = clk_gate_ops.enable(hw); + + ret = readl_relaxed_poll_timeout_atomic(base + STM32F4_RCC_CR, reg, + reg & (1 << pll->bit_rdy_idx), 0, 10000); + + return ret; +} + +static void stm32f4_pll_disable(struct clk_hw *hw) +{ + clk_gate_ops.disable(hw); +} + +static unsigned long stm32f4_pll_recalc(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_gate *gate = to_clk_gate(hw); + struct stm32f4_pll *pll = to_stm32f4_pll(gate); + unsigned long n; + + n = (readl(base + pll->offset) >> 6) & 0x1ff; + + return parent_rate * n; +} + +static long stm32f4_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_gate *gate = to_clk_gate(hw); + struct stm32f4_pll *pll = to_stm32f4_pll(gate); + unsigned long n; + + n = rate / *prate; + + if (n < pll->n_start) + n = pll->n_start; + else if (n > 432) + n = 432; + + return *prate * n; +} + +static int stm32f4_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_gate *gate = to_clk_gate(hw); + struct stm32f4_pll *pll = to_stm32f4_pll(gate); + + unsigned long n; + unsigned long val; + int pll_state; + + pll_state = stm32f4_pll_is_enabled(hw); + + if (pll_state) + stm32f4_pll_disable(hw); + + n = rate / parent_rate; + + val = readl(base + pll->offset) & ~(0x1ff << 6); + + writel(val | ((n & 0x1ff) << 6), base + pll->offset); + + if (pll_state) + stm32f4_pll_enable(hw); + + return 0; +} + +static const struct clk_ops stm32f4_pll_gate_ops = { + .enable = stm32f4_pll_enable, + .disable = stm32f4_pll_disable, + .is_enabled = stm32f4_pll_is_enabled, + .recalc_rate = stm32f4_pll_recalc, + .round_rate = stm32f4_pll_round_rate, + .set_rate = stm32f4_pll_set_rate, +}; + +struct stm32f4_pll_div { + struct clk_divider div; + struct clk_hw *hw_pll; +}; + +#define to_pll_div_clk(_div) container_of(_div, struct stm32f4_pll_div, div) + +static unsigned long stm32f4_pll_div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return clk_divider_ops.recalc_rate(hw, parent_rate); +} + +static long stm32f4_pll_div_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + return clk_divider_ops.round_rate(hw, rate, prate); +} + +static int stm32f4_pll_div_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) { - unsigned long pllcfgr = readl(base + STM32F4_RCC_PLLCFGR); + int pll_state, ret; + + struct clk_divider *div = to_clk_divider(hw); + struct stm32f4_pll_div *pll_div = to_pll_div_clk(div); + + pll_state = stm32f4_pll_is_enabled(pll_div->hw_pll); + + if (pll_state) + stm32f4_pll_disable(pll_div->hw_pll); + + ret = clk_divider_ops.set_rate(hw, rate, parent_rate); - unsigned long pllm = pllcfgr & 0x3f; - unsigned long plln = (pllcfgr >> 6) & 0x1ff; - unsigned long pllp = BIT(((pllcfgr >> 16) & 3) + 1); - const char *pllsrc = pllcfgr & BIT(22) ? hse_clk : hsi_clk; - unsigned long pllq = (pllcfgr >> 24) & 0xf; + if (pll_state) + stm32f4_pll_enable(pll_div->hw_pll); - clk_register_fixed_factor(NULL, "vco", pllsrc, 0, plln, pllm); - clk_register_fixed_factor(NULL, "pll", "vco", 0, 1, pllp); - clk_register_fixed_factor(NULL, "pll48", "vco", 0, 1, pllq); + return ret; +} + +static const struct clk_ops stm32f4_pll_div_ops = { + .recalc_rate = stm32f4_pll_div_recalc_rate, + .round_rate = stm32f4_pll_div_round_rate, + .set_rate = stm32f4_pll_div_set_rate, +}; + +static struct clk_hw *clk_register_pll_div(const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, const struct clk_div_table *table, + struct clk_hw *pll_hw, spinlock_t *lock) +{ + struct stm32f4_pll_div *pll_div; + struct clk_hw *hw; + struct clk_init_data init; + int ret; + + /* allocate the divider */ + pll_div = kzalloc(sizeof(*pll_div), GFP_KERNEL); + if (!pll_div) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &stm32f4_pll_div_ops; + init.flags = flags; + init.parent_names = (parent_name ? &parent_name : NULL); + init.num_parents = (parent_name ? 1 : 0); + + /* struct clk_divider assignments */ + pll_div->div.reg = reg; + pll_div->div.shift = shift; + pll_div->div.width = width; + pll_div->div.flags = clk_divider_flags; + pll_div->div.lock = lock; + pll_div->div.table = table; + pll_div->div.hw.init = &init; + + pll_div->hw_pll = pll_hw; + + /* register the clock */ + hw = &pll_div->div.hw; + ret = clk_hw_register(NULL, hw); + if (ret) { + kfree(pll_div); + hw = ERR_PTR(ret); + } + + return hw; +} + +static struct clk_hw *stm32f4_rcc_register_pll(const char *pllsrc, + const struct stm32f4_pll_data *data, spinlock_t *lock) +{ + struct stm32f4_pll *pll; + struct clk_init_data init = { NULL }; + void __iomem *reg; + struct clk_hw *pll_hw; + int ret; + int i; + const struct stm32f4_vco_data *vco; + + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (!pll) + return ERR_PTR(-ENOMEM); + + vco = &vco_data[data->pll_num]; + + init.name = vco->vco_name; + init.ops = &stm32f4_pll_gate_ops; + init.flags = CLK_SET_RATE_GATE; + init.parent_names = &pllsrc; + init.num_parents = 1; + + pll->gate.lock = lock; + pll->gate.reg = base + STM32F4_RCC_CR; + pll->gate.bit_idx = vco->bit_idx; + pll->gate.hw.init = &init; + + pll->offset = vco->offset; + pll->n_start = data->n_start; + pll->bit_rdy_idx = vco->bit_rdy_idx; + pll->status = (readl(base + STM32F4_RCC_CR) >> vco->bit_idx) & 0x1; + + reg = base + pll->offset; + + pll_hw = &pll->gate.hw; + ret = clk_hw_register(NULL, pll_hw); + if (ret) { + kfree(pll); + return ERR_PTR(ret); + } + + for (i = 0; i < MAX_PLL_DIV; i++) + if (data->div_name[i]) + clk_register_pll_div(data->div_name[i], + vco->vco_name, + 0, + reg, + div_data[i].shift, + div_data[i].width, + div_data[i].flag_div, + div_data[i].div_table, + pll_hw, + lock); + return pll_hw; } /* @@ -352,7 +766,7 @@ static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary) u64 table[MAX_GATE_MAP]; if (primary == 1) { - if (WARN_ON(secondary >= END_PRIMARY_CLK)) + if (WARN_ON(secondary >= stm32fx_end_primary_clk)) return -EINVAL; return secondary; } @@ -369,7 +783,7 @@ static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary) table[BIT_ULL_WORD(secondary)] &= GENMASK_ULL(secondary % BITS_PER_LONG_LONG, 0); - return END_PRIMARY_CLK - 1 + hweight64(table[0]) + + return stm32fx_end_primary_clk - 1 + hweight64(table[0]) + (BIT_ULL_WORD(secondary) >= 1 ? hweight64(table[1]) : 0) + (BIT_ULL_WORD(secondary) >= 2 ? hweight64(table[2]) : 0); } @@ -611,22 +1025,291 @@ static const char *rtc_parents[4] = { "no-clock", "lse", "lsi", "hse-rtc" }; +static const char *lcd_parent[1] = { "pllsai-r-div" }; + +static const char *i2s_parents[2] = { "plli2s-r", NULL }; + +static const char *sai_parents[4] = { "pllsai-q-div", "plli2s-q-div", NULL, + "no-clock" }; + +static const char *pll48_parents[2] = { "pll-q", "pllsai-p" }; + +static const char *sdmux_parents[2] = { "pll48", "sys" }; + +static const char *hdmi_parents[2] = { "lse", "hsi_div488" }; + +static const char *spdif_parent[1] = { "plli2s-p" }; + +static const char *lptim_parent[4] = { "apb1_mul", "lsi", "hsi", "lse" }; + +static const char *uart_parents1[4] = { "apb2_div", "sys", "hsi", "lse" }; +static const char *uart_parents2[4] = { "apb1_div", "sys", "hsi", "lse" }; + +static const char *i2c_parents[4] = { "apb1_div", "sys", "hsi", "no-clock" }; + +struct stm32_aux_clk { + int idx; + const char *name; + const char * const *parent_names; + int num_parents; + int offset_mux; + u8 shift; + u8 mask; + int offset_gate; + u8 bit_idx; + unsigned long flags; +}; + struct stm32f4_clk_data { const struct stm32f4_gate_data *gates_data; const u64 *gates_map; int gates_num; + const struct stm32f4_pll_data *pll_data; + const struct stm32_aux_clk *aux_clk; + int aux_clk_num; + int end_primary; +}; + +static const struct stm32_aux_clk stm32f429_aux_clk[] = { + { + CLK_LCD, "lcd-tft", lcd_parent, ARRAY_SIZE(lcd_parent), + NO_MUX, 0, 0, + STM32F4_RCC_APB2ENR, 26, + CLK_SET_RATE_PARENT + }, + { + CLK_I2S, "i2s", i2s_parents, ARRAY_SIZE(i2s_parents), + STM32F4_RCC_CFGR, 23, 1, + NO_GATE, 0, + CLK_SET_RATE_PARENT + }, + { + CLK_SAI1, "sai1-a", sai_parents, ARRAY_SIZE(sai_parents), + STM32F4_RCC_DCKCFGR, 20, 3, + STM32F4_RCC_APB2ENR, 22, + CLK_SET_RATE_PARENT + }, + { + CLK_SAI2, "sai1-b", sai_parents, ARRAY_SIZE(sai_parents), + STM32F4_RCC_DCKCFGR, 22, 3, + STM32F4_RCC_APB2ENR, 22, + CLK_SET_RATE_PARENT + }, +}; + +static const struct stm32_aux_clk stm32f469_aux_clk[] = { + { + CLK_LCD, "lcd-tft", lcd_parent, ARRAY_SIZE(lcd_parent), + NO_MUX, 0, 0, + STM32F4_RCC_APB2ENR, 26, + CLK_SET_RATE_PARENT + }, + { + CLK_I2S, "i2s", i2s_parents, ARRAY_SIZE(i2s_parents), + STM32F4_RCC_CFGR, 23, 1, + NO_GATE, 0, + CLK_SET_RATE_PARENT + }, + { + CLK_SAI1, "sai1-a", sai_parents, ARRAY_SIZE(sai_parents), + STM32F4_RCC_DCKCFGR, 20, 3, + STM32F4_RCC_APB2ENR, 22, + CLK_SET_RATE_PARENT + }, + { + CLK_SAI2, "sai1-b", sai_parents, ARRAY_SIZE(sai_parents), + STM32F4_RCC_DCKCFGR, 22, 3, + STM32F4_RCC_APB2ENR, 22, + CLK_SET_RATE_PARENT + }, + { + NO_IDX, "pll48", pll48_parents, ARRAY_SIZE(pll48_parents), + STM32F4_RCC_DCKCFGR, 27, 1, + NO_GATE, 0, + 0 + }, + { + NO_IDX, "sdmux", sdmux_parents, ARRAY_SIZE(sdmux_parents), + STM32F4_RCC_DCKCFGR, 28, 1, + NO_GATE, 0, + 0 + }, +}; + +static const struct stm32_aux_clk stm32f746_aux_clk[] = { + { + CLK_LCD, "lcd-tft", lcd_parent, ARRAY_SIZE(lcd_parent), + NO_MUX, 0, 0, + STM32F4_RCC_APB2ENR, 26, + CLK_SET_RATE_PARENT + }, + { + CLK_I2S, "i2s", i2s_parents, ARRAY_SIZE(i2s_parents), + STM32F4_RCC_CFGR, 23, 1, + NO_GATE, 0, + CLK_SET_RATE_PARENT + }, + { + CLK_SAI1, "sai1_clk", sai_parents, ARRAY_SIZE(sai_parents), + STM32F4_RCC_DCKCFGR, 20, 3, + STM32F4_RCC_APB2ENR, 22, + CLK_SET_RATE_PARENT + }, + { + CLK_SAI2, "sai2_clk", sai_parents, ARRAY_SIZE(sai_parents), + STM32F4_RCC_DCKCFGR, 22, 3, + STM32F4_RCC_APB2ENR, 23, + CLK_SET_RATE_PARENT + }, + { + NO_IDX, "pll48", pll48_parents, ARRAY_SIZE(pll48_parents), + STM32F7_RCC_DCKCFGR2, 27, 1, + NO_GATE, 0, + 0 + }, + { + NO_IDX, "sdmux", sdmux_parents, ARRAY_SIZE(sdmux_parents), + STM32F7_RCC_DCKCFGR2, 28, 1, + NO_GATE, 0, + 0 + }, + { + CLK_HDMI_CEC, "hdmi-cec", + hdmi_parents, ARRAY_SIZE(hdmi_parents), + STM32F7_RCC_DCKCFGR2, 26, 1, + NO_GATE, 0, + 0 + }, + { + CLK_SPDIF, "spdif-rx", + spdif_parent, ARRAY_SIZE(spdif_parent), + STM32F7_RCC_DCKCFGR2, 22, 3, + STM32F4_RCC_APB2ENR, 23, + CLK_SET_RATE_PARENT + }, + { + CLK_USART1, "usart1", + uart_parents1, ARRAY_SIZE(uart_parents1), + STM32F7_RCC_DCKCFGR2, 0, 3, + STM32F4_RCC_APB2ENR, 4, + CLK_SET_RATE_PARENT, + }, + { + CLK_USART2, "usart2", + uart_parents2, ARRAY_SIZE(uart_parents1), + STM32F7_RCC_DCKCFGR2, 2, 3, + STM32F4_RCC_APB1ENR, 17, + CLK_SET_RATE_PARENT, + }, + { + CLK_USART3, "usart3", + uart_parents2, ARRAY_SIZE(uart_parents1), + STM32F7_RCC_DCKCFGR2, 4, 3, + STM32F4_RCC_APB1ENR, 18, + CLK_SET_RATE_PARENT, + }, + { + CLK_UART4, "uart4", + uart_parents2, ARRAY_SIZE(uart_parents1), + STM32F7_RCC_DCKCFGR2, 6, 3, + STM32F4_RCC_APB1ENR, 19, + CLK_SET_RATE_PARENT, + }, + { + CLK_UART5, "uart5", + uart_parents2, ARRAY_SIZE(uart_parents1), + STM32F7_RCC_DCKCFGR2, 8, 3, + STM32F4_RCC_APB1ENR, 20, + CLK_SET_RATE_PARENT, + }, + { + CLK_USART6, "usart6", + uart_parents1, ARRAY_SIZE(uart_parents1), + STM32F7_RCC_DCKCFGR2, 10, 3, + STM32F4_RCC_APB2ENR, 5, + CLK_SET_RATE_PARENT, + }, + + { + CLK_UART7, "uart7", + uart_parents2, ARRAY_SIZE(uart_parents1), + STM32F7_RCC_DCKCFGR2, 12, 3, + STM32F4_RCC_APB1ENR, 30, + CLK_SET_RATE_PARENT, + }, + { + CLK_UART8, "uart8", + uart_parents2, ARRAY_SIZE(uart_parents1), + STM32F7_RCC_DCKCFGR2, 14, 3, + STM32F4_RCC_APB1ENR, 31, + CLK_SET_RATE_PARENT, + }, + { + CLK_I2C1, "i2c1", + i2c_parents, ARRAY_SIZE(i2c_parents), + STM32F7_RCC_DCKCFGR2, 16, 3, + STM32F4_RCC_APB1ENR, 21, + CLK_SET_RATE_PARENT, + }, + { + CLK_I2C2, "i2c2", + i2c_parents, ARRAY_SIZE(i2c_parents), + STM32F7_RCC_DCKCFGR2, 18, 3, + STM32F4_RCC_APB1ENR, 22, + CLK_SET_RATE_PARENT, + }, + { + CLK_I2C3, "i2c3", + i2c_parents, ARRAY_SIZE(i2c_parents), + STM32F7_RCC_DCKCFGR2, 20, 3, + STM32F4_RCC_APB1ENR, 23, + CLK_SET_RATE_PARENT, + }, + { + CLK_I2C4, "i2c4", + i2c_parents, ARRAY_SIZE(i2c_parents), + STM32F7_RCC_DCKCFGR2, 22, 3, + STM32F4_RCC_APB1ENR, 24, + CLK_SET_RATE_PARENT, + }, + + { + CLK_LPTIMER, "lptim1", + lptim_parent, ARRAY_SIZE(lptim_parent), + STM32F7_RCC_DCKCFGR2, 24, 3, + STM32F4_RCC_APB1ENR, 9, + CLK_SET_RATE_PARENT + }, }; static const struct stm32f4_clk_data stm32f429_clk_data = { + .end_primary = END_PRIMARY_CLK, .gates_data = stm32f429_gates, .gates_map = stm32f42xx_gate_map, .gates_num = ARRAY_SIZE(stm32f429_gates), + .pll_data = stm32f429_pll, + .aux_clk = stm32f429_aux_clk, + .aux_clk_num = ARRAY_SIZE(stm32f429_aux_clk), }; static const struct stm32f4_clk_data stm32f469_clk_data = { + .end_primary = END_PRIMARY_CLK, .gates_data = stm32f469_gates, .gates_map = stm32f46xx_gate_map, .gates_num = ARRAY_SIZE(stm32f469_gates), + .pll_data = stm32f469_pll, + .aux_clk = stm32f469_aux_clk, + .aux_clk_num = ARRAY_SIZE(stm32f469_aux_clk), +}; + +static const struct stm32f4_clk_data stm32f746_clk_data = { + .end_primary = END_PRIMARY_CLK_F7, + .gates_data = stm32f746_gates, + .gates_map = stm32f746_gate_map, + .gates_num = ARRAY_SIZE(stm32f746_gates), + .pll_data = stm32f469_pll, + .aux_clk = stm32f746_aux_clk, + .aux_clk_num = ARRAY_SIZE(stm32f746_aux_clk), }; static const struct of_device_id stm32f4_of_match[] = { @@ -638,15 +1321,84 @@ static const struct of_device_id stm32f4_of_match[] = { .compatible = "st,stm32f469-rcc", .data = &stm32f469_clk_data }, + { + .compatible = "st,stm32f746-rcc", + .data = &stm32f746_clk_data + }, {} }; +static struct clk_hw *stm32_register_aux_clk(const char *name, + const char * const *parent_names, int num_parents, + int offset_mux, u8 shift, u8 mask, + int offset_gate, u8 bit_idx, + unsigned long flags, spinlock_t *lock) +{ + struct clk_hw *hw; + struct clk_gate *gate = NULL; + struct clk_mux *mux = NULL; + struct clk_hw *mux_hw = NULL, *gate_hw = NULL; + const struct clk_ops *mux_ops = NULL, *gate_ops = NULL; + + if (offset_gate != NO_GATE) { + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) { + hw = ERR_PTR(-EINVAL); + goto fail; + } + + gate->reg = base + offset_gate; + gate->bit_idx = bit_idx; + gate->flags = 0; + gate->lock = lock; + gate_hw = &gate->hw; + gate_ops = &clk_gate_ops; + } + + if (offset_mux != NO_MUX) { + mux = kzalloc(sizeof(*mux), GFP_KERNEL); + if (!mux) { + hw = ERR_PTR(-EINVAL); + goto fail; + } + + mux->reg = base + offset_mux; + mux->shift = shift; + mux->mask = mask; + mux->flags = 0; + mux_hw = &mux->hw; + mux_ops = &clk_mux_ops; + } + + if (mux_hw == NULL && gate_hw == NULL) { + hw = ERR_PTR(-EINVAL); + goto fail; + } + + hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, + mux_hw, mux_ops, + NULL, NULL, + gate_hw, gate_ops, + flags); + +fail: + if (IS_ERR(hw)) { + kfree(gate); + kfree(mux); + } + + return hw; +} + static void __init stm32f4_rcc_init(struct device_node *np) { - const char *hse_clk; + const char *hse_clk, *i2s_in_clk; int n; const struct of_device_id *match; const struct stm32f4_clk_data *data; + unsigned long pllcfgr; + const char *pllsrc; + unsigned long pllm; base = of_iomap(np, 0); if (!base) { @@ -666,7 +1418,9 @@ static void __init stm32f4_rcc_init(struct device_node *np) data = match->data; - clks = kmalloc_array(data->gates_num + END_PRIMARY_CLK, + stm32fx_end_primary_clk = data->end_primary; + + clks = kmalloc_array(data->gates_num + stm32fx_end_primary_clk, sizeof(*clks), GFP_KERNEL); if (!clks) goto fail; @@ -675,12 +1429,54 @@ static void __init stm32f4_rcc_init(struct device_node *np) hse_clk = of_clk_get_parent_name(np, 0); - clk_register_fixed_rate_with_accuracy(NULL, "hsi", NULL, 0, - 16000000, 160000); - stm32f4_rcc_register_pll(hse_clk, "hsi"); + i2s_in_clk = of_clk_get_parent_name(np, 1); + + i2s_parents[1] = i2s_in_clk; + sai_parents[2] = i2s_in_clk; + + clks[CLK_HSI] = clk_hw_register_fixed_rate_with_accuracy(NULL, "hsi", + NULL, 0, 16000000, 160000); + + pllcfgr = readl(base + STM32F4_RCC_PLLCFGR); + pllsrc = pllcfgr & BIT(22) ? hse_clk : "hsi"; + pllm = pllcfgr & 0x3f; + + clk_hw_register_fixed_factor(NULL, "vco_in", pllsrc, + 0, 1, pllm); + + stm32f4_rcc_register_pll("vco_in", &data->pll_data[0], + &stm32f4_clk_lock); + + clks[PLL_VCO_I2S] = stm32f4_rcc_register_pll("vco_in", + &data->pll_data[1], &stm32f4_clk_lock); + + clks[PLL_VCO_SAI] = stm32f4_rcc_register_pll("vco_in", + &data->pll_data[2], &stm32f4_clk_lock); + + for (n = 0; n < MAX_POST_DIV; n++) { + const struct stm32f4_pll_post_div_data *post_div; + struct clk_hw *hw; + + post_div = &post_div_data[n]; + + hw = clk_register_pll_div(post_div->name, + post_div->parent, + post_div->flag, + base + post_div->offset, + post_div->shift, + post_div->width, + post_div->flag_div, + post_div->div_table, + clks[post_div->pll_num], + &stm32f4_clk_lock); + + if (post_div->idx != NO_IDX) + clks[post_div->idx] = hw; + } sys_parents[1] = hse_clk; - clk_register_mux_table( + + clks[CLK_SYSCLK] = clk_hw_register_mux_table( NULL, "sys", sys_parents, ARRAY_SIZE(sys_parents), 0, base + STM32F4_RCC_CFGR, 0, 3, 0, NULL, &stm32f4_clk_lock); @@ -762,6 +1558,33 @@ static void __init stm32f4_rcc_init(struct device_node *np) goto fail; } + for (n = 0; n < data->aux_clk_num; n++) { + const struct stm32_aux_clk *aux_clk; + struct clk_hw *hw; + + aux_clk = &data->aux_clk[n]; + + hw = stm32_register_aux_clk(aux_clk->name, + aux_clk->parent_names, aux_clk->num_parents, + aux_clk->offset_mux, aux_clk->shift, + aux_clk->mask, aux_clk->offset_gate, + aux_clk->bit_idx, aux_clk->flags, + &stm32f4_clk_lock); + + if (IS_ERR(hw)) { + pr_warn("Unable to register %s clk\n", aux_clk->name); + continue; + } + + if (aux_clk->idx != NO_IDX) + clks[aux_clk->idx] = hw; + } + + if (of_device_is_compatible(np, "st,stm32f746-rcc")) + + clk_hw_register_fixed_factor(NULL, "hsi_div488", "hsi", 0, + 1, 488); + of_clk_add_hw_provider(np, stm32f4_rcc_lookup_clk, NULL); return; fail: @@ -770,3 +1593,4 @@ fail: } CLK_OF_DECLARE_DRIVER(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init); CLK_OF_DECLARE_DRIVER(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init); +CLK_OF_DECLARE_DRIVER(stm32f746_rcc, "st,stm32f746-rcc", stm32f4_rcc_init); diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c new file mode 100644 index 000000000000..56741f3cf0a3 --- /dev/null +++ b/drivers/clk/clk-versaclock5.c @@ -0,0 +1,791 @@ +/* + * Driver for IDT Versaclock 5 + * + * Copyright (C) 2017 Marek Vasut <marek.vasut@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Possible optimizations: + * - Use spread spectrum + * - Use integer divider in FOD if applicable + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/delay.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/rational.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +/* VersaClock5 registers */ +#define VC5_OTP_CONTROL 0x00 + +/* Factory-reserved register block */ +#define VC5_RSVD_DEVICE_ID 0x01 +#define VC5_RSVD_ADC_GAIN_7_0 0x02 +#define VC5_RSVD_ADC_GAIN_15_8 0x03 +#define VC5_RSVD_ADC_OFFSET_7_0 0x04 +#define VC5_RSVD_ADC_OFFSET_15_8 0x05 +#define VC5_RSVD_TEMPY 0x06 +#define VC5_RSVD_OFFSET_TBIN 0x07 +#define VC5_RSVD_GAIN 0x08 +#define VC5_RSVD_TEST_NP 0x09 +#define VC5_RSVD_UNUSED 0x0a +#define VC5_RSVD_BANDGAP_TRIM_UP 0x0b +#define VC5_RSVD_BANDGAP_TRIM_DN 0x0c +#define VC5_RSVD_CLK_R_12_CLK_AMP_4 0x0d +#define VC5_RSVD_CLK_R_34_CLK_AMP_4 0x0e +#define VC5_RSVD_CLK_AMP_123 0x0f + +/* Configuration register block */ +#define VC5_PRIM_SRC_SHDN 0x10 +#define VC5_PRIM_SRC_SHDN_EN_XTAL BIT(7) +#define VC5_PRIM_SRC_SHDN_EN_CLKIN BIT(6) +#define VC5_PRIM_SRC_SHDN_SP BIT(1) +#define VC5_PRIM_SRC_SHDN_EN_GBL_SHDN BIT(0) + +#define VC5_VCO_BAND 0x11 +#define VC5_XTAL_X1_LOAD_CAP 0x12 +#define VC5_XTAL_X2_LOAD_CAP 0x13 +#define VC5_REF_DIVIDER 0x15 +#define VC5_REF_DIVIDER_SEL_PREDIV2 BIT(7) +#define VC5_REF_DIVIDER_REF_DIV(n) ((n) & 0x3f) + +#define VC5_VCO_CTRL_AND_PREDIV 0x16 +#define VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV BIT(7) + +#define VC5_FEEDBACK_INT_DIV 0x17 +#define VC5_FEEDBACK_INT_DIV_BITS 0x18 +#define VC5_FEEDBACK_FRAC_DIV(n) (0x19 + (n)) +#define VC5_RC_CONTROL0 0x1e +#define VC5_RC_CONTROL1 0x1f +/* Register 0x20 is factory reserved */ + +/* Output divider control for divider 1,2,3,4 */ +#define VC5_OUT_DIV_CONTROL(idx) (0x21 + ((idx) * 0x10)) +#define VC5_OUT_DIV_CONTROL_RESET BIT(7) +#define VC5_OUT_DIV_CONTROL_SELB_NORM BIT(3) +#define VC5_OUT_DIV_CONTROL_SEL_EXT BIT(2) +#define VC5_OUT_DIV_CONTROL_INT_MODE BIT(1) +#define VC5_OUT_DIV_CONTROL_EN_FOD BIT(0) + +#define VC5_OUT_DIV_FRAC(idx, n) (0x22 + ((idx) * 0x10) + (n)) +#define VC5_OUT_DIV_FRAC4_OD_SCEE BIT(1) + +#define VC5_OUT_DIV_STEP_SPREAD(idx, n) (0x26 + ((idx) * 0x10) + (n)) +#define VC5_OUT_DIV_SPREAD_MOD(idx, n) (0x29 + ((idx) * 0x10) + (n)) +#define VC5_OUT_DIV_SKEW_INT(idx, n) (0x2b + ((idx) * 0x10) + (n)) +#define VC5_OUT_DIV_INT(idx, n) (0x2d + ((idx) * 0x10) + (n)) +#define VC5_OUT_DIV_SKEW_FRAC(idx) (0x2f + ((idx) * 0x10)) +/* Registers 0x30, 0x40, 0x50 are factory reserved */ + +/* Clock control register for clock 1,2 */ +#define VC5_CLK_OUTPUT_CFG(idx, n) (0x60 + ((idx) * 0x2) + (n)) +#define VC5_CLK_OUTPUT_CFG1_EN_CLKBUF BIT(0) + +#define VC5_CLK_OE_SHDN 0x68 +#define VC5_CLK_OS_SHDN 0x69 + +#define VC5_GLOBAL_REGISTER 0x76 +#define VC5_GLOBAL_REGISTER_GLOBAL_RESET BIT(5) + +/* PLL/VCO runs between 2.5 GHz and 3.0 GHz */ +#define VC5_PLL_VCO_MIN 2500000000UL +#define VC5_PLL_VCO_MAX 3000000000UL + +/* VC5 Input mux settings */ +#define VC5_MUX_IN_XIN BIT(0) +#define VC5_MUX_IN_CLKIN BIT(1) + +/* Supported IDT VC5 models. */ +enum vc5_model { + IDT_VC5_5P49V5923, + IDT_VC5_5P49V5933, +}; + +struct vc5_driver_data; + +struct vc5_hw_data { + struct clk_hw hw; + struct vc5_driver_data *vc5; + u32 div_int; + u32 div_frc; + unsigned int num; +}; + +struct vc5_driver_data { + struct i2c_client *client; + struct regmap *regmap; + enum vc5_model model; + + struct clk *pin_xin; + struct clk *pin_clkin; + unsigned char clk_mux_ins; + struct clk_hw clk_mux; + struct vc5_hw_data clk_pll; + struct vc5_hw_data clk_fod[2]; + struct vc5_hw_data clk_out[3]; +}; + +static const char * const vc5_mux_names[] = { + "mux" +}; + +static const char * const vc5_pll_names[] = { + "pll" +}; + +static const char * const vc5_fod_names[] = { + "fod0", "fod1", "fod2", "fod3", +}; + +static const char * const vc5_clk_out_names[] = { + "out0_sel_i2cb", "out1", "out2", "out3", "out4", +}; + +/* + * VersaClock5 i2c regmap + */ +static bool vc5_regmap_is_writeable(struct device *dev, unsigned int reg) +{ + /* Factory reserved regs, make them read-only */ + if (reg <= 0xf) + return false; + + /* Factory reserved regs, make them read-only */ + if (reg == 0x14 || reg == 0x1c || reg == 0x1d) + return false; + + return true; +} + +static const struct regmap_config vc5_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .cache_type = REGCACHE_RBTREE, + .max_register = 0x76, + .writeable_reg = vc5_regmap_is_writeable, +}; + +/* + * VersaClock5 input multiplexer between XTAL and CLKIN divider + */ +static unsigned char vc5_mux_get_parent(struct clk_hw *hw) +{ + struct vc5_driver_data *vc5 = + container_of(hw, struct vc5_driver_data, clk_mux); + const u8 mask = VC5_PRIM_SRC_SHDN_EN_XTAL | VC5_PRIM_SRC_SHDN_EN_CLKIN; + unsigned int src; + + regmap_read(vc5->regmap, VC5_PRIM_SRC_SHDN, &src); + src &= mask; + + if (src == VC5_PRIM_SRC_SHDN_EN_XTAL) + return 0; + + if (src == VC5_PRIM_SRC_SHDN_EN_CLKIN) + return 1; + + dev_warn(&vc5->client->dev, + "Invalid clock input configuration (%02x)\n", src); + return 0; +} + +static int vc5_mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct vc5_driver_data *vc5 = + container_of(hw, struct vc5_driver_data, clk_mux); + const u8 mask = VC5_PRIM_SRC_SHDN_EN_XTAL | VC5_PRIM_SRC_SHDN_EN_CLKIN; + u8 src; + + if ((index > 1) || !vc5->clk_mux_ins) + return -EINVAL; + + if (vc5->clk_mux_ins == (VC5_MUX_IN_CLKIN | VC5_MUX_IN_XIN)) { + if (index == 0) + src = VC5_PRIM_SRC_SHDN_EN_XTAL; + if (index == 1) + src = VC5_PRIM_SRC_SHDN_EN_CLKIN; + } else { + if (index != 0) + return -EINVAL; + + if (vc5->clk_mux_ins == VC5_MUX_IN_XIN) + src = VC5_PRIM_SRC_SHDN_EN_XTAL; + if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN) + src = VC5_PRIM_SRC_SHDN_EN_CLKIN; + } + + return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src); +} + +static unsigned long vc5_mux_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct vc5_driver_data *vc5 = + container_of(hw, struct vc5_driver_data, clk_mux); + unsigned int prediv, div; + + regmap_read(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV, &prediv); + + /* The bypass_prediv is set, PLL fed from Ref_in directly. */ + if (prediv & VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV) + return parent_rate; + + regmap_read(vc5->regmap, VC5_REF_DIVIDER, &div); + + /* The Sel_prediv2 is set, PLL fed from prediv2 (Ref_in / 2) */ + if (div & VC5_REF_DIVIDER_SEL_PREDIV2) + return parent_rate / 2; + else + return parent_rate / VC5_REF_DIVIDER_REF_DIV(div); +} + +static long vc5_mux_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + unsigned long idiv; + + /* PLL cannot operate with input clock above 50 MHz. */ + if (rate > 50000000) + return -EINVAL; + + /* CLKIN within range of PLL input, feed directly to PLL. */ + if (*parent_rate <= 50000000) + return *parent_rate; + + idiv = DIV_ROUND_UP(*parent_rate, rate); + if (idiv > 127) + return -EINVAL; + + return *parent_rate / idiv; +} + +static int vc5_mux_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct vc5_driver_data *vc5 = + container_of(hw, struct vc5_driver_data, clk_mux); + unsigned long idiv; + u8 div; + + /* CLKIN within range of PLL input, feed directly to PLL. */ + if (parent_rate <= 50000000) { + regmap_update_bits(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV, + VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV, + VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV); + regmap_update_bits(vc5->regmap, VC5_REF_DIVIDER, 0xff, 0x00); + return 0; + } + + idiv = DIV_ROUND_UP(parent_rate, rate); + + /* We have dedicated div-2 predivider. */ + if (idiv == 2) + div = VC5_REF_DIVIDER_SEL_PREDIV2; + else + div = VC5_REF_DIVIDER_REF_DIV(idiv); + + regmap_update_bits(vc5->regmap, VC5_REF_DIVIDER, 0xff, div); + regmap_update_bits(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV, + VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV, 0); + + return 0; +} + +static const struct clk_ops vc5_mux_ops = { + .set_parent = vc5_mux_set_parent, + .get_parent = vc5_mux_get_parent, + .recalc_rate = vc5_mux_recalc_rate, + .round_rate = vc5_mux_round_rate, + .set_rate = vc5_mux_set_rate, +}; + +/* + * VersaClock5 PLL/VCO + */ +static unsigned long vc5_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); + struct vc5_driver_data *vc5 = hwdata->vc5; + u32 div_int, div_frc; + u8 fb[5]; + + regmap_bulk_read(vc5->regmap, VC5_FEEDBACK_INT_DIV, fb, 5); + + div_int = (fb[0] << 4) | (fb[1] >> 4); + div_frc = (fb[2] << 16) | (fb[3] << 8) | fb[4]; + + /* The PLL divider has 12 integer bits and 24 fractional bits */ + return (parent_rate * div_int) + ((parent_rate * div_frc) >> 24); +} + +static long vc5_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); + u32 div_int; + u64 div_frc; + + if (rate < VC5_PLL_VCO_MIN) + rate = VC5_PLL_VCO_MIN; + if (rate > VC5_PLL_VCO_MAX) + rate = VC5_PLL_VCO_MAX; + + /* Determine integer part, which is 12 bit wide */ + div_int = rate / *parent_rate; + if (div_int > 0xfff) + rate = *parent_rate * 0xfff; + + /* Determine best fractional part, which is 24 bit wide */ + div_frc = rate % *parent_rate; + div_frc *= BIT(24) - 1; + do_div(div_frc, *parent_rate); + + hwdata->div_int = div_int; + hwdata->div_frc = (u32)div_frc; + + return (*parent_rate * div_int) + ((*parent_rate * div_frc) >> 24); +} + +static int vc5_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); + struct vc5_driver_data *vc5 = hwdata->vc5; + u8 fb[5]; + + fb[0] = hwdata->div_int >> 4; + fb[1] = hwdata->div_int << 4; + fb[2] = hwdata->div_frc >> 16; + fb[3] = hwdata->div_frc >> 8; + fb[4] = hwdata->div_frc; + + return regmap_bulk_write(vc5->regmap, VC5_FEEDBACK_INT_DIV, fb, 5); +} + +static const struct clk_ops vc5_pll_ops = { + .recalc_rate = vc5_pll_recalc_rate, + .round_rate = vc5_pll_round_rate, + .set_rate = vc5_pll_set_rate, +}; + +static unsigned long vc5_fod_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); + struct vc5_driver_data *vc5 = hwdata->vc5; + /* VCO frequency is divided by two before entering FOD */ + u32 f_in = parent_rate / 2; + u32 div_int, div_frc; + u8 od_int[2]; + u8 od_frc[4]; + + regmap_bulk_read(vc5->regmap, VC5_OUT_DIV_INT(hwdata->num, 0), + od_int, 2); + regmap_bulk_read(vc5->regmap, VC5_OUT_DIV_FRAC(hwdata->num, 0), + od_frc, 4); + + div_int = (od_int[0] << 4) | (od_int[1] >> 4); + div_frc = (od_frc[0] << 22) | (od_frc[1] << 14) | + (od_frc[2] << 6) | (od_frc[3] >> 2); + + /* The PLL divider has 12 integer bits and 30 fractional bits */ + return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc); +} + +static long vc5_fod_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); + /* VCO frequency is divided by two before entering FOD */ + u32 f_in = *parent_rate / 2; + u32 div_int; + u64 div_frc; + + /* Determine integer part, which is 12 bit wide */ + div_int = f_in / rate; + /* + * WARNING: The clock chip does not output signal if the integer part + * of the divider is 0xfff and fractional part is non-zero. + * Clamp the divider at 0xffe to keep the code simple. + */ + if (div_int > 0xffe) { + div_int = 0xffe; + rate = f_in / div_int; + } + + /* Determine best fractional part, which is 30 bit wide */ + div_frc = f_in % rate; + div_frc <<= 24; + do_div(div_frc, rate); + + hwdata->div_int = div_int; + hwdata->div_frc = (u32)div_frc; + + return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc); +} + +static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); + struct vc5_driver_data *vc5 = hwdata->vc5; + u8 data[14] = { + hwdata->div_frc >> 22, hwdata->div_frc >> 14, + hwdata->div_frc >> 6, hwdata->div_frc << 2, + 0, 0, 0, 0, 0, + 0, 0, + hwdata->div_int >> 4, hwdata->div_int << 4, + 0 + }; + + regmap_bulk_write(vc5->regmap, VC5_OUT_DIV_FRAC(hwdata->num, 0), + data, 14); + + /* + * Toggle magic bit in undocumented register for unknown reason. + * This is what the IDT timing commander tool does and the chip + * datasheet somewhat implies this is needed, but the register + * and the bit is not documented. + */ + regmap_update_bits(vc5->regmap, VC5_GLOBAL_REGISTER, + VC5_GLOBAL_REGISTER_GLOBAL_RESET, 0); + regmap_update_bits(vc5->regmap, VC5_GLOBAL_REGISTER, + VC5_GLOBAL_REGISTER_GLOBAL_RESET, + VC5_GLOBAL_REGISTER_GLOBAL_RESET); + return 0; +} + +static const struct clk_ops vc5_fod_ops = { + .recalc_rate = vc5_fod_recalc_rate, + .round_rate = vc5_fod_round_rate, + .set_rate = vc5_fod_set_rate, +}; + +static int vc5_clk_out_prepare(struct clk_hw *hw) +{ + struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); + struct vc5_driver_data *vc5 = hwdata->vc5; + + /* Enable the clock buffer */ + regmap_update_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1), + VC5_CLK_OUTPUT_CFG1_EN_CLKBUF, + VC5_CLK_OUTPUT_CFG1_EN_CLKBUF); + return 0; +} + +static void vc5_clk_out_unprepare(struct clk_hw *hw) +{ + struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); + struct vc5_driver_data *vc5 = hwdata->vc5; + + /* Enable the clock buffer */ + regmap_update_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1), + VC5_CLK_OUTPUT_CFG1_EN_CLKBUF, 0); +} + +static unsigned char vc5_clk_out_get_parent(struct clk_hw *hw) +{ + struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); + struct vc5_driver_data *vc5 = hwdata->vc5; + const u8 mask = VC5_OUT_DIV_CONTROL_SELB_NORM | + VC5_OUT_DIV_CONTROL_SEL_EXT | + VC5_OUT_DIV_CONTROL_EN_FOD; + const u8 fodclkmask = VC5_OUT_DIV_CONTROL_SELB_NORM | + VC5_OUT_DIV_CONTROL_EN_FOD; + const u8 extclk = VC5_OUT_DIV_CONTROL_SELB_NORM | + VC5_OUT_DIV_CONTROL_SEL_EXT; + unsigned int src; + + regmap_read(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num), &src); + src &= mask; + + if ((src & fodclkmask) == VC5_OUT_DIV_CONTROL_EN_FOD) + return 0; + + if (src == extclk) + return 1; + + dev_warn(&vc5->client->dev, + "Invalid clock output configuration (%02x)\n", src); + return 0; +} + +static int vc5_clk_out_set_parent(struct clk_hw *hw, u8 index) +{ + struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); + struct vc5_driver_data *vc5 = hwdata->vc5; + const u8 mask = VC5_OUT_DIV_CONTROL_RESET | + VC5_OUT_DIV_CONTROL_SELB_NORM | + VC5_OUT_DIV_CONTROL_SEL_EXT | + VC5_OUT_DIV_CONTROL_EN_FOD; + const u8 extclk = VC5_OUT_DIV_CONTROL_SELB_NORM | + VC5_OUT_DIV_CONTROL_SEL_EXT; + u8 src = VC5_OUT_DIV_CONTROL_RESET; + + if (index == 0) + src |= VC5_OUT_DIV_CONTROL_EN_FOD; + else + src |= extclk; + + return regmap_update_bits(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num), + mask, src); +} + +static const struct clk_ops vc5_clk_out_ops = { + .prepare = vc5_clk_out_prepare, + .unprepare = vc5_clk_out_unprepare, + .set_parent = vc5_clk_out_set_parent, + .get_parent = vc5_clk_out_get_parent, +}; + +static struct clk_hw *vc5_of_clk_get(struct of_phandle_args *clkspec, + void *data) +{ + struct vc5_driver_data *vc5 = data; + unsigned int idx = clkspec->args[0]; + + if (idx > 2) + return ERR_PTR(-EINVAL); + + return &vc5->clk_out[idx].hw; +} + +static int vc5_map_index_to_output(const enum vc5_model model, + const unsigned int n) +{ + switch (model) { + case IDT_VC5_5P49V5933: + return (n == 0) ? 0 : 3; + case IDT_VC5_5P49V5923: + default: + return n; + } +} + +static const struct of_device_id clk_vc5_of_match[]; + +static int vc5_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + const struct of_device_id *of_id = + of_match_device(clk_vc5_of_match, &client->dev); + struct vc5_driver_data *vc5; + struct clk_init_data init; + const char *parent_names[2]; + unsigned int n, idx; + int ret; + + vc5 = devm_kzalloc(&client->dev, sizeof(*vc5), GFP_KERNEL); + if (vc5 == NULL) + return -ENOMEM; + + i2c_set_clientdata(client, vc5); + vc5->client = client; + vc5->model = (enum vc5_model)of_id->data; + + vc5->pin_xin = devm_clk_get(&client->dev, "xin"); + if (PTR_ERR(vc5->pin_xin) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + vc5->pin_clkin = devm_clk_get(&client->dev, "clkin"); + if (PTR_ERR(vc5->pin_clkin) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + vc5->regmap = devm_regmap_init_i2c(client, &vc5_regmap_config); + if (IS_ERR(vc5->regmap)) { + dev_err(&client->dev, "failed to allocate register map\n"); + return PTR_ERR(vc5->regmap); + } + + /* Register clock input mux */ + memset(&init, 0, sizeof(init)); + + if (!IS_ERR(vc5->pin_xin)) { + vc5->clk_mux_ins |= VC5_MUX_IN_XIN; + parent_names[init.num_parents++] = __clk_get_name(vc5->pin_xin); + } else if (vc5->model == IDT_VC5_5P49V5933) { + /* IDT VC5 5P49V5933 has built-in oscilator. */ + vc5->pin_xin = clk_register_fixed_rate(&client->dev, + "internal-xtal", NULL, + 0, 25000000); + if (IS_ERR(vc5->pin_xin)) + return PTR_ERR(vc5->pin_xin); + vc5->clk_mux_ins |= VC5_MUX_IN_XIN; + parent_names[init.num_parents++] = __clk_get_name(vc5->pin_xin); + } + + if (!IS_ERR(vc5->pin_clkin)) { + vc5->clk_mux_ins |= VC5_MUX_IN_CLKIN; + parent_names[init.num_parents++] = + __clk_get_name(vc5->pin_clkin); + } + + if (!init.num_parents) { + dev_err(&client->dev, "no input clock specified!\n"); + return -EINVAL; + } + + init.name = vc5_mux_names[0]; + init.ops = &vc5_mux_ops; + init.flags = 0; + init.parent_names = parent_names; + vc5->clk_mux.init = &init; + ret = devm_clk_hw_register(&client->dev, &vc5->clk_mux); + if (ret) { + dev_err(&client->dev, "unable to register %s\n", init.name); + goto err_clk; + } + + /* Register PLL */ + memset(&init, 0, sizeof(init)); + init.name = vc5_pll_names[0]; + init.ops = &vc5_pll_ops; + init.flags = CLK_SET_RATE_PARENT; + init.parent_names = vc5_mux_names; + init.num_parents = 1; + vc5->clk_pll.num = 0; + vc5->clk_pll.vc5 = vc5; + vc5->clk_pll.hw.init = &init; + ret = devm_clk_hw_register(&client->dev, &vc5->clk_pll.hw); + if (ret) { + dev_err(&client->dev, "unable to register %s\n", init.name); + goto err_clk; + } + + /* Register FODs */ + for (n = 0; n < 2; n++) { + idx = vc5_map_index_to_output(vc5->model, n); + memset(&init, 0, sizeof(init)); + init.name = vc5_fod_names[idx]; + init.ops = &vc5_fod_ops; + init.flags = CLK_SET_RATE_PARENT; + init.parent_names = vc5_pll_names; + init.num_parents = 1; + vc5->clk_fod[n].num = idx; + vc5->clk_fod[n].vc5 = vc5; + vc5->clk_fod[n].hw.init = &init; + ret = devm_clk_hw_register(&client->dev, &vc5->clk_fod[n].hw); + if (ret) { + dev_err(&client->dev, "unable to register %s\n", + init.name); + goto err_clk; + } + } + + /* Register MUX-connected OUT0_I2C_SELB output */ + memset(&init, 0, sizeof(init)); + init.name = vc5_clk_out_names[0]; + init.ops = &vc5_clk_out_ops; + init.flags = CLK_SET_RATE_PARENT; + init.parent_names = vc5_mux_names; + init.num_parents = 1; + vc5->clk_out[0].num = idx; + vc5->clk_out[0].vc5 = vc5; + vc5->clk_out[0].hw.init = &init; + ret = devm_clk_hw_register(&client->dev, &vc5->clk_out[0].hw); + if (ret) { + dev_err(&client->dev, "unable to register %s\n", + init.name); + goto err_clk; + } + + /* Register FOD-connected OUTx outputs */ + for (n = 1; n < 3; n++) { + idx = vc5_map_index_to_output(vc5->model, n - 1); + parent_names[0] = vc5_fod_names[idx]; + if (n == 1) + parent_names[1] = vc5_mux_names[0]; + else + parent_names[1] = vc5_clk_out_names[n - 1]; + + memset(&init, 0, sizeof(init)); + init.name = vc5_clk_out_names[idx + 1]; + init.ops = &vc5_clk_out_ops; + init.flags = CLK_SET_RATE_PARENT; + init.parent_names = parent_names; + init.num_parents = 2; + vc5->clk_out[n].num = idx; + vc5->clk_out[n].vc5 = vc5; + vc5->clk_out[n].hw.init = &init; + ret = devm_clk_hw_register(&client->dev, + &vc5->clk_out[n].hw); + if (ret) { + dev_err(&client->dev, "unable to register %s\n", + init.name); + goto err_clk; + } + } + + ret = of_clk_add_hw_provider(client->dev.of_node, vc5_of_clk_get, vc5); + if (ret) { + dev_err(&client->dev, "unable to add clk provider\n"); + goto err_clk; + } + + return 0; + +err_clk: + if (vc5->model == IDT_VC5_5P49V5933) + clk_unregister_fixed_rate(vc5->pin_xin); + return ret; +} + +static int vc5_remove(struct i2c_client *client) +{ + struct vc5_driver_data *vc5 = i2c_get_clientdata(client); + + of_clk_del_provider(client->dev.of_node); + + if (vc5->model == IDT_VC5_5P49V5933) + clk_unregister_fixed_rate(vc5->pin_xin); + + return 0; +} + +static const struct i2c_device_id vc5_id[] = { + { "5p49v5923", .driver_data = IDT_VC5_5P49V5923 }, + { "5p49v5933", .driver_data = IDT_VC5_5P49V5933 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, vc5_id); + +static const struct of_device_id clk_vc5_of_match[] = { + { .compatible = "idt,5p49v5923", .data = (void *)IDT_VC5_5P49V5923 }, + { .compatible = "idt,5p49v5933", .data = (void *)IDT_VC5_5P49V5933 }, + { }, +}; +MODULE_DEVICE_TABLE(of, clk_vc5_of_match); + +static struct i2c_driver vc5_driver = { + .driver = { + .name = "vc5", + .of_match_table = clk_vc5_of_match, + }, + .probe = vc5_probe, + .remove = vc5_remove, + .id_table = vc5_id, +}; +module_i2c_driver(vc5_driver); + +MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); +MODULE_DESCRIPTION("IDT VersaClock 5 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c index 0621fbfb4beb..a47960aacfa5 100644 --- a/drivers/clk/clk-wm831x.c +++ b/drivers/clk/clk-wm831x.c @@ -97,7 +97,8 @@ static int wm831x_fll_prepare(struct clk_hw *hw) if (ret != 0) dev_crit(wm831x->dev, "Failed to enable FLL: %d\n", ret); - usleep_range(2000, 2000); + /* wait 2-3 ms for new frequency taking effect */ + usleep_range(2000, 3000); return ret; } diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig index cbed6602172b..7098bfd32b1b 100644 --- a/drivers/clk/hisilicon/Kconfig +++ b/drivers/clk/hisilicon/Kconfig @@ -14,6 +14,13 @@ config COMMON_CLK_HI3519 help Build the clock driver for hi3519. +config COMMON_CLK_HI3660 + bool "Hi3660 Clock Driver" + depends on ARCH_HISI || COMPILE_TEST + default ARCH_HISI + help + Build the clock driver for hi3660. + config COMMON_CLK_HI3798CV200 tristate "Hi3798CV200 Clock Driver" depends on ARCH_HISI || COMPILE_TEST diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile index 4eec5e511e4c..1e4c3ddbad84 100644 --- a/drivers/clk/hisilicon/Makefile +++ b/drivers/clk/hisilicon/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o obj-$(CONFIG_ARCH_HIX5HD2) += clk-hix5hd2.o obj-$(CONFIG_COMMON_CLK_HI3516CV300) += crg-hi3516cv300.o obj-$(CONFIG_COMMON_CLK_HI3519) += clk-hi3519.o +obj-$(CONFIG_COMMON_CLK_HI3660) += clk-hi3660.o obj-$(CONFIG_COMMON_CLK_HI3798CV200) += crg-hi3798cv200.o obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o obj-$(CONFIG_RESET_HISI) += reset.o diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c new file mode 100644 index 000000000000..96a9697b06cf --- /dev/null +++ b/drivers/clk/hisilicon/clk-hi3660.c @@ -0,0 +1,567 @@ +/* + * Copyright (c) 2016-2017 Linaro Ltd. + * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <dt-bindings/clock/hi3660-clock.h> +#include <linux/clk-provider.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include "clk.h" + +static const struct hisi_fixed_rate_clock hi3660_fixed_rate_clks[] = { + { HI3660_CLKIN_SYS, "clkin_sys", NULL, 0, 19200000, }, + { HI3660_CLKIN_REF, "clkin_ref", NULL, 0, 32764, }, + { HI3660_CLK_FLL_SRC, "clk_fll_src", NULL, 0, 128000000, }, + { HI3660_CLK_PPLL0, "clk_ppll0", NULL, 0, 1600000000, }, + { HI3660_CLK_PPLL1, "clk_ppll1", NULL, 0, 1866000000, }, + { HI3660_CLK_PPLL2, "clk_ppll2", NULL, 0, 960000000, }, + { HI3660_CLK_PPLL3, "clk_ppll3", NULL, 0, 1290000000, }, + { HI3660_CLK_SCPLL, "clk_scpll", NULL, 0, 245760000, }, + { HI3660_PCLK, "pclk", NULL, 0, 20000000, }, + { HI3660_CLK_UART0_DBG, "clk_uart0_dbg", NULL, 0, 19200000, }, + { HI3660_CLK_UART6, "clk_uart6", NULL, 0, 19200000, }, + { HI3660_OSC32K, "osc32k", NULL, 0, 32764, }, + { HI3660_OSC19M, "osc19m", NULL, 0, 19200000, }, + { HI3660_CLK_480M, "clk_480m", NULL, 0, 480000000, }, + { HI3660_CLK_INV, "clk_inv", NULL, 0, 10000000, }, +}; + +/* crgctrl */ +static const struct hisi_fixed_factor_clock hi3660_crg_fixed_factor_clks[] = { + { HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 8, 0, }, + { HI3660_CLK_FACTOR_MMC, "clk_factor_mmc", "clkin_sys", 1, 6, 0, }, + { HI3660_CLK_GATE_I2C0, "clk_gate_i2c0", "clk_i2c0_iomcu", 1, 4, 0, }, + { HI3660_CLK_GATE_I2C1, "clk_gate_i2c1", "clk_i2c1_iomcu", 1, 4, 0, }, + { HI3660_CLK_GATE_I2C2, "clk_gate_i2c2", "clk_i2c2_iomcu", 1, 4, 0, }, + { HI3660_CLK_GATE_I2C6, "clk_gate_i2c6", "clk_i2c6_iomcu", 1, 4, 0, }, + { HI3660_CLK_DIV_SYSBUS, "clk_div_sysbus", "clk_mux_sysbus", 1, 7, 0, }, + { HI3660_CLK_DIV_320M, "clk_div_320m", "clk_320m_pll_gt", 1, 5, 0, }, + { HI3660_CLK_DIV_A53, "clk_div_a53hpm", "clk_a53hpm_andgt", 1, 2, 0, }, + { HI3660_CLK_GATE_SPI0, "clk_gate_spi0", "clk_ppll0", 1, 8, 0, }, + { HI3660_CLK_GATE_SPI2, "clk_gate_spi2", "clk_ppll0", 1, 8, 0, }, + { HI3660_PCIEPHY_REF, "clk_pciephy_ref", "clk_div_pciephy", 1, 1, 0, }, + { HI3660_CLK_ABB_USB, "clk_abb_usb", "clk_gate_usb_tcxo_en", 1, 1, 0 }, +}; + +static const struct hisi_gate_clock hi3660_crgctrl_gate_sep_clks[] = { + { HI3660_HCLK_GATE_SDIO0, "hclk_gate_sdio0", "clk_div_sysbus", + CLK_SET_RATE_PARENT, 0x0, 21, 0, }, + { HI3660_HCLK_GATE_SD, "hclk_gate_sd", "clk_div_sysbus", + CLK_SET_RATE_PARENT, 0x0, 30, 0, }, + { HI3660_CLK_GATE_AOMM, "clk_gate_aomm", "clk_div_aomm", + CLK_SET_RATE_PARENT, 0x0, 31, 0, }, + { HI3660_PCLK_GPIO0, "pclk_gpio0", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 0, 0, }, + { HI3660_PCLK_GPIO1, "pclk_gpio1", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 1, 0, }, + { HI3660_PCLK_GPIO2, "pclk_gpio2", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 2, 0, }, + { HI3660_PCLK_GPIO3, "pclk_gpio3", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 3, 0, }, + { HI3660_PCLK_GPIO4, "pclk_gpio4", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 4, 0, }, + { HI3660_PCLK_GPIO5, "pclk_gpio5", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 5, 0, }, + { HI3660_PCLK_GPIO6, "pclk_gpio6", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 6, 0, }, + { HI3660_PCLK_GPIO7, "pclk_gpio7", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 7, 0, }, + { HI3660_PCLK_GPIO8, "pclk_gpio8", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 8, 0, }, + { HI3660_PCLK_GPIO9, "pclk_gpio9", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 9, 0, }, + { HI3660_PCLK_GPIO10, "pclk_gpio10", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 10, 0, }, + { HI3660_PCLK_GPIO11, "pclk_gpio11", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 11, 0, }, + { HI3660_PCLK_GPIO12, "pclk_gpio12", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 12, 0, }, + { HI3660_PCLK_GPIO13, "pclk_gpio13", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 13, 0, }, + { HI3660_PCLK_GPIO14, "pclk_gpio14", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 14, 0, }, + { HI3660_PCLK_GPIO15, "pclk_gpio15", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 15, 0, }, + { HI3660_PCLK_GPIO16, "pclk_gpio16", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 16, 0, }, + { HI3660_PCLK_GPIO17, "pclk_gpio17", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 17, 0, }, + { HI3660_PCLK_GPIO18, "pclk_gpio18", "clk_div_ioperi", + CLK_SET_RATE_PARENT, 0x10, 18, 0, }, + { HI3660_PCLK_GPIO19, "pclk_gpio19", "clk_div_ioperi", + CLK_SET_RATE_PARENT, 0x10, 19, 0, }, + { HI3660_PCLK_GPIO20, "pclk_gpio20", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 20, 0, }, + { HI3660_PCLK_GPIO21, "pclk_gpio21", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 21, 0, }, + { HI3660_CLK_GATE_SPI3, "clk_gate_spi3", "clk_div_ioperi", + CLK_SET_RATE_PARENT, 0x10, 30, 0, }, + { HI3660_CLK_GATE_I2C7, "clk_gate_i2c7", "clk_mux_i2c", + CLK_SET_RATE_PARENT, 0x10, 31, 0, }, + { HI3660_CLK_GATE_I2C3, "clk_gate_i2c3", "clk_mux_i2c", + CLK_SET_RATE_PARENT, 0x20, 7, 0, }, + { HI3660_CLK_GATE_SPI1, "clk_gate_spi1", "clk_mux_spi", + CLK_SET_RATE_PARENT, 0x20, 9, 0, }, + { HI3660_CLK_GATE_UART1, "clk_gate_uart1", "clk_mux_uarth", + CLK_SET_RATE_PARENT, 0x20, 11, 0, }, + { HI3660_CLK_GATE_UART2, "clk_gate_uart2", "clk_mux_uart1", + CLK_SET_RATE_PARENT, 0x20, 12, 0, }, + { HI3660_CLK_GATE_UART4, "clk_gate_uart4", "clk_mux_uarth", + CLK_SET_RATE_PARENT, 0x20, 14, 0, }, + { HI3660_CLK_GATE_UART5, "clk_gate_uart5", "clk_mux_uart1", + CLK_SET_RATE_PARENT, 0x20, 15, 0, }, + { HI3660_CLK_GATE_I2C4, "clk_gate_i2c4", "clk_mux_i2c", + CLK_SET_RATE_PARENT, 0x20, 27, 0, }, + { HI3660_CLK_GATE_DMAC, "clk_gate_dmac", "clk_div_sysbus", + CLK_SET_RATE_PARENT, 0x30, 1, 0, }, + { HI3660_PCLK_GATE_DSS, "pclk_gate_dss", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x30, 12, 0, }, + { HI3660_ACLK_GATE_DSS, "aclk_gate_dss", "clk_gate_vivobus", + CLK_SET_RATE_PARENT, 0x30, 13, 0, }, + { HI3660_CLK_GATE_LDI1, "clk_gate_ldi1", "clk_div_ldi1", + CLK_SET_RATE_PARENT, 0x30, 14, 0, }, + { HI3660_CLK_GATE_LDI0, "clk_gate_ldi0", "clk_div_ldi0", + CLK_SET_RATE_PARENT, 0x30, 15, 0, }, + { HI3660_CLK_GATE_VIVOBUS, "clk_gate_vivobus", "clk_div_vivobus", + CLK_SET_RATE_PARENT, 0x30, 16, 0, }, + { HI3660_CLK_GATE_EDC0, "clk_gate_edc0", "clk_div_edc0", + CLK_SET_RATE_PARENT, 0x30, 17, 0, }, + { HI3660_CLK_GATE_TXDPHY0_CFG, "clk_gate_txdphy0_cfg", "clkin_sys", + CLK_SET_RATE_PARENT, 0x30, 28, 0, }, + { HI3660_CLK_GATE_TXDPHY0_REF, "clk_gate_txdphy0_ref", "clkin_sys", + CLK_SET_RATE_PARENT, 0x30, 29, 0, }, + { HI3660_CLK_GATE_TXDPHY1_CFG, "clk_gate_txdphy1_cfg", "clkin_sys", + CLK_SET_RATE_PARENT, 0x30, 30, 0, }, + { HI3660_CLK_GATE_TXDPHY1_REF, "clk_gate_txdphy1_ref", "clkin_sys", + CLK_SET_RATE_PARENT, 0x30, 31, 0, }, + { HI3660_ACLK_GATE_USB3OTG, "aclk_gate_usb3otg", "clk_div_mmc0bus", + CLK_SET_RATE_PARENT, 0x40, 1, 0, }, + { HI3660_CLK_GATE_SPI4, "clk_gate_spi4", "clk_mux_spi", + CLK_SET_RATE_PARENT, 0x40, 4, 0, }, + { HI3660_CLK_GATE_SD, "clk_gate_sd", "clk_mux_sd_sys", + CLK_SET_RATE_PARENT, 0x40, 17, 0, }, + { HI3660_CLK_GATE_SDIO0, "clk_gate_sdio0", "clk_mux_sdio_sys", + CLK_SET_RATE_PARENT, 0x40, 19, 0, }, + { HI3660_CLK_GATE_UFS_SUBSYS, "clk_gate_ufs_subsys", "clk_div_sysbus", + CLK_SET_RATE_PARENT, 0x50, 21, 0, }, + { HI3660_PCLK_GATE_DSI0, "pclk_gate_dsi0", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x50, 28, 0, }, + { HI3660_PCLK_GATE_DSI1, "pclk_gate_dsi1", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x50, 29, 0, }, + { HI3660_ACLK_GATE_PCIE, "aclk_gate_pcie", "clk_div_mmc1bus", + CLK_SET_RATE_PARENT, 0x420, 5, 0, }, + { HI3660_PCLK_GATE_PCIE_SYS, "pclk_gate_pcie_sys", "clk_div_mmc1bus", + CLK_SET_RATE_PARENT, 0x420, 7, 0, }, + { HI3660_CLK_GATE_PCIEAUX, "clk_gate_pcieaux", "clkin_sys", + CLK_SET_RATE_PARENT, 0x420, 8, 0, }, + { HI3660_PCLK_GATE_PCIE_PHY, "pclk_gate_pcie_phy", "clk_div_mmc1bus", + CLK_SET_RATE_PARENT, 0x420, 9, 0, }, +}; + +static const struct hisi_gate_clock hi3660_crgctrl_gate_clks[] = { + { HI3660_CLK_ANDGT_LDI0, "clk_andgt_ldi0", "clk_mux_ldi0", + CLK_SET_RATE_PARENT, 0xf0, 6, CLK_GATE_HIWORD_MASK, }, + { HI3660_CLK_ANDGT_LDI1, "clk_andgt_ldi1", "clk_mux_ldi1", + CLK_SET_RATE_PARENT, 0xf0, 7, CLK_GATE_HIWORD_MASK, }, + { HI3660_CLK_ANDGT_EDC0, "clk_andgt_edc0", "clk_mux_edc0", + CLK_SET_RATE_PARENT, 0xf0, 8, CLK_GATE_HIWORD_MASK, }, + { HI3660_CLK_GATE_UFSPHY_GT, "clk_gate_ufsphy_gt", "clk_div_ufsperi", + CLK_SET_RATE_PARENT, 0xf4, 1, CLK_GATE_HIWORD_MASK, }, + { HI3660_CLK_ANDGT_MMC, "clk_andgt_mmc", "clk_mux_mmc_pll", + CLK_SET_RATE_PARENT, 0xf4, 2, CLK_GATE_HIWORD_MASK, }, + { HI3660_CLK_ANDGT_SD, "clk_andgt_sd", "clk_mux_sd_pll", + CLK_SET_RATE_PARENT, 0xf4, 3, CLK_GATE_HIWORD_MASK, }, + { HI3660_CLK_A53HPM_ANDGT, "clk_a53hpm_andgt", "clk_mux_a53hpm", + CLK_SET_RATE_PARENT, 0xf4, 7, CLK_GATE_HIWORD_MASK, }, + { HI3660_CLK_ANDGT_SDIO, "clk_andgt_sdio", "clk_mux_sdio_pll", + CLK_SET_RATE_PARENT, 0xf4, 8, CLK_GATE_HIWORD_MASK, }, + { HI3660_CLK_ANDGT_UART0, "clk_andgt_uart0", "clk_div_320m", + CLK_SET_RATE_PARENT, 0xf4, 9, CLK_GATE_HIWORD_MASK, }, + { HI3660_CLK_ANDGT_UART1, "clk_andgt_uart1", "clk_div_320m", + CLK_SET_RATE_PARENT, 0xf4, 10, CLK_GATE_HIWORD_MASK, }, + { HI3660_CLK_ANDGT_UARTH, "clk_andgt_uarth", "clk_div_320m", + CLK_SET_RATE_PARENT, 0xf4, 11, CLK_GATE_HIWORD_MASK, }, + { HI3660_CLK_ANDGT_SPI, "clk_andgt_spi", "clk_div_320m", + CLK_SET_RATE_PARENT, 0xf4, 13, CLK_GATE_HIWORD_MASK, }, + { HI3660_CLK_VIVOBUS_ANDGT, "clk_vivobus_andgt", "clk_mux_vivobus", + CLK_SET_RATE_PARENT, 0xf8, 1, CLK_GATE_HIWORD_MASK, }, + { HI3660_CLK_AOMM_ANDGT, "clk_aomm_andgt", "clk_ppll2", + CLK_SET_RATE_PARENT, 0xf8, 3, CLK_GATE_HIWORD_MASK, }, + { HI3660_CLK_320M_PLL_GT, "clk_320m_pll_gt", "clk_mux_320m", + CLK_SET_RATE_PARENT, 0xf8, 10, 0, }, + { HI3660_AUTODIV_EMMC0BUS, "autodiv_emmc0bus", "autodiv_sysbus", + CLK_SET_RATE_PARENT, 0x404, 1, CLK_GATE_HIWORD_MASK, }, + { HI3660_AUTODIV_SYSBUS, "autodiv_sysbus", "clk_div_sysbus", + CLK_SET_RATE_PARENT, 0x404, 5, CLK_GATE_HIWORD_MASK, }, + { HI3660_CLK_GATE_UFSPHY_CFG, "clk_gate_ufsphy_cfg", + "clk_div_ufsphy_cfg", CLK_SET_RATE_PARENT, 0x420, 12, 0, }, + { HI3660_CLK_GATE_UFSIO_REF, "clk_gate_ufsio_ref", + "clk_gate_ufs_tcxo_en", CLK_SET_RATE_PARENT, 0x420, 14, 0, }, +}; + +static const char *const +clk_mux_sdio_sys_p[] = {"clk_factor_mmc", "clk_div_sdio",}; +static const char *const +clk_mux_sd_sys_p[] = {"clk_factor_mmc", "clk_div_sd",}; +static const char *const +clk_mux_pll_p[] = {"clk_ppll0", "clk_ppll1", "clk_ppll2", "clk_ppll2",}; +static const char *const +clk_mux_pll0123_p[] = {"clk_ppll0", "clk_ppll1", "clk_ppll2", "clk_ppll3",}; +static const char *const +clk_mux_edc0_p[] = {"clk_inv", "clk_ppll0", "clk_ppll1", "clk_inv", + "clk_ppll2", "clk_inv", "clk_inv", "clk_inv", + "clk_ppll3", "clk_inv", "clk_inv", "clk_inv", + "clk_inv", "clk_inv", "clk_inv", "clk_inv",}; +static const char *const +clk_mux_ldi0_p[] = {"clk_inv", "clk_ppll0", "clk_ppll2", "clk_inv", + "clk_ppll1", "clk_inv", "clk_inv", "clk_inv", + "clk_ppll3", "clk_inv", "clk_inv", "clk_inv", + "clk_inv", "clk_inv", "clk_inv", "clk_inv",}; +static const char *const +clk_mux_uart0_p[] = {"clkin_sys", "clk_div_uart0",}; +static const char *const +clk_mux_uart1_p[] = {"clkin_sys", "clk_div_uart1",}; +static const char *const +clk_mux_uarth_p[] = {"clkin_sys", "clk_div_uarth",}; +static const char *const +clk_mux_pll02p[] = {"clk_ppll0", "clk_ppll2",}; +static const char *const +clk_mux_ioperi_p[] = {"clk_div_320m", "clk_div_a53hpm",}; +static const char *const +clk_mux_spi_p[] = {"clkin_sys", "clk_div_spi",}; +static const char *const +clk_mux_i2c_p[] = {"clkin_sys", "clk_div_i2c",}; + +static const struct hisi_mux_clock hi3660_crgctrl_mux_clks[] = { + { HI3660_CLK_MUX_SYSBUS, "clk_mux_sysbus", clk_mux_sdio_sys_p, + ARRAY_SIZE(clk_mux_sdio_sys_p), CLK_SET_RATE_PARENT, 0xac, 0, 1, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_UART0, "clk_mux_uart0", clk_mux_uart0_p, + ARRAY_SIZE(clk_mux_uart0_p), CLK_SET_RATE_PARENT, 0xac, 2, 1, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_UART1, "clk_mux_uart1", clk_mux_uart1_p, + ARRAY_SIZE(clk_mux_uart1_p), CLK_SET_RATE_PARENT, 0xac, 3, 1, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_UARTH, "clk_mux_uarth", clk_mux_uarth_p, + ARRAY_SIZE(clk_mux_uarth_p), CLK_SET_RATE_PARENT, 0xac, 4, 1, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_SPI, "clk_mux_spi", clk_mux_spi_p, + ARRAY_SIZE(clk_mux_spi_p), CLK_SET_RATE_PARENT, 0xac, 8, 1, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_I2C, "clk_mux_i2c", clk_mux_i2c_p, + ARRAY_SIZE(clk_mux_i2c_p), CLK_SET_RATE_PARENT, 0xac, 13, 1, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_MMC_PLL, "clk_mux_mmc_pll", clk_mux_pll02p, + ARRAY_SIZE(clk_mux_pll02p), CLK_SET_RATE_PARENT, 0xb4, 0, 1, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_LDI1, "clk_mux_ldi1", clk_mux_ldi0_p, + ARRAY_SIZE(clk_mux_ldi0_p), CLK_SET_RATE_PARENT, 0xb4, 8, 4, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_LDI0, "clk_mux_ldi0", clk_mux_ldi0_p, + ARRAY_SIZE(clk_mux_ldi0_p), CLK_SET_RATE_PARENT, 0xb4, 12, 4, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_SD_PLL, "clk_mux_sd_pll", clk_mux_pll_p, + ARRAY_SIZE(clk_mux_pll_p), CLK_SET_RATE_PARENT, 0xb8, 4, 2, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_SD_SYS, "clk_mux_sd_sys", clk_mux_sd_sys_p, + ARRAY_SIZE(clk_mux_sd_sys_p), CLK_SET_RATE_PARENT, 0xb8, 6, 1, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_EDC0, "clk_mux_edc0", clk_mux_edc0_p, + ARRAY_SIZE(clk_mux_edc0_p), CLK_SET_RATE_PARENT, 0xbc, 6, 4, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_SDIO_SYS, "clk_mux_sdio_sys", clk_mux_sdio_sys_p, + ARRAY_SIZE(clk_mux_sdio_sys_p), CLK_SET_RATE_PARENT, 0xc0, 6, 1, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_SDIO_PLL, "clk_mux_sdio_pll", clk_mux_pll_p, + ARRAY_SIZE(clk_mux_pll_p), CLK_SET_RATE_PARENT, 0xc0, 4, 2, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_VIVOBUS, "clk_mux_vivobus", clk_mux_pll0123_p, + ARRAY_SIZE(clk_mux_pll0123_p), CLK_SET_RATE_PARENT, 0xd0, 12, 2, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_A53HPM, "clk_mux_a53hpm", clk_mux_pll02p, + ARRAY_SIZE(clk_mux_pll02p), CLK_SET_RATE_PARENT, 0xd4, 9, 1, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_320M, "clk_mux_320m", clk_mux_pll02p, + ARRAY_SIZE(clk_mux_pll02p), CLK_SET_RATE_PARENT, 0x100, 0, 1, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_MUX_IOPERI, "clk_mux_ioperi", clk_mux_ioperi_p, + ARRAY_SIZE(clk_mux_ioperi_p), CLK_SET_RATE_PARENT, 0x108, 10, 1, + CLK_MUX_HIWORD_MASK, }, +}; + +static const struct hisi_divider_clock hi3660_crgctrl_divider_clks[] = { + { HI3660_CLK_DIV_UART0, "clk_div_uart0", "clk_andgt_uart0", + CLK_SET_RATE_PARENT, 0xb0, 4, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_UART1, "clk_div_uart1", "clk_andgt_uart1", + CLK_SET_RATE_PARENT, 0xb0, 8, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_UARTH, "clk_div_uarth", "clk_andgt_uarth", + CLK_SET_RATE_PARENT, 0xb0, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_MMC, "clk_div_mmc", "clk_andgt_mmc", + CLK_SET_RATE_PARENT, 0xb4, 3, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_SD, "clk_div_sd", "clk_andgt_sd", + CLK_SET_RATE_PARENT, 0xb8, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_EDC0, "clk_div_edc0", "clk_andgt_edc0", + CLK_SET_RATE_PARENT, 0xbc, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_LDI0, "clk_div_ldi0", "clk_andgt_ldi0", + CLK_SET_RATE_PARENT, 0xbc, 10, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_SDIO, "clk_div_sdio", "clk_andgt_sdio", + CLK_SET_RATE_PARENT, 0xc0, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_LDI1, "clk_div_ldi1", "clk_andgt_ldi1", + CLK_SET_RATE_PARENT, 0xc0, 8, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_SPI, "clk_div_spi", "clk_andgt_spi", + CLK_SET_RATE_PARENT, 0xc4, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_VIVOBUS, "clk_div_vivobus", "clk_vivobus_andgt", + CLK_SET_RATE_PARENT, 0xd0, 7, 5, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_I2C, "clk_div_i2c", "clk_div_320m", + CLK_SET_RATE_PARENT, 0xe8, 4, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_UFSPHY, "clk_div_ufsphy_cfg", "clk_gate_ufsphy_gt", + CLK_SET_RATE_PARENT, 0xe8, 9, 2, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_CFGBUS, "clk_div_cfgbus", "clk_div_sysbus", + CLK_SET_RATE_PARENT, 0xec, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_MMC0BUS, "clk_div_mmc0bus", "autodiv_emmc0bus", + CLK_SET_RATE_PARENT, 0xec, 2, 1, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_MMC1BUS, "clk_div_mmc1bus", "clk_div_sysbus", + CLK_SET_RATE_PARENT, 0xec, 3, 1, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_UFSPERI, "clk_div_ufsperi", "clk_gate_ufs_subsys", + CLK_SET_RATE_PARENT, 0xec, 14, 1, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_AOMM, "clk_div_aomm", "clk_aomm_andgt", + CLK_SET_RATE_PARENT, 0x100, 7, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_IOPERI, "clk_div_ioperi", "clk_mux_ioperi", + CLK_SET_RATE_PARENT, 0x108, 11, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, +}; + +/* clk_pmuctrl */ +/* pmu register need shift 2 bits */ +static const struct hisi_gate_clock hi3660_pmu_gate_clks[] = { + { HI3660_GATE_ABB_192, "clk_gate_abb_192", "clkin_sys", + CLK_SET_RATE_PARENT, (0x10a << 2), 3, 0, }, +}; + +/* clk_pctrl */ +static const struct hisi_gate_clock hi3660_pctrl_gate_clks[] = { + { HI3660_GATE_UFS_TCXO_EN, "clk_gate_ufs_tcxo_en", + "clk_gate_abb_192", CLK_SET_RATE_PARENT, 0x10, 0, + CLK_GATE_HIWORD_MASK, }, + { HI3660_GATE_USB_TCXO_EN, "clk_gate_usb_tcxo_en", "clk_gate_abb_192", + CLK_SET_RATE_PARENT, 0x10, 1, CLK_GATE_HIWORD_MASK, }, +}; + +/* clk_sctrl */ +static const struct hisi_gate_clock hi3660_sctrl_gate_sep_clks[] = { + { HI3660_PCLK_AO_GPIO0, "pclk_ao_gpio0", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x160, 11, 0, }, + { HI3660_PCLK_AO_GPIO1, "pclk_ao_gpio1", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x160, 12, 0, }, + { HI3660_PCLK_AO_GPIO2, "pclk_ao_gpio2", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x160, 13, 0, }, + { HI3660_PCLK_AO_GPIO3, "pclk_ao_gpio3", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x160, 14, 0, }, + { HI3660_PCLK_AO_GPIO4, "pclk_ao_gpio4", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x160, 21, 0, }, + { HI3660_PCLK_AO_GPIO5, "pclk_ao_gpio5", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x160, 22, 0, }, + { HI3660_PCLK_AO_GPIO6, "pclk_ao_gpio6", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x160, 25, 0, }, + { HI3660_PCLK_GATE_MMBUF, "pclk_gate_mmbuf", "pclk_div_mmbuf", + CLK_SET_RATE_PARENT, 0x170, 23, 0, }, + { HI3660_CLK_GATE_DSS_AXI_MM, "clk_gate_dss_axi_mm", "aclk_mux_mmbuf", + CLK_SET_RATE_PARENT, 0x170, 24, 0, }, +}; + +static const struct hisi_gate_clock hi3660_sctrl_gate_clks[] = { + { HI3660_PCLK_MMBUF_ANDGT, "pclk_mmbuf_andgt", "clk_sw_mmbuf", + CLK_SET_RATE_PARENT, 0x258, 7, CLK_GATE_HIWORD_MASK, }, + { HI3660_CLK_MMBUF_PLL_ANDGT, "clk_mmbuf_pll_andgt", "clk_ppll0", + CLK_SET_RATE_PARENT, 0x260, 11, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_FLL_MMBUF_ANDGT, "clk_fll_mmbuf_andgt", "clk_fll_src", + CLK_SET_RATE_PARENT, 0x260, 12, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_SYS_MMBUF_ANDGT, "clk_sys_mmbuf_andgt", "clkin_sys", + CLK_SET_RATE_PARENT, 0x260, 13, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_GATE_PCIEPHY_GT, "clk_gate_pciephy_gt", "clk_ppll0", + CLK_SET_RATE_PARENT, 0x268, 11, CLK_DIVIDER_HIWORD_MASK, 0, }, +}; + +static const char *const +aclk_mux_mmbuf_p[] = {"aclk_div_mmbuf", "clk_gate_aomm",}; +static const char *const +clk_sw_mmbuf_p[] = {"clk_sys_mmbuf_andgt", "clk_fll_mmbuf_andgt", + "aclk_mux_mmbuf", "aclk_mux_mmbuf"}; + +static const struct hisi_mux_clock hi3660_sctrl_mux_clks[] = { + { HI3660_ACLK_MUX_MMBUF, "aclk_mux_mmbuf", aclk_mux_mmbuf_p, + ARRAY_SIZE(aclk_mux_mmbuf_p), CLK_SET_RATE_PARENT, 0x250, 12, 1, + CLK_MUX_HIWORD_MASK, }, + { HI3660_CLK_SW_MMBUF, "clk_sw_mmbuf", clk_sw_mmbuf_p, + ARRAY_SIZE(clk_sw_mmbuf_p), CLK_SET_RATE_PARENT, 0x258, 8, 2, + CLK_MUX_HIWORD_MASK, }, +}; + +static const struct hisi_divider_clock hi3660_sctrl_divider_clks[] = { + { HI3660_CLK_DIV_AOBUS, "clk_div_aobus", "clk_ppll0", + CLK_SET_RATE_PARENT, 0x254, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_PCLK_DIV_MMBUF, "pclk_div_mmbuf", "pclk_mmbuf_andgt", + CLK_SET_RATE_PARENT, 0x258, 10, 2, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_ACLK_DIV_MMBUF, "aclk_div_mmbuf", "clk_mmbuf_pll_andgt", + CLK_SET_RATE_PARENT, 0x258, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3660_CLK_DIV_PCIEPHY, "clk_div_pciephy", "clk_gate_pciephy_gt", + CLK_SET_RATE_PARENT, 0x268, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, +}; + +/* clk_iomcu */ +static const struct hisi_gate_clock hi3660_iomcu_gate_sep_clks[] = { + { HI3660_CLK_I2C0_IOMCU, "clk_i2c0_iomcu", "clk_fll_src", + CLK_SET_RATE_PARENT, 0x10, 3, 0, }, + { HI3660_CLK_I2C1_IOMCU, "clk_i2c1_iomcu", "clk_fll_src", + CLK_SET_RATE_PARENT, 0x10, 4, 0, }, + { HI3660_CLK_I2C2_IOMCU, "clk_i2c2_iomcu", "clk_fll_src", + CLK_SET_RATE_PARENT, 0x10, 5, 0, }, + { HI3660_CLK_I2C6_IOMCU, "clk_i2c6_iomcu", "clk_fll_src", + CLK_SET_RATE_PARENT, 0x10, 27, 0, }, + { HI3660_CLK_IOMCU_PERI0, "iomcu_peri0", "clk_ppll0", + CLK_SET_RATE_PARENT, 0x90, 0, 0, }, +}; + +static void hi3660_clk_iomcu_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data; + int nr = ARRAY_SIZE(hi3660_iomcu_gate_sep_clks); + + clk_data = hisi_clk_init(np, nr); + if (!clk_data) + return; + + hisi_clk_register_gate_sep(hi3660_iomcu_gate_sep_clks, + ARRAY_SIZE(hi3660_iomcu_gate_sep_clks), + clk_data); +} + +static void hi3660_clk_pmuctrl_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data; + int nr = ARRAY_SIZE(hi3660_pmu_gate_clks); + + clk_data = hisi_clk_init(np, nr); + if (!clk_data) + return; + + hisi_clk_register_gate(hi3660_pmu_gate_clks, + ARRAY_SIZE(hi3660_pmu_gate_clks), clk_data); +} + +static void hi3660_clk_pctrl_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data; + int nr = ARRAY_SIZE(hi3660_pctrl_gate_clks); + + clk_data = hisi_clk_init(np, nr); + if (!clk_data) + return; + hisi_clk_register_gate(hi3660_pctrl_gate_clks, + ARRAY_SIZE(hi3660_pctrl_gate_clks), clk_data); +} + +static void hi3660_clk_sctrl_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data; + int nr = ARRAY_SIZE(hi3660_sctrl_gate_clks) + + ARRAY_SIZE(hi3660_sctrl_gate_sep_clks) + + ARRAY_SIZE(hi3660_sctrl_mux_clks) + + ARRAY_SIZE(hi3660_sctrl_divider_clks); + + clk_data = hisi_clk_init(np, nr); + if (!clk_data) + return; + hisi_clk_register_gate(hi3660_sctrl_gate_clks, + ARRAY_SIZE(hi3660_sctrl_gate_clks), clk_data); + hisi_clk_register_gate_sep(hi3660_sctrl_gate_sep_clks, + ARRAY_SIZE(hi3660_sctrl_gate_sep_clks), + clk_data); + hisi_clk_register_mux(hi3660_sctrl_mux_clks, + ARRAY_SIZE(hi3660_sctrl_mux_clks), clk_data); + hisi_clk_register_divider(hi3660_sctrl_divider_clks, + ARRAY_SIZE(hi3660_sctrl_divider_clks), + clk_data); +} + +static void hi3660_clk_crgctrl_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data; + int nr = ARRAY_SIZE(hi3660_fixed_rate_clks) + + ARRAY_SIZE(hi3660_crgctrl_gate_sep_clks) + + ARRAY_SIZE(hi3660_crgctrl_gate_clks) + + ARRAY_SIZE(hi3660_crgctrl_mux_clks) + + ARRAY_SIZE(hi3660_crg_fixed_factor_clks) + + ARRAY_SIZE(hi3660_crgctrl_divider_clks); + + clk_data = hisi_clk_init(np, nr); + if (!clk_data) + return; + + hisi_clk_register_fixed_rate(hi3660_fixed_rate_clks, + ARRAY_SIZE(hi3660_fixed_rate_clks), + clk_data); + hisi_clk_register_gate_sep(hi3660_crgctrl_gate_sep_clks, + ARRAY_SIZE(hi3660_crgctrl_gate_sep_clks), + clk_data); + hisi_clk_register_gate(hi3660_crgctrl_gate_clks, + ARRAY_SIZE(hi3660_crgctrl_gate_clks), + clk_data); + hisi_clk_register_mux(hi3660_crgctrl_mux_clks, + ARRAY_SIZE(hi3660_crgctrl_mux_clks), + clk_data); + hisi_clk_register_fixed_factor(hi3660_crg_fixed_factor_clks, + ARRAY_SIZE(hi3660_crg_fixed_factor_clks), + clk_data); + hisi_clk_register_divider(hi3660_crgctrl_divider_clks, + ARRAY_SIZE(hi3660_crgctrl_divider_clks), + clk_data); +} + +static const struct of_device_id hi3660_clk_match_table[] = { + { .compatible = "hisilicon,hi3660-crgctrl", + .data = hi3660_clk_crgctrl_init }, + { .compatible = "hisilicon,hi3660-pctrl", + .data = hi3660_clk_pctrl_init }, + { .compatible = "hisilicon,hi3660-pmuctrl", + .data = hi3660_clk_pmuctrl_init }, + { .compatible = "hisilicon,hi3660-sctrl", + .data = hi3660_clk_sctrl_init }, + { .compatible = "hisilicon,hi3660-iomcu", + .data = hi3660_clk_iomcu_init }, + { } +}; + +static int hi3660_clk_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = pdev->dev.of_node; + void (*init_func)(struct device_node *np); + + init_func = of_device_get_match_data(dev); + if (!init_func) + return -ENODEV; + + init_func(np); + + return 0; +} + +static struct platform_driver hi3660_clk_driver = { + .probe = hi3660_clk_probe, + .driver = { + .name = "hi3660-clk", + .of_match_table = hi3660_clk_match_table, + }, +}; + +static int __init hi3660_clk_init(void) +{ + return platform_driver_register(&hi3660_clk_driver); +} +core_initcall(hi3660_clk_init); diff --git a/drivers/clk/hisilicon/clkgate-separated.c b/drivers/clk/hisilicon/clkgate-separated.c index a47812f56a17..7908bc3c9ec7 100644 --- a/drivers/clk/hisilicon/clkgate-separated.c +++ b/drivers/clk/hisilicon/clkgate-separated.c @@ -120,6 +120,7 @@ struct clk *hisi_register_clkgate_sep(struct device *dev, const char *name, sclk->bit_idx = bit_idx; sclk->flags = clk_gate_flags; sclk->hw.init = &init; + sclk->lock = lock; clk = clk_register(dev, &sclk->hw); if (IS_ERR(clk)) diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index 42ffc1c92bab..c07df719b8a3 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c @@ -592,15 +592,20 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) imx6q_mmdc_ch1_mask_handshake(base); - /* - * The LDB_DI0/1_SEL muxes are registered read-only due to a hardware - * bug. Set the muxes to the requested values before registering the - * ldb_di_sel clocks. - */ - init_ldb_clks(np, base); + if (clk_on_imx6qp()) { + clk[IMX6QDL_CLK_LDB_DI0_SEL] = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT); + clk[IMX6QDL_CLK_LDB_DI1_SEL] = imx_clk_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT); + } else { + /* + * The LDB_DI0/1_SEL muxes are registered read-only due to a hardware + * bug. Set the muxes to the requested values before registering the + * ldb_di_sel clocks. + */ + init_ldb_clks(np, base); - clk[IMX6QDL_CLK_LDB_DI0_SEL] = imx_clk_mux_ldb("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels)); - clk[IMX6QDL_CLK_LDB_DI1_SEL] = imx_clk_mux_ldb("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels)); + clk[IMX6QDL_CLK_LDB_DI0_SEL] = imx_clk_mux_ldb("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels)); + clk[IMX6QDL_CLK_LDB_DI1_SEL] = imx_clk_mux_ldb("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels)); + } clk[IMX6QDL_CLK_IPU1_DI0_PRE_SEL] = imx_clk_mux_flags("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT); clk[IMX6QDL_CLK_IPU1_DI1_PRE_SEL] = imx_clk_mux_flags("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT); clk[IMX6QDL_CLK_IPU2_DI0_PRE_SEL] = imx_clk_mux_flags("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT); diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index e7c7353a86fc..ae1d31be906e 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c @@ -803,6 +803,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate4("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0); clks[IMX7D_DRAM_PHYM_ALT_ROOT_CLK] = imx_clk_gate4("dram_phym_alt_root_clk", "dram_phym_alt_post_div", base + 0x4130, 0); clks[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_gate4("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0); + clks[IMX7D_OCOTP_CLK] = imx_clk_gate4("ocotp_clk", "ipg_root_clk", base + 0x4230, 0); clks[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_gate4("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4420, 0); clks[IMX7D_SDMA_CORE_CLK] = imx_clk_gate4("sdma_root_clk", "ahb_root_clk", base + 0x4480, 0); clks[IMX7D_PCIE_CTRL_ROOT_CLK] = imx_clk_gate4("pcie_ctrl_root_clk", "pcie_ctrl_post_div", base + 0x4600, 0); diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c index ed3a2df536ea..f1099167ba31 100644 --- a/drivers/clk/imx/clk-pllv3.c +++ b/drivers/clk/imx/clk-pllv3.c @@ -21,6 +21,9 @@ #define PLL_NUM_OFFSET 0x10 #define PLL_DENOM_OFFSET 0x20 +#define PLL_VF610_NUM_OFFSET 0x20 +#define PLL_VF610_DENOM_OFFSET 0x30 + #define BM_PLL_POWER (0x1 << 12) #define BM_PLL_LOCK (0x1 << 31) #define IMX7_ENET_PLL_POWER (0x1 << 5) @@ -300,6 +303,99 @@ static const struct clk_ops clk_pllv3_av_ops = { .set_rate = clk_pllv3_av_set_rate, }; +struct clk_pllv3_vf610_mf { + u32 mfi; /* integer part, can be 20 or 22 */ + u32 mfn; /* numerator, 30-bit value */ + u32 mfd; /* denominator, 30-bit value, must be less than mfn */ +}; + +static unsigned long clk_pllv3_vf610_mf_to_rate(unsigned long parent_rate, + struct clk_pllv3_vf610_mf mf) +{ + u64 temp64; + + temp64 = parent_rate; + temp64 *= mf.mfn; + do_div(temp64, mf.mfd); + + return (parent_rate * mf.mfi) + temp64; +} + +static struct clk_pllv3_vf610_mf clk_pllv3_vf610_rate_to_mf( + unsigned long parent_rate, unsigned long rate) +{ + struct clk_pllv3_vf610_mf mf; + u64 temp64; + + mf.mfi = (rate >= 22 * parent_rate) ? 22 : 20; + mf.mfd = 0x3fffffff; /* use max supported value for best accuracy */ + + if (rate <= parent_rate * mf.mfi) + mf.mfn = 0; + else if (rate >= parent_rate * (mf.mfi + 1)) + mf.mfn = mf.mfd - 1; + else { + /* rate = parent_rate * (mfi + mfn/mfd) */ + temp64 = rate - parent_rate * mf.mfi; + temp64 *= mf.mfd; + do_div(temp64, parent_rate); + mf.mfn = temp64; + } + + return mf; +} + +static unsigned long clk_pllv3_vf610_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_pllv3 *pll = to_clk_pllv3(hw); + struct clk_pllv3_vf610_mf mf; + + mf.mfn = readl_relaxed(pll->base + PLL_VF610_NUM_OFFSET); + mf.mfd = readl_relaxed(pll->base + PLL_VF610_DENOM_OFFSET); + mf.mfi = (readl_relaxed(pll->base) & pll->div_mask) ? 22 : 20; + + return clk_pllv3_vf610_mf_to_rate(parent_rate, mf); +} + +static long clk_pllv3_vf610_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_pllv3_vf610_mf mf = clk_pllv3_vf610_rate_to_mf(*prate, rate); + + return clk_pllv3_vf610_mf_to_rate(*prate, mf); +} + +static int clk_pllv3_vf610_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_pllv3 *pll = to_clk_pllv3(hw); + struct clk_pllv3_vf610_mf mf = + clk_pllv3_vf610_rate_to_mf(parent_rate, rate); + u32 val; + + val = readl_relaxed(pll->base); + if (mf.mfi == 20) + val &= ~pll->div_mask; /* clear bit for mfi=20 */ + else + val |= pll->div_mask; /* set bit for mfi=22 */ + writel_relaxed(val, pll->base); + + writel_relaxed(mf.mfn, pll->base + PLL_VF610_NUM_OFFSET); + writel_relaxed(mf.mfd, pll->base + PLL_VF610_DENOM_OFFSET); + + return clk_pllv3_wait_lock(pll); +} + +static const struct clk_ops clk_pllv3_vf610_ops = { + .prepare = clk_pllv3_prepare, + .unprepare = clk_pllv3_unprepare, + .is_prepared = clk_pllv3_is_prepared, + .recalc_rate = clk_pllv3_vf610_recalc_rate, + .round_rate = clk_pllv3_vf610_round_rate, + .set_rate = clk_pllv3_vf610_set_rate, +}; + static unsigned long clk_pllv3_enet_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { @@ -334,6 +430,9 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name, case IMX_PLLV3_SYS: ops = &clk_pllv3_sys_ops; break; + case IMX_PLLV3_SYS_VF610: + ops = &clk_pllv3_vf610_ops; + break; case IMX_PLLV3_USB_VF610: pll->div_shift = 1; case IMX_PLLV3_USB: diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c index 0476353ab423..59b1863deb88 100644 --- a/drivers/clk/imx/clk-vf610.c +++ b/drivers/clk/imx/clk-vf610.c @@ -219,8 +219,8 @@ static void __init vf610_clocks_init(struct device_node *ccm_node) clk[VF610_CLK_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", PLL6_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); clk[VF610_CLK_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", PLL7_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clk[VF610_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll1", "pll1_bypass_src", PLL1_CTRL, 0x1); - clk[VF610_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", PLL2_CTRL, 0x1); + clk[VF610_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS_VF610, "pll1", "pll1_bypass_src", PLL1_CTRL, 0x1); + clk[VF610_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_SYS_VF610, "pll2", "pll2_bypass_src", PLL2_CTRL, 0x1); clk[VF610_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB_VF610, "pll3", "pll3_bypass_src", PLL3_CTRL, 0x2); clk[VF610_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", PLL4_CTRL, 0x7f); clk[VF610_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll5", "pll5_bypass_src", PLL5_CTRL, 0x3); diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index 4afad3b96a61..e1f5e425db73 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -34,6 +34,7 @@ enum imx_pllv3_type { IMX_PLLV3_AV, IMX_PLLV3_ENET, IMX_PLLV3_ENET_IMX7, + IMX_PLLV3_SYS_VF610, }; struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name, diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig index 0bd631a41f6a..a01ef7806aed 100644 --- a/drivers/clk/mediatek/Kconfig +++ b/drivers/clk/mediatek/Kconfig @@ -8,52 +8,53 @@ config COMMON_CLK_MEDIATEK config COMMON_CLK_MT2701 bool "Clock driver for Mediatek MT2701" + depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST select COMMON_CLK_MEDIATEK - default ARCH_MEDIATEK + default ARCH_MEDIATEK && ARM ---help--- This driver supports Mediatek MT2701 basic clocks. config COMMON_CLK_MT2701_MMSYS bool "Clock driver for Mediatek MT2701 mmsys" - select COMMON_CLK_MT2701 + depends on COMMON_CLK_MT2701 ---help--- This driver supports Mediatek MT2701 mmsys clocks. config COMMON_CLK_MT2701_IMGSYS bool "Clock driver for Mediatek MT2701 imgsys" - select COMMON_CLK_MT2701 + depends on COMMON_CLK_MT2701 ---help--- This driver supports Mediatek MT2701 imgsys clocks. config COMMON_CLK_MT2701_VDECSYS bool "Clock driver for Mediatek MT2701 vdecsys" - select COMMON_CLK_MT2701 + depends on COMMON_CLK_MT2701 ---help--- This driver supports Mediatek MT2701 vdecsys clocks. config COMMON_CLK_MT2701_HIFSYS bool "Clock driver for Mediatek MT2701 hifsys" - select COMMON_CLK_MT2701 + depends on COMMON_CLK_MT2701 ---help--- This driver supports Mediatek MT2701 hifsys clocks. config COMMON_CLK_MT2701_ETHSYS bool "Clock driver for Mediatek MT2701 ethsys" - select COMMON_CLK_MT2701 + depends on COMMON_CLK_MT2701 ---help--- This driver supports Mediatek MT2701 ethsys clocks. config COMMON_CLK_MT2701_BDPSYS bool "Clock driver for Mediatek MT2701 bdpsys" - select COMMON_CLK_MT2701 + depends on COMMON_CLK_MT2701 ---help--- This driver supports Mediatek MT2701 bdpsys clocks. config COMMON_CLK_MT8135 bool "Clock driver for Mediatek MT8135" - depends on ARCH_MEDIATEK || COMPILE_TEST + depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST select COMMON_CLK_MEDIATEK - default ARCH_MEDIATEK + default ARCH_MEDIATEK && ARM ---help--- This driver supports Mediatek MT8135 clocks. diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 9d9af446bafc..1c1ec137a3cc 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -564,6 +564,46 @@ static struct clk_gate gxbb_clk81 = { }, }; +static struct clk_mux gxbb_sar_adc_clk_sel = { + .reg = (void *)HHI_SAR_CLK_CNTL, + .mask = 0x3, + .shift = 9, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "sar_adc_clk_sel", + .ops = &clk_mux_ops, + /* NOTE: The datasheet doesn't list the parents for bit 10 */ + .parent_names = (const char *[]){ "xtal", "clk81", }, + .num_parents = 2, + }, +}; + +static struct clk_divider gxbb_sar_adc_clk_div = { + .reg = (void *)HHI_SAR_CLK_CNTL, + .shift = 0, + .width = 8, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "sar_adc_clk_div", + .ops = &clk_divider_ops, + .parent_names = (const char *[]){ "sar_adc_clk_sel" }, + .num_parents = 1, + }, +}; + +static struct clk_gate gxbb_sar_adc_clk = { + .reg = (void *)HHI_SAR_CLK_CNTL, + .bit_idx = 8, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "sar_adc_clk", + .ops = &clk_gate_ops, + .parent_names = (const char *[]){ "sar_adc_clk_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + /* Everything Else (EE) domain gates */ static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0); static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1); @@ -754,6 +794,9 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = { [CLKID_SD_EMMC_A] = &gxbb_emmc_a.hw, [CLKID_SD_EMMC_B] = &gxbb_emmc_b.hw, [CLKID_SD_EMMC_C] = &gxbb_emmc_c.hw, + [CLKID_SAR_ADC_CLK] = &gxbb_sar_adc_clk.hw, + [CLKID_SAR_ADC_SEL] = &gxbb_sar_adc_clk_sel.hw, + [CLKID_SAR_ADC_DIV] = &gxbb_sar_adc_clk_div.hw, }, .num = NR_CLKS, }; @@ -856,6 +899,7 @@ static struct clk_gate *gxbb_clk_gates[] = { &gxbb_emmc_a, &gxbb_emmc_b, &gxbb_emmc_c, + &gxbb_sar_adc_clk, }; static int gxbb_clkc_probe(struct platform_device *pdev) @@ -888,6 +932,10 @@ static int gxbb_clkc_probe(struct platform_device *pdev) gxbb_mpeg_clk_sel.reg = clk_base + (u64)gxbb_mpeg_clk_sel.reg; gxbb_mpeg_clk_div.reg = clk_base + (u64)gxbb_mpeg_clk_div.reg; + /* Populate the base address for the SAR ADC clks */ + gxbb_sar_adc_clk_sel.reg = clk_base + (u64)gxbb_sar_adc_clk_sel.reg; + gxbb_sar_adc_clk_div.reg = clk_base + (u64)gxbb_sar_adc_clk_div.reg; + /* Populate base address for gates */ for (i = 0; i < ARRAY_SIZE(gxbb_clk_gates); i++) gxbb_clk_gates[i]->reg = clk_base + diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h index 0252939ba58f..8ee2022ce5d5 100644 --- a/drivers/clk/meson/gxbb.h +++ b/drivers/clk/meson/gxbb.h @@ -191,7 +191,7 @@ #define CLKID_PERIPHS 20 #define CLKID_SPICC 21 /* CLKID_I2C */ -#define CLKID_SAR_ADC 23 +/* #define CLKID_SAR_ADC */ #define CLKID_SMART_CARD 24 #define CLKID_RNG0 25 #define CLKID_UART0 26 @@ -204,7 +204,7 @@ #define CLKID_ASSIST_MISC 33 /* CLKID_SPI */ #define CLKID_I2S_SPDIF 35 -#define CLKID_ETH 36 +/* CLKID_ETH */ #define CLKID_DEMUX 37 #define CLKID_AIU_GLUE 38 #define CLKID_IEC958 39 @@ -231,13 +231,13 @@ #define CLKID_AHB_DATA_BUS 60 #define CLKID_AHB_CTRL_BUS 61 #define CLKID_HDMI_INTR_SYNC 62 -#define CLKID_HDMI_PCLK 63 +/* CLKID_HDMI_PCLK */ /* CLKID_USB1_DDR_BRIDGE */ /* CLKID_USB0_DDR_BRIDGE */ #define CLKID_MMC_PCLK 66 #define CLKID_DVIN 67 #define CLKID_UART2 68 -#define CLKID_SANA 69 +/* #define CLKID_SANA */ #define CLKID_VPU_INTR 70 #define CLKID_SEC_AHB_AHB3_BRIDGE 71 #define CLKID_CLK81_A53 72 @@ -245,7 +245,7 @@ #define CLKID_VCLK2_VENCI1 74 #define CLKID_VCLK2_VENCP0 75 #define CLKID_VCLK2_VENCP1 76 -#define CLKID_GCLK_VENCI_INT0 77 +/* CLKID_GCLK_VENCI_INT0 */ #define CLKID_GCLK_VENCI_INT 78 #define CLKID_DAC_CLK 79 #define CLKID_AOCLK_GATE 80 @@ -265,8 +265,11 @@ /* CLKID_SD_EMMC_A */ /* CLKID_SD_EMMC_B */ /* CLKID_SD_EMMC_C */ +/* CLKID_SAR_ADC_CLK */ +/* CLKID_SAR_ADC_SEL */ +#define CLKID_SAR_ADC_DIV 99 -#define NR_CLKS 97 +#define NR_CLKS 100 /* include the CLKIDs that have been made part of the stable DT binding */ #include <dt-bindings/clock/gxbb-clkc.h> diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 3f1be46cbb33..888494d4fb8a 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -607,7 +607,6 @@ static int meson8b_clkc_probe(struct platform_device *pdev) /* Populate the base address for the MPEG clks */ meson8b_mpeg_clk_sel.reg = clk_base + (u32)meson8b_mpeg_clk_sel.reg; meson8b_mpeg_clk_div.reg = clk_base + (u32)meson8b_mpeg_clk_div.reg; - meson8b_clk81.reg = clk_base + (u32)meson8b_clk81.reg; /* Populate base address for gates */ for (i = 0; i < ARRAY_SIZE(meson8b_clk_gates); i++) diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile index d9ae97fb43c4..d71c7fd5da16 100644 --- a/drivers/clk/mvebu/Makefile +++ b/drivers/clk/mvebu/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_ARMADA_39X_CLK) += armada-39x.o obj-$(CONFIG_ARMADA_37XX_CLK) += armada-37xx-xtal.o obj-$(CONFIG_ARMADA_37XX_CLK) += armada-37xx-tbg.o obj-$(CONFIG_ARMADA_37XX_CLK) += armada-37xx-periph.o -obj-$(CONFIG_ARMADA_XP_CLK) += armada-xp.o +obj-$(CONFIG_ARMADA_XP_CLK) += armada-xp.o mv98dx3236.o obj-$(CONFIG_ARMADA_AP806_SYSCON) += ap806-system-controller.o obj-$(CONFIG_ARMADA_CP110_SYSCON) += cp110-system-controller.o obj-$(CONFIG_DOVE_CLK) += dove.o dove-divider.o diff --git a/drivers/clk/mvebu/ap806-system-controller.c b/drivers/clk/mvebu/ap806-system-controller.c index 8181b919f062..f17702107ac5 100644 --- a/drivers/clk/mvebu/ap806-system-controller.c +++ b/drivers/clk/mvebu/ap806-system-controller.c @@ -55,21 +55,39 @@ static int ap806_syscon_clk_probe(struct platform_device *pdev) freq_mode = reg & AP806_SAR_CLKFREQ_MODE_MASK; switch (freq_mode) { - case 0x0 ... 0x5: + case 0x0: + case 0x1: cpuclk_freq = 2000; break; - case 0x6 ... 0xB: + case 0x6: + case 0x7: cpuclk_freq = 1800; break; - case 0xC ... 0x11: + case 0x4: + case 0xB: + case 0xD: cpuclk_freq = 1600; break; - case 0x12 ... 0x16: + case 0x1a: cpuclk_freq = 1400; break; - case 0x17 ... 0x19: + case 0x14: + case 0x17: cpuclk_freq = 1300; break; + case 0x19: + cpuclk_freq = 1200; + break; + case 0x13: + case 0x1d: + cpuclk_freq = 1000; + break; + case 0x1c: + cpuclk_freq = 800; + break; + case 0x1b: + cpuclk_freq = 600; + break; default: dev_err(&pdev->dev, "invalid SAR value\n"); return -EINVAL; diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c index b3094315a3c0..0ec44ae9a2a2 100644 --- a/drivers/clk/mvebu/armada-xp.c +++ b/drivers/clk/mvebu/armada-xp.c @@ -52,6 +52,12 @@ static u32 __init axp_get_tclk_freq(void __iomem *sar) return 250000000; } +/* MV98DX3236 TCLK frequency is fixed to 200MHz */ +static u32 __init mv98dx3236_get_tclk_freq(void __iomem *sar) +{ + return 200000000; +} + static const u32 axp_cpu_freqs[] __initconst = { 1000000000, 1066000000, @@ -89,6 +95,12 @@ static u32 __init axp_get_cpu_freq(void __iomem *sar) return cpu_freq; } +/* MV98DX3236 CLK frequency is fixed to 800MHz */ +static u32 __init mv98dx3236_get_cpu_freq(void __iomem *sar) +{ + return 800000000; +} + static const int axp_nbclk_ratios[32][2] __initconst = { {0, 1}, {1, 2}, {2, 2}, {2, 2}, {1, 2}, {1, 2}, {1, 1}, {2, 3}, @@ -158,6 +170,11 @@ static const struct coreclk_soc_desc axp_coreclks = { .num_ratios = ARRAY_SIZE(axp_coreclk_ratios), }; +static const struct coreclk_soc_desc mv98dx3236_coreclks = { + .get_tclk_freq = mv98dx3236_get_tclk_freq, + .get_cpu_freq = mv98dx3236_get_cpu_freq, +}; + /* * Clock Gating Control */ @@ -195,6 +212,15 @@ static const struct clk_gating_soc_desc axp_gating_desc[] __initconst = { { } }; +static const struct clk_gating_soc_desc mv98dx3236_gating_desc[] __initconst = { + { "ge1", NULL, 3, 0 }, + { "ge0", NULL, 4, 0 }, + { "pex00", NULL, 5, 0 }, + { "sdio", NULL, 17, 0 }, + { "xor0", NULL, 22, 0 }, + { } +}; + static void __init axp_clk_init(struct device_node *np) { struct device_node *cgnp = diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c index d1e5863d3375..8491979f4096 100644 --- a/drivers/clk/mvebu/clk-corediv.c +++ b/drivers/clk/mvebu/clk-corediv.c @@ -71,6 +71,10 @@ static const struct clk_corediv_desc mvebu_corediv_desc[] = { { .mask = 0x3f, .offset = 8, .fieldbit = 1 }, /* NAND clock */ }; +static const struct clk_corediv_desc mv98dx3236_corediv_desc[] = { + { .mask = 0x0f, .offset = 6, .fieldbit = 26 }, /* NAND clock */ +}; + #define to_corediv_clk(p) container_of(p, struct clk_corediv, hw) static int clk_corediv_is_enabled(struct clk_hw *hwclk) @@ -232,6 +236,18 @@ static const struct clk_corediv_soc_desc armada375_corediv_soc = { .ratio_offset = 0x4, }; +static const struct clk_corediv_soc_desc mv98dx3236_corediv_soc = { + .descs = mv98dx3236_corediv_desc, + .ndescs = ARRAY_SIZE(mv98dx3236_corediv_desc), + .ops = { + .recalc_rate = clk_corediv_recalc_rate, + .round_rate = clk_corediv_round_rate, + .set_rate = clk_corediv_set_rate, + }, + .ratio_reload = BIT(10), + .ratio_offset = 0x8, +}; + static void __init mvebu_corediv_clk_init(struct device_node *node, const struct clk_corediv_soc_desc *soc_desc) @@ -313,3 +329,10 @@ static void __init armada380_corediv_clk_init(struct device_node *node) } CLK_OF_DECLARE(armada380_corediv_clk, "marvell,armada-380-corediv-clock", armada380_corediv_clk_init); + +static void __init mv98dx3236_corediv_clk_init(struct device_node *node) +{ + return mvebu_corediv_clk_init(node, &mv98dx3236_corediv_soc); +} +CLK_OF_DECLARE(mv98dx3236_corediv_clk, "marvell,mv98dx3236-corediv-clock", + mv98dx3236_corediv_clk_init); diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c index 5837eb8a212f..044892b6534d 100644 --- a/drivers/clk/mvebu/clk-cpu.c +++ b/drivers/clk/mvebu/clk-cpu.c @@ -245,3 +245,11 @@ cpuclk_out: CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock", of_cpu_clk_setup); + +static void __init of_mv98dx3236_cpu_clk_setup(struct device_node *node) +{ + of_clk_add_provider(node, of_clk_src_simple_get, NULL); +} + +CLK_OF_DECLARE(mv98dx3236_cpu_clock, "marvell,mv98dx3236-cpu-clock", + of_mv98dx3236_cpu_clk_setup); diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c index 32e5b43c086f..6b11d7b3e0e0 100644 --- a/drivers/clk/mvebu/cp110-system-controller.c +++ b/drivers/clk/mvebu/cp110-system-controller.c @@ -64,8 +64,11 @@ enum { #define CP110_GATE_NAND 2 #define CP110_GATE_PPV2 3 #define CP110_GATE_SDIO 4 +#define CP110_GATE_MG 5 +#define CP110_GATE_MG_CORE 6 #define CP110_GATE_XOR1 7 #define CP110_GATE_XOR0 8 +#define CP110_GATE_GOP_DP 9 #define CP110_GATE_PCIE_X1_0 11 #define CP110_GATE_PCIE_X1_1 12 #define CP110_GATE_PCIE_X4 13 @@ -73,7 +76,7 @@ enum { #define CP110_GATE_SATA 15 #define CP110_GATE_SATA_USB 16 #define CP110_GATE_MAIN 17 -#define CP110_GATE_SDMMC 18 +#define CP110_GATE_SDMMC_GOP 18 #define CP110_GATE_SLOW_IO 21 #define CP110_GATE_USB3H0 22 #define CP110_GATE_USB3H1 23 @@ -296,6 +299,11 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev) "gate-clock-output-names", CP110_GATE_MAIN, &parent); break; + case CP110_GATE_MG: + of_property_read_string_index(np, + "gate-clock-output-names", + CP110_GATE_MG_CORE, &parent); + break; case CP110_GATE_NAND: parent = nand_name; break; @@ -303,9 +311,10 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev) parent = ppv2_name; break; case CP110_GATE_SDIO: + case CP110_GATE_GOP_DP: of_property_read_string_index(np, "gate-clock-output-names", - CP110_GATE_SDMMC, &parent); + CP110_GATE_SDMMC_GOP, &parent); break; case CP110_GATE_XOR1: case CP110_GATE_XOR0: diff --git a/drivers/clk/mvebu/mv98dx3236.c b/drivers/clk/mvebu/mv98dx3236.c new file mode 100644 index 000000000000..6e203af73cac --- /dev/null +++ b/drivers/clk/mvebu/mv98dx3236.c @@ -0,0 +1,180 @@ +/* + * Marvell MV98DX3236 SoC clocks + * + * Copyright (C) 2012 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * Andrew Lunn <andrew@lunn.ch> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include "common.h" + + +/* + * For 98DX4251 Sample At Reset the CPU, DDR and Main PLL clocks are all + * defined at the same time + * + * SAR1[20:18] : CPU frequency DDR frequency MPLL frequency + * 0 = 400 MHz 400 MHz 800 MHz + * 2 = 667 MHz 667 MHz 2000 MHz + * 3 = 800 MHz 800 MHz 1600 MHz + * others reserved. + * + * For 98DX3236 Sample At Reset the CPU, DDR and Main PLL clocks are all + * defined at the same time + * + * SAR1[20:18] : CPU frequency DDR frequency MPLL frequency + * 1 = 667 MHz 667 MHz 2000 MHz + * 2 = 400 MHz 400 MHz 400 MHz + * 3 = 800 MHz 800 MHz 800 MHz + * 5 = 800 MHz 400 MHz 800 MHz + * others reserved. + */ + +#define SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT 18 +#define SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT_MASK 0x7 + +static u32 __init mv98dx3236_get_tclk_freq(void __iomem *sar) +{ + /* Tclk = 200MHz, no SaR dependency */ + return 200000000; +} + +static const u32 mv98dx3236_cpu_frequencies[] __initconst = { + 0, + 667000000, + 400000000, + 800000000, + 0, + 800000000, + 0, 0, +}; + +static const u32 mv98dx4251_cpu_frequencies[] __initconst = { + 400000000, + 0, + 667000000, + 800000000, + 0, 0, 0, 0, +}; + +static u32 __init mv98dx3236_get_cpu_freq(void __iomem *sar) +{ + u32 cpu_freq = 0; + u8 cpu_freq_select = 0; + + cpu_freq_select = ((readl(sar) >> SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT) & + SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT_MASK); + + if (of_machine_is_compatible("marvell,armadaxp-98dx4251")) + cpu_freq = mv98dx4251_cpu_frequencies[cpu_freq_select]; + else if (of_machine_is_compatible("marvell,armadaxp-98dx3236")) + cpu_freq = mv98dx3236_cpu_frequencies[cpu_freq_select]; + + if (!cpu_freq) + pr_err("CPU freq select unsupported %d\n", cpu_freq_select); + + return cpu_freq; +} + +enum { + MV98DX3236_CPU_TO_DDR, + MV98DX3236_CPU_TO_MPLL +}; + +static const struct coreclk_ratio mv98dx3236_core_ratios[] __initconst = { + { .id = MV98DX3236_CPU_TO_DDR, .name = "ddrclk" }, + { .id = MV98DX3236_CPU_TO_MPLL, .name = "mpll" }, +}; + +static const int __initconst mv98dx3236_cpu_mpll_ratios[8][2] = { + {0, 1}, {3, 1}, {1, 1}, {1, 1}, + {0, 1}, {1, 1}, {0, 1}, {0, 1}, +}; + +static const int __initconst mv98dx3236_cpu_ddr_ratios[8][2] = { + {0, 1}, {1, 1}, {1, 1}, {1, 1}, + {0, 1}, {1, 2}, {0, 1}, {0, 1}, +}; + +static const int __initconst mv98dx4251_cpu_mpll_ratios[8][2] = { + {2, 1}, {0, 1}, {3, 1}, {2, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static const int __initconst mv98dx4251_cpu_ddr_ratios[8][2] = { + {1, 1}, {0, 1}, {1, 1}, {1, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static void __init mv98dx3236_get_clk_ratio( + void __iomem *sar, int id, int *mult, int *div) +{ + u32 opt = ((readl(sar) >> SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT) & + SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT_MASK); + + switch (id) { + case MV98DX3236_CPU_TO_DDR: + if (of_machine_is_compatible("marvell,armadaxp-98dx4251")) { + *mult = mv98dx4251_cpu_ddr_ratios[opt][0]; + *div = mv98dx4251_cpu_ddr_ratios[opt][1]; + } else if (of_machine_is_compatible("marvell,armadaxp-98dx3236")) { + *mult = mv98dx3236_cpu_ddr_ratios[opt][0]; + *div = mv98dx3236_cpu_ddr_ratios[opt][1]; + } + break; + case MV98DX3236_CPU_TO_MPLL: + if (of_machine_is_compatible("marvell,armadaxp-98dx4251")) { + *mult = mv98dx4251_cpu_mpll_ratios[opt][0]; + *div = mv98dx4251_cpu_mpll_ratios[opt][1]; + } else if (of_machine_is_compatible("marvell,armadaxp-98dx3236")) { + *mult = mv98dx3236_cpu_mpll_ratios[opt][0]; + *div = mv98dx3236_cpu_mpll_ratios[opt][1]; + } + break; + } +} + +static const struct coreclk_soc_desc mv98dx3236_core_clocks = { + .get_tclk_freq = mv98dx3236_get_tclk_freq, + .get_cpu_freq = mv98dx3236_get_cpu_freq, + .get_clk_ratio = mv98dx3236_get_clk_ratio, + .ratios = mv98dx3236_core_ratios, + .num_ratios = ARRAY_SIZE(mv98dx3236_core_ratios), +}; + + +/* + * Clock Gating Control + */ + +static const struct clk_gating_soc_desc mv98dx3236_gating_desc[] __initconst = { + { "ge1", NULL, 3, 0 }, + { "ge0", NULL, 4, 0 }, + { "pex00", NULL, 5, 0 }, + { "sdio", NULL, 17, 0 }, + { "usb0", NULL, 18, 0 }, + { "xor0", NULL, 22, 0 }, + { } +}; + +static void __init mv98dx3236_clk_init(struct device_node *np) +{ + struct device_node *cgnp = + of_find_compatible_node(NULL, NULL, "marvell,mv98dx3236-gating-clock"); + + mvebu_coreclk_setup(np, &mv98dx3236_core_clocks); + + if (cgnp) + mvebu_clk_gating_setup(cgnp, mv98dx3236_gating_desc); +} +CLK_OF_DECLARE(mv98dx3236_clk, "marvell,mv98dx3236-core-clock", mv98dx3236_clk_init); diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index 07e2cc6ed781..3487c267833e 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -462,8 +462,79 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8916 = { .num_clks = ARRAY_SIZE(msm8916_clks), }; +/* msm8974 */ +DEFINE_CLK_SMD_RPM(msm8974, pnoc_clk, pnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8974, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8974, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2); +DEFINE_CLK_SMD_RPM(msm8974, mmssnoc_ahb_clk, mmssnoc_ahb_a_clk, QCOM_SMD_RPM_BUS_CLK, 3); +DEFINE_CLK_SMD_RPM(msm8974, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8974, gfx3d_clk_src, gfx3d_a_clk_src, QCOM_SMD_RPM_MEM_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8974, ocmemgx_clk, ocmemgx_a_clk, QCOM_SMD_RPM_MEM_CLK, 2); +DEFINE_CLK_SMD_RPM_QDSS(msm8974, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_d0, cxo_d0_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_d1, cxo_d1_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a0, cxo_a0_a, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a1, cxo_a1_a, 5); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a2, cxo_a2_a, 6); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, diff_clk, diff_a_clk, 7); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, div_clk1, div_a_clk1, 11); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, div_clk2, div_a_clk2, 12); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_d0_pin, cxo_d0_a_pin, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_d1_pin, cxo_d1_a_pin, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a0_pin, cxo_a0_a_pin, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a1_pin, cxo_a1_a_pin, 5); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a2_pin, cxo_a2_a_pin, 6); + +static struct clk_smd_rpm *msm8974_clks[] = { + [RPM_SMD_PNOC_CLK] = &msm8974_pnoc_clk, + [RPM_SMD_PNOC_A_CLK] = &msm8974_pnoc_a_clk, + [RPM_SMD_SNOC_CLK] = &msm8974_snoc_clk, + [RPM_SMD_SNOC_A_CLK] = &msm8974_snoc_a_clk, + [RPM_SMD_CNOC_CLK] = &msm8974_cnoc_clk, + [RPM_SMD_CNOC_A_CLK] = &msm8974_cnoc_a_clk, + [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8974_mmssnoc_ahb_clk, + [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8974_mmssnoc_ahb_a_clk, + [RPM_SMD_BIMC_CLK] = &msm8974_bimc_clk, + [RPM_SMD_BIMC_A_CLK] = &msm8974_bimc_a_clk, + [RPM_SMD_OCMEMGX_CLK] = &msm8974_ocmemgx_clk, + [RPM_SMD_OCMEMGX_A_CLK] = &msm8974_ocmemgx_a_clk, + [RPM_SMD_QDSS_CLK] = &msm8974_qdss_clk, + [RPM_SMD_QDSS_A_CLK] = &msm8974_qdss_a_clk, + [RPM_SMD_CXO_D0] = &msm8974_cxo_d0, + [RPM_SMD_CXO_D0_A] = &msm8974_cxo_d0_a, + [RPM_SMD_CXO_D1] = &msm8974_cxo_d1, + [RPM_SMD_CXO_D1_A] = &msm8974_cxo_d1_a, + [RPM_SMD_CXO_A0] = &msm8974_cxo_a0, + [RPM_SMD_CXO_A0_A] = &msm8974_cxo_a0_a, + [RPM_SMD_CXO_A1] = &msm8974_cxo_a1, + [RPM_SMD_CXO_A1_A] = &msm8974_cxo_a1_a, + [RPM_SMD_CXO_A2] = &msm8974_cxo_a2, + [RPM_SMD_CXO_A2_A] = &msm8974_cxo_a2_a, + [RPM_SMD_DIFF_CLK] = &msm8974_diff_clk, + [RPM_SMD_DIFF_A_CLK] = &msm8974_diff_a_clk, + [RPM_SMD_DIV_CLK1] = &msm8974_div_clk1, + [RPM_SMD_DIV_A_CLK1] = &msm8974_div_a_clk1, + [RPM_SMD_DIV_CLK2] = &msm8974_div_clk2, + [RPM_SMD_DIV_A_CLK2] = &msm8974_div_a_clk2, + [RPM_SMD_CXO_D0_PIN] = &msm8974_cxo_d0_pin, + [RPM_SMD_CXO_D0_A_PIN] = &msm8974_cxo_d0_a_pin, + [RPM_SMD_CXO_D1_PIN] = &msm8974_cxo_d1_pin, + [RPM_SMD_CXO_D1_A_PIN] = &msm8974_cxo_d1_a_pin, + [RPM_SMD_CXO_A0_PIN] = &msm8974_cxo_a0_pin, + [RPM_SMD_CXO_A0_A_PIN] = &msm8974_cxo_a0_a_pin, + [RPM_SMD_CXO_A1_PIN] = &msm8974_cxo_a1_pin, + [RPM_SMD_CXO_A1_A_PIN] = &msm8974_cxo_a1_a_pin, + [RPM_SMD_CXO_A2_PIN] = &msm8974_cxo_a2_pin, + [RPM_SMD_CXO_A2_A_PIN] = &msm8974_cxo_a2_a_pin, +}; + +static const struct rpm_smd_clk_desc rpm_clk_msm8974 = { + .clks = msm8974_clks, + .num_clks = ARRAY_SIZE(msm8974_clks), +}; static const struct of_device_id rpm_smd_clk_match_table[] = { { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 }, + { .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 }, { } }; MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table); diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index cfab7b400381..03f9d316f969 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c @@ -145,7 +145,6 @@ static int _qcom_cc_register_board_clk(struct device *dev, const char *path, clocks_node = of_find_node_by_path("/clocks"); if (clocks_node) node = of_find_node_by_name(clocks_node, path); - of_node_put(clocks_node); if (!node) { fixed = devm_kzalloc(dev, sizeof(*fixed), GFP_KERNEL); diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c index 33d09138f5e5..46cb256b4aa2 100644 --- a/drivers/clk/qcom/gcc-ipq4019.c +++ b/drivers/clk/qcom/gcc-ipq4019.c @@ -20,6 +20,9 @@ #include <linux/clk-provider.h> #include <linux/regmap.h> #include <linux/reset-controller.h> +#include <linux/math64.h> +#include <linux/delay.h> +#include <linux/clk.h> #include <dt-bindings/clock/qcom,gcc-ipq4019.h> @@ -28,6 +31,13 @@ #include "clk-rcg.h" #include "clk-branch.h" #include "reset.h" +#include "clk-regmap-divider.h" + +#define to_clk_regmap_div(_hw) container_of(to_clk_regmap(_hw),\ + struct clk_regmap_div, clkr) + +#define to_clk_fepll(_hw) container_of(to_clk_regmap_div(_hw),\ + struct clk_fepll, cdiv) enum { P_XO, @@ -40,6 +50,41 @@ enum { P_DDRPLLAPSS, }; +/* + * struct clk_fepll_vco - vco feedback divider corresponds for FEPLL clocks + * @fdbkdiv_shift: lowest bit for FDBKDIV + * @fdbkdiv_width: number of bits in FDBKDIV + * @refclkdiv_shift: lowest bit for REFCLKDIV + * @refclkdiv_width: number of bits in REFCLKDIV + * @reg: PLL_DIV register address + */ +struct clk_fepll_vco { + u32 fdbkdiv_shift; + u32 fdbkdiv_width; + u32 refclkdiv_shift; + u32 refclkdiv_width; + u32 reg; +}; + +/* + * struct clk_fepll - clk divider corresponds to FEPLL clocks + * @fixed_div: fixed divider value if divider is fixed + * @parent_map: map from software's parent index to hardware's src_sel field + * @cdiv: divider values for PLL_DIV + * @pll_vco: vco feedback divider + * @div_table: mapping for actual divider value to register divider value + * in case of non fixed divider + * @freq_tbl: frequency table + */ +struct clk_fepll { + u32 fixed_div; + const u8 *parent_map; + struct clk_regmap_div cdiv; + const struct clk_fepll_vco *pll_vco; + const struct clk_div_table *div_table; + const struct freq_tbl *freq_tbl; +}; + static struct parent_map gcc_xo_200_500_map[] = { { P_XO, 0 }, { P_FEPLL200, 1 }, @@ -80,7 +125,7 @@ static struct parent_map gcc_xo_sdcc1_500_map[] = { static const char * const gcc_xo_sdcc1_500[] = { "xo", - "ddrpll", + "ddrpllsdcc", "fepll500", }; @@ -121,6 +166,12 @@ static struct parent_map gcc_xo_ddr_500_200_map[] = { { P_DDRPLLAPSS, 1 }, }; +/* + * Contains index for safe clock during APSS freq change. + * fepll500 is being used as safe clock so initialize it + * with its index in parents list gcc_xo_ddr_500_200. + */ +static const int gcc_ipq4019_cpu_safe_parent = 2; static const char * const gcc_xo_ddr_500_200[] = { "xo", "fepll200", @@ -505,7 +556,7 @@ static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = { F(25000000, P_FEPLL500, 1, 1, 20), F(50000000, P_FEPLL500, 1, 1, 10), F(100000000, P_FEPLL500, 1, 1, 5), - F(193000000, P_DDRPLL, 1, 0, 0), + F(192000000, P_DDRPLL, 1, 0, 0), { } }; @@ -524,10 +575,20 @@ static struct clk_rcg2 sdcc1_apps_clk_src = { }; static const struct freq_tbl ftbl_gcc_apps_clk[] = { - F(48000000, P_XO, 1, 0, 0), + F(48000000, P_XO, 1, 0, 0), F(200000000, P_FEPLL200, 1, 0, 0), + F(384000000, P_DDRPLLAPSS, 1, 0, 0), + F(413000000, P_DDRPLLAPSS, 1, 0, 0), + F(448000000, P_DDRPLLAPSS, 1, 0, 0), + F(488000000, P_DDRPLLAPSS, 1, 0, 0), F(500000000, P_FEPLL500, 1, 0, 0), - F(626000000, P_DDRPLLAPSS, 1, 0, 0), + F(512000000, P_DDRPLLAPSS, 1, 0, 0), + F(537000000, P_DDRPLLAPSS, 1, 0, 0), + F(565000000, P_DDRPLLAPSS, 1, 0, 0), + F(597000000, P_DDRPLLAPSS, 1, 0, 0), + F(632000000, P_DDRPLLAPSS, 1, 0, 0), + F(672000000, P_DDRPLLAPSS, 1, 0, 0), + F(716000000, P_DDRPLLAPSS, 1, 0, 0), { } }; @@ -541,6 +602,7 @@ static struct clk_rcg2 apps_clk_src = { .parent_names = gcc_xo_ddr_500_200, .num_parents = 4, .ops = &clk_rcg2_ops, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -1154,6 +1216,364 @@ static struct clk_branch gcc_wcss5g_rtc_clk = { }, }; +/* Calculates the VCO rate for FEPLL. */ +static u64 clk_fepll_vco_calc_rate(struct clk_fepll *pll_div, + unsigned long parent_rate) +{ + const struct clk_fepll_vco *pll_vco = pll_div->pll_vco; + u32 fdbkdiv, refclkdiv, cdiv; + u64 vco; + + regmap_read(pll_div->cdiv.clkr.regmap, pll_vco->reg, &cdiv); + refclkdiv = (cdiv >> pll_vco->refclkdiv_shift) & + (BIT(pll_vco->refclkdiv_width) - 1); + fdbkdiv = (cdiv >> pll_vco->fdbkdiv_shift) & + (BIT(pll_vco->fdbkdiv_width) - 1); + + vco = parent_rate / refclkdiv; + vco *= 2; + vco *= fdbkdiv; + + return vco; +} + +static const struct clk_fepll_vco gcc_apss_ddrpll_vco = { + .fdbkdiv_shift = 16, + .fdbkdiv_width = 8, + .refclkdiv_shift = 24, + .refclkdiv_width = 5, + .reg = 0x2e020, +}; + +static const struct clk_fepll_vco gcc_fepll_vco = { + .fdbkdiv_shift = 16, + .fdbkdiv_width = 8, + .refclkdiv_shift = 24, + .refclkdiv_width = 5, + .reg = 0x2f020, +}; + +/* + * Round rate function for APSS CPU PLL Clock divider. + * It looks up the frequency table and returns the next higher frequency + * supported in hardware. + */ +static long clk_cpu_div_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *p_rate) +{ + struct clk_fepll *pll = to_clk_fepll(hw); + struct clk_hw *p_hw; + const struct freq_tbl *f; + + f = qcom_find_freq(pll->freq_tbl, rate); + if (!f) + return -EINVAL; + + p_hw = clk_hw_get_parent_by_index(hw, f->src); + *p_rate = clk_hw_get_rate(p_hw); + + return f->freq; +}; + +/* + * Clock set rate function for APSS CPU PLL Clock divider. + * It looks up the frequency table and updates the PLL divider to corresponding + * divider value. + */ +static int clk_cpu_div_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_fepll *pll = to_clk_fepll(hw); + const struct freq_tbl *f; + u32 mask; + int ret; + + f = qcom_find_freq(pll->freq_tbl, rate); + if (!f) + return -EINVAL; + + mask = (BIT(pll->cdiv.width) - 1) << pll->cdiv.shift; + ret = regmap_update_bits(pll->cdiv.clkr.regmap, + pll->cdiv.reg, mask, + f->pre_div << pll->cdiv.shift); + /* + * There is no status bit which can be checked for successful CPU + * divider update operation so using delay for the same. + */ + udelay(1); + + return 0; +}; + +/* + * Clock frequency calculation function for APSS CPU PLL Clock divider. + * This clock divider is nonlinear so this function calculates the actual + * divider and returns the output frequency by dividing VCO Frequency + * with this actual divider value. + */ +static unsigned long +clk_cpu_div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_fepll *pll = to_clk_fepll(hw); + u32 cdiv, pre_div; + u64 rate; + + regmap_read(pll->cdiv.clkr.regmap, pll->cdiv.reg, &cdiv); + cdiv = (cdiv >> pll->cdiv.shift) & (BIT(pll->cdiv.width) - 1); + + /* + * Some dividers have value in 0.5 fraction so multiply both VCO + * frequency(parent_rate) and pre_div with 2 to make integer + * calculation. + */ + if (cdiv > 10) + pre_div = (cdiv + 1) * 2; + else + pre_div = cdiv + 12; + + rate = clk_fepll_vco_calc_rate(pll, parent_rate) * 2; + do_div(rate, pre_div); + + return rate; +}; + +static const struct clk_ops clk_regmap_cpu_div_ops = { + .round_rate = clk_cpu_div_round_rate, + .set_rate = clk_cpu_div_set_rate, + .recalc_rate = clk_cpu_div_recalc_rate, +}; + +static const struct freq_tbl ftbl_apss_ddr_pll[] = { + { 384000000, P_XO, 0xd, 0, 0 }, + { 413000000, P_XO, 0xc, 0, 0 }, + { 448000000, P_XO, 0xb, 0, 0 }, + { 488000000, P_XO, 0xa, 0, 0 }, + { 512000000, P_XO, 0x9, 0, 0 }, + { 537000000, P_XO, 0x8, 0, 0 }, + { 565000000, P_XO, 0x7, 0, 0 }, + { 597000000, P_XO, 0x6, 0, 0 }, + { 632000000, P_XO, 0x5, 0, 0 }, + { 672000000, P_XO, 0x4, 0, 0 }, + { 716000000, P_XO, 0x3, 0, 0 }, + { 768000000, P_XO, 0x2, 0, 0 }, + { 823000000, P_XO, 0x1, 0, 0 }, + { 896000000, P_XO, 0x0, 0, 0 }, + { } +}; + +static struct clk_fepll gcc_apss_cpu_plldiv_clk = { + .cdiv.reg = 0x2e020, + .cdiv.shift = 4, + .cdiv.width = 4, + .cdiv.clkr = { + .enable_reg = 0x2e000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "ddrpllapss", + .parent_names = (const char *[]){ + "xo", + }, + .num_parents = 1, + .ops = &clk_regmap_cpu_div_ops, + }, + }, + .freq_tbl = ftbl_apss_ddr_pll, + .pll_vco = &gcc_apss_ddrpll_vco, +}; + +/* Calculates the rate for PLL divider. + * If the divider value is not fixed then it gets the actual divider value + * from divider table. Then, it calculate the clock rate by dividing the + * parent rate with actual divider value. + */ +static unsigned long +clk_regmap_clk_div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_fepll *pll = to_clk_fepll(hw); + u32 cdiv, pre_div = 1; + u64 rate; + const struct clk_div_table *clkt; + + if (pll->fixed_div) { + pre_div = pll->fixed_div; + } else { + regmap_read(pll->cdiv.clkr.regmap, pll->cdiv.reg, &cdiv); + cdiv = (cdiv >> pll->cdiv.shift) & (BIT(pll->cdiv.width) - 1); + + for (clkt = pll->div_table; clkt->div; clkt++) { + if (clkt->val == cdiv) + pre_div = clkt->div; + } + } + + rate = clk_fepll_vco_calc_rate(pll, parent_rate); + do_div(rate, pre_div); + + return rate; +}; + +static const struct clk_ops clk_fepll_div_ops = { + .recalc_rate = clk_regmap_clk_div_recalc_rate, +}; + +static struct clk_fepll gcc_apss_sdcc_clk = { + .fixed_div = 28, + .cdiv.clkr = { + .hw.init = &(struct clk_init_data){ + .name = "ddrpllsdcc", + .parent_names = (const char *[]){ + "xo", + }, + .num_parents = 1, + .ops = &clk_fepll_div_ops, + }, + }, + .pll_vco = &gcc_apss_ddrpll_vco, +}; + +static struct clk_fepll gcc_fepll125_clk = { + .fixed_div = 32, + .cdiv.clkr = { + .hw.init = &(struct clk_init_data){ + .name = "fepll125", + .parent_names = (const char *[]){ + "xo", + }, + .num_parents = 1, + .ops = &clk_fepll_div_ops, + }, + }, + .pll_vco = &gcc_fepll_vco, +}; + +static struct clk_fepll gcc_fepll125dly_clk = { + .fixed_div = 32, + .cdiv.clkr = { + .hw.init = &(struct clk_init_data){ + .name = "fepll125dly", + .parent_names = (const char *[]){ + "xo", + }, + .num_parents = 1, + .ops = &clk_fepll_div_ops, + }, + }, + .pll_vco = &gcc_fepll_vco, +}; + +static struct clk_fepll gcc_fepll200_clk = { + .fixed_div = 20, + .cdiv.clkr = { + .hw.init = &(struct clk_init_data){ + .name = "fepll200", + .parent_names = (const char *[]){ + "xo", + }, + .num_parents = 1, + .ops = &clk_fepll_div_ops, + }, + }, + .pll_vco = &gcc_fepll_vco, +}; + +static struct clk_fepll gcc_fepll500_clk = { + .fixed_div = 8, + .cdiv.clkr = { + .hw.init = &(struct clk_init_data){ + .name = "fepll500", + .parent_names = (const char *[]){ + "xo", + }, + .num_parents = 1, + .ops = &clk_fepll_div_ops, + }, + }, + .pll_vco = &gcc_fepll_vco, +}; + +static const struct clk_div_table fepllwcss_clk_div_table[] = { + { 0, 15 }, + { 1, 16 }, + { 2, 18 }, + { 3, 20 }, + { }, +}; + +static struct clk_fepll gcc_fepllwcss2g_clk = { + .cdiv.reg = 0x2f020, + .cdiv.shift = 8, + .cdiv.width = 2, + .cdiv.clkr = { + .hw.init = &(struct clk_init_data){ + .name = "fepllwcss2g", + .parent_names = (const char *[]){ + "xo", + }, + .num_parents = 1, + .ops = &clk_fepll_div_ops, + }, + }, + .div_table = fepllwcss_clk_div_table, + .pll_vco = &gcc_fepll_vco, +}; + +static struct clk_fepll gcc_fepllwcss5g_clk = { + .cdiv.reg = 0x2f020, + .cdiv.shift = 12, + .cdiv.width = 2, + .cdiv.clkr = { + .hw.init = &(struct clk_init_data){ + .name = "fepllwcss5g", + .parent_names = (const char *[]){ + "xo", + }, + .num_parents = 1, + .ops = &clk_fepll_div_ops, + }, + }, + .div_table = fepllwcss_clk_div_table, + .pll_vco = &gcc_fepll_vco, +}; + +static const struct freq_tbl ftbl_gcc_pcnoc_ahb_clk[] = { + F(48000000, P_XO, 1, 0, 0), + F(100000000, P_FEPLL200, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcnoc_ahb_clk_src = { + .cmd_rcgr = 0x21024, + .hid_width = 5, + .parent_map = gcc_xo_200_500_map, + .freq_tbl = ftbl_gcc_pcnoc_ahb_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcnoc_ahb_clk_src", + .parent_names = gcc_xo_200_500, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch pcnoc_clk_src = { + .halt_reg = 0x21030, + .clkr = { + .enable_reg = 0x21030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "pcnoc_clk_src", + .parent_names = (const char *[]){ + "gcc_pcnoc_ahb_clk_src", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + .flags = CLK_SET_RATE_PARENT | + CLK_IS_CRITICAL, + }, + }, +}; + static struct clk_regmap *gcc_ipq4019_clocks[] = { [AUDIO_CLK_SRC] = &audio_clk_src.clkr, [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr, @@ -1214,6 +1634,16 @@ static struct clk_regmap *gcc_ipq4019_clocks[] = { [GCC_WCSS5G_CLK] = &gcc_wcss5g_clk.clkr, [GCC_WCSS5G_REF_CLK] = &gcc_wcss5g_ref_clk.clkr, [GCC_WCSS5G_RTC_CLK] = &gcc_wcss5g_rtc_clk.clkr, + [GCC_SDCC_PLLDIV_CLK] = &gcc_apss_sdcc_clk.cdiv.clkr, + [GCC_FEPLL125_CLK] = &gcc_fepll125_clk.cdiv.clkr, + [GCC_FEPLL125DLY_CLK] = &gcc_fepll125dly_clk.cdiv.clkr, + [GCC_FEPLL200_CLK] = &gcc_fepll200_clk.cdiv.clkr, + [GCC_FEPLL500_CLK] = &gcc_fepll500_clk.cdiv.clkr, + [GCC_FEPLL_WCSS2G_CLK] = &gcc_fepllwcss2g_clk.cdiv.clkr, + [GCC_FEPLL_WCSS5G_CLK] = &gcc_fepllwcss5g_clk.cdiv.clkr, + [GCC_APSS_CPU_PLLDIV_CLK] = &gcc_apss_cpu_plldiv_clk.cdiv.clkr, + [GCC_PCNOC_AHB_CLK_SRC] = &gcc_pcnoc_ahb_clk_src.clkr, + [GCC_PCNOC_AHB_CLK] = &pcnoc_clk_src.clkr, }; static const struct qcom_reset_map gcc_ipq4019_resets[] = { @@ -1294,7 +1724,7 @@ static const struct regmap_config gcc_ipq4019_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, - .max_register = 0x2dfff, + .max_register = 0x2ffff, .fast_io = true, }; @@ -1312,23 +1742,44 @@ static const struct of_device_id gcc_ipq4019_match_table[] = { }; MODULE_DEVICE_TABLE(of, gcc_ipq4019_match_table); +static int +gcc_ipq4019_cpu_clk_notifier_fn(struct notifier_block *nb, + unsigned long action, void *data) +{ + int err = 0; + + if (action == PRE_RATE_CHANGE) + err = clk_rcg2_ops.set_parent(&apps_clk_src.clkr.hw, + gcc_ipq4019_cpu_safe_parent); + + return notifier_from_errno(err); +} + +static struct notifier_block gcc_ipq4019_cpu_clk_notifier = { + .notifier_call = gcc_ipq4019_cpu_clk_notifier_fn, +}; + static int gcc_ipq4019_probe(struct platform_device *pdev) { - struct device *dev = &pdev->dev; + int err; - clk_register_fixed_rate(dev, "fepll125", "xo", 0, 200000000); - clk_register_fixed_rate(dev, "fepll125dly", "xo", 0, 200000000); - clk_register_fixed_rate(dev, "fepllwcss2g", "xo", 0, 200000000); - clk_register_fixed_rate(dev, "fepllwcss5g", "xo", 0, 200000000); - clk_register_fixed_rate(dev, "fepll200", "xo", 0, 200000000); - clk_register_fixed_rate(dev, "fepll500", "xo", 0, 200000000); - clk_register_fixed_rate(dev, "ddrpllapss", "xo", 0, 666000000); + err = qcom_cc_probe(pdev, &gcc_ipq4019_desc); + if (err) + return err; - return qcom_cc_probe(pdev, &gcc_ipq4019_desc); + return clk_notifier_register(apps_clk_src.clkr.hw.clk, + &gcc_ipq4019_cpu_clk_notifier); +} + +static int gcc_ipq4019_remove(struct platform_device *pdev) +{ + return clk_notifier_unregister(apps_clk_src.clkr.hw.clk, + &gcc_ipq4019_cpu_clk_notifier); } static struct platform_driver gcc_ipq4019_driver = { .probe = gcc_ipq4019_probe, + .remove = gcc_ipq4019_remove, .driver = { .name = "qcom,gcc-ipq4019", .of_match_table = gcc_ipq4019_match_table, diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c index 581a17f67379..b99dd406e907 100644 --- a/drivers/clk/qcom/gcc-mdm9615.c +++ b/drivers/clk/qcom/gcc-mdm9615.c @@ -1563,6 +1563,34 @@ static struct clk_branch rpm_msg_ram_h_clk = { }, }; +static struct clk_branch ebi2_clk = { + .hwcg_reg = 0x2664, + .hwcg_bit = 6, + .halt_reg = 0x2fcc, + .halt_bit = 24, + .clkr = { + .enable_reg = 0x2664, + .enable_mask = BIT(6) | BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "ebi2_clk", + .ops = &clk_branch_ops, + }, + }, +}; + +static struct clk_branch ebi2_aon_clk = { + .halt_reg = 0x2fcc, + .halt_bit = 23, + .clkr = { + .enable_reg = 0x2664, + .enable_mask = BIT(8), + .hw.init = &(struct clk_init_data){ + .name = "ebi2_aon_clk", + .ops = &clk_branch_ops, + }, + }, +}; + static struct clk_hw *gcc_mdm9615_hws[] = { &cxo.hw, }; @@ -1637,6 +1665,8 @@ static struct clk_regmap *gcc_mdm9615_clks[] = { [PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr, [PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr, [RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr, + [EBI2_CLK] = &ebi2_clk.clkr, + [EBI2_AON_CLK] = &ebi2_aon_clk.clkr, }; static const struct qcom_reset_map gcc_mdm9615_resets[] = { diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c index 8afd8304a070..7983288d9141 100644 --- a/drivers/clk/qcom/gcc-msm8994.c +++ b/drivers/clk/qcom/gcc-msm8994.c @@ -1888,6 +1888,23 @@ static struct clk_branch gcc_sdcc1_apps_clk = { }, }; +static struct clk_branch gcc_sdcc1_ahb_clk = { + .halt_reg = 0x04c8, + .clkr = { + .enable_reg = 0x04c8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_sdcc1_ahb_clk", + .parent_names = (const char *[]){ + "periph_noc_clk_src", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_sdcc2_apps_clk = { .halt_reg = 0x0504, .clkr = { @@ -2231,6 +2248,7 @@ static struct clk_regmap *gcc_msm8994_clocks[] = { [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr, [GCC_SDCC3_APPS_CLK] = &gcc_sdcc3_apps_clk.clkr, [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr, + [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr, [GCC_SYS_NOC_UFS_AXI_CLK] = &gcc_sys_noc_ufs_axi_clk.clkr, [GCC_SYS_NOC_USB3_AXI_CLK] = &gcc_sys_noc_usb3_axi_clk.clkr, [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr, diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c index 4b1fc1730d29..8abc200d4fd3 100644 --- a/drivers/clk/qcom/gcc-msm8996.c +++ b/drivers/clk/qcom/gcc-msm8996.c @@ -3448,6 +3448,7 @@ static const struct qcom_reset_map gcc_msm8996_resets[] = { [GCC_MSMPU_BCR] = { 0x8d000 }, [GCC_MSS_Q6_BCR] = { 0x8e000 }, [GCC_QREFS_VBG_CAL_BCR] = { 0x88020 }, + [GCC_MSS_RESTART] = { 0x8f008 }, }; static const struct regmap_config gcc_msm8996_regmap_config = { diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index 288186cce0ae..a4f3580587b7 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -63,11 +63,26 @@ static int gdsc_hwctrl(struct gdsc *sc, bool en) return regmap_update_bits(sc->regmap, sc->gdscr, HW_CONTROL_MASK, val); } +static int gdsc_poll_status(struct gdsc *sc, unsigned int reg, bool en) +{ + ktime_t start; + + start = ktime_get(); + do { + if (gdsc_is_enabled(sc, reg) == en) + return 0; + } while (ktime_us_delta(ktime_get(), start) < TIMEOUT_US); + + if (gdsc_is_enabled(sc, reg) == en) + return 0; + + return -ETIMEDOUT; +} + static int gdsc_toggle_logic(struct gdsc *sc, bool en) { int ret; u32 val = en ? 0 : SW_COLLAPSE_MASK; - ktime_t start; unsigned int status_reg = sc->gdscr; ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val); @@ -100,16 +115,7 @@ static int gdsc_toggle_logic(struct gdsc *sc, bool en) udelay(1); } - start = ktime_get(); - do { - if (gdsc_is_enabled(sc, status_reg) == en) - return 0; - } while (ktime_us_delta(ktime_get(), start) < TIMEOUT_US); - - if (gdsc_is_enabled(sc, status_reg) == en) - return 0; - - return -ETIMEDOUT; + return gdsc_poll_status(sc, status_reg, en); } static inline int gdsc_deassert_reset(struct gdsc *sc) @@ -188,8 +194,20 @@ static int gdsc_enable(struct generic_pm_domain *domain) udelay(1); /* Turn on HW trigger mode if supported */ - if (sc->flags & HW_CTRL) - return gdsc_hwctrl(sc, true); + if (sc->flags & HW_CTRL) { + ret = gdsc_hwctrl(sc, true); + if (ret) + return ret; + /* + * Wait for the GDSC to go through a power down and + * up cycle. In case a firmware ends up polling status + * bits for the gdsc, it might read an 'on' status before + * the GDSC can finish the power cycle. + * We wait 1us before returning to ensure the firmware + * can't immediately poll the status bits. + */ + udelay(1); + } return 0; } @@ -204,9 +222,23 @@ static int gdsc_disable(struct generic_pm_domain *domain) /* Turn off HW trigger mode if supported */ if (sc->flags & HW_CTRL) { + unsigned int reg; + ret = gdsc_hwctrl(sc, false); if (ret < 0) return ret; + /* + * Wait for the GDSC to go through a power down and + * up cycle. In case we end up polling status + * bits for the gdsc before the power cycle is completed + * it might read an 'on' status wrongly. + */ + udelay(1); + + reg = sc->gds_hw_ctrl ? sc->gds_hw_ctrl : sc->gdscr; + ret = gdsc_poll_status(sc, reg, true); + if (ret) + return ret; } if (sc->pwrsts & PWRSTS_OFF) diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c index b533f99550e1..4067216bf31f 100644 --- a/drivers/clk/renesas/clk-mstp.c +++ b/drivers/clk/renesas/clk-mstp.c @@ -91,6 +91,12 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable) value |= bitmask; cpg_mstp_write(group, value, group->smstpcr); + if (!group->mstpsr) { + /* dummy read to ensure write has completed */ + cpg_mstp_read(group, group->smstpcr); + barrier_data(group->smstpcr); + } + spin_unlock_irqrestore(&group->lock, flags); if (!enable || !group->mstpsr) @@ -141,9 +147,9 @@ static const struct clk_ops cpg_mstp_clock_ops = { .is_enabled = cpg_mstp_clock_is_enabled, }; -static struct clk * __init -cpg_mstp_clock_register(const char *name, const char *parent_name, - unsigned int index, struct mstp_clock_group *group) +static struct clk * __init cpg_mstp_clock_register(const char *name, + const char *parent_name, unsigned int index, + struct mstp_clock_group *group) { struct clk_init_data init; struct mstp_clock *clock; @@ -158,6 +164,11 @@ cpg_mstp_clock_register(const char *name, const char *parent_name, init.name = name; init.ops = &cpg_mstp_clock_ops; init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT; + /* INTC-SYS is the module clock of the GIC, and must not be disabled */ + if (!strcmp(name, "intc-sys")) { + pr_debug("MSTP %s setting CLK_IS_CRITICAL\n", name); + init.flags |= CLK_IS_CRITICAL; + } init.parent_names = &parent_name; init.num_parents = 1; diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c index 50698a7d9074..bfffdb00df97 100644 --- a/drivers/clk/renesas/r8a7795-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c @@ -221,6 +221,7 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = { DEF_MOD("can-if0", 916, R8A7795_CLK_S3D4), DEF_MOD("i2c6", 918, R8A7795_CLK_S3D2), DEF_MOD("i2c5", 919, R8A7795_CLK_S3D2), + DEF_MOD("i2c-dvfs", 926, R8A7795_CLK_CP), DEF_MOD("i2c4", 927, R8A7795_CLK_S3D2), DEF_MOD("i2c3", 928, R8A7795_CLK_S3D2), DEF_MOD("i2c2", 929, R8A7795_CLK_S3D2), diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c index 7d298c57a3e0..11e084a56b0d 100644 --- a/drivers/clk/renesas/r8a7796-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c @@ -103,7 +103,9 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = { DEF_FIXED("cl", R8A7796_CLK_CL, CLK_PLL1_DIV2, 48, 1), DEF_FIXED("cp", R8A7796_CLK_CP, CLK_EXTAL, 2, 1), + DEF_DIV6P1("canfd", R8A7796_CLK_CANFD, CLK_PLL1_DIV4, 0x244), DEF_DIV6P1("csi0", R8A7796_CLK_CSI0, CLK_PLL1_DIV4, 0x00c), + DEF_DIV6P1("mso", R8A7796_CLK_MSO, CLK_PLL1_DIV4, 0x014), DEF_DIV6_RO("osc", R8A7796_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8), DEF_DIV6_RO("r_int", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32), @@ -117,6 +119,10 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = { DEF_MOD("scif3", 204, R8A7796_CLK_S3D4), DEF_MOD("scif1", 206, R8A7796_CLK_S3D4), DEF_MOD("scif0", 207, R8A7796_CLK_S3D4), + DEF_MOD("msiof3", 208, R8A7796_CLK_MSO), + DEF_MOD("msiof2", 209, R8A7796_CLK_MSO), + DEF_MOD("msiof1", 210, R8A7796_CLK_MSO), + DEF_MOD("msiof0", 211, R8A7796_CLK_MSO), DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S0D3), DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S0D3), DEF_MOD("sys-dmac0", 219, R8A7796_CLK_S0D3), @@ -181,8 +187,12 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = { DEF_MOD("gpio2", 910, R8A7796_CLK_S3D4), DEF_MOD("gpio1", 911, R8A7796_CLK_S3D4), DEF_MOD("gpio0", 912, R8A7796_CLK_S3D4), + DEF_MOD("can-fd", 914, R8A7796_CLK_S3D2), + DEF_MOD("can-if1", 915, R8A7796_CLK_S3D4), + DEF_MOD("can-if0", 916, R8A7796_CLK_S3D4), DEF_MOD("i2c6", 918, R8A7796_CLK_S0D6), DEF_MOD("i2c5", 919, R8A7796_CLK_S0D6), + DEF_MOD("i2c-dvfs", 926, R8A7796_CLK_CP), DEF_MOD("i2c4", 927, R8A7796_CLK_S0D6), DEF_MOD("i2c3", 928, R8A7796_CLK_S0D6), DEF_MOD("i2c2", 929, R8A7796_CLK_S3D2), diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 8359ce75db7a..eadcbd43ff88 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -16,6 +16,7 @@ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/clk/renesas.h> +#include <linux/delay.h> #include <linux/device.h> #include <linux/init.h> #include <linux/mod_devicetable.h> @@ -25,6 +26,7 @@ #include <linux/platform_device.h> #include <linux/pm_clock.h> #include <linux/pm_domain.h> +#include <linux/reset-controller.h> #include <linux/slab.h> #include <dt-bindings/clock/renesas-cpg-mssr.h> @@ -43,7 +45,7 @@ * Module Standby and Software Reset register offets. * * If the registers exist, these are valid for SH-Mobile, R-Mobile, - * R-Car Gen 2, and R-Car Gen 3. + * R-Car Gen2, R-Car Gen3, and RZ/G1. * These are NOT valid for R-Car Gen1 and RZ/A1! */ @@ -96,18 +98,22 @@ static const u16 srcr[] = { /** * Clock Pulse Generator / Module Standby and Software Reset Private Data * + * @rcdev: Optional reset controller entity * @dev: CPG/MSSR device * @base: CPG/MSSR register block base address - * @mstp_lock: protects writes to SMSTPCR + * @rmw_lock: protects RMW register accesses * @clks: Array containing all Core and Module Clocks * @num_core_clks: Number of Core Clocks in clks[] * @num_mod_clks: Number of Module Clocks in clks[] * @last_dt_core_clk: ID of the last Core Clock exported to DT */ struct cpg_mssr_priv { +#ifdef CONFIG_RESET_CONTROLLER + struct reset_controller_dev rcdev; +#endif struct device *dev; void __iomem *base; - spinlock_t mstp_lock; + spinlock_t rmw_lock; struct clk **clks; unsigned int num_core_clks; @@ -144,7 +150,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable) dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk, enable ? "ON" : "OFF"); - spin_lock_irqsave(&priv->mstp_lock, flags); + spin_lock_irqsave(&priv->rmw_lock, flags); value = readl(priv->base + SMSTPCR(reg)); if (enable) @@ -153,7 +159,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable) value |= bitmask; writel(value, priv->base + SMSTPCR(reg)); - spin_unlock_irqrestore(&priv->mstp_lock, flags); + spin_unlock_irqrestore(&priv->rmw_lock, flags); if (!enable) return 0; @@ -346,17 +352,10 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod, init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT; for (i = 0; i < info->num_crit_mod_clks; i++) if (id == info->crit_mod_clks[i]) { -#ifdef CLK_ENABLE_HAND_OFF - dev_dbg(dev, "MSTP %s setting CLK_ENABLE_HAND_OFF\n", + dev_dbg(dev, "MSTP %s setting CLK_IS_CRITICAL\n", mod->name); - init.flags |= CLK_ENABLE_HAND_OFF; + init.flags |= CLK_IS_CRITICAL; break; -#else - dev_dbg(dev, "Ignoring MSTP %s to prevent disabling\n", - mod->name); - kfree(clock); - return; -#endif } parent_name = __clk_get_name(parent); @@ -501,6 +500,122 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev, return 0; } +#ifdef CONFIG_RESET_CONTROLLER + +#define rcdev_to_priv(x) container_of(x, struct cpg_mssr_priv, rcdev) + +static int cpg_mssr_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev); + unsigned int reg = id / 32; + unsigned int bit = id % 32; + u32 bitmask = BIT(bit); + unsigned long flags; + u32 value; + + dev_dbg(priv->dev, "reset %u%02u\n", reg, bit); + + /* Reset module */ + spin_lock_irqsave(&priv->rmw_lock, flags); + value = readl(priv->base + SRCR(reg)); + value |= bitmask; + writel(value, priv->base + SRCR(reg)); + spin_unlock_irqrestore(&priv->rmw_lock, flags); + + /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */ + udelay(35); + + /* Release module from reset state */ + writel(bitmask, priv->base + SRSTCLR(reg)); + + return 0; +} + +static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id) +{ + struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev); + unsigned int reg = id / 32; + unsigned int bit = id % 32; + u32 bitmask = BIT(bit); + unsigned long flags; + u32 value; + + dev_dbg(priv->dev, "assert %u%02u\n", reg, bit); + + spin_lock_irqsave(&priv->rmw_lock, flags); + value = readl(priv->base + SRCR(reg)); + value |= bitmask; + writel(value, priv->base + SRCR(reg)); + spin_unlock_irqrestore(&priv->rmw_lock, flags); + return 0; +} + +static int cpg_mssr_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev); + unsigned int reg = id / 32; + unsigned int bit = id % 32; + u32 bitmask = BIT(bit); + + dev_dbg(priv->dev, "deassert %u%02u\n", reg, bit); + + writel(bitmask, priv->base + SRSTCLR(reg)); + return 0; +} + +static int cpg_mssr_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev); + unsigned int reg = id / 32; + unsigned int bit = id % 32; + u32 bitmask = BIT(bit); + + return !!(readl(priv->base + SRCR(reg)) & bitmask); +} + +static const struct reset_control_ops cpg_mssr_reset_ops = { + .reset = cpg_mssr_reset, + .assert = cpg_mssr_assert, + .deassert = cpg_mssr_deassert, + .status = cpg_mssr_status, +}; + +static int cpg_mssr_reset_xlate(struct reset_controller_dev *rcdev, + const struct of_phandle_args *reset_spec) +{ + struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev); + unsigned int unpacked = reset_spec->args[0]; + unsigned int idx = MOD_CLK_PACK(unpacked); + + if (unpacked % 100 > 31 || idx >= rcdev->nr_resets) { + dev_err(priv->dev, "Invalid reset index %u\n", unpacked); + return -EINVAL; + } + + return idx; +} + +static int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv) +{ + priv->rcdev.ops = &cpg_mssr_reset_ops; + priv->rcdev.of_node = priv->dev->of_node; + priv->rcdev.of_reset_n_cells = 1; + priv->rcdev.of_xlate = cpg_mssr_reset_xlate; + priv->rcdev.nr_resets = priv->num_mod_clks; + return devm_reset_controller_register(priv->dev, &priv->rcdev); +} + +#else /* !CONFIG_RESET_CONTROLLER */ +static inline int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv) +{ + return 0; +} +#endif /* !CONFIG_RESET_CONTROLLER */ + + static const struct of_device_id cpg_mssr_match[] = { #ifdef CONFIG_ARCH_R8A7743 { @@ -557,7 +672,7 @@ static int __init cpg_mssr_probe(struct platform_device *pdev) return -ENOMEM; priv->dev = dev; - spin_lock_init(&priv->mstp_lock); + spin_lock_init(&priv->rmw_lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->base = devm_ioremap_resource(dev, res); @@ -598,6 +713,10 @@ static int __init cpg_mssr_probe(struct platform_device *pdev) if (error) return error; + error = cpg_mssr_reset_controller_register(priv); + if (error) + return error; + return 0; } diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile index 16e098c36f90..141971488f40 100644 --- a/drivers/clk/rockchip/Makefile +++ b/drivers/clk/rockchip/Makefile @@ -8,6 +8,7 @@ obj-y += clk-pll.o obj-y += clk-cpu.o obj-y += clk-inverter.o obj-y += clk-mmc-phase.o +obj-y += clk-muxgrf.o obj-y += clk-ddr.o obj-$(CONFIG_RESET_CONTROLLER) += softrst.o @@ -16,5 +17,6 @@ obj-y += clk-rk3036.o obj-y += clk-rk3188.o obj-y += clk-rk3228.o obj-y += clk-rk3288.o +obj-y += clk-rk3328.o obj-y += clk-rk3368.o obj-y += clk-rk3399.o diff --git a/drivers/clk/rockchip/clk-muxgrf.c b/drivers/clk/rockchip/clk-muxgrf.c new file mode 100644 index 000000000000..4f291180a26b --- /dev/null +++ b/drivers/clk/rockchip/clk-muxgrf.c @@ -0,0 +1,102 @@ +/* + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/slab.h> +#include <linux/bitops.h> +#include <linux/regmap.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include "clk.h" + +struct rockchip_muxgrf_clock { + struct clk_hw hw; + struct regmap *regmap; + u32 reg; + u32 shift; + u32 width; + int flags; +}; + +#define to_muxgrf_clock(_hw) container_of(_hw, struct rockchip_muxgrf_clock, hw) + +static u8 rockchip_muxgrf_get_parent(struct clk_hw *hw) +{ + struct rockchip_muxgrf_clock *mux = to_muxgrf_clock(hw); + unsigned int mask = GENMASK(mux->width - 1, 0); + unsigned int val; + + regmap_read(mux->regmap, mux->reg, &val); + + val >>= mux->shift; + val &= mask; + + return val; +} + +static int rockchip_muxgrf_set_parent(struct clk_hw *hw, u8 index) +{ + struct rockchip_muxgrf_clock *mux = to_muxgrf_clock(hw); + unsigned int mask = GENMASK(mux->width + mux->shift - 1, mux->shift); + unsigned int val; + + val = index; + val <<= mux->shift; + + if (mux->flags & CLK_MUX_HIWORD_MASK) + return regmap_write(mux->regmap, mux->reg, val | (mask << 16)); + else + return regmap_update_bits(mux->regmap, mux->reg, mask, val); +} + +static const struct clk_ops rockchip_muxgrf_clk_ops = { + .get_parent = rockchip_muxgrf_get_parent, + .set_parent = rockchip_muxgrf_set_parent, + .determine_rate = __clk_mux_determine_rate, +}; + +struct clk *rockchip_clk_register_muxgrf(const char *name, + const char *const *parent_names, u8 num_parents, + int flags, struct regmap *regmap, int reg, + int shift, int width, int mux_flags) +{ + struct rockchip_muxgrf_clock *muxgrf_clock; + struct clk_init_data init; + struct clk *clk; + + if (IS_ERR(regmap)) { + pr_err("%s: regmap not available\n", __func__); + return ERR_PTR(-ENOTSUPP); + } + + muxgrf_clock = kmalloc(sizeof(*muxgrf_clock), GFP_KERNEL); + if (!muxgrf_clock) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.flags = flags; + init.num_parents = num_parents; + init.parent_names = parent_names; + init.ops = &rockchip_muxgrf_clk_ops; + + muxgrf_clock->hw.init = &init; + muxgrf_clock->regmap = regmap; + muxgrf_clock->reg = reg; + muxgrf_clock->shift = shift; + muxgrf_clock->width = width; + muxgrf_clock->flags = mux_flags; + + clk = clk_register(NULL, &muxgrf_clock->hw); + if (IS_ERR(clk)) + kfree(muxgrf_clock); + + return clk; +} diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c index 6ed605776abd..eec51893a7e6 100644 --- a/drivers/clk/rockchip/clk-pll.c +++ b/drivers/clk/rockchip/clk-pll.c @@ -29,6 +29,7 @@ #define PLL_MODE_SLOW 0x0 #define PLL_MODE_NORM 0x1 #define PLL_MODE_DEEP 0x2 +#define PLL_RK3328_MODE_MASK 0x1 struct rockchip_clk_pll { struct clk_hw hw; @@ -848,7 +849,8 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, struct clk *pll_clk, *mux_clk; char pll_name[20]; - if (num_parents != 2) { + if ((pll_type != pll_rk3328 && num_parents != 2) || + (pll_type == pll_rk3328 && num_parents != 1)) { pr_err("%s: needs two parent clocks\n", __func__); return ERR_PTR(-EINVAL); } @@ -865,13 +867,17 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, pll_mux = &pll->pll_mux; pll_mux->reg = ctx->reg_base + mode_offset; pll_mux->shift = mode_shift; - pll_mux->mask = PLL_MODE_MASK; + if (pll_type == pll_rk3328) + pll_mux->mask = PLL_RK3328_MODE_MASK; + else + pll_mux->mask = PLL_MODE_MASK; pll_mux->flags = 0; pll_mux->lock = &ctx->lock; pll_mux->hw.init = &init; if (pll_type == pll_rk3036 || pll_type == pll_rk3066 || + pll_type == pll_rk3328 || pll_type == pll_rk3399) pll_mux->flags |= CLK_MUX_HIWORD_MASK; @@ -884,7 +890,10 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, init.flags = CLK_SET_RATE_PARENT; init.ops = pll->pll_mux_ops; init.parent_names = pll_parents; - init.num_parents = ARRAY_SIZE(pll_parents); + if (pll_type == pll_rk3328) + init.num_parents = 2; + else + init.num_parents = ARRAY_SIZE(pll_parents); mux_clk = clk_register(NULL, &pll_mux->hw); if (IS_ERR(mux_clk)) @@ -918,6 +927,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, switch (pll_type) { case pll_rk3036: + case pll_rk3328: if (!pll->rate_table || IS_ERR(ctx->grf)) init.ops = &rockchip_rk3036_pll_clk_norate_ops; else diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c index 062ef4960244..00ad0e5f8d66 100644 --- a/drivers/clk/rockchip/clk-rk3188.c +++ b/drivers/clk/rockchip/clk-rk3188.c @@ -507,8 +507,8 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = { GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 11, GFLAGS), GATE(PCLK_EFUSE, "pclk_efuse", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 2, GFLAGS), GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 3, GFLAGS), - GATE(0, "pclk_ddrupctl", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 7, GFLAGS), - GATE(0, "pclk_ddrpubl", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 6, GFLAGS), + GATE(PCLK_DDRUPCTL, "pclk_ddrupctl", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 7, GFLAGS), + GATE(PCLK_PUBL, "pclk_ddrpubl", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 6, GFLAGS), GATE(0, "pclk_dbg", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 1, GFLAGS), GATE(PCLK_GRF, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS), GATE(PCLK_PMU, "pclk_pmu", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 5, GFLAGS), diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index 39af05a589b3..68ba7d4105e7 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -198,6 +198,7 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" }; PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" }; PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" }; +PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vepu", "aclk_vdpu" }; PNAME(mux_usbphy480m_p) = { "sclk_otgphy1_480m", "sclk_otgphy2_480m", "sclk_otgphy0_480m" }; PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" }; @@ -398,14 +399,12 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb480m_p, 0, RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS, RK3288_CLKGATE_CON(3), 11, GFLAGS), - /* - * We use aclk_vdpu by default GRF_SOC_CON0[7] setting in system, - * so we ignore the mux and make clocks nodes as following, - */ - GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vdpu", 0, + MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, 0, + RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS), + GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0, RK3288_CLKGATE_CON(9), 0, GFLAGS), - FACTOR_GATE(0, "hclk_vcodec_pre", "aclk_vdpu", 0, 1, 4, + FACTOR_GATE(0, "hclk_vcodec_pre", "aclk_vcodec_pre", 0, 1, 4, RK3288_CLKGATE_CON(3), 10, GFLAGS), GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_vcodec_pre", 0, @@ -469,7 +468,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0, RK3288_CLKSEL_CON(26), 8, 1, MFLAGS, RK3288_CLKGATE_CON(3), 7, GFLAGS), - COMPOSITE_NOGATE(0, "sclk_vip_out", mux_vip_out_p, 0, + COMPOSITE_NOGATE(SCLK_VIP_OUT, "sclk_vip_out", mux_vip_out_p, 0, RK3288_CLKSEL_CON(26), 15, 1, MFLAGS, 9, 5, DFLAGS), DIV(0, "pclk_pd_alive", "gpll", 0, @@ -690,7 +689,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { /* aclk_peri gates */ GATE(0, "aclk_peri_axi_matrix", "aclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(6), 2, GFLAGS), GATE(ACLK_DMAC2, "aclk_dmac2", "aclk_peri", 0, RK3288_CLKGATE_CON(6), 3, GFLAGS), - GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 11, GFLAGS), + GATE(0, "aclk_peri_niu", "aclk_peri", 0, RK3288_CLKGATE_CON(7), 11, GFLAGS), GATE(ACLK_MMU, "aclk_mmu", "aclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(8), 12, GFLAGS), GATE(ACLK_GMAC, "aclk_gmac", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 0, GFLAGS), GATE(HCLK_GPS, "hclk_gps", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 2, GFLAGS), @@ -753,12 +752,12 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { GATE(PCLK_GPIO5, "pclk_gpio5", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 5, GFLAGS), GATE(PCLK_GPIO6, "pclk_gpio6", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 6, GFLAGS), GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(14), 11, GFLAGS), - GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(14), 12, GFLAGS), + GATE(0, "pclk_alive_niu", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 12, GFLAGS), /* pclk_pd_pmu gates */ GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 0, GFLAGS), GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 1, GFLAGS), - GATE(0, "pclk_pmu_niu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 2, GFLAGS), + GATE(0, "pclk_pmu_niu", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 2, GFLAGS), GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 3, GFLAGS), GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 4, GFLAGS), @@ -767,7 +766,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { GATE(HCLK_VOP0, "hclk_vop0", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 6, GFLAGS), GATE(HCLK_VOP1, "hclk_vop1", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 8, GFLAGS), GATE(HCLK_VIO_AHB_ARBI, "hclk_vio_ahb_arbi", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 9, GFLAGS), - GATE(HCLK_VIO_NIU, "hclk_vio_niu", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 10, GFLAGS), + GATE(HCLK_VIO_NIU, "hclk_vio_niu", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 10, GFLAGS), GATE(HCLK_VIP, "hclk_vip", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 15, GFLAGS), GATE(HCLK_IEP, "hclk_iep", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 3, GFLAGS), GATE(HCLK_ISP, "hclk_isp", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 1, GFLAGS), @@ -783,17 +782,17 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { /* aclk_vio0 gates */ GATE(ACLK_VOP0, "aclk_vop0", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 5, GFLAGS), GATE(ACLK_IEP, "aclk_iep", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 2, GFLAGS), - GATE(ACLK_VIO0_NIU, "aclk_vio0_niu", "aclk_vio0", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 11, GFLAGS), + GATE(ACLK_VIO0_NIU, "aclk_vio0_niu", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 11, GFLAGS), GATE(ACLK_VIP, "aclk_vip", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 14, GFLAGS), /* aclk_vio1 gates */ GATE(ACLK_VOP1, "aclk_vop1", "aclk_vio1", 0, RK3288_CLKGATE_CON(15), 7, GFLAGS), GATE(ACLK_ISP, "aclk_isp", "aclk_vio1", 0, RK3288_CLKGATE_CON(16), 2, GFLAGS), - GATE(ACLK_VIO1_NIU, "aclk_vio1_niu", "aclk_vio1", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 12, GFLAGS), + GATE(ACLK_VIO1_NIU, "aclk_vio1_niu", "aclk_vio1", 0, RK3288_CLKGATE_CON(15), 12, GFLAGS), /* aclk_rga_pre gates */ GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK3288_CLKGATE_CON(15), 0, GFLAGS), - GATE(ACLK_RGA_NIU, "aclk_rga_niu", "aclk_rga_pre", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 13, GFLAGS), + GATE(ACLK_RGA_NIU, "aclk_rga_niu", "aclk_rga_pre", 0, RK3288_CLKGATE_CON(15), 13, GFLAGS), /* * Other ungrouped clocks. @@ -801,15 +800,22 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { GATE(0, "pclk_vip_in", "ext_vip", 0, RK3288_CLKGATE_CON(16), 0, GFLAGS), INVERTER(0, "pclk_vip", "pclk_vip_in", RK3288_CLKSEL_CON(29), 4, IFLAGS), - GATE(0, "pclk_isp_in", "ext_isp", 0, RK3288_CLKGATE_CON(16), 3, GFLAGS), + GATE(PCLK_ISP_IN, "pclk_isp_in", "ext_isp", 0, RK3288_CLKGATE_CON(16), 3, GFLAGS), INVERTER(0, "pclk_isp", "pclk_isp_in", RK3288_CLKSEL_CON(29), 3, IFLAGS), }; static const char *const rk3288_critical_clocks[] __initconst = { "aclk_cpu", "aclk_peri", + "aclk_peri_niu", + "aclk_vio0_niu", + "aclk_vio1_niu", + "aclk_rga_niu", "hclk_peri", + "hclk_vio_niu", + "pclk_alive_niu", "pclk_pd_pmu", + "pclk_pmu_niu", }; static void __iomem *rk3288_cru_base; diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c new file mode 100644 index 000000000000..1e384e143504 --- /dev/null +++ b/drivers/clk/rockchip/clk-rk3328.c @@ -0,0 +1,895 @@ +/* + * Copyright (c) 2016 Rockchip Electronics Co. Ltd. + * Author: Elaine <zhangqing@rock-chips.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk-provider.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/syscore_ops.h> +#include <dt-bindings/clock/rk3328-cru.h> +#include "clk.h" + +#define RK3328_GRF_SOC_STATUS0 0x480 +#define RK3328_GRF_MAC_CON1 0x904 +#define RK3328_GRF_MAC_CON2 0x908 + +enum rk3328_plls { + apll, dpll, cpll, gpll, npll, +}; + +static struct rockchip_pll_rate_table rk3328_pll_rates[] = { + /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */ + RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0), + RK3036_PLL_RATE(1584000000, 1, 66, 1, 1, 1, 0), + RK3036_PLL_RATE(1560000000, 1, 65, 1, 1, 1, 0), + RK3036_PLL_RATE(1536000000, 1, 64, 1, 1, 1, 0), + RK3036_PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0), + RK3036_PLL_RATE(1488000000, 1, 62, 1, 1, 1, 0), + RK3036_PLL_RATE(1464000000, 1, 61, 1, 1, 1, 0), + RK3036_PLL_RATE(1440000000, 1, 60, 1, 1, 1, 0), + RK3036_PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0), + RK3036_PLL_RATE(1392000000, 1, 58, 1, 1, 1, 0), + RK3036_PLL_RATE(1368000000, 1, 57, 1, 1, 1, 0), + RK3036_PLL_RATE(1344000000, 1, 56, 1, 1, 1, 0), + RK3036_PLL_RATE(1320000000, 1, 55, 1, 1, 1, 0), + RK3036_PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0), + RK3036_PLL_RATE(1272000000, 1, 53, 1, 1, 1, 0), + RK3036_PLL_RATE(1248000000, 1, 52, 1, 1, 1, 0), + RK3036_PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0), + RK3036_PLL_RATE(1188000000, 2, 99, 1, 1, 1, 0), + RK3036_PLL_RATE(1104000000, 1, 46, 1, 1, 1, 0), + RK3036_PLL_RATE(1100000000, 12, 550, 1, 1, 1, 0), + RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0), + RK3036_PLL_RATE(1000000000, 6, 500, 2, 1, 1, 0), + RK3036_PLL_RATE(984000000, 1, 82, 2, 1, 1, 0), + RK3036_PLL_RATE(960000000, 1, 80, 2, 1, 1, 0), + RK3036_PLL_RATE(936000000, 1, 78, 2, 1, 1, 0), + RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0), + RK3036_PLL_RATE(900000000, 4, 300, 2, 1, 1, 0), + RK3036_PLL_RATE(888000000, 1, 74, 2, 1, 1, 0), + RK3036_PLL_RATE(864000000, 1, 72, 2, 1, 1, 0), + RK3036_PLL_RATE(840000000, 1, 70, 2, 1, 1, 0), + RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0), + RK3036_PLL_RATE(800000000, 6, 400, 2, 1, 1, 0), + RK3036_PLL_RATE(700000000, 6, 350, 2, 1, 1, 0), + RK3036_PLL_RATE(696000000, 1, 58, 2, 1, 1, 0), + RK3036_PLL_RATE(600000000, 1, 75, 3, 1, 1, 0), + RK3036_PLL_RATE(594000000, 2, 99, 2, 1, 1, 0), + RK3036_PLL_RATE(504000000, 1, 63, 3, 1, 1, 0), + RK3036_PLL_RATE(500000000, 6, 250, 2, 1, 1, 0), + RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0), + RK3036_PLL_RATE(312000000, 1, 52, 2, 2, 1, 0), + RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0), + RK3036_PLL_RATE(96000000, 1, 64, 4, 4, 1, 0), + { /* sentinel */ }, +}; + +static struct rockchip_pll_rate_table rk3328_pll_frac_rates[] = { + /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */ + RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134217), + /* vco = 1016064000 */ + RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671088), + /* vco = 983040000 */ + RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671088), + /* vco = 983040000 */ + RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671088), + /* vco = 860156000 */ + RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797894), + /* vco = 903168000 */ + RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066329), + /* vco = 819200000 */ + { /* sentinel */ }, +}; + +#define RK3328_DIV_ACLKM_MASK 0x7 +#define RK3328_DIV_ACLKM_SHIFT 4 +#define RK3328_DIV_PCLK_DBG_MASK 0xf +#define RK3328_DIV_PCLK_DBG_SHIFT 0 + +#define RK3328_CLKSEL1(_aclk_core, _pclk_dbg) \ +{ \ + .reg = RK3328_CLKSEL_CON(1), \ + .val = HIWORD_UPDATE(_aclk_core, RK3328_DIV_ACLKM_MASK, \ + RK3328_DIV_ACLKM_SHIFT) | \ + HIWORD_UPDATE(_pclk_dbg, RK3328_DIV_PCLK_DBG_MASK, \ + RK3328_DIV_PCLK_DBG_SHIFT), \ +} + +#define RK3328_CPUCLK_RATE(_prate, _aclk_core, _pclk_dbg) \ +{ \ + .prate = _prate, \ + .divs = { \ + RK3328_CLKSEL1(_aclk_core, _pclk_dbg), \ + }, \ +} + +static struct rockchip_cpuclk_rate_table rk3328_cpuclk_rates[] __initdata = { + RK3328_CPUCLK_RATE(1800000000, 1, 7), + RK3328_CPUCLK_RATE(1704000000, 1, 7), + RK3328_CPUCLK_RATE(1608000000, 1, 7), + RK3328_CPUCLK_RATE(1512000000, 1, 7), + RK3328_CPUCLK_RATE(1488000000, 1, 5), + RK3328_CPUCLK_RATE(1416000000, 1, 5), + RK3328_CPUCLK_RATE(1392000000, 1, 5), + RK3328_CPUCLK_RATE(1296000000, 1, 5), + RK3328_CPUCLK_RATE(1200000000, 1, 5), + RK3328_CPUCLK_RATE(1104000000, 1, 5), + RK3328_CPUCLK_RATE(1008000000, 1, 5), + RK3328_CPUCLK_RATE(912000000, 1, 5), + RK3328_CPUCLK_RATE(816000000, 1, 3), + RK3328_CPUCLK_RATE(696000000, 1, 3), + RK3328_CPUCLK_RATE(600000000, 1, 3), + RK3328_CPUCLK_RATE(408000000, 1, 1), + RK3328_CPUCLK_RATE(312000000, 1, 1), + RK3328_CPUCLK_RATE(216000000, 1, 1), + RK3328_CPUCLK_RATE(96000000, 1, 1), +}; + +static const struct rockchip_cpuclk_reg_data rk3328_cpuclk_data = { + .core_reg = RK3328_CLKSEL_CON(0), + .div_core_shift = 0, + .div_core_mask = 0x1f, + .mux_core_alt = 1, + .mux_core_main = 3, + .mux_core_shift = 6, + .mux_core_mask = 0x3, +}; + +PNAME(mux_pll_p) = { "xin24m" }; + +PNAME(mux_2plls_p) = { "cpll", "gpll" }; +PNAME(mux_gpll_cpll_p) = { "gpll", "cpll" }; +PNAME(mux_cpll_gpll_apll_p) = { "cpll", "gpll", "apll" }; +PNAME(mux_2plls_xin24m_p) = { "cpll", "gpll", "xin24m" }; +PNAME(mux_2plls_hdmiphy_p) = { "cpll", "gpll", + "dummy_hdmiphy" }; +PNAME(mux_4plls_p) = { "cpll", "gpll", + "dummy_hdmiphy", + "usb480m" }; +PNAME(mux_2plls_u480m_p) = { "cpll", "gpll", + "usb480m" }; +PNAME(mux_2plls_24m_u480m_p) = { "cpll", "gpll", + "xin24m", "usb480m" }; + +PNAME(mux_ddrphy_p) = { "dpll", "apll", "cpll" }; +PNAME(mux_armclk_p) = { "apll_core", + "gpll_core", + "dpll_core", + "npll_core"}; +PNAME(mux_hdmiphy_p) = { "hdmi_phy", "xin24m" }; +PNAME(mux_usb480m_p) = { "usb480m_phy", + "xin24m" }; + +PNAME(mux_i2s0_p) = { "clk_i2s0_div", + "clk_i2s0_frac", + "xin12m", + "xin12m" }; +PNAME(mux_i2s1_p) = { "clk_i2s1_div", + "clk_i2s1_frac", + "clkin_i2s1", + "xin12m" }; +PNAME(mux_i2s2_p) = { "clk_i2s2_div", + "clk_i2s2_frac", + "clkin_i2s2", + "xin12m" }; +PNAME(mux_i2s1out_p) = { "clk_i2s1", "xin12m"}; +PNAME(mux_i2s2out_p) = { "clk_i2s2", "xin12m" }; +PNAME(mux_spdif_p) = { "clk_spdif_div", + "clk_spdif_frac", + "xin12m", + "xin12m" }; +PNAME(mux_uart0_p) = { "clk_uart0_div", + "clk_uart0_frac", + "xin24m" }; +PNAME(mux_uart1_p) = { "clk_uart1_div", + "clk_uart1_frac", + "xin24m" }; +PNAME(mux_uart2_p) = { "clk_uart2_div", + "clk_uart2_frac", + "xin24m" }; + +PNAME(mux_sclk_cif_p) = { "clk_cif_src", + "xin24m" }; +PNAME(mux_dclk_lcdc_p) = { "hdmiphy", + "dclk_lcdc_src" }; +PNAME(mux_aclk_peri_pre_p) = { "cpll_peri", + "gpll_peri", + "hdmiphy_peri" }; +PNAME(mux_ref_usb3otg_src_p) = { "xin24m", + "clk_usb3otg_ref" }; +PNAME(mux_xin24m_32k_p) = { "xin24m", + "clk_rtc32k" }; +PNAME(mux_mac2io_src_p) = { "clk_mac2io_src", + "gmac_clkin" }; +PNAME(mux_mac2phy_src_p) = { "clk_mac2phy_src", + "phy_50m_out" }; + +static struct rockchip_pll_clock rk3328_pll_clks[] __initdata = { + [apll] = PLL(pll_rk3328, PLL_APLL, "apll", mux_pll_p, + 0, RK3328_PLL_CON(0), + RK3328_MODE_CON, 0, 4, 0, rk3328_pll_frac_rates), + [dpll] = PLL(pll_rk3328, PLL_DPLL, "dpll", mux_pll_p, + 0, RK3328_PLL_CON(8), + RK3328_MODE_CON, 4, 3, 0, NULL), + [cpll] = PLL(pll_rk3328, PLL_CPLL, "cpll", mux_pll_p, + 0, RK3328_PLL_CON(16), + RK3328_MODE_CON, 8, 2, 0, rk3328_pll_rates), + [gpll] = PLL(pll_rk3328, PLL_GPLL, "gpll", mux_pll_p, + 0, RK3328_PLL_CON(24), + RK3328_MODE_CON, 12, 1, 0, rk3328_pll_frac_rates), + [npll] = PLL(pll_rk3328, PLL_NPLL, "npll", mux_pll_p, + 0, RK3328_PLL_CON(40), + RK3328_MODE_CON, 1, 0, 0, rk3328_pll_rates), +}; + +#define MFLAGS CLK_MUX_HIWORD_MASK +#define DFLAGS CLK_DIVIDER_HIWORD_MASK +#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE) + +static struct rockchip_clk_branch rk3328_i2s0_fracmux __initdata = + MUX(0, "i2s0_pre", mux_i2s0_p, CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(6), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rk3328_i2s1_fracmux __initdata = + MUX(0, "i2s1_pre", mux_i2s1_p, CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(8), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rk3328_i2s2_fracmux __initdata = + MUX(0, "i2s2_pre", mux_i2s2_p, CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(10), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rk3328_spdif_fracmux __initdata = + MUX(SCLK_SPDIF, "sclk_spdif", mux_spdif_p, CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(12), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rk3328_uart0_fracmux __initdata = + MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(14), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rk3328_uart1_fracmux __initdata = + MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(16), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rk3328_uart2_fracmux __initdata = + MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(18), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { + /* + * Clock-Architecture Diagram 1 + */ + + DIV(0, "clk_24m", "xin24m", CLK_IGNORE_UNUSED, + RK3328_CLKSEL_CON(2), 8, 5, DFLAGS), + COMPOSITE(SCLK_RTC32K, "clk_rtc32k", mux_2plls_xin24m_p, 0, + RK3328_CLKSEL_CON(38), 14, 2, MFLAGS, 0, 14, DFLAGS, + RK3328_CLKGATE_CON(0), 11, GFLAGS), + + /* PD_MISC */ + MUX(HDMIPHY, "hdmiphy", mux_hdmiphy_p, CLK_SET_RATE_PARENT, + RK3328_MISC_CON, 13, 1, MFLAGS), + MUX(USB480M, "usb480m", mux_usb480m_p, CLK_SET_RATE_PARENT, + RK3328_MISC_CON, 15, 1, MFLAGS), + + /* + * Clock-Architecture Diagram 2 + */ + + /* PD_CORE */ + GATE(0, "apll_core", "apll", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(0), 0, GFLAGS), + GATE(0, "gpll_core", "gpll", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(0), 2, GFLAGS), + GATE(0, "dpll_core", "dpll", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(0), 1, GFLAGS), + GATE(0, "npll_core", "npll", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(0), 12, GFLAGS), + COMPOSITE_NOMUX(0, "pclk_dbg", "armclk", CLK_IGNORE_UNUSED, + RK3328_CLKSEL_CON(1), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY, + RK3328_CLKGATE_CON(7), 0, GFLAGS), + COMPOSITE_NOMUX(0, "aclk_core", "armclk", CLK_IGNORE_UNUSED, + RK3328_CLKSEL_CON(1), 4, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, + RK3328_CLKGATE_CON(7), 1, GFLAGS), + GATE(0, "aclk_core_niu", "aclk_core", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(13), 0, GFLAGS), + GATE(0, "aclk_gic400", "aclk_core", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(13), 1, GFLAGS), + + GATE(0, "clk_jtag", "jtag_clkin", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(7), 2, GFLAGS), + + /* PD_GPU */ + COMPOSITE(0, "aclk_gpu_pre", mux_4plls_p, 0, + RK3328_CLKSEL_CON(44), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3328_CLKGATE_CON(6), 6, GFLAGS), + GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", CLK_SET_RATE_PARENT, + RK3328_CLKGATE_CON(14), 0, GFLAGS), + GATE(0, "aclk_gpu_niu", "aclk_gpu_pre", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(14), 1, GFLAGS), + + /* PD_DDR */ + COMPOSITE(0, "clk_ddr", mux_ddrphy_p, CLK_IGNORE_UNUSED, + RK3328_CLKSEL_CON(3), 8, 2, MFLAGS, 0, 3, DFLAGS | CLK_DIVIDER_POWER_OF_TWO, + RK3328_CLKGATE_CON(0), 4, GFLAGS), + GATE(0, "clk_ddrmsch", "clk_ddr", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(18), 6, GFLAGS), + GATE(0, "clk_ddrupctl", "clk_ddr", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(18), 5, GFLAGS), + GATE(0, "aclk_ddrupctl", "clk_ddr", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(18), 4, GFLAGS), + GATE(0, "clk_ddrmon", "xin24m", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(0), 6, GFLAGS), + + COMPOSITE(PCLK_DDR, "pclk_ddr", mux_2plls_hdmiphy_p, 0, + RK3328_CLKSEL_CON(4), 13, 2, MFLAGS, 8, 3, DFLAGS, + RK3328_CLKGATE_CON(7), 4, GFLAGS), + GATE(0, "pclk_ddrupctl", "pclk_ddr", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(18), 1, GFLAGS), + GATE(0, "pclk_ddr_msch", "pclk_ddr", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(18), 2, GFLAGS), + GATE(0, "pclk_ddr_mon", "pclk_ddr", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(18), 3, GFLAGS), + GATE(0, "pclk_ddrstdby", "pclk_ddr", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(18), 7, GFLAGS), + GATE(0, "pclk_ddr_grf", "pclk_ddr", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(18), 9, GFLAGS), + + /* + * Clock-Architecture Diagram 3 + */ + + /* PD_BUS */ + COMPOSITE(ACLK_BUS_PRE, "aclk_bus_pre", mux_2plls_hdmiphy_p, 0, + RK3328_CLKSEL_CON(0), 13, 2, MFLAGS, 8, 5, DFLAGS, + RK3328_CLKGATE_CON(8), 0, GFLAGS), + COMPOSITE_NOMUX(HCLK_BUS_PRE, "hclk_bus_pre", "aclk_bus_pre", 0, + RK3328_CLKSEL_CON(1), 8, 2, DFLAGS, + RK3328_CLKGATE_CON(8), 1, GFLAGS), + COMPOSITE_NOMUX(PCLK_BUS_PRE, "pclk_bus_pre", "aclk_bus_pre", 0, + RK3328_CLKSEL_CON(1), 12, 3, DFLAGS, + RK3328_CLKGATE_CON(8), 2, GFLAGS), + GATE(0, "pclk_bus", "pclk_bus_pre", 0, + RK3328_CLKGATE_CON(8), 3, GFLAGS), + GATE(0, "pclk_phy_pre", "pclk_bus_pre", 0, + RK3328_CLKGATE_CON(8), 4, GFLAGS), + + COMPOSITE(SCLK_TSP, "clk_tsp", mux_2plls_p, 0, + RK3328_CLKSEL_CON(21), 15, 1, MFLAGS, 8, 5, DFLAGS, + RK3328_CLKGATE_CON(2), 5, GFLAGS), + GATE(0, "clk_hsadc_tsp", "ext_gpio3a2", 0, + RK3328_CLKGATE_CON(17), 13, GFLAGS), + + /* PD_I2S */ + COMPOSITE(0, "clk_i2s0_div", mux_2plls_p, 0, + RK3328_CLKSEL_CON(6), 15, 1, MFLAGS, 0, 7, DFLAGS, + RK3328_CLKGATE_CON(1), 1, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_i2s0_frac", "clk_i2s0_div", CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(7), 0, + RK3328_CLKGATE_CON(1), 2, GFLAGS, + &rk3328_i2s0_fracmux), + GATE(SCLK_I2S0, "clk_i2s0", "i2s0_pre", CLK_SET_RATE_PARENT, + RK3328_CLKGATE_CON(1), 3, GFLAGS), + + COMPOSITE(0, "clk_i2s1_div", mux_2plls_p, 0, + RK3328_CLKSEL_CON(8), 15, 1, MFLAGS, 0, 7, DFLAGS, + RK3328_CLKGATE_CON(1), 4, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_i2s1_frac", "clk_i2s1_div", CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(9), 0, + RK3328_CLKGATE_CON(1), 5, GFLAGS, + &rk3328_i2s1_fracmux), + GATE(SCLK_I2S1, "clk_i2s1", "i2s1_pre", CLK_SET_RATE_PARENT, + RK3328_CLKGATE_CON(0), 6, GFLAGS), + COMPOSITE_NODIV(SCLK_I2S1_OUT, "i2s1_out", mux_i2s1out_p, 0, + RK3328_CLKSEL_CON(8), 12, 1, MFLAGS, + RK3328_CLKGATE_CON(1), 7, GFLAGS), + + COMPOSITE(0, "clk_i2s2_div", mux_2plls_p, 0, + RK3328_CLKSEL_CON(10), 15, 1, MFLAGS, 0, 7, DFLAGS, + RK3328_CLKGATE_CON(1), 8, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_i2s2_frac", "clk_i2s2_div", CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(11), 0, + RK3328_CLKGATE_CON(1), 9, GFLAGS, + &rk3328_i2s2_fracmux), + GATE(SCLK_I2S2, "clk_i2s2", "i2s2_pre", CLK_SET_RATE_PARENT, + RK3328_CLKGATE_CON(1), 10, GFLAGS), + COMPOSITE_NODIV(SCLK_I2S2_OUT, "i2s2_out", mux_i2s2out_p, 0, + RK3328_CLKSEL_CON(10), 12, 1, MFLAGS, + RK3328_CLKGATE_CON(1), 11, GFLAGS), + + COMPOSITE(0, "clk_spdif_div", mux_2plls_p, 0, + RK3328_CLKSEL_CON(12), 15, 1, MFLAGS, 0, 7, DFLAGS, + RK3328_CLKGATE_CON(1), 12, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_spdif_frac", "clk_spdif_div", CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(13), 0, + RK3328_CLKGATE_CON(1), 13, GFLAGS, + &rk3328_spdif_fracmux), + + /* PD_UART */ + COMPOSITE(0, "clk_uart0_div", mux_2plls_u480m_p, 0, + RK3328_CLKSEL_CON(14), 12, 2, MFLAGS, 0, 7, DFLAGS, + RK3328_CLKGATE_CON(1), 14, GFLAGS), + COMPOSITE(0, "clk_uart1_div", mux_2plls_u480m_p, 0, + RK3328_CLKSEL_CON(16), 12, 2, MFLAGS, 0, 7, DFLAGS, + RK3328_CLKGATE_CON(2), 0, GFLAGS), + COMPOSITE(0, "clk_uart2_div", mux_2plls_u480m_p, 0, + RK3328_CLKSEL_CON(18), 12, 2, MFLAGS, 0, 7, DFLAGS, + RK3328_CLKGATE_CON(2), 2, GFLAGS), + COMPOSITE_FRACMUX(0, "clk_uart0_frac", "clk_uart0_div", CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(15), 0, + RK3328_CLKGATE_CON(1), 15, GFLAGS, + &rk3328_uart0_fracmux), + COMPOSITE_FRACMUX(0, "clk_uart1_frac", "clk_uart1_div", CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(17), 0, + RK3328_CLKGATE_CON(2), 1, GFLAGS, + &rk3328_uart1_fracmux), + COMPOSITE_FRACMUX(0, "clk_uart2_frac", "clk_uart2_div", CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(19), 0, + RK3328_CLKGATE_CON(2), 3, GFLAGS, + &rk3328_uart2_fracmux), + + /* + * Clock-Architecture Diagram 4 + */ + + COMPOSITE(SCLK_I2C0, "clk_i2c0", mux_2plls_p, 0, + RK3328_CLKSEL_CON(34), 7, 1, MFLAGS, 0, 7, DFLAGS, + RK3328_CLKGATE_CON(2), 9, GFLAGS), + COMPOSITE(SCLK_I2C1, "clk_i2c1", mux_2plls_p, 0, + RK3328_CLKSEL_CON(34), 15, 1, MFLAGS, 8, 7, DFLAGS, + RK3328_CLKGATE_CON(2), 10, GFLAGS), + COMPOSITE(SCLK_I2C2, "clk_i2c2", mux_2plls_p, 0, + RK3328_CLKSEL_CON(35), 7, 1, MFLAGS, 0, 7, DFLAGS, + RK3328_CLKGATE_CON(2), 11, GFLAGS), + COMPOSITE(SCLK_I2C3, "clk_i2c3", mux_2plls_p, 0, + RK3328_CLKSEL_CON(35), 15, 1, MFLAGS, 8, 7, DFLAGS, + RK3328_CLKGATE_CON(2), 12, GFLAGS), + COMPOSITE(SCLK_CRYPTO, "clk_crypto", mux_2plls_p, 0, + RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 7, DFLAGS, + RK3328_CLKGATE_CON(2), 4, GFLAGS), + COMPOSITE_NOMUX(SCLK_TSADC, "clk_tsadc", "clk_24m", 0, + RK3328_CLKSEL_CON(22), 0, 10, DFLAGS, + RK3328_CLKGATE_CON(2), 6, GFLAGS), + COMPOSITE_NOMUX(SCLK_SARADC, "clk_saradc", "clk_24m", 0, + RK3328_CLKSEL_CON(23), 0, 10, DFLAGS, + RK3328_CLKGATE_CON(2), 14, GFLAGS), + COMPOSITE(SCLK_SPI, "clk_spi", mux_2plls_p, 0, + RK3328_CLKSEL_CON(24), 7, 1, MFLAGS, 0, 7, DFLAGS, + RK3328_CLKGATE_CON(2), 7, GFLAGS), + COMPOSITE(SCLK_PWM, "clk_pwm", mux_2plls_p, 0, + RK3328_CLKSEL_CON(24), 15, 1, MFLAGS, 8, 7, DFLAGS, + RK3328_CLKGATE_CON(2), 8, GFLAGS), + COMPOSITE(SCLK_OTP, "clk_otp", mux_2plls_xin24m_p, 0, + RK3328_CLKSEL_CON(4), 6, 2, MFLAGS, 0, 6, DFLAGS, + RK3328_CLKGATE_CON(3), 8, GFLAGS), + COMPOSITE(SCLK_EFUSE, "clk_efuse", mux_2plls_xin24m_p, 0, + RK3328_CLKSEL_CON(5), 14, 2, MFLAGS, 8, 5, DFLAGS, + RK3328_CLKGATE_CON(2), 13, GFLAGS), + COMPOSITE(SCLK_PDM, "clk_pdm", mux_cpll_gpll_apll_p, CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(20), 14, 2, MFLAGS, 8, 5, DFLAGS, + RK3328_CLKGATE_CON(2), 15, GFLAGS), + + GATE(SCLK_TIMER0, "sclk_timer0", "xin24m", 0, + RK3328_CLKGATE_CON(8), 5, GFLAGS), + GATE(SCLK_TIMER1, "sclk_timer1", "xin24m", 0, + RK3328_CLKGATE_CON(8), 6, GFLAGS), + GATE(SCLK_TIMER2, "sclk_timer2", "xin24m", 0, + RK3328_CLKGATE_CON(8), 7, GFLAGS), + GATE(SCLK_TIMER3, "sclk_timer3", "xin24m", 0, + RK3328_CLKGATE_CON(8), 8, GFLAGS), + GATE(SCLK_TIMER4, "sclk_timer4", "xin24m", 0, + RK3328_CLKGATE_CON(8), 9, GFLAGS), + GATE(SCLK_TIMER5, "sclk_timer5", "xin24m", 0, + RK3328_CLKGATE_CON(8), 10, GFLAGS), + + COMPOSITE(SCLK_WIFI, "clk_wifi", mux_2plls_u480m_p, 0, + RK3328_CLKSEL_CON(52), 6, 2, MFLAGS, 0, 6, DFLAGS, + RK3328_CLKGATE_CON(0), 10, GFLAGS), + + /* + * Clock-Architecture Diagram 5 + */ + + /* PD_VIDEO */ + COMPOSITE(ACLK_RKVDEC_PRE, "aclk_rkvdec_pre", mux_4plls_p, 0, + RK3328_CLKSEL_CON(48), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3328_CLKGATE_CON(6), 0, GFLAGS), + FACTOR_GATE(HCLK_RKVDEC_PRE, "hclk_rkvdec_pre", "aclk_rkvdec_pre", 0, 1, 4, + RK3328_CLKGATE_CON(11), 0, GFLAGS), + GATE(ACLK_RKVDEC, "aclk_rkvdec", "aclk_rkvdec_pre", CLK_SET_RATE_PARENT, + RK3328_CLKGATE_CON(24), 0, GFLAGS), + GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_rkvdec_pre", CLK_SET_RATE_PARENT, + RK3328_CLKGATE_CON(24), 1, GFLAGS), + GATE(0, "aclk_rkvdec_niu", "aclk_rkvdec_pre", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(24), 2, GFLAGS), + GATE(0, "hclk_rkvdec_niu", "hclk_rkvdec_pre", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(24), 3, GFLAGS), + + COMPOSITE(SCLK_VDEC_CABAC, "sclk_vdec_cabac", mux_4plls_p, 0, + RK3328_CLKSEL_CON(48), 14, 2, MFLAGS, 8, 5, DFLAGS, + RK3328_CLKGATE_CON(6), 1, GFLAGS), + + COMPOSITE(SCLK_VDEC_CORE, "sclk_vdec_core", mux_4plls_p, 0, + RK3328_CLKSEL_CON(49), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3328_CLKGATE_CON(6), 2, GFLAGS), + + COMPOSITE(ACLK_VPU_PRE, "aclk_vpu_pre", mux_4plls_p, 0, + RK3328_CLKSEL_CON(50), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3328_CLKGATE_CON(6), 5, GFLAGS), + FACTOR_GATE(HCLK_VPU_PRE, "hclk_vpu_pre", "aclk_vpu_pre", 0, 1, 4, + RK3328_CLKGATE_CON(11), 8, GFLAGS), + GATE(ACLK_VPU, "aclk_vpu", "aclk_vpu_pre", CLK_SET_RATE_PARENT, + RK3328_CLKGATE_CON(23), 0, GFLAGS), + GATE(HCLK_VPU, "hclk_vpu", "hclk_vpu_pre", CLK_SET_RATE_PARENT, + RK3328_CLKGATE_CON(23), 1, GFLAGS), + GATE(0, "aclk_vpu_niu", "aclk_vpu_pre", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(23), 2, GFLAGS), + GATE(0, "hclk_vpu_niu", "hclk_vpu_pre", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(23), 3, GFLAGS), + + COMPOSITE(ACLK_RKVENC, "aclk_rkvenc", mux_4plls_p, 0, + RK3328_CLKSEL_CON(51), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3328_CLKGATE_CON(6), 3, GFLAGS), + FACTOR_GATE(HCLK_RKVENC, "hclk_rkvenc", "aclk_rkvenc", 0, 1, 4, + RK3328_CLKGATE_CON(11), 4, GFLAGS), + GATE(0, "aclk_rkvenc_niu", "aclk_rkvenc", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(25), 0, GFLAGS), + GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(25), 1, GFLAGS), + GATE(ACLK_H265, "aclk_h265", "aclk_rkvenc", 0, + RK3328_CLKGATE_CON(25), 0, GFLAGS), + GATE(PCLK_H265, "pclk_h265", "hclk_rkvenc", 0, + RK3328_CLKGATE_CON(25), 1, GFLAGS), + GATE(ACLK_H264, "aclk_h264", "aclk_rkvenc", 0, + RK3328_CLKGATE_CON(25), 0, GFLAGS), + GATE(HCLK_H264, "hclk_h264", "hclk_rkvenc", 0, + RK3328_CLKGATE_CON(25), 1, GFLAGS), + GATE(ACLK_AXISRAM, "aclk_axisram", "aclk_rkvenc", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(25), 0, GFLAGS), + + COMPOSITE(SCLK_VENC_CORE, "sclk_venc_core", mux_4plls_p, 0, + RK3328_CLKSEL_CON(51), 14, 2, MFLAGS, 8, 5, DFLAGS, + RK3328_CLKGATE_CON(6), 4, GFLAGS), + + COMPOSITE(SCLK_VENC_DSP, "sclk_venc_dsp", mux_4plls_p, 0, + RK3328_CLKSEL_CON(52), 14, 2, MFLAGS, 8, 5, DFLAGS, + RK3328_CLKGATE_CON(6), 7, GFLAGS), + + /* + * Clock-Architecture Diagram 6 + */ + + /* PD_VIO */ + COMPOSITE(ACLK_VIO_PRE, "aclk_vio_pre", mux_4plls_p, 0, + RK3328_CLKSEL_CON(37), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3328_CLKGATE_CON(5), 2, GFLAGS), + DIV(HCLK_VIO_PRE, "hclk_vio_pre", "aclk_vio_pre", 0, + RK3328_CLKSEL_CON(37), 8, 5, DFLAGS), + + COMPOSITE(ACLK_RGA_PRE, "aclk_rga_pre", mux_4plls_p, 0, + RK3328_CLKSEL_CON(36), 14, 2, MFLAGS, 8, 5, DFLAGS, + RK3328_CLKGATE_CON(5), 0, GFLAGS), + COMPOSITE(SCLK_RGA, "clk_rga", mux_4plls_p, 0, + RK3328_CLKSEL_CON(36), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3328_CLKGATE_CON(5), 1, GFLAGS), + COMPOSITE(ACLK_VOP_PRE, "aclk_vop_pre", mux_4plls_p, 0, + RK3328_CLKSEL_CON(39), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3328_CLKGATE_CON(5), 5, GFLAGS), + GATE(0, "clk_hdmi_sfc", "xin24m", 0, + RK3328_CLKGATE_CON(5), 4, GFLAGS), + + COMPOSITE_NODIV(0, "clk_cif_src", mux_2plls_p, 0, + RK3328_CLKSEL_CON(42), 7, 1, MFLAGS, + RK3328_CLKGATE_CON(5), 3, GFLAGS), + COMPOSITE_NOGATE(SCLK_CIF_OUT, "clk_cif_out", mux_sclk_cif_p, CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(42), 5, 1, MFLAGS, 0, 5, DFLAGS), + + COMPOSITE(DCLK_LCDC_SRC, "dclk_lcdc_src", mux_gpll_cpll_p, 0, + RK3328_CLKSEL_CON(40), 0, 1, MFLAGS, 8, 8, DFLAGS, + RK3328_CLKGATE_CON(5), 6, GFLAGS), + DIV(DCLK_HDMIPHY, "dclk_hdmiphy", "dclk_lcdc_src", 0, + RK3328_CLKSEL_CON(40), 3, 3, DFLAGS), + MUX(DCLK_LCDC, "dclk_lcdc", mux_dclk_lcdc_p, 0, + RK3328_CLKSEL_CON(40), 1, 1, MFLAGS), + + /* + * Clock-Architecture Diagram 7 + */ + + /* PD_PERI */ + GATE(0, "gpll_peri", "gpll", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(4), 0, GFLAGS), + GATE(0, "cpll_peri", "cpll", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(4), 1, GFLAGS), + GATE(0, "hdmiphy_peri", "hdmiphy", CLK_IGNORE_UNUSED, + RK3328_CLKGATE_CON(4), 2, GFLAGS), + COMPOSITE_NOGATE(ACLK_PERI_PRE, "aclk_peri_pre", mux_aclk_peri_pre_p, 0, + RK3328_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 5, DFLAGS), + COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_pre", CLK_IGNORE_UNUSED, + RK3328_CLKSEL_CON(29), 0, 2, DFLAGS, + RK3328_CLKGATE_CON(10), 2, GFLAGS), + COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_pre", CLK_IGNORE_UNUSED, + RK3328_CLKSEL_CON(29), 4, 3, DFLAGS, + RK3328_CLKGATE_CON(10), 1, GFLAGS), + GATE(ACLK_PERI, "aclk_peri", "aclk_peri_pre", CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, + RK3328_CLKGATE_CON(10), 0, GFLAGS), + + COMPOSITE(SCLK_SDMMC, "clk_sdmmc", mux_2plls_24m_u480m_p, 0, + RK3328_CLKSEL_CON(30), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3328_CLKGATE_CON(4), 3, GFLAGS), + + COMPOSITE(SCLK_SDIO, "clk_sdio", mux_2plls_24m_u480m_p, 0, + RK3328_CLKSEL_CON(31), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3328_CLKGATE_CON(4), 4, GFLAGS), + + COMPOSITE(SCLK_EMMC, "clk_emmc", mux_2plls_24m_u480m_p, 0, + RK3328_CLKSEL_CON(32), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3328_CLKGATE_CON(4), 5, GFLAGS), + + COMPOSITE(SCLK_SDMMC_EXT, "clk_sdmmc_ext", mux_2plls_24m_u480m_p, 0, + RK3328_CLKSEL_CON(43), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3328_CLKGATE_CON(4), 10, GFLAGS), + + COMPOSITE(SCLK_REF_USB3OTG_SRC, "clk_ref_usb3otg_src", mux_2plls_p, 0, + RK3328_CLKSEL_CON(45), 7, 1, MFLAGS, 0, 7, DFLAGS, + RK3328_CLKGATE_CON(4), 9, GFLAGS), + + MUX(SCLK_REF_USB3OTG, "clk_ref_usb3otg", mux_ref_usb3otg_src_p, CLK_SET_RATE_PARENT, + RK3328_CLKSEL_CON(45), 8, 1, MFLAGS), + + GATE(SCLK_USB3OTG_REF, "clk_usb3otg_ref", "xin24m", 0, + RK3328_CLKGATE_CON(4), 7, GFLAGS), + + COMPOSITE(SCLK_USB3OTG_SUSPEND, "clk_usb3otg_suspend", mux_xin24m_32k_p, 0, + RK3328_CLKSEL_CON(33), 15, 1, MFLAGS, 0, 10, DFLAGS, + RK3328_CLKGATE_CON(4), 8, GFLAGS), + + /* + * Clock-Architecture Diagram 8 + */ + + /* PD_GMAC */ + COMPOSITE(ACLK_GMAC, "aclk_gmac", mux_2plls_hdmiphy_p, 0, + RK3328_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3328_CLKGATE_CON(3), 2, GFLAGS), + COMPOSITE_NOMUX(PCLK_GMAC, "pclk_gmac", "aclk_gmac", 0, + RK3328_CLKSEL_CON(25), 8, 3, DFLAGS, + RK3328_CLKGATE_CON(9), 0, GFLAGS), + + COMPOSITE(SCLK_MAC2IO_SRC, "clk_mac2io_src", mux_2plls_p, 0, + RK3328_CLKSEL_CON(27), 7, 1, MFLAGS, 0, 5, DFLAGS, + RK3328_CLKGATE_CON(3), 1, GFLAGS), + GATE(SCLK_MAC2IO_REF, "clk_mac2io_ref", "clk_mac2io", 0, + RK3328_CLKGATE_CON(9), 7, GFLAGS), + GATE(SCLK_MAC2IO_RX, "clk_mac2io_rx", "clk_mac2io", 0, + RK3328_CLKGATE_CON(9), 4, GFLAGS), + GATE(SCLK_MAC2IO_TX, "clk_mac2io_tx", "clk_mac2io", 0, + RK3328_CLKGATE_CON(9), 5, GFLAGS), + GATE(SCLK_MAC2IO_REFOUT, "clk_mac2io_refout", "clk_mac2io", 0, + RK3328_CLKGATE_CON(9), 6, GFLAGS), + COMPOSITE(SCLK_MAC2IO_OUT, "clk_mac2io_out", mux_2plls_p, 0, + RK3328_CLKSEL_CON(27), 15, 1, MFLAGS, 8, 5, DFLAGS, + RK3328_CLKGATE_CON(3), 5, GFLAGS), + + COMPOSITE(SCLK_MAC2PHY_SRC, "clk_mac2phy_src", mux_2plls_p, 0, + RK3328_CLKSEL_CON(26), 7, 1, MFLAGS, 0, 5, DFLAGS, + RK3328_CLKGATE_CON(3), 0, GFLAGS), + GATE(SCLK_MAC2PHY_REF, "clk_mac2phy_ref", "clk_mac2phy", 0, + RK3328_CLKGATE_CON(9), 3, GFLAGS), + GATE(SCLK_MAC2PHY_RXTX, "clk_mac2phy_rxtx", "clk_mac2phy", 0, + RK3328_CLKGATE_CON(9), 1, GFLAGS), + COMPOSITE_NOMUX(SCLK_MAC2PHY_OUT, "clk_mac2phy_out", "clk_mac2phy", 0, + RK3328_CLKSEL_CON(26), 8, 2, DFLAGS, + RK3328_CLKGATE_CON(9), 2, GFLAGS), + + FACTOR(0, "xin12m", "xin24m", 0, 1, 2), + + /* + * Clock-Architecture Diagram 9 + */ + + /* PD_VOP */ + GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK3328_CLKGATE_CON(21), 10, GFLAGS), + GATE(0, "aclk_rga_niu", "aclk_rga_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(22), 3, GFLAGS), + GATE(ACLK_VOP, "aclk_vop", "aclk_vop_pre", 0, RK3328_CLKGATE_CON(21), 2, GFLAGS), + GATE(0, "aclk_vop_niu", "aclk_vop_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(21), 4, GFLAGS), + + GATE(ACLK_IEP, "aclk_iep", "aclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 6, GFLAGS), + GATE(ACLK_CIF, "aclk_cif", "aclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 8, GFLAGS), + GATE(ACLK_HDCP, "aclk_hdcp", "aclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 15, GFLAGS), + GATE(0, "aclk_vio_niu", "aclk_vio_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(22), 2, GFLAGS), + + GATE(HCLK_VOP, "hclk_vop", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 3, GFLAGS), + GATE(0, "hclk_vop_niu", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 5, GFLAGS), + GATE(HCLK_IEP, "hclk_iep", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 7, GFLAGS), + GATE(HCLK_CIF, "hclk_cif", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 9, GFLAGS), + GATE(HCLK_RGA, "hclk_rga", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 11, GFLAGS), + GATE(0, "hclk_ahb1tom", "hclk_vio_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(21), 12, GFLAGS), + GATE(0, "pclk_vio_h2p", "hclk_vio_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(21), 13, GFLAGS), + GATE(0, "hclk_vio_h2p", "hclk_vio_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(21), 14, GFLAGS), + GATE(HCLK_HDCP, "hclk_hdcp", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(22), 0, GFLAGS), + GATE(HCLK_VIO, "hclk_vio", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(22), 1, GFLAGS), + GATE(PCLK_HDMI, "pclk_hdmi", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(22), 4, GFLAGS), + GATE(PCLK_HDCP, "pclk_hdcp", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(22), 5, GFLAGS), + + /* PD_PERI */ + GATE(0, "aclk_peri_noc", "aclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 11, GFLAGS), + GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 4, GFLAGS), + + GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 0, GFLAGS), + GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 1, GFLAGS), + GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 2, GFLAGS), + GATE(HCLK_SDMMC_EXT, "hclk_sdmmc_ext", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 15, GFLAGS), + GATE(HCLK_HOST0, "hclk_host0", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 6, GFLAGS), + GATE(HCLK_HOST0_ARB, "hclk_host0_arb", "hclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 7, GFLAGS), + GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 8, GFLAGS), + GATE(HCLK_OTG_PMU, "hclk_otg_pmu", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 9, GFLAGS), + GATE(0, "hclk_peri_niu", "hclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 12, GFLAGS), + GATE(0, "pclk_peri_niu", "hclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 13, GFLAGS), + + /* PD_GMAC */ + GATE(ACLK_MAC2PHY, "aclk_mac2phy", "aclk_gmac", 0, RK3328_CLKGATE_CON(26), 0, GFLAGS), + GATE(ACLK_MAC2IO, "aclk_mac2io", "aclk_gmac", 0, RK3328_CLKGATE_CON(26), 2, GFLAGS), + GATE(0, "aclk_gmac_niu", "aclk_gmac", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(26), 4, GFLAGS), + GATE(PCLK_MAC2PHY, "pclk_mac2phy", "pclk_gmac", 0, RK3328_CLKGATE_CON(26), 1, GFLAGS), + GATE(PCLK_MAC2IO, "pclk_mac2io", "pclk_gmac", 0, RK3328_CLKGATE_CON(26), 3, GFLAGS), + GATE(0, "pclk_gmac_niu", "pclk_gmac", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(26), 5, GFLAGS), + + /* PD_BUS */ + GATE(0, "aclk_bus_niu", "aclk_bus_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 12, GFLAGS), + GATE(ACLK_DCF, "aclk_dcf", "aclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 11, GFLAGS), + GATE(ACLK_TSP, "aclk_tsp", "aclk_bus_pre", 0, RK3328_CLKGATE_CON(17), 12, GFLAGS), + GATE(0, "aclk_intmem", "aclk_bus_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 0, GFLAGS), + GATE(ACLK_DMAC, "aclk_dmac_bus", "aclk_bus_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 1, GFLAGS), + + GATE(0, "hclk_rom", "hclk_bus_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 2, GFLAGS), + GATE(HCLK_I2S0_8CH, "hclk_i2s0_8ch", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 3, GFLAGS), + GATE(HCLK_I2S1_8CH, "hclk_i2s1_8ch", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 4, GFLAGS), + GATE(HCLK_I2S2_2CH, "hclk_i2s2_2ch", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 5, GFLAGS), + GATE(HCLK_SPDIF_8CH, "hclk_spdif_8ch", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 6, GFLAGS), + GATE(HCLK_TSP, "hclk_tsp", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(17), 11, GFLAGS), + GATE(HCLK_CRYPTO_MST, "hclk_crypto_mst", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 7, GFLAGS), + GATE(HCLK_CRYPTO_SLV, "hclk_crypto_slv", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 8, GFLAGS), + GATE(0, "hclk_bus_niu", "hclk_bus_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 13, GFLAGS), + GATE(HCLK_PDM, "hclk_pdm", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(28), 0, GFLAGS), + + GATE(0, "pclk_bus_niu", "pclk_bus", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 14, GFLAGS), + GATE(0, "pclk_efuse", "pclk_bus", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 9, GFLAGS), + GATE(0, "pclk_otp", "pclk_bus", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(28), 4, GFLAGS), + GATE(PCLK_I2C0, "pclk_i2c0", "pclk_bus", 0, RK3328_CLKGATE_CON(15), 10, GFLAGS), + GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus", 0, RK3328_CLKGATE_CON(16), 0, GFLAGS), + GATE(PCLK_I2C2, "pclk_i2c2", "pclk_bus", 0, RK3328_CLKGATE_CON(16), 1, GFLAGS), + GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus", 0, RK3328_CLKGATE_CON(16), 2, GFLAGS), + GATE(PCLK_TIMER, "pclk_timer0", "pclk_bus", 0, RK3328_CLKGATE_CON(16), 3, GFLAGS), + GATE(0, "pclk_stimer", "pclk_bus", 0, RK3328_CLKGATE_CON(16), 4, GFLAGS), + GATE(PCLK_SPI, "pclk_spi", "pclk_bus", 0, RK3328_CLKGATE_CON(16), 5, GFLAGS), + GATE(PCLK_PWM, "pclk_rk_pwm", "pclk_bus", 0, RK3328_CLKGATE_CON(16), 6, GFLAGS), + GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_bus", 0, RK3328_CLKGATE_CON(16), 7, GFLAGS), + GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_bus", 0, RK3328_CLKGATE_CON(16), 8, GFLAGS), + GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_bus", 0, RK3328_CLKGATE_CON(16), 9, GFLAGS), + GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_bus", 0, RK3328_CLKGATE_CON(16), 10, GFLAGS), + GATE(PCLK_UART0, "pclk_uart0", "pclk_bus", 0, RK3328_CLKGATE_CON(16), 11, GFLAGS), + GATE(PCLK_UART1, "pclk_uart1", "pclk_bus", 0, RK3328_CLKGATE_CON(16), 12, GFLAGS), + GATE(PCLK_UART2, "pclk_uart2", "pclk_bus", 0, RK3328_CLKGATE_CON(16), 13, GFLAGS), + GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus", 0, RK3328_CLKGATE_CON(16), 14, GFLAGS), + GATE(PCLK_DCF, "pclk_dcf", "pclk_bus", 0, RK3328_CLKGATE_CON(16), 15, GFLAGS), + GATE(PCLK_GRF, "pclk_grf", "pclk_bus", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 0, GFLAGS), + GATE(0, "pclk_cru", "pclk_bus", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 4, GFLAGS), + GATE(0, "pclk_sgrf", "pclk_bus", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 6, GFLAGS), + GATE(0, "pclk_sim", "pclk_bus", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 10, GFLAGS), + GATE(PCLK_SARADC, "pclk_saradc", "pclk_bus", 0, RK3328_CLKGATE_CON(17), 15, GFLAGS), + GATE(0, "pclk_pmu", "pclk_bus", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(28), 3, GFLAGS), + + GATE(PCLK_USB3PHY_OTG, "pclk_usb3phy_otg", "pclk_phy_pre", 0, RK3328_CLKGATE_CON(28), 1, GFLAGS), + GATE(PCLK_USB3PHY_PIPE, "pclk_usb3phy_pipe", "pclk_phy_pre", 0, RK3328_CLKGATE_CON(28), 2, GFLAGS), + GATE(PCLK_USB3_GRF, "pclk_usb3_grf", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 2, GFLAGS), + GATE(PCLK_USB2_GRF, "pclk_usb2_grf", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 14, GFLAGS), + GATE(0, "pclk_ddrphy", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 13, GFLAGS), + GATE(0, "pclk_acodecphy", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 5, GFLAGS), + GATE(PCLK_HDMIPHY, "pclk_hdmiphy", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 7, GFLAGS), + GATE(0, "pclk_vdacphy", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 8, GFLAGS), + GATE(0, "pclk_phy_niu", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 15, GFLAGS), + + /* PD_MMC */ + MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", + RK3328_SDMMC_CON0, 1), + MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", + RK3328_SDMMC_CON1, 1), + + MMC(SCLK_SDIO_DRV, "sdio_drv", "sclk_sdio", + RK3328_SDIO_CON0, 1), + MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", + RK3328_SDIO_CON1, 1), + + MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", + RK3328_EMMC_CON0, 1), + MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", + RK3328_EMMC_CON1, 1), + + MMC(SCLK_SDMMC_EXT_DRV, "sdmmc_ext_drv", "sclk_sdmmc_ext", + RK3328_SDMMC_EXT_CON0, 1), + MMC(SCLK_SDMMC_EXT_SAMPLE, "sdmmc_ext_sample", "sclk_sdmmc_ext", + RK3328_SDMMC_EXT_CON1, 1), +}; + +static const char *const rk3328_critical_clocks[] __initconst = { + "aclk_bus", + "pclk_bus", + "hclk_bus", + "aclk_peri", + "hclk_peri", + "pclk_peri", + "pclk_dbg", + "aclk_core_niu", + "aclk_gic400", + "aclk_intmem", + "hclk_rom", + "pclk_grf", + "pclk_cru", + "pclk_sgrf", + "pclk_timer0", + "clk_timer0", + "pclk_ddr_msch", + "pclk_ddr_mon", + "pclk_ddr_grf", + "clk_ddrupctl", + "clk_ddrmsch", + "hclk_ahb1tom", + "clk_jtag", + "pclk_ddrphy", + "pclk_pmu", + "hclk_otg_pmu", + "aclk_rga_niu", + "pclk_vio_h2p", + "hclk_vio_h2p", +}; + +static void __init rk3328_clk_init(struct device_node *np) +{ + struct rockchip_clk_provider *ctx; + void __iomem *reg_base; + + reg_base = of_iomap(np, 0); + if (!reg_base) { + pr_err("%s: could not map cru region\n", __func__); + return; + } + + ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); + if (IS_ERR(ctx)) { + pr_err("%s: rockchip clk init failed\n", __func__); + iounmap(reg_base); + return; + } + + rockchip_clk_register_plls(ctx, rk3328_pll_clks, + ARRAY_SIZE(rk3328_pll_clks), + RK3328_GRF_SOC_STATUS0); + rockchip_clk_register_branches(ctx, rk3328_clk_branches, + ARRAY_SIZE(rk3328_clk_branches)); + rockchip_clk_protect_critical(rk3328_critical_clocks, + ARRAY_SIZE(rk3328_critical_clocks)); + + rockchip_clk_register_armclk(ctx, ARMCLK, "armclk", + mux_armclk_p, ARRAY_SIZE(mux_armclk_p), + &rk3328_cpuclk_data, rk3328_cpuclk_rates, + ARRAY_SIZE(rk3328_cpuclk_rates)); + + rockchip_register_softrst(np, 11, reg_base + RK3328_SOFTRST_CON(0), + ROCKCHIP_SOFTRST_HIWORD_MASK); + + rockchip_register_restart_notifier(ctx, RK3328_GLB_SRST_FST, NULL); + + rockchip_clk_of_add_provider(np, ctx); +} +CLK_OF_DECLARE(rk3328_cru, "rockchip,rk3328-cru", rk3328_clk_init); diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c index 3490887b0579..73121b144634 100644 --- a/drivers/clk/rockchip/clk-rk3399.c +++ b/drivers/clk/rockchip/clk-rk3399.c @@ -1132,7 +1132,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(11), 8, GFLAGS), COMPOSITE(PCLK_EDP, "pclk_edp", mux_pll_src_cpll_gpll_p, 0, - RK3399_CLKSEL_CON(44), 15, 1, MFLAGS, 8, 5, DFLAGS, + RK3399_CLKSEL_CON(44), 15, 1, MFLAGS, 8, 6, DFLAGS, RK3399_CLKGATE_CON(11), 11, GFLAGS), GATE(PCLK_EDP_NOC, "pclk_edp_noc", "pclk_edp", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(32), 12, GFLAGS), diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c index b886be30f34f..fe1d393cf678 100644 --- a/drivers/clk/rockchip/clk.c +++ b/drivers/clk/rockchip/clk.c @@ -344,7 +344,6 @@ struct rockchip_clk_provider * __init rockchip_clk_init(struct device_node *np, ctx->clk_data.clks = clk_table; ctx->clk_data.clk_num = nr_clks; ctx->cru_node = np; - ctx->grf = ERR_PTR(-EPROBE_DEFER); spin_lock_init(&ctx->lock); ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node, @@ -417,6 +416,13 @@ void __init rockchip_clk_register_branches( list->mux_shift, list->mux_width, list->mux_flags, &ctx->lock); break; + case branch_muxgrf: + clk = rockchip_clk_register_muxgrf(list->name, + list->parent_names, list->num_parents, + flags, ctx->grf, list->muxdiv_offset, + list->mux_shift, list->mux_width, + list->mux_flags); + break; case branch_divider: if (list->div_table) clk = clk_register_divider_table(NULL, diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h index d67eecc4ade9..7c15473ea72b 100644 --- a/drivers/clk/rockchip/clk.h +++ b/drivers/clk/rockchip/clk.h @@ -91,6 +91,24 @@ struct clk; #define RK3288_EMMC_CON0 0x218 #define RK3288_EMMC_CON1 0x21c +#define RK3328_PLL_CON(x) RK2928_PLL_CON(x) +#define RK3328_CLKSEL_CON(x) ((x) * 0x4 + 0x100) +#define RK3328_CLKGATE_CON(x) ((x) * 0x4 + 0x200) +#define RK3328_GRFCLKSEL_CON(x) ((x) * 0x4 + 0x100) +#define RK3328_GLB_SRST_FST 0x9c +#define RK3328_GLB_SRST_SND 0x98 +#define RK3328_SOFTRST_CON(x) ((x) * 0x4 + 0x300) +#define RK3328_MODE_CON 0x80 +#define RK3328_MISC_CON 0x84 +#define RK3328_SDMMC_CON0 0x380 +#define RK3328_SDMMC_CON1 0x384 +#define RK3328_SDIO_CON0 0x388 +#define RK3328_SDIO_CON1 0x38c +#define RK3328_EMMC_CON0 0x390 +#define RK3328_EMMC_CON1 0x394 +#define RK3328_SDMMC_EXT_CON0 0x398 +#define RK3328_SDMMC_EXT_CON1 0x39C + #define RK3368_PLL_CON(x) RK2928_PLL_CON(x) #define RK3368_CLKSEL_CON(x) ((x) * 0x4 + 0x100) #define RK3368_CLKGATE_CON(x) ((x) * 0x4 + 0x200) @@ -130,6 +148,7 @@ struct clk; enum rockchip_pll_type { pll_rk3036, pll_rk3066, + pll_rk3328, pll_rk3399, }; @@ -317,11 +336,17 @@ struct clk *rockchip_clk_register_inverter(const char *name, void __iomem *reg, int shift, int flags, spinlock_t *lock); +struct clk *rockchip_clk_register_muxgrf(const char *name, + const char *const *parent_names, u8 num_parents, + int flags, struct regmap *grf, int reg, + int shift, int width, int mux_flags); + #define PNAME(x) static const char *const x[] __initconst enum rockchip_clk_branch_type { branch_composite, branch_mux, + branch_muxgrf, branch_divider, branch_fraction_divider, branch_gate, @@ -551,6 +576,21 @@ struct rockchip_clk_branch { .gate_offset = -1, \ } +#define MUXGRF(_id, cname, pnames, f, o, s, w, mf) \ + { \ + .id = _id, \ + .branch_type = branch_muxgrf, \ + .name = cname, \ + .parent_names = pnames, \ + .num_parents = ARRAY_SIZE(pnames), \ + .flags = f, \ + .muxdiv_offset = o, \ + .mux_shift = s, \ + .mux_width = w, \ + .mux_flags = mf, \ + .gate_offset = -1, \ + } + #define DIV(_id, cname, pname, f, o, s, w, df) \ { \ .id = _id, \ diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile index 57f4dc6dc447..7afc21dc374e 100644 --- a/drivers/clk/samsung/Makefile +++ b/drivers/clk/samsung/Makefile @@ -5,7 +5,6 @@ obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o clk-cpu.o obj-$(CONFIG_SOC_EXYNOS3250) += clk-exynos3250.o obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o -obj-$(CONFIG_SOC_EXYNOS4415) += clk-exynos4415.o obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c index 17e68a724945..cb7df358a27d 100644 --- a/drivers/clk/samsung/clk-exynos-audss.c +++ b/drivers/clk/samsung/clk-exynos-audss.c @@ -44,7 +44,7 @@ static unsigned long reg_save[][2] = { { ASS_CLK_GATE, 0 }, }; -static int exynos_audss_clk_suspend(void) +static int exynos_audss_clk_suspend(struct device *dev) { int i; @@ -54,18 +54,15 @@ static int exynos_audss_clk_suspend(void) return 0; } -static void exynos_audss_clk_resume(void) +static int exynos_audss_clk_resume(struct device *dev) { int i; for (i = 0; i < ARRAY_SIZE(reg_save); i++) writel(reg_save[i][1], reg_base + reg_save[i][0]); -} -static struct syscore_ops exynos_audss_clk_syscore_ops = { - .suspend = exynos_audss_clk_suspend, - .resume = exynos_audss_clk_resume, -}; + return 0; +} #endif /* CONFIG_PM_SLEEP */ struct exynos_audss_clk_drvdata { @@ -251,9 +248,6 @@ static int exynos_audss_clk_probe(struct platform_device *pdev) goto unregister; } -#ifdef CONFIG_PM_SLEEP - register_syscore_ops(&exynos_audss_clk_syscore_ops); -#endif return 0; unregister: @@ -267,10 +261,6 @@ unregister: static int exynos_audss_clk_remove(struct platform_device *pdev) { -#ifdef CONFIG_PM_SLEEP - unregister_syscore_ops(&exynos_audss_clk_syscore_ops); -#endif - of_clk_del_provider(pdev->dev.of_node); exynos_audss_clk_teardown(); @@ -281,10 +271,16 @@ static int exynos_audss_clk_remove(struct platform_device *pdev) return 0; } +static const struct dev_pm_ops exynos_audss_clk_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_audss_clk_suspend, + exynos_audss_clk_resume) +}; + static struct platform_driver exynos_audss_clk_driver = { .driver = { .name = "exynos-audss-clk", .of_match_table = exynos_audss_clk_of_match, + .pm = &exynos_audss_clk_pm_ops, }, .probe = exynos_audss_clk_probe, .remove = exynos_audss_clk_remove, diff --git a/drivers/clk/samsung/clk-exynos4415.c b/drivers/clk/samsung/clk-exynos4415.c deleted file mode 100644 index 6c9063159717..000000000000 --- a/drivers/clk/samsung/clk-exynos4415.c +++ /dev/null @@ -1,1022 +0,0 @@ -/* - * Copyright (c) 2014 Samsung Electronics Co., Ltd. - * Author: Chanwoo Choi <cw00.choi@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Common Clock Framework support for Exynos4415 SoC. - */ - -#include <linux/clk-provider.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/platform_device.h> -#include <linux/syscore_ops.h> - -#include <dt-bindings/clock/exynos4415.h> - -#include "clk.h" -#include "clk-pll.h" - -#define SRC_LEFTBUS 0x4200 -#define DIV_LEFTBUS 0x4500 -#define GATE_IP_LEFTBUS 0x4800 -#define GATE_IP_IMAGE 0x4930 -#define SRC_RIGHTBUS 0x8200 -#define DIV_RIGHTBUS 0x8500 -#define GATE_IP_RIGHTBUS 0x8800 -#define GATE_IP_PERIR 0x8960 -#define EPLL_LOCK 0xc010 -#define G3D_PLL_LOCK 0xc020 -#define DISP_PLL_LOCK 0xc030 -#define ISP_PLL_LOCK 0xc040 -#define EPLL_CON0 0xc110 -#define EPLL_CON1 0xc114 -#define EPLL_CON2 0xc118 -#define G3D_PLL_CON0 0xc120 -#define G3D_PLL_CON1 0xc124 -#define G3D_PLL_CON2 0xc128 -#define ISP_PLL_CON0 0xc130 -#define ISP_PLL_CON1 0xc134 -#define ISP_PLL_CON2 0xc138 -#define DISP_PLL_CON0 0xc140 -#define DISP_PLL_CON1 0xc144 -#define DISP_PLL_CON2 0xc148 -#define SRC_TOP0 0xc210 -#define SRC_TOP1 0xc214 -#define SRC_CAM 0xc220 -#define SRC_TV 0xc224 -#define SRC_MFC 0xc228 -#define SRC_G3D 0xc22c -#define SRC_LCD 0xc234 -#define SRC_ISP 0xc238 -#define SRC_MAUDIO 0xc23c -#define SRC_FSYS 0xc240 -#define SRC_PERIL0 0xc250 -#define SRC_PERIL1 0xc254 -#define SRC_CAM1 0xc258 -#define SRC_TOP_ISP0 0xc25c -#define SRC_TOP_ISP1 0xc260 -#define SRC_MASK_TOP 0xc310 -#define SRC_MASK_CAM 0xc320 -#define SRC_MASK_TV 0xc324 -#define SRC_MASK_LCD 0xc334 -#define SRC_MASK_ISP 0xc338 -#define SRC_MASK_MAUDIO 0xc33c -#define SRC_MASK_FSYS 0xc340 -#define SRC_MASK_PERIL0 0xc350 -#define SRC_MASK_PERIL1 0xc354 -#define DIV_TOP 0xc510 -#define DIV_CAM 0xc520 -#define DIV_TV 0xc524 -#define DIV_MFC 0xc528 -#define DIV_G3D 0xc52c -#define DIV_LCD 0xc534 -#define DIV_ISP 0xc538 -#define DIV_MAUDIO 0xc53c -#define DIV_FSYS0 0xc540 -#define DIV_FSYS1 0xc544 -#define DIV_FSYS2 0xc548 -#define DIV_PERIL0 0xc550 -#define DIV_PERIL1 0xc554 -#define DIV_PERIL2 0xc558 -#define DIV_PERIL3 0xc55c -#define DIV_PERIL4 0xc560 -#define DIV_PERIL5 0xc564 -#define DIV_CAM1 0xc568 -#define DIV_TOP_ISP1 0xc56c -#define DIV_TOP_ISP0 0xc570 -#define CLKDIV2_RATIO 0xc580 -#define GATE_SCLK_CAM 0xc820 -#define GATE_SCLK_TV 0xc824 -#define GATE_SCLK_MFC 0xc828 -#define GATE_SCLK_G3D 0xc82c -#define GATE_SCLK_LCD 0xc834 -#define GATE_SCLK_MAUDIO 0xc83c -#define GATE_SCLK_FSYS 0xc840 -#define GATE_SCLK_PERIL 0xc850 -#define GATE_IP_CAM 0xc920 -#define GATE_IP_TV 0xc924 -#define GATE_IP_MFC 0xc928 -#define GATE_IP_G3D 0xc92c -#define GATE_IP_LCD 0xc934 -#define GATE_IP_FSYS 0xc940 -#define GATE_IP_PERIL 0xc950 -#define GATE_BLOCK 0xc970 -#define APLL_LOCK 0x14000 -#define APLL_CON0 0x14100 -#define SRC_CPU 0x14200 -#define DIV_CPU0 0x14500 -#define DIV_CPU1 0x14504 - -static const unsigned long exynos4415_cmu_clk_regs[] __initconst = { - SRC_LEFTBUS, - DIV_LEFTBUS, - GATE_IP_LEFTBUS, - GATE_IP_IMAGE, - SRC_RIGHTBUS, - DIV_RIGHTBUS, - GATE_IP_RIGHTBUS, - GATE_IP_PERIR, - EPLL_LOCK, - G3D_PLL_LOCK, - DISP_PLL_LOCK, - ISP_PLL_LOCK, - EPLL_CON0, - EPLL_CON1, - EPLL_CON2, - G3D_PLL_CON0, - G3D_PLL_CON1, - G3D_PLL_CON2, - ISP_PLL_CON0, - ISP_PLL_CON1, - ISP_PLL_CON2, - DISP_PLL_CON0, - DISP_PLL_CON1, - DISP_PLL_CON2, - SRC_TOP0, - SRC_TOP1, - SRC_CAM, - SRC_TV, - SRC_MFC, - SRC_G3D, - SRC_LCD, - SRC_ISP, - SRC_MAUDIO, - SRC_FSYS, - SRC_PERIL0, - SRC_PERIL1, - SRC_CAM1, - SRC_TOP_ISP0, - SRC_TOP_ISP1, - SRC_MASK_TOP, - SRC_MASK_CAM, - SRC_MASK_TV, - SRC_MASK_LCD, - SRC_MASK_ISP, - SRC_MASK_MAUDIO, - SRC_MASK_FSYS, - SRC_MASK_PERIL0, - SRC_MASK_PERIL1, - DIV_TOP, - DIV_CAM, - DIV_TV, - DIV_MFC, - DIV_G3D, - DIV_LCD, - DIV_ISP, - DIV_MAUDIO, - DIV_FSYS0, - DIV_FSYS1, - DIV_FSYS2, - DIV_PERIL0, - DIV_PERIL1, - DIV_PERIL2, - DIV_PERIL3, - DIV_PERIL4, - DIV_PERIL5, - DIV_CAM1, - DIV_TOP_ISP1, - DIV_TOP_ISP0, - CLKDIV2_RATIO, - GATE_SCLK_CAM, - GATE_SCLK_TV, - GATE_SCLK_MFC, - GATE_SCLK_G3D, - GATE_SCLK_LCD, - GATE_SCLK_MAUDIO, - GATE_SCLK_FSYS, - GATE_SCLK_PERIL, - GATE_IP_CAM, - GATE_IP_TV, - GATE_IP_MFC, - GATE_IP_G3D, - GATE_IP_LCD, - GATE_IP_FSYS, - GATE_IP_PERIL, - GATE_BLOCK, - APLL_LOCK, - APLL_CON0, - SRC_CPU, - DIV_CPU0, - DIV_CPU1, -}; - -/* list of all parent clock list */ -PNAME(mout_g3d_pllsrc_p) = { "fin_pll", }; - -PNAME(mout_apll_p) = { "fin_pll", "fout_apll", }; -PNAME(mout_g3d_pll_p) = { "fin_pll", "fout_g3d_pll", }; -PNAME(mout_isp_pll_p) = { "fin_pll", "fout_isp_pll", }; -PNAME(mout_disp_pll_p) = { "fin_pll", "fout_disp_pll", }; - -PNAME(mout_mpll_user_p) = { "fin_pll", "div_mpll_pre", }; -PNAME(mout_epll_p) = { "fin_pll", "fout_epll", }; -PNAME(mout_core_p) = { "mout_apll", "mout_mpll_user_c", }; -PNAME(mout_hpm_p) = { "mout_apll", "mout_mpll_user_c", }; - -PNAME(mout_ebi_p) = { "div_aclk_200", "div_aclk_160", }; -PNAME(mout_ebi_1_p) = { "mout_ebi", "mout_g3d_pll", }; - -PNAME(mout_gdl_p) = { "mout_mpll_user_l", }; -PNAME(mout_gdr_p) = { "mout_mpll_user_r", }; - -PNAME(mout_aclk_266_p) = { "mout_mpll_user_t", "mout_g3d_pll", }; - -PNAME(group_epll_g3dpll_p) = { "mout_epll", "mout_g3d_pll" }; -PNAME(group_sclk_p) = { "xxti", "xusbxti", - "none", "mout_isp_pll", - "none", "none", "div_mpll_pre", - "mout_epll", "mout_g3d_pll", }; -PNAME(group_spdif_p) = { "mout_audio0", "mout_audio1", - "mout_audio2", "spdif_extclk", }; -PNAME(group_sclk_audio2_p) = { "audiocdclk2", "none", - "none", "mout_isp_pll", - "mout_disp_pll", "xusbxti", - "div_mpll_pre", "mout_epll", - "mout_g3d_pll", }; -PNAME(group_sclk_audio1_p) = { "audiocdclk1", "none", - "none", "mout_isp_pll", - "mout_disp_pll", "xusbxti", - "div_mpll_pre", "mout_epll", - "mout_g3d_pll", }; -PNAME(group_sclk_audio0_p) = { "audiocdclk0", "none", - "none", "mout_isp_pll", - "mout_disp_pll", "xusbxti", - "div_mpll_pre", "mout_epll", - "mout_g3d_pll", }; -PNAME(group_fimc_lclk_p) = { "xxti", "xusbxti", - "none", "mout_isp_pll", - "none", "mout_disp_pll", - "mout_mpll_user_t", "mout_epll", - "mout_g3d_pll", }; -PNAME(group_sclk_fimd0_p) = { "xxti", "xusbxti", - "m_bitclkhsdiv4_4l", "mout_isp_pll", - "mout_disp_pll", "sclk_hdmiphy", - "div_mpll_pre", "mout_epll", - "mout_g3d_pll", }; -PNAME(mout_hdmi_p) = { "sclk_pixel", "sclk_hdmiphy" }; -PNAME(mout_mfc_p) = { "mout_mfc_0", "mout_mfc_1" }; -PNAME(mout_g3d_p) = { "mout_g3d_0", "mout_g3d_1" }; -PNAME(mout_jpeg_p) = { "mout_jpeg_0", "mout_jpeg_1" }; -PNAME(mout_jpeg1_p) = { "mout_epll", "mout_g3d_pll" }; -PNAME(group_aclk_isp0_300_p) = { "mout_isp_pll", "div_mpll_pre" }; -PNAME(group_aclk_isp0_400_user_p) = { "fin_pll", "div_aclk_400_mcuisp" }; -PNAME(group_aclk_isp0_300_user_p) = { "fin_pll", "mout_aclk_isp0_300" }; -PNAME(group_aclk_isp1_300_user_p) = { "fin_pll", "mout_aclk_isp1_300" }; -PNAME(group_mout_mpll_user_t_p) = { "mout_mpll_user_t" }; - -static const struct samsung_fixed_factor_clock exynos4415_fixed_factor_clks[] __initconst = { - /* HACK: fin_pll hardcoded to xusbxti until detection is implemented. */ - FFACTOR(CLK_FIN_PLL, "fin_pll", "xusbxti", 1, 1, 0), -}; - -static const struct samsung_fixed_rate_clock exynos4415_fixed_rate_clks[] __initconst = { - FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, 0, 27000000), -}; - -static const struct samsung_mux_clock exynos4415_mux_clks[] __initconst = { - /* - * NOTE: Following table is sorted by register address in ascending - * order and then bitfield shift in descending order, as it is done - * in the User's Manual. When adding new entries, please make sure - * that the order is preserved, to avoid merge conflicts and make - * further work with defined data easier. - */ - - /* SRC_LEFTBUS */ - MUX(CLK_MOUT_MPLL_USER_L, "mout_mpll_user_l", mout_mpll_user_p, - SRC_LEFTBUS, 4, 1), - MUX(CLK_MOUT_GDL, "mout_gdl", mout_gdl_p, SRC_LEFTBUS, 0, 1), - - /* SRC_RIGHTBUS */ - MUX(CLK_MOUT_MPLL_USER_R, "mout_mpll_user_r", mout_mpll_user_p, - SRC_RIGHTBUS, 4, 1), - MUX(CLK_MOUT_GDR, "mout_gdr", mout_gdr_p, SRC_RIGHTBUS, 0, 1), - - /* SRC_TOP0 */ - MUX(CLK_MOUT_EBI, "mout_ebi", mout_ebi_p, SRC_TOP0, 28, 1), - MUX(CLK_MOUT_ACLK_200, "mout_aclk_200", group_mout_mpll_user_t_p, - SRC_TOP0, 24, 1), - MUX(CLK_MOUT_ACLK_160, "mout_aclk_160", group_mout_mpll_user_t_p, - SRC_TOP0, 20, 1), - MUX(CLK_MOUT_ACLK_100, "mout_aclk_100", group_mout_mpll_user_t_p, - SRC_TOP0, 16, 1), - MUX(CLK_MOUT_ACLK_266, "mout_aclk_266", mout_aclk_266_p, - SRC_TOP0, 12, 1), - MUX(CLK_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p, - SRC_TOP0, 8, 1), - MUX(CLK_MOUT_EPLL, "mout_epll", mout_epll_p, SRC_TOP0, 4, 1), - MUX(CLK_MOUT_EBI_1, "mout_ebi_1", mout_ebi_1_p, SRC_TOP0, 0, 1), - - /* SRC_TOP1 */ - MUX(CLK_MOUT_ISP_PLL, "mout_isp_pll", mout_isp_pll_p, - SRC_TOP1, 28, 1), - MUX(CLK_MOUT_DISP_PLL, "mout_disp_pll", mout_disp_pll_p, - SRC_TOP1, 16, 1), - MUX(CLK_MOUT_MPLL_USER_T, "mout_mpll_user_t", mout_mpll_user_p, - SRC_TOP1, 12, 1), - MUX(CLK_MOUT_ACLK_400_MCUISP, "mout_aclk_400_mcuisp", - group_mout_mpll_user_t_p, SRC_TOP1, 8, 1), - MUX(CLK_MOUT_G3D_PLLSRC, "mout_g3d_pllsrc", mout_g3d_pllsrc_p, - SRC_TOP1, 0, 1), - - /* SRC_CAM */ - MUX(CLK_MOUT_CSIS1, "mout_csis1", group_fimc_lclk_p, SRC_CAM, 28, 4), - MUX(CLK_MOUT_CSIS0, "mout_csis0", group_fimc_lclk_p, SRC_CAM, 24, 4), - MUX(CLK_MOUT_CAM1, "mout_cam1", group_fimc_lclk_p, SRC_CAM, 20, 4), - MUX(CLK_MOUT_FIMC3_LCLK, "mout_fimc3_lclk", group_fimc_lclk_p, SRC_CAM, - 12, 4), - MUX(CLK_MOUT_FIMC2_LCLK, "mout_fimc2_lclk", group_fimc_lclk_p, SRC_CAM, - 8, 4), - MUX(CLK_MOUT_FIMC1_LCLK, "mout_fimc1_lclk", group_fimc_lclk_p, SRC_CAM, - 4, 4), - MUX(CLK_MOUT_FIMC0_LCLK, "mout_fimc0_lclk", group_fimc_lclk_p, SRC_CAM, - 0, 4), - - /* SRC_TV */ - MUX(CLK_MOUT_HDMI, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1), - - /* SRC_MFC */ - MUX(CLK_MOUT_MFC, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1), - MUX(CLK_MOUT_MFC_1, "mout_mfc_1", group_epll_g3dpll_p, SRC_MFC, 4, 1), - MUX(CLK_MOUT_MFC_0, "mout_mfc_0", group_mout_mpll_user_t_p, SRC_MFC, 0, - 1), - - /* SRC_G3D */ - MUX(CLK_MOUT_G3D, "mout_g3d", mout_g3d_p, SRC_G3D, 8, 1), - MUX(CLK_MOUT_G3D_1, "mout_g3d_1", group_epll_g3dpll_p, SRC_G3D, 4, 1), - MUX(CLK_MOUT_G3D_0, "mout_g3d_0", group_mout_mpll_user_t_p, SRC_G3D, 0, - 1), - - /* SRC_LCD */ - MUX(CLK_MOUT_MIPI0, "mout_mipi0", group_fimc_lclk_p, SRC_LCD, 12, 4), - MUX(CLK_MOUT_FIMD0, "mout_fimd0", group_sclk_fimd0_p, SRC_LCD, 0, 4), - - /* SRC_ISP */ - MUX(CLK_MOUT_TSADC_ISP, "mout_tsadc_isp", group_fimc_lclk_p, SRC_ISP, - 16, 4), - MUX(CLK_MOUT_UART_ISP, "mout_uart_isp", group_fimc_lclk_p, SRC_ISP, - 12, 4), - MUX(CLK_MOUT_SPI1_ISP, "mout_spi1_isp", group_fimc_lclk_p, SRC_ISP, - 8, 4), - MUX(CLK_MOUT_SPI0_ISP, "mout_spi0_isp", group_fimc_lclk_p, SRC_ISP, - 4, 4), - MUX(CLK_MOUT_PWM_ISP, "mout_pwm_isp", group_fimc_lclk_p, SRC_ISP, - 0, 4), - - /* SRC_MAUDIO */ - MUX(CLK_MOUT_AUDIO0, "mout_audio0", group_sclk_audio0_p, SRC_MAUDIO, - 0, 4), - - /* SRC_FSYS */ - MUX(CLK_MOUT_TSADC, "mout_tsadc", group_sclk_p, SRC_FSYS, 28, 4), - MUX(CLK_MOUT_MMC2, "mout_mmc2", group_sclk_p, SRC_FSYS, 8, 4), - MUX(CLK_MOUT_MMC1, "mout_mmc1", group_sclk_p, SRC_FSYS, 4, 4), - MUX(CLK_MOUT_MMC0, "mout_mmc0", group_sclk_p, SRC_FSYS, 0, 4), - - /* SRC_PERIL0 */ - MUX(CLK_MOUT_UART3, "mout_uart3", group_sclk_p, SRC_PERIL0, 12, 4), - MUX(CLK_MOUT_UART2, "mout_uart2", group_sclk_p, SRC_PERIL0, 8, 4), - MUX(CLK_MOUT_UART1, "mout_uart1", group_sclk_p, SRC_PERIL0, 4, 4), - MUX(CLK_MOUT_UART0, "mout_uart0", group_sclk_p, SRC_PERIL0, 0, 4), - - /* SRC_PERIL1 */ - MUX(CLK_MOUT_SPI2, "mout_spi2", group_sclk_p, SRC_PERIL1, 24, 4), - MUX(CLK_MOUT_SPI1, "mout_spi1", group_sclk_p, SRC_PERIL1, 20, 4), - MUX(CLK_MOUT_SPI0, "mout_spi0", group_sclk_p, SRC_PERIL1, 16, 4), - MUX(CLK_MOUT_SPDIF, "mout_spdif", group_spdif_p, SRC_PERIL1, 8, 4), - MUX(CLK_MOUT_AUDIO2, "mout_audio2", group_sclk_audio2_p, SRC_PERIL1, - 4, 4), - MUX(CLK_MOUT_AUDIO1, "mout_audio1", group_sclk_audio1_p, SRC_PERIL1, - 0, 4), - - /* SRC_CPU */ - MUX(CLK_MOUT_MPLL_USER_C, "mout_mpll_user_c", mout_mpll_user_p, - SRC_CPU, 24, 1), - MUX(CLK_MOUT_HPM, "mout_hpm", mout_hpm_p, SRC_CPU, 20, 1), - MUX_F(CLK_MOUT_CORE, "mout_core", mout_core_p, SRC_CPU, 16, 1, 0, - CLK_MUX_READ_ONLY), - MUX_F(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, - CLK_SET_RATE_PARENT, 0), - - /* SRC_CAM1 */ - MUX(CLK_MOUT_PXLASYNC_CSIS1_FIMC, "mout_pxlasync_csis1", - group_fimc_lclk_p, SRC_CAM1, 20, 1), - MUX(CLK_MOUT_PXLASYNC_CSIS0_FIMC, "mout_pxlasync_csis0", - group_fimc_lclk_p, SRC_CAM1, 16, 1), - MUX(CLK_MOUT_JPEG, "mout_jpeg", mout_jpeg_p, SRC_CAM1, 8, 1), - MUX(CLK_MOUT_JPEG1, "mout_jpeg_1", mout_jpeg1_p, SRC_CAM1, 4, 1), - MUX(CLK_MOUT_JPEG0, "mout_jpeg_0", group_mout_mpll_user_t_p, SRC_CAM1, - 0, 1), - - /* SRC_TOP_ISP0 */ - MUX(CLK_MOUT_ACLK_ISP0_300, "mout_aclk_isp0_300", - group_aclk_isp0_300_p, SRC_TOP_ISP0, 8, 1), - MUX(CLK_MOUT_ACLK_ISP0_400, "mout_aclk_isp0_400_user", - group_aclk_isp0_400_user_p, SRC_TOP_ISP0, 4, 1), - MUX(CLK_MOUT_ACLK_ISP0_300_USER, "mout_aclk_isp0_300_user", - group_aclk_isp0_300_user_p, SRC_TOP_ISP0, 0, 1), - - /* SRC_TOP_ISP1 */ - MUX(CLK_MOUT_ACLK_ISP1_300, "mout_aclk_isp1_300", - group_aclk_isp0_300_p, SRC_TOP_ISP1, 4, 1), - MUX(CLK_MOUT_ACLK_ISP1_300_USER, "mout_aclk_isp1_300_user", - group_aclk_isp1_300_user_p, SRC_TOP_ISP1, 0, 1), -}; - -static const struct samsung_div_clock exynos4415_div_clks[] __initconst = { - /* - * NOTE: Following table is sorted by register address in ascending - * order and then bitfield shift in descending order, as it is done - * in the User's Manual. When adding new entries, please make sure - * that the order is preserved, to avoid merge conflicts and make - * further work with defined data easier. - */ - - /* DIV_LEFTBUS */ - DIV(CLK_DIV_GPL, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3), - DIV(CLK_DIV_GDL, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 4), - - /* DIV_RIGHTBUS */ - DIV(CLK_DIV_GPR, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3), - DIV(CLK_DIV_GDR, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 4), - - /* DIV_TOP */ - DIV(CLK_DIV_ACLK_400_MCUISP, "div_aclk_400_mcuisp", - "mout_aclk_400_mcuisp", DIV_TOP, 24, 3), - DIV(CLK_DIV_EBI, "div_ebi", "mout_ebi_1", DIV_TOP, 16, 3), - DIV(CLK_DIV_ACLK_200, "div_aclk_200", "mout_aclk_200", DIV_TOP, 12, 3), - DIV(CLK_DIV_ACLK_160, "div_aclk_160", "mout_aclk_160", DIV_TOP, 8, 3), - DIV(CLK_DIV_ACLK_100, "div_aclk_100", "mout_aclk_100", DIV_TOP, 4, 4), - DIV(CLK_DIV_ACLK_266, "div_aclk_266", "mout_aclk_266", DIV_TOP, 0, 3), - - /* DIV_CAM */ - DIV(CLK_DIV_CSIS1, "div_csis1", "mout_csis1", DIV_CAM, 28, 4), - DIV(CLK_DIV_CSIS0, "div_csis0", "mout_csis0", DIV_CAM, 24, 4), - DIV(CLK_DIV_CAM1, "div_cam1", "mout_cam1", DIV_CAM, 20, 4), - DIV(CLK_DIV_FIMC3_LCLK, "div_fimc3_lclk", "mout_fimc3_lclk", DIV_CAM, - 12, 4), - DIV(CLK_DIV_FIMC2_LCLK, "div_fimc2_lclk", "mout_fimc2_lclk", DIV_CAM, - 8, 4), - DIV(CLK_DIV_FIMC1_LCLK, "div_fimc1_lclk", "mout_fimc1_lclk", DIV_CAM, - 4, 4), - DIV(CLK_DIV_FIMC0_LCLK, "div_fimc0_lclk", "mout_fimc0_lclk", DIV_CAM, - 0, 4), - - /* DIV_TV */ - DIV(CLK_DIV_TV_BLK, "div_tv_blk", "mout_g3d_pll", DIV_TV, 0, 4), - - /* DIV_MFC */ - DIV(CLK_DIV_MFC, "div_mfc", "mout_mfc", DIV_MFC, 0, 4), - - /* DIV_G3D */ - DIV(CLK_DIV_G3D, "div_g3d", "mout_g3d", DIV_G3D, 0, 4), - - /* DIV_LCD */ - DIV_F(CLK_DIV_MIPI0_PRE, "div_mipi0_pre", "div_mipi0", DIV_LCD, 20, 4, - CLK_SET_RATE_PARENT, 0), - DIV(CLK_DIV_MIPI0, "div_mipi0", "mout_mipi0", DIV_LCD, 16, 4), - DIV(CLK_DIV_FIMD0, "div_fimd0", "mout_fimd0", DIV_LCD, 0, 4), - - /* DIV_ISP */ - DIV(CLK_DIV_UART_ISP, "div_uart_isp", "mout_uart_isp", DIV_ISP, 28, 4), - DIV_F(CLK_DIV_SPI1_ISP_PRE, "div_spi1_isp_pre", "div_spi1_isp", - DIV_ISP, 20, 8, CLK_SET_RATE_PARENT, 0), - DIV(CLK_DIV_SPI1_ISP, "div_spi1_isp", "mout_spi1_isp", DIV_ISP, 16, 4), - DIV_F(CLK_DIV_SPI0_ISP_PRE, "div_spi0_isp_pre", "div_spi0_isp", - DIV_ISP, 8, 8, CLK_SET_RATE_PARENT, 0), - DIV(CLK_DIV_SPI0_ISP, "div_spi0_isp", "mout_spi0_isp", DIV_ISP, 4, 4), - DIV(CLK_DIV_PWM_ISP, "div_pwm_isp", "mout_pwm_isp", DIV_ISP, 0, 4), - - /* DIV_MAUDIO */ - DIV(CLK_DIV_PCM0, "div_pcm0", "div_audio0", DIV_MAUDIO, 4, 8), - DIV(CLK_DIV_AUDIO0, "div_audio0", "mout_audio0", DIV_MAUDIO, 0, 4), - - /* DIV_FSYS0 */ - DIV_F(CLK_DIV_TSADC_PRE, "div_tsadc_pre", "div_tsadc", DIV_FSYS0, 8, 8, - CLK_SET_RATE_PARENT, 0), - DIV(CLK_DIV_TSADC, "div_tsadc", "mout_tsadc", DIV_FSYS0, 0, 4), - - /* DIV_FSYS1 */ - DIV_F(CLK_DIV_MMC1_PRE, "div_mmc1_pre", "div_mmc1", DIV_FSYS1, 24, 8, - CLK_SET_RATE_PARENT, 0), - DIV(CLK_DIV_MMC1, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4), - DIV_F(CLK_DIV_MMC0_PRE, "div_mmc0_pre", "div_mmc0", DIV_FSYS1, 8, 8, - CLK_SET_RATE_PARENT, 0), - DIV(CLK_DIV_MMC0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4), - - /* DIV_FSYS2 */ - DIV_F(CLK_DIV_MMC2_PRE, "div_mmc2_pre", "div_mmc2", DIV_FSYS2, 8, 8, - CLK_SET_RATE_PARENT, 0), - DIV_F(CLK_DIV_MMC2_PRE, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4, - CLK_SET_RATE_PARENT, 0), - - /* DIV_PERIL0 */ - DIV(CLK_DIV_UART3, "div_uart3", "mout_uart3", DIV_PERIL0, 12, 4), - DIV(CLK_DIV_UART2, "div_uart2", "mout_uart2", DIV_PERIL0, 8, 4), - DIV(CLK_DIV_UART1, "div_uart1", "mout_uart1", DIV_PERIL0, 4, 4), - DIV(CLK_DIV_UART0, "div_uart0", "mout_uart0", DIV_PERIL0, 0, 4), - - /* DIV_PERIL1 */ - DIV_F(CLK_DIV_SPI1_PRE, "div_spi1_pre", "div_spi1", DIV_PERIL1, 24, 8, - CLK_SET_RATE_PARENT, 0), - DIV(CLK_DIV_SPI1, "div_spi1", "mout_spi1", DIV_PERIL1, 16, 4), - DIV_F(CLK_DIV_SPI0_PRE, "div_spi0_pre", "div_spi0", DIV_PERIL1, 8, 8, - CLK_SET_RATE_PARENT, 0), - DIV(CLK_DIV_SPI0, "div_spi0", "mout_spi0", DIV_PERIL1, 0, 4), - - /* DIV_PERIL2 */ - DIV_F(CLK_DIV_SPI2_PRE, "div_spi2_pre", "div_spi2", DIV_PERIL2, 8, 8, - CLK_SET_RATE_PARENT, 0), - DIV(CLK_DIV_SPI2, "div_spi2", "mout_spi2", DIV_PERIL2, 0, 4), - - /* DIV_PERIL4 */ - DIV(CLK_DIV_PCM2, "div_pcm2", "div_audio2", DIV_PERIL4, 20, 8), - DIV(CLK_DIV_AUDIO2, "div_audio2", "mout_audio2", DIV_PERIL4, 16, 4), - DIV(CLK_DIV_PCM1, "div_pcm1", "div_audio1", DIV_PERIL4, 20, 8), - DIV(CLK_DIV_AUDIO1, "div_audio1", "mout_audio1", DIV_PERIL4, 0, 4), - - /* DIV_PERIL5 */ - DIV(CLK_DIV_I2S1, "div_i2s1", "div_audio1", DIV_PERIL5, 0, 6), - - /* DIV_CAM1 */ - DIV(CLK_DIV_PXLASYNC_CSIS1_FIMC, "div_pxlasync_csis1_fimc", - "mout_pxlasync_csis1", DIV_CAM1, 24, 4), - DIV(CLK_DIV_PXLASYNC_CSIS0_FIMC, "div_pxlasync_csis0_fimc", - "mout_pxlasync_csis0", DIV_CAM1, 20, 4), - DIV(CLK_DIV_JPEG, "div_jpeg", "mout_jpeg", DIV_CAM1, 0, 4), - - /* DIV_CPU0 */ - DIV(CLK_DIV_CORE2, "div_core2", "div_core", DIV_CPU0, 28, 3), - DIV_F(CLK_DIV_APLL, "div_apll", "mout_apll", DIV_CPU0, 24, 3, - CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), - DIV(CLK_DIV_PCLK_DBG, "div_pclk_dbg", "div_core2", DIV_CPU0, 20, 3), - DIV(CLK_DIV_ATB, "div_atb", "div_core2", DIV_CPU0, 16, 3), - DIV(CLK_DIV_PERIPH, "div_periph", "div_core2", DIV_CPU0, 12, 3), - DIV(CLK_DIV_COREM1, "div_corem1", "div_core2", DIV_CPU0, 8, 3), - DIV(CLK_DIV_COREM0, "div_corem0", "div_core2", DIV_CPU0, 4, 3), - DIV_F(CLK_DIV_CORE, "div_core", "mout_core", DIV_CPU0, 0, 3, - CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), - - /* DIV_CPU1 */ - DIV(CLK_DIV_HPM, "div_hpm", "div_copy", DIV_CPU1, 4, 3), - DIV(CLK_DIV_COPY, "div_copy", "mout_hpm", DIV_CPU1, 0, 3), -}; - -static const struct samsung_gate_clock exynos4415_gate_clks[] __initconst = { - /* - * NOTE: Following table is sorted by register address in ascending - * order and then bitfield shift in descending order, as it is done - * in the User's Manual. When adding new entries, please make sure - * that the order is preserved, to avoid merge conflicts and make - * further work with defined data easier. - */ - - /* GATE_IP_LEFTBUS */ - GATE(CLK_ASYNC_G3D, "async_g3d", "div_aclk_100", GATE_IP_LEFTBUS, 6, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_ASYNC_MFCL, "async_mfcl", "div_aclk_100", GATE_IP_LEFTBUS, 4, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_ASYNC_TVX, "async_tvx", "div_aclk_100", GATE_IP_LEFTBUS, 3, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_PPMULEFT, "ppmuleft", "div_aclk_100", GATE_IP_LEFTBUS, 1, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_GPIO_LEFT, "gpio_left", "div_aclk_100", GATE_IP_LEFTBUS, 0, - CLK_IGNORE_UNUSED, 0), - - /* GATE_IP_IMAGE */ - GATE(CLK_PPMUIMAGE, "ppmuimage", "div_aclk_100", GATE_IP_IMAGE, - 9, 0, 0), - GATE(CLK_QEMDMA2, "qe_mdma2", "div_aclk_100", GATE_IP_IMAGE, - 8, 0, 0), - GATE(CLK_QEROTATOR, "qe_rotator", "div_aclk_100", GATE_IP_IMAGE, - 7, 0, 0), - GATE(CLK_SMMUMDMA2, "smmu_mdam2", "div_aclk_100", GATE_IP_IMAGE, - 5, 0, 0), - GATE(CLK_SMMUROTATOR, "smmu_rotator", "div_aclk_100", GATE_IP_IMAGE, - 4, 0, 0), - GATE(CLK_MDMA2, "mdma2", "div_aclk_100", GATE_IP_IMAGE, 2, 0, 0), - GATE(CLK_ROTATOR, "rotator", "div_aclk_100", GATE_IP_IMAGE, 1, 0, 0), - - /* GATE_IP_RIGHTBUS */ - GATE(CLK_ASYNC_ISPMX, "async_ispmx", "div_aclk_100", - GATE_IP_RIGHTBUS, 9, CLK_IGNORE_UNUSED, 0), - GATE(CLK_ASYNC_MAUDIOX, "async_maudiox", "div_aclk_100", - GATE_IP_RIGHTBUS, 7, CLK_IGNORE_UNUSED, 0), - GATE(CLK_ASYNC_MFCR, "async_mfcr", "div_aclk_100", - GATE_IP_RIGHTBUS, 6, CLK_IGNORE_UNUSED, 0), - GATE(CLK_ASYNC_FSYSD, "async_fsysd", "div_aclk_100", - GATE_IP_RIGHTBUS, 5, CLK_IGNORE_UNUSED, 0), - GATE(CLK_ASYNC_LCD0X, "async_lcd0x", "div_aclk_100", - GATE_IP_RIGHTBUS, 3, CLK_IGNORE_UNUSED, 0), - GATE(CLK_ASYNC_CAMX, "async_camx", "div_aclk_100", - GATE_IP_RIGHTBUS, 2, CLK_IGNORE_UNUSED, 0), - GATE(CLK_PPMURIGHT, "ppmuright", "div_aclk_100", - GATE_IP_RIGHTBUS, 1, CLK_IGNORE_UNUSED, 0), - GATE(CLK_GPIO_RIGHT, "gpio_right", "div_aclk_100", - GATE_IP_RIGHTBUS, 0, CLK_IGNORE_UNUSED, 0), - - /* GATE_IP_PERIR */ - GATE(CLK_ANTIRBK_APBIF, "antirbk_apbif", "div_aclk_100", - GATE_IP_PERIR, 24, CLK_IGNORE_UNUSED, 0), - GATE(CLK_EFUSE_WRITER_APBIF, "efuse_writer_apbif", "div_aclk_100", - GATE_IP_PERIR, 23, CLK_IGNORE_UNUSED, 0), - GATE(CLK_MONOCNT, "monocnt", "div_aclk_100", GATE_IP_PERIR, 22, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_TZPC6, "tzpc6", "div_aclk_100", GATE_IP_PERIR, 21, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_PROVISIONKEY1, "provisionkey1", "div_aclk_100", - GATE_IP_PERIR, 20, CLK_IGNORE_UNUSED, 0), - GATE(CLK_PROVISIONKEY0, "provisionkey0", "div_aclk_100", - GATE_IP_PERIR, 19, CLK_IGNORE_UNUSED, 0), - GATE(CLK_CMU_ISPPART, "cmu_isppart", "div_aclk_100", GATE_IP_PERIR, 18, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_TMU_APBIF, "tmu_apbif", "div_aclk_100", - GATE_IP_PERIR, 17, 0, 0), - GATE(CLK_KEYIF, "keyif", "div_aclk_100", GATE_IP_PERIR, 16, 0, 0), - GATE(CLK_RTC, "rtc", "div_aclk_100", GATE_IP_PERIR, 15, 0, 0), - GATE(CLK_WDT, "wdt", "div_aclk_100", GATE_IP_PERIR, 14, 0, 0), - GATE(CLK_MCT, "mct", "div_aclk_100", GATE_IP_PERIR, 13, 0, 0), - GATE(CLK_SECKEY, "seckey", "div_aclk_100", GATE_IP_PERIR, 12, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_HDMI_CEC, "hdmi_cec", "div_aclk_100", GATE_IP_PERIR, 11, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_TZPC5, "tzpc5", "div_aclk_100", GATE_IP_PERIR, 10, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_TZPC4, "tzpc4", "div_aclk_100", GATE_IP_PERIR, 9, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_TZPC3, "tzpc3", "div_aclk_100", GATE_IP_PERIR, 8, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_TZPC2, "tzpc2", "div_aclk_100", GATE_IP_PERIR, 7, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_TZPC1, "tzpc1", "div_aclk_100", GATE_IP_PERIR, 6, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_TZPC0, "tzpc0", "div_aclk_100", GATE_IP_PERIR, 5, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_CMU_COREPART, "cmu_corepart", "div_aclk_100", GATE_IP_PERIR, 4, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_CMU_TOPPART, "cmu_toppart", "div_aclk_100", GATE_IP_PERIR, 3, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_PMU_APBIF, "pmu_apbif", "div_aclk_100", GATE_IP_PERIR, 2, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_SYSREG, "sysreg", "div_aclk_100", GATE_IP_PERIR, 1, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_CHIP_ID, "chip_id", "div_aclk_100", GATE_IP_PERIR, 0, - CLK_IGNORE_UNUSED, 0), - - /* GATE_SCLK_CAM - non-completed */ - GATE(CLK_SCLK_PXLAYSNC_CSIS1_FIMC, "sclk_pxlasync_csis1_fimc", - "div_pxlasync_csis1_fimc", GATE_SCLK_CAM, 11, - CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_PXLAYSNC_CSIS0_FIMC, "sclk_pxlasync_csis0_fimc", - "div_pxlasync_csis0_fimc", GATE_SCLK_CAM, - 10, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_JPEG, "sclk_jpeg", "div_jpeg", - GATE_SCLK_CAM, 8, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_CSIS1, "sclk_csis1", "div_csis1", - GATE_SCLK_CAM, 7, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_CSIS0, "sclk_csis0", "div_csis0", - GATE_SCLK_CAM, 6, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_CAM1, "sclk_cam1", "div_cam1", - GATE_SCLK_CAM, 5, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_FIMC3_LCLK, "sclk_fimc3_lclk", "div_fimc3_lclk", - GATE_SCLK_CAM, 3, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_FIMC2_LCLK, "sclk_fimc2_lclk", "div_fimc2_lclk", - GATE_SCLK_CAM, 2, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_FIMC1_LCLK, "sclk_fimc1_lclk", "div_fimc1_lclk", - GATE_SCLK_CAM, 1, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_FIMC0_LCLK, "sclk_fimc0_lclk", "div_fimc0_lclk", - GATE_SCLK_CAM, 0, CLK_SET_RATE_PARENT, 0), - - /* GATE_SCLK_TV */ - GATE(CLK_SCLK_PIXEL, "sclk_pixel", "div_tv_blk", - GATE_SCLK_TV, 3, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi", - GATE_SCLK_TV, 2, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_MIXER, "sclk_mixer", "div_tv_blk", - GATE_SCLK_TV, 0, CLK_SET_RATE_PARENT, 0), - - /* GATE_SCLK_MFC */ - GATE(CLK_SCLK_MFC, "sclk_mfc", "div_mfc", - GATE_SCLK_MFC, 0, CLK_SET_RATE_PARENT, 0), - - /* GATE_SCLK_G3D */ - GATE(CLK_SCLK_G3D, "sclk_g3d", "div_g3d", - GATE_SCLK_G3D, 0, CLK_SET_RATE_PARENT, 0), - - /* GATE_SCLK_LCD */ - GATE(CLK_SCLK_MIPIDPHY4L, "sclk_mipidphy4l", "div_mipi0", - GATE_SCLK_LCD, 4, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_MIPI0, "sclk_mipi0", "div_mipi0_pre", - GATE_SCLK_LCD, 3, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_MDNIE0, "sclk_mdnie0", "div_fimd0", - GATE_SCLK_LCD, 1, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_FIMD0, "sclk_fimd0", "div_fimd0", - GATE_SCLK_LCD, 0, CLK_SET_RATE_PARENT, 0), - - /* GATE_SCLK_MAUDIO */ - GATE(CLK_SCLK_PCM0, "sclk_pcm0", "div_pcm0", - GATE_SCLK_MAUDIO, 1, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_AUDIO0, "sclk_audio0", "div_audio0", - GATE_SCLK_MAUDIO, 0, CLK_SET_RATE_PARENT, 0), - - /* GATE_SCLK_FSYS */ - GATE(CLK_SCLK_TSADC, "sclk_tsadc", "div_tsadc_pre", - GATE_SCLK_FSYS, 9, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_EBI, "sclk_ebi", "div_ebi", - GATE_SCLK_FSYS, 6, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_MMC2, "sclk_mmc2", "div_mmc2_pre", - GATE_SCLK_FSYS, 2, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_MMC1, "sclk_mmc1", "div_mmc1_pre", - GATE_SCLK_FSYS, 1, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc0_pre", - GATE_SCLK_FSYS, 0, CLK_SET_RATE_PARENT, 0), - - /* GATE_SCLK_PERIL */ - GATE(CLK_SCLK_I2S, "sclk_i2s1", "div_i2s1", - GATE_SCLK_PERIL, 18, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_PCM2, "sclk_pcm2", "div_pcm2", - GATE_SCLK_PERIL, 16, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_PCM1, "sclk_pcm1", "div_pcm1", - GATE_SCLK_PERIL, 15, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_AUDIO2, "sclk_audio2", "div_audio2", - GATE_SCLK_PERIL, 14, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_AUDIO1, "sclk_audio1", "div_audio1", - GATE_SCLK_PERIL, 13, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_SPDIF, "sclk_spdif", "mout_spdif", - GATE_SCLK_PERIL, 10, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_SPI2, "sclk_spi2", "div_spi2_pre", - GATE_SCLK_PERIL, 8, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_SPI1, "sclk_spi1", "div_spi1_pre", - GATE_SCLK_PERIL, 7, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_SPI0, "sclk_spi0", "div_spi0_pre", - GATE_SCLK_PERIL, 6, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_UART3, "sclk_uart3", "div_uart3", - GATE_SCLK_PERIL, 3, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_UART2, "sclk_uart2", "div_uart2", - GATE_SCLK_PERIL, 2, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_UART1, "sclk_uart1", "div_uart1", - GATE_SCLK_PERIL, 1, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0", - GATE_SCLK_PERIL, 0, CLK_SET_RATE_PARENT, 0), - - /* GATE_IP_CAM */ - GATE(CLK_SMMUFIMC_LITE2, "smmufimc_lite2", "div_aclk_160", GATE_IP_CAM, - 22, CLK_IGNORE_UNUSED, 0), - GATE(CLK_FIMC_LITE2, "fimc_lite2", "div_aclk_160", GATE_IP_CAM, - 20, CLK_IGNORE_UNUSED, 0), - GATE(CLK_PIXELASYNCM1, "pixelasyncm1", "div_aclk_160", GATE_IP_CAM, - 18, CLK_IGNORE_UNUSED, 0), - GATE(CLK_PIXELASYNCM0, "pixelasyncm0", "div_aclk_160", GATE_IP_CAM, - 17, CLK_IGNORE_UNUSED, 0), - GATE(CLK_PPMUCAMIF, "ppmucamif", "div_aclk_160", GATE_IP_CAM, - 16, CLK_IGNORE_UNUSED, 0), - GATE(CLK_SMMUJPEG, "smmujpeg", "div_aclk_160", GATE_IP_CAM, 11, 0, 0), - GATE(CLK_SMMUFIMC3, "smmufimc3", "div_aclk_160", GATE_IP_CAM, 10, 0, 0), - GATE(CLK_SMMUFIMC2, "smmufimc2", "div_aclk_160", GATE_IP_CAM, 9, 0, 0), - GATE(CLK_SMMUFIMC1, "smmufimc1", "div_aclk_160", GATE_IP_CAM, 8, 0, 0), - GATE(CLK_SMMUFIMC0, "smmufimc0", "div_aclk_160", GATE_IP_CAM, 7, 0, 0), - GATE(CLK_JPEG, "jpeg", "div_aclk_160", GATE_IP_CAM, 6, 0, 0), - GATE(CLK_CSIS1, "csis1", "div_aclk_160", GATE_IP_CAM, 5, 0, 0), - GATE(CLK_CSIS0, "csis0", "div_aclk_160", GATE_IP_CAM, 4, 0, 0), - GATE(CLK_FIMC3, "fimc3", "div_aclk_160", GATE_IP_CAM, 3, 0, 0), - GATE(CLK_FIMC2, "fimc2", "div_aclk_160", GATE_IP_CAM, 2, 0, 0), - GATE(CLK_FIMC1, "fimc1", "div_aclk_160", GATE_IP_CAM, 1, 0, 0), - GATE(CLK_FIMC0, "fimc0", "div_aclk_160", GATE_IP_CAM, 0, 0, 0), - - /* GATE_IP_TV */ - GATE(CLK_PPMUTV, "ppmutv", "div_aclk_100", GATE_IP_TV, 5, 0, 0), - GATE(CLK_SMMUTV, "smmutv", "div_aclk_100", GATE_IP_TV, 4, 0, 0), - GATE(CLK_HDMI, "hdmi", "div_aclk_100", GATE_IP_TV, 3, 0, 0), - GATE(CLK_MIXER, "mixer", "div_aclk_100", GATE_IP_TV, 1, 0, 0), - GATE(CLK_VP, "vp", "div_aclk_100", GATE_IP_TV, 0, 0, 0), - - /* GATE_IP_MFC */ - GATE(CLK_PPMUMFC_R, "ppmumfc_r", "div_aclk_200", GATE_IP_MFC, 4, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_PPMUMFC_L, "ppmumfc_l", "div_aclk_200", GATE_IP_MFC, 3, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_SMMUMFC_R, "smmumfc_r", "div_aclk_200", GATE_IP_MFC, 2, 0, 0), - GATE(CLK_SMMUMFC_L, "smmumfc_l", "div_aclk_200", GATE_IP_MFC, 1, 0, 0), - GATE(CLK_MFC, "mfc", "div_aclk_200", GATE_IP_MFC, 0, 0, 0), - - /* GATE_IP_G3D */ - GATE(CLK_PPMUG3D, "ppmug3d", "div_aclk_200", GATE_IP_G3D, 1, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_G3D, "g3d", "div_aclk_200", GATE_IP_G3D, 0, 0, 0), - - /* GATE_IP_LCD */ - GATE(CLK_PPMULCD0, "ppmulcd0", "div_aclk_160", GATE_IP_LCD, 5, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_SMMUFIMD0, "smmufimd0", "div_aclk_160", GATE_IP_LCD, 4, 0, 0), - GATE(CLK_DSIM0, "dsim0", "div_aclk_160", GATE_IP_LCD, 3, 0, 0), - GATE(CLK_SMIES, "smies", "div_aclk_160", GATE_IP_LCD, 2, 0, 0), - GATE(CLK_MIE0, "mie0", "div_aclk_160", GATE_IP_LCD, 1, 0, 0), - GATE(CLK_FIMD0, "fimd0", "div_aclk_160", GATE_IP_LCD, 0, 0, 0), - - /* GATE_IP_FSYS */ - GATE(CLK_TSADC, "tsadc", "div_aclk_200", GATE_IP_FSYS, 20, 0, 0), - GATE(CLK_PPMUFILE, "ppmufile", "div_aclk_200", GATE_IP_FSYS, 17, - CLK_IGNORE_UNUSED, 0), - GATE(CLK_NFCON, "nfcon", "div_aclk_200", GATE_IP_FSYS, 16, 0, 0), - GATE(CLK_USBDEVICE, "usbdevice", "div_aclk_200", GATE_IP_FSYS, 13, - 0, 0), - GATE(CLK_USBHOST, "usbhost", "div_aclk_200", GATE_IP_FSYS, 12, 0, 0), - GATE(CLK_SROMC, "sromc", "div_aclk_200", GATE_IP_FSYS, 11, 0, 0), - GATE(CLK_SDMMC2, "sdmmc2", "div_aclk_200", GATE_IP_FSYS, 7, 0, 0), - GATE(CLK_SDMMC1, "sdmmc1", "div_aclk_200", GATE_IP_FSYS, 6, 0, 0), - GATE(CLK_SDMMC0, "sdmmc0", "div_aclk_200", GATE_IP_FSYS, 5, 0, 0), - GATE(CLK_PDMA1, "pdma1", "div_aclk_200", GATE_IP_FSYS, 1, 0, 0), - GATE(CLK_PDMA0, "pdma0", "div_aclk_200", GATE_IP_FSYS, 0, 0, 0), - - /* GATE_IP_PERIL */ - GATE(CLK_SPDIF, "spdif", "div_aclk_100", GATE_IP_PERIL, 26, 0, 0), - GATE(CLK_PWM, "pwm", "div_aclk_100", GATE_IP_PERIL, 24, 0, 0), - GATE(CLK_PCM2, "pcm2", "div_aclk_100", GATE_IP_PERIL, 23, 0, 0), - GATE(CLK_PCM1, "pcm1", "div_aclk_100", GATE_IP_PERIL, 22, 0, 0), - GATE(CLK_I2S1, "i2s1", "div_aclk_100", GATE_IP_PERIL, 20, 0, 0), - GATE(CLK_SPI2, "spi2", "div_aclk_100", GATE_IP_PERIL, 18, 0, 0), - GATE(CLK_SPI1, "spi1", "div_aclk_100", GATE_IP_PERIL, 17, 0, 0), - GATE(CLK_SPI0, "spi0", "div_aclk_100", GATE_IP_PERIL, 16, 0, 0), - GATE(CLK_I2CHDMI, "i2chdmi", "div_aclk_100", GATE_IP_PERIL, 14, 0, 0), - GATE(CLK_I2C7, "i2c7", "div_aclk_100", GATE_IP_PERIL, 13, 0, 0), - GATE(CLK_I2C6, "i2c6", "div_aclk_100", GATE_IP_PERIL, 12, 0, 0), - GATE(CLK_I2C5, "i2c5", "div_aclk_100", GATE_IP_PERIL, 11, 0, 0), - GATE(CLK_I2C4, "i2c4", "div_aclk_100", GATE_IP_PERIL, 10, 0, 0), - GATE(CLK_I2C3, "i2c3", "div_aclk_100", GATE_IP_PERIL, 9, 0, 0), - GATE(CLK_I2C2, "i2c2", "div_aclk_100", GATE_IP_PERIL, 8, 0, 0), - GATE(CLK_I2C1, "i2c1", "div_aclk_100", GATE_IP_PERIL, 7, 0, 0), - GATE(CLK_I2C0, "i2c0", "div_aclk_100", GATE_IP_PERIL, 6, 0, 0), - GATE(CLK_UART3, "uart3", "div_aclk_100", GATE_IP_PERIL, 3, 0, 0), - GATE(CLK_UART2, "uart2", "div_aclk_100", GATE_IP_PERIL, 2, 0, 0), - GATE(CLK_UART1, "uart1", "div_aclk_100", GATE_IP_PERIL, 1, 0, 0), - GATE(CLK_UART0, "uart0", "div_aclk_100", GATE_IP_PERIL, 0, 0, 0), -}; - -/* - * APLL & MPLL & BPLL & ISP_PLL & DISP_PLL & G3D_PLL - */ -static const struct samsung_pll_rate_table exynos4415_pll_rates[] __initconst = { - PLL_35XX_RATE(1600000000, 400, 3, 1), - PLL_35XX_RATE(1500000000, 250, 2, 1), - PLL_35XX_RATE(1400000000, 175, 3, 0), - PLL_35XX_RATE(1300000000, 325, 3, 1), - PLL_35XX_RATE(1200000000, 400, 4, 1), - PLL_35XX_RATE(1100000000, 275, 3, 1), - PLL_35XX_RATE(1066000000, 533, 6, 1), - PLL_35XX_RATE(1000000000, 250, 3, 1), - PLL_35XX_RATE(960000000, 320, 4, 1), - PLL_35XX_RATE(900000000, 300, 4, 1), - PLL_35XX_RATE(850000000, 425, 6, 1), - PLL_35XX_RATE(800000000, 200, 3, 1), - PLL_35XX_RATE(700000000, 175, 3, 1), - PLL_35XX_RATE(667000000, 667, 12, 1), - PLL_35XX_RATE(600000000, 400, 4, 2), - PLL_35XX_RATE(550000000, 275, 3, 2), - PLL_35XX_RATE(533000000, 533, 6, 2), - PLL_35XX_RATE(520000000, 260, 3, 2), - PLL_35XX_RATE(500000000, 250, 3, 2), - PLL_35XX_RATE(440000000, 220, 3, 2), - PLL_35XX_RATE(400000000, 200, 3, 2), - PLL_35XX_RATE(350000000, 175, 3, 2), - PLL_35XX_RATE(300000000, 300, 3, 3), - PLL_35XX_RATE(266000000, 266, 3, 3), - PLL_35XX_RATE(200000000, 200, 3, 3), - PLL_35XX_RATE(160000000, 160, 3, 3), - PLL_35XX_RATE(100000000, 200, 3, 4), - { /* sentinel */ } -}; - -/* EPLL */ -static const struct samsung_pll_rate_table exynos4415_epll_rates[] __initconst = { - PLL_36XX_RATE(800000000, 200, 3, 1, 0), - PLL_36XX_RATE(288000000, 96, 2, 2, 0), - PLL_36XX_RATE(192000000, 128, 2, 3, 0), - PLL_36XX_RATE(144000000, 96, 2, 3, 0), - PLL_36XX_RATE(96000000, 128, 2, 4, 0), - PLL_36XX_RATE(84000000, 112, 2, 4, 0), - PLL_36XX_RATE(80750011, 107, 2, 4, 43691), - PLL_36XX_RATE(73728004, 98, 2, 4, 19923), - PLL_36XX_RATE(67987602, 271, 3, 5, 62285), - PLL_36XX_RATE(65911004, 175, 2, 5, 49982), - PLL_36XX_RATE(50000000, 200, 3, 5, 0), - PLL_36XX_RATE(49152003, 131, 2, 5, 4719), - PLL_36XX_RATE(48000000, 128, 2, 5, 0), - PLL_36XX_RATE(45250000, 181, 3, 5, 0), - { /* sentinel */ } -}; - -static const struct samsung_pll_clock exynos4415_plls[] __initconst = { - PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", - APLL_LOCK, APLL_CON0, exynos4415_pll_rates), - PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll", - EPLL_LOCK, EPLL_CON0, exynos4415_epll_rates), - PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "mout_g3d_pllsrc", - G3D_PLL_LOCK, G3D_PLL_CON0, exynos4415_pll_rates), - PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll", - ISP_PLL_LOCK, ISP_PLL_CON0, exynos4415_pll_rates), - PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll", - "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, exynos4415_pll_rates), -}; - -static const struct samsung_cmu_info cmu_info __initconst = { - .pll_clks = exynos4415_plls, - .nr_pll_clks = ARRAY_SIZE(exynos4415_plls), - .mux_clks = exynos4415_mux_clks, - .nr_mux_clks = ARRAY_SIZE(exynos4415_mux_clks), - .div_clks = exynos4415_div_clks, - .nr_div_clks = ARRAY_SIZE(exynos4415_div_clks), - .gate_clks = exynos4415_gate_clks, - .nr_gate_clks = ARRAY_SIZE(exynos4415_gate_clks), - .fixed_clks = exynos4415_fixed_rate_clks, - .nr_fixed_clks = ARRAY_SIZE(exynos4415_fixed_rate_clks), - .fixed_factor_clks = exynos4415_fixed_factor_clks, - .nr_fixed_factor_clks = ARRAY_SIZE(exynos4415_fixed_factor_clks), - .nr_clk_ids = CLK_NR_CLKS, - .clk_regs = exynos4415_cmu_clk_regs, - .nr_clk_regs = ARRAY_SIZE(exynos4415_cmu_clk_regs), -}; - -static void __init exynos4415_cmu_init(struct device_node *np) -{ - samsung_cmu_register_one(np, &cmu_info); -} -CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init); - -/* - * CMU DMC - */ - -#define MPLL_LOCK 0x008 -#define MPLL_CON0 0x108 -#define MPLL_CON1 0x10c -#define MPLL_CON2 0x110 -#define BPLL_LOCK 0x118 -#define BPLL_CON0 0x218 -#define BPLL_CON1 0x21c -#define BPLL_CON2 0x220 -#define SRC_DMC 0x300 -#define DIV_DMC1 0x504 - -static const unsigned long exynos4415_cmu_dmc_clk_regs[] __initconst = { - MPLL_LOCK, - MPLL_CON0, - MPLL_CON1, - MPLL_CON2, - BPLL_LOCK, - BPLL_CON0, - BPLL_CON1, - BPLL_CON2, - SRC_DMC, - DIV_DMC1, -}; - -PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", }; -PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", }; -PNAME(mbpll_p) = { "mout_mpll", "mout_bpll", }; - -static const struct samsung_mux_clock exynos4415_dmc_mux_clks[] __initconst = { - MUX(CLK_DMC_MOUT_MPLL, "mout_mpll", mout_mpll_p, SRC_DMC, 12, 1), - MUX(CLK_DMC_MOUT_BPLL, "mout_bpll", mout_bpll_p, SRC_DMC, 10, 1), - MUX(CLK_DMC_MOUT_DPHY, "mout_dphy", mbpll_p, SRC_DMC, 8, 1), - MUX(CLK_DMC_MOUT_DMC_BUS, "mout_dmc_bus", mbpll_p, SRC_DMC, 4, 1), -}; - -static const struct samsung_div_clock exynos4415_dmc_div_clks[] __initconst = { - DIV(CLK_DMC_DIV_DMC, "div_dmc", "div_dmc_pre", DIV_DMC1, 27, 3), - DIV(CLK_DMC_DIV_DPHY, "div_dphy", "mout_dphy", DIV_DMC1, 23, 3), - DIV(CLK_DMC_DIV_DMC_PRE, "div_dmc_pre", "mout_dmc_bus", - DIV_DMC1, 19, 2), - DIV(CLK_DMC_DIV_DMCP, "div_dmcp", "div_dmcd", DIV_DMC1, 15, 3), - DIV(CLK_DMC_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3), - DIV(CLK_DMC_DIV_MPLL_PRE, "div_mpll_pre", "mout_mpll", DIV_DMC1, 8, 2), -}; - -static const struct samsung_pll_clock exynos4415_dmc_plls[] __initconst = { - PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll", - MPLL_LOCK, MPLL_CON0, exynos4415_pll_rates), - PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll", - BPLL_LOCK, BPLL_CON0, exynos4415_pll_rates), -}; - -static const struct samsung_cmu_info cmu_dmc_info __initconst = { - .pll_clks = exynos4415_dmc_plls, - .nr_pll_clks = ARRAY_SIZE(exynos4415_dmc_plls), - .mux_clks = exynos4415_dmc_mux_clks, - .nr_mux_clks = ARRAY_SIZE(exynos4415_dmc_mux_clks), - .div_clks = exynos4415_dmc_div_clks, - .nr_div_clks = ARRAY_SIZE(exynos4415_dmc_div_clks), - .nr_clk_ids = NR_CLKS_DMC, - .clk_regs = exynos4415_cmu_dmc_clk_regs, - .nr_clk_regs = ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs), -}; - -static void __init exynos4415_cmu_dmc_init(struct device_node *np) -{ - samsung_cmu_register_one(np, &cmu_dmc_info); -} -CLK_OF_DECLARE(exynos4415_cmu_dmc, "samsung,exynos4415-cmu-dmc", - exynos4415_cmu_dmc_init); diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c index f096bd7df40c..11343a597093 100644 --- a/drivers/clk/samsung/clk-exynos5433.c +++ b/drivers/clk/samsung/clk-exynos5433.c @@ -6,7 +6,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * - * Common Clock Framework support for Exynos5443 SoC. + * Common Clock Framework support for Exynos5433 SoC. */ #include <linux/clk-provider.h> @@ -549,10 +549,10 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = { 29, CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK_BUS0_400, "aclk_bus0_400", "div_aclk_bus0_400", ENABLE_ACLK_TOP, 26, - CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), + CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0), GATE(CLK_ACLK_BUS1_400, "aclk_bus1_400", "div_aclk_bus1_400", ENABLE_ACLK_TOP, 25, - CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), + CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0), GATE(CLK_ACLK_IMEM_200, "aclk_imem_200", "div_aclk_imem_266", ENABLE_ACLK_TOP, 24, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0), @@ -616,7 +616,7 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = { /* ENABLE_SCLK_TOP_MSCL */ GATE(CLK_SCLK_JPEG_MSCL, "sclk_jpeg_mscl", "div_sclk_jpeg", - ENABLE_SCLK_TOP_MSCL, 0, 0, 0), + ENABLE_SCLK_TOP_MSCL, 0, CLK_SET_RATE_PARENT, 0), /* ENABLE_SCLK_TOP_CAM1 */ GATE(CLK_SCLK_ISP_SENSOR2, "sclk_isp_sensor2", "div_sclk_isp_sensor2_b", @@ -698,7 +698,7 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = { * ATLAS_PLL & APOLLO_PLL & MEM0_PLL & MEM1_PLL & BUS_PLL & MFC_PLL * & MPHY_PLL & G3D_PLL & DISP_PLL & ISP_PLL */ -static const struct samsung_pll_rate_table exynos5443_pll_rates[] __initconst = { +static const struct samsung_pll_rate_table exynos5433_pll_rates[] __initconst = { PLL_35XX_RATE(2500000000U, 625, 6, 0), PLL_35XX_RATE(2400000000U, 500, 5, 0), PLL_35XX_RATE(2300000000U, 575, 6, 0), @@ -739,7 +739,9 @@ static const struct samsung_pll_rate_table exynos5443_pll_rates[] __initconst = PLL_35XX_RATE(350000000U, 350, 6, 2), PLL_35XX_RATE(333000000U, 222, 4, 2), PLL_35XX_RATE(300000000U, 500, 5, 3), + PLL_35XX_RATE(278000000U, 556, 6, 3), PLL_35XX_RATE(266000000U, 532, 6, 3), + PLL_35XX_RATE(250000000U, 500, 6, 3), PLL_35XX_RATE(200000000U, 400, 6, 3), PLL_35XX_RATE(166000000U, 332, 6, 3), PLL_35XX_RATE(160000000U, 320, 6, 3), @@ -749,7 +751,7 @@ static const struct samsung_pll_rate_table exynos5443_pll_rates[] __initconst = }; /* AUD_PLL */ -static const struct samsung_pll_rate_table exynos5443_aud_pll_rates[] __initconst = { +static const struct samsung_pll_rate_table exynos5433_aud_pll_rates[] __initconst = { PLL_36XX_RATE(400000000U, 200, 3, 2, 0), PLL_36XX_RATE(393216000U, 197, 3, 2, -25690), PLL_36XX_RATE(384000000U, 128, 2, 2, 0), @@ -764,9 +766,9 @@ static const struct samsung_pll_rate_table exynos5443_aud_pll_rates[] __initcons static const struct samsung_pll_clock top_pll_clks[] __initconst = { PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "oscclk", - ISP_PLL_LOCK, ISP_PLL_CON0, exynos5443_pll_rates), + ISP_PLL_LOCK, ISP_PLL_CON0, exynos5433_pll_rates), PLL(pll_36xx, CLK_FOUT_AUD_PLL, "fout_aud_pll", "oscclk", - AUD_PLL_LOCK, AUD_PLL_CON0, exynos5443_aud_pll_rates), + AUD_PLL_LOCK, AUD_PLL_CON0, exynos5433_aud_pll_rates), }; static const struct samsung_cmu_info top_cmu_info __initconst = { @@ -820,7 +822,7 @@ PNAME(mout_mphy_pll_p) = { "oscclk", "fout_mphy_pll", }; static const struct samsung_pll_clock cpif_pll_clks[] __initconst = { PLL(pll_35xx, CLK_FOUT_MPHY_PLL, "fout_mphy_pll", "oscclk", - MPHY_PLL_LOCK, MPHY_PLL_CON0, exynos5443_pll_rates), + MPHY_PLL_LOCK, MPHY_PLL_CON0, exynos5433_pll_rates), }; static const struct samsung_mux_clock cpif_mux_clks[] __initconst = { @@ -1011,13 +1013,13 @@ static const unsigned long mif_clk_regs[] __initconst = { static const struct samsung_pll_clock mif_pll_clks[] __initconst = { PLL(pll_35xx, CLK_FOUT_MEM0_PLL, "fout_mem0_pll", "oscclk", - MEM0_PLL_LOCK, MEM0_PLL_CON0, exynos5443_pll_rates), + MEM0_PLL_LOCK, MEM0_PLL_CON0, exynos5433_pll_rates), PLL(pll_35xx, CLK_FOUT_MEM1_PLL, "fout_mem1_pll", "oscclk", - MEM1_PLL_LOCK, MEM1_PLL_CON0, exynos5443_pll_rates), + MEM1_PLL_LOCK, MEM1_PLL_CON0, exynos5433_pll_rates), PLL(pll_35xx, CLK_FOUT_BUS_PLL, "fout_bus_pll", "oscclk", - BUS_PLL_LOCK, BUS_PLL_CON0, exynos5443_pll_rates), + BUS_PLL_LOCK, BUS_PLL_CON0, exynos5433_pll_rates), PLL(pll_35xx, CLK_FOUT_MFC_PLL, "fout_mfc_pll", "oscclk", - MFC_PLL_LOCK, MFC_PLL_CON0, exynos5443_pll_rates), + MFC_PLL_LOCK, MFC_PLL_CON0, exynos5433_pll_rates), }; /* list of all parent clock list */ @@ -1382,7 +1384,7 @@ static const struct samsung_gate_clock mif_gate_clks[] __initconst = { /* ENABLE_ACLK_MIF3 */ GATE(CLK_ACLK_BUS2_400, "aclk_bus2_400", "div_aclk_bus2_400", ENABLE_ACLK_MIF3, 4, - CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), + CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0), GATE(CLK_ACLK_DISP_333, "aclk_disp_333", "div_aclk_disp_333", ENABLE_ACLK_MIF3, 1, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0), @@ -2539,7 +2541,7 @@ PNAME(mout_sclk_decon_tv_vclk_b_disp_p) = { "mout_sclk_decon_tv_vclk_a_disp", static const struct samsung_pll_clock disp_pll_clks[] __initconst = { PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll", "oscclk", - DISP_PLL_LOCK, DISP_PLL_CON0, exynos5443_pll_rates), + DISP_PLL_LOCK, DISP_PLL_CON0, exynos5433_pll_rates), }; static const struct samsung_fixed_factor_clock disp_fixed_factor_clks[] __initconst = { @@ -2559,8 +2561,10 @@ static const struct samsung_fixed_rate_clock disp_fixed_clks[] __initconst = { FRATE(0, "phyclk_mipidphy1_bitclkdiv8_phy", NULL, 0, 188000000), FRATE(0, "phyclk_mipidphy1_rxclkesc0_phy", NULL, 0, 100000000), /* PHY clocks from MIPI_DPHY0 */ - FRATE(0, "phyclk_mipidphy0_bitclkdiv8_phy", NULL, 0, 188000000), - FRATE(0, "phyclk_mipidphy0_rxclkesc0_phy", NULL, 0, 100000000), + FRATE(CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY, "phyclk_mipidphy0_bitclkdiv8_phy", + NULL, 0, 188000000), + FRATE(CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY, "phyclk_mipidphy0_rxclkesc0_phy", + NULL, 0, 100000000), /* PHY clocks from HDMI_PHY */ FRATE(CLK_PHYCLK_HDMIPHY_TMDS_CLKO_PHY, "phyclk_hdmiphy_tmds_clko_phy", NULL, 0, 300000000), @@ -3224,7 +3228,7 @@ PNAME(mout_g3d_pll_p) = { "oscclk", "fout_g3d_pll", }; static const struct samsung_pll_clock g3d_pll_clks[] __initconst = { PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "oscclk", - G3D_PLL_LOCK, G3D_PLL_CON0, exynos5443_pll_rates), + G3D_PLL_LOCK, G3D_PLL_CON0, exynos5433_pll_rates), }; static const struct samsung_mux_clock g3d_mux_clks[] __initconst = { @@ -3514,7 +3518,7 @@ PNAME(mout_apollo_p) = { "mout_apollo_pll", static const struct samsung_pll_clock apollo_pll_clks[] __initconst = { PLL(pll_35xx, CLK_FOUT_APOLLO_PLL, "fout_apollo_pll", "oscclk", - APOLLO_PLL_LOCK, APOLLO_PLL_CON0, exynos5443_pll_rates), + APOLLO_PLL_LOCK, APOLLO_PLL_CON0, exynos5433_pll_rates), }; static const struct samsung_mux_clock apollo_mux_clks[] __initconst = { @@ -3737,7 +3741,7 @@ PNAME(mout_atlas_p) = { "mout_atlas_pll", static const struct samsung_pll_clock atlas_pll_clks[] __initconst = { PLL(pll_35xx, CLK_FOUT_ATLAS_PLL, "fout_atlas_pll", "oscclk", - ATLAS_PLL_LOCK, ATLAS_PLL_CON0, exynos5443_pll_rates), + ATLAS_PLL_LOCK, ATLAS_PLL_CON0, exynos5433_pll_rates), }; static const struct samsung_mux_clock atlas_mux_clks[] __initconst = { diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c index 9617825daabb..52290894857a 100644 --- a/drivers/clk/samsung/clk-pll.c +++ b/drivers/clk/samsung/clk-pll.c @@ -136,11 +136,39 @@ static const struct clk_ops samsung_pll3000_clk_ops = { #define PLL35XX_MDIV_MASK (0x3FF) #define PLL35XX_PDIV_MASK (0x3F) #define PLL35XX_SDIV_MASK (0x7) -#define PLL35XX_LOCK_STAT_MASK (0x1) #define PLL35XX_MDIV_SHIFT (16) #define PLL35XX_PDIV_SHIFT (8) #define PLL35XX_SDIV_SHIFT (0) #define PLL35XX_LOCK_STAT_SHIFT (29) +#define PLL35XX_ENABLE_SHIFT (31) + +static int samsung_pll35xx_enable(struct clk_hw *hw) +{ + struct samsung_clk_pll *pll = to_clk_pll(hw); + u32 tmp; + + tmp = readl_relaxed(pll->con_reg); + tmp |= BIT(PLL35XX_ENABLE_SHIFT); + writel_relaxed(tmp, pll->con_reg); + + /* wait_lock_time */ + do { + cpu_relax(); + tmp = readl_relaxed(pll->con_reg); + } while (!(tmp & BIT(PLL35XX_LOCK_STAT_SHIFT))); + + return 0; +} + +static void samsung_pll35xx_disable(struct clk_hw *hw) +{ + struct samsung_clk_pll *pll = to_clk_pll(hw); + u32 tmp; + + tmp = readl_relaxed(pll->con_reg); + tmp &= ~BIT(PLL35XX_ENABLE_SHIFT); + writel_relaxed(tmp, pll->con_reg); +} static unsigned long samsung_pll35xx_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) @@ -210,12 +238,13 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate, (rate->sdiv << PLL35XX_SDIV_SHIFT); writel_relaxed(tmp, pll->con_reg); - /* wait_lock_time */ - do { - cpu_relax(); - tmp = readl_relaxed(pll->con_reg); - } while (!(tmp & (PLL35XX_LOCK_STAT_MASK - << PLL35XX_LOCK_STAT_SHIFT))); + /* wait_lock_time if enabled */ + if (tmp & BIT(PLL35XX_ENABLE_SHIFT)) { + do { + cpu_relax(); + tmp = readl_relaxed(pll->con_reg); + } while (!(tmp & BIT(PLL35XX_LOCK_STAT_SHIFT))); + } return 0; } @@ -223,6 +252,8 @@ static const struct clk_ops samsung_pll35xx_clk_ops = { .recalc_rate = samsung_pll35xx_recalc_rate, .round_rate = samsung_pll_round_rate, .set_rate = samsung_pll35xx_set_rate, + .enable = samsung_pll35xx_enable, + .disable = samsung_pll35xx_disable, }; static const struct clk_ops samsung_pll35xx_clk_min_ops = { diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c index d7a1e772d95a..e0650c33863b 100644 --- a/drivers/clk/samsung/clk-s3c2410.c +++ b/drivers/clk/samsung/clk-s3c2410.c @@ -76,7 +76,7 @@ static struct syscore_ops s3c2410_clk_syscore_ops = { .resume = s3c2410_clk_resume, }; -static void s3c2410_clk_sleep_init(void) +static void __init s3c2410_clk_sleep_init(void) { s3c2410_save = samsung_clk_alloc_reg_dump(s3c2410_clk_regs, ARRAY_SIZE(s3c2410_clk_regs)); @@ -90,7 +90,7 @@ static void s3c2410_clk_sleep_init(void) return; } #else -static void s3c2410_clk_sleep_init(void) {} +static void __init s3c2410_clk_sleep_init(void) {} #endif PNAME(fclk_p) = { "mpll", "div_slow" }; diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c index ec873ee15d37..b8340a49921b 100644 --- a/drivers/clk/samsung/clk-s3c2412.c +++ b/drivers/clk/samsung/clk-s3c2412.c @@ -69,7 +69,7 @@ static struct syscore_ops s3c2412_clk_syscore_ops = { .resume = s3c2412_clk_resume, }; -static void s3c2412_clk_sleep_init(void) +static void __init s3c2412_clk_sleep_init(void) { s3c2412_save = samsung_clk_alloc_reg_dump(s3c2412_clk_regs, ARRAY_SIZE(s3c2412_clk_regs)); @@ -83,7 +83,7 @@ static void s3c2412_clk_sleep_init(void) return; } #else -static void s3c2412_clk_sleep_init(void) {} +static void __init s3c2412_clk_sleep_init(void) {} #endif static struct clk_div_table divxti_d[] = { diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c index 5e24a17e10e6..abb935c42916 100644 --- a/drivers/clk/samsung/clk-s3c2443.c +++ b/drivers/clk/samsung/clk-s3c2443.c @@ -89,7 +89,7 @@ static struct syscore_ops s3c2443_clk_syscore_ops = { .resume = s3c2443_clk_resume, }; -static void s3c2443_clk_sleep_init(void) +static void __init s3c2443_clk_sleep_init(void) { s3c2443_save = samsung_clk_alloc_reg_dump(s3c2443_clk_regs, ARRAY_SIZE(s3c2443_clk_regs)); @@ -103,7 +103,7 @@ static void s3c2443_clk_sleep_init(void) return; } #else -static void s3c2443_clk_sleep_init(void) {} +static void __init s3c2443_clk_sleep_init(void) {} #endif PNAME(epllref_p) = { "mpllref", "mpllref", "xti", "ext" }; diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c index a48bd5f17330..7306867a0ab8 100644 --- a/drivers/clk/samsung/clk-s3c64xx.c +++ b/drivers/clk/samsung/clk-s3c64xx.c @@ -121,7 +121,7 @@ static struct syscore_ops s3c64xx_clk_syscore_ops = { .resume = s3c64xx_clk_resume, }; -static void s3c64xx_clk_sleep_init(void) +static void __init s3c64xx_clk_sleep_init(void) { s3c64xx_save_common = samsung_clk_alloc_reg_dump(s3c64xx_clk_regs, ARRAY_SIZE(s3c64xx_clk_regs)); @@ -145,7 +145,7 @@ err_warn: __func__); } #else -static void s3c64xx_clk_sleep_init(void) {} +static void __init s3c64xx_clk_sleep_init(void) {} #endif /* List of parent clocks common for all S3C64xx SoCs. */ diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig index 8454c6e3dd65..695bbf9ef428 100644 --- a/drivers/clk/sunxi-ng/Kconfig +++ b/drivers/clk/sunxi-ng/Kconfig @@ -64,6 +64,17 @@ config SUN50I_A64_CCU select SUNXI_CCU_PHASE default ARM64 && ARCH_SUNXI +config SUN5I_CCU + bool "Support for the Allwinner sun5i family CCM" + select SUNXI_CCU_DIV + select SUNXI_CCU_MULT + select SUNXI_CCU_NK + select SUNXI_CCU_NKM + select SUNXI_CCU_NM + select SUNXI_CCU_MP + select SUNXI_CCU_PHASE + default MACH_SUN5I + config SUN6I_A31_CCU bool "Support for the Allwinner A31/A31s CCU" select SUNXI_CCU_DIV @@ -109,4 +120,25 @@ config SUN8I_H3_CCU select SUNXI_CCU_PHASE default MACH_SUN8I +config SUN8I_V3S_CCU + bool "Support for the Allwinner V3s CCU" + select SUNXI_CCU_DIV + select SUNXI_CCU_NK + select SUNXI_CCU_NKM + select SUNXI_CCU_NKMP + select SUNXI_CCU_NM + select SUNXI_CCU_MP + select SUNXI_CCU_PHASE + default MACH_SUN8I + +config SUN9I_A80_CCU + bool "Support for the Allwinner A80 CCU" + select SUNXI_CCU_DIV + select SUNXI_CCU_GATE + select SUNXI_CCU_NKMP + select SUNXI_CCU_NM + select SUNXI_CCU_MP + select SUNXI_CCU_PHASE + default MACH_SUN9I + endif diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile index 24fbc6e5deb8..6feaac0c5600 100644 --- a/drivers/clk/sunxi-ng/Makefile +++ b/drivers/clk/sunxi-ng/Makefile @@ -19,7 +19,12 @@ obj-$(CONFIG_SUNXI_CCU_MP) += ccu_mp.o # SoC support obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o +obj-$(CONFIG_SUN5I_CCU) += ccu-sun5i.o obj-$(CONFIG_SUN6I_A31_CCU) += ccu-sun6i-a31.o obj-$(CONFIG_SUN8I_A23_CCU) += ccu-sun8i-a23.o obj-$(CONFIG_SUN8I_A33_CCU) += ccu-sun8i-a33.o obj-$(CONFIG_SUN8I_H3_CCU) += ccu-sun8i-h3.o +obj-$(CONFIG_SUN8I_V3S_CCU) += ccu-sun8i-v3s.o +obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o +obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o +obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c new file mode 100644 index 000000000000..06edaa523479 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun5i.c @@ -0,0 +1,1022 @@ +/* + * Copyright (c) 2016 Maxime Ripard. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk-provider.h> +#include <linux/of_address.h> + +#include "ccu_common.h" +#include "ccu_reset.h" + +#include "ccu_div.h" +#include "ccu_gate.h" +#include "ccu_mp.h" +#include "ccu_mult.h" +#include "ccu_nk.h" +#include "ccu_nkm.h" +#include "ccu_nkmp.h" +#include "ccu_nm.h" +#include "ccu_phase.h" + +#include "ccu-sun5i.h" + +static struct ccu_nkmp pll_core_clk = { + .enable = BIT(31), + .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), + .k = _SUNXI_CCU_MULT(4, 2), + .m = _SUNXI_CCU_DIV(0, 2), + .p = _SUNXI_CCU_DIV(16, 2), + .common = { + .reg = 0x000, + .hw.init = CLK_HW_INIT("pll-core", + "hosc", + &ccu_nkmp_ops, + 0), + }, +}; + +/* + * The Audio PLL is supposed to have 4 outputs: 3 fixed factors from + * the base (2x, 4x and 8x), and one variable divider (the one true + * pll audio). + * + * We don't have any need for the variable divider for now, so we just + * hardcode it to match with the clock names + */ +#define SUN5I_PLL_AUDIO_REG 0x008 + +static struct ccu_nm pll_audio_base_clk = { + .enable = BIT(31), + .n = _SUNXI_CCU_MULT_OFFSET(8, 7, 0), + + /* + * The datasheet is wrong here, this doesn't have any + * offset + */ + .m = _SUNXI_CCU_DIV_OFFSET(0, 5, 0), + .common = { + .reg = 0x008, + .hw.init = CLK_HW_INIT("pll-audio-base", + "hosc", + &ccu_nm_ops, + 0), + }, +}; + +static struct ccu_mult pll_video0_clk = { + .enable = BIT(31), + .mult = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(0, 7, 0, 9, 127), + .frac = _SUNXI_CCU_FRAC(BIT(15), BIT(14), + 270000000, 297000000), + .common = { + .reg = 0x010, + .features = (CCU_FEATURE_FRACTIONAL | + CCU_FEATURE_ALL_PREDIV), + .prediv = 8, + .hw.init = CLK_HW_INIT("pll-video0", + "hosc", + &ccu_mult_ops, + 0), + }, +}; + +static struct ccu_nkmp pll_ve_clk = { + .enable = BIT(31), + .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), + .k = _SUNXI_CCU_MULT(4, 2), + .m = _SUNXI_CCU_DIV(0, 2), + .p = _SUNXI_CCU_DIV(16, 2), + .common = { + .reg = 0x018, + .hw.init = CLK_HW_INIT("pll-ve", + "hosc", + &ccu_nkmp_ops, + 0), + }, +}; + +static struct ccu_nk pll_ddr_base_clk = { + .enable = BIT(31), + .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), + .k = _SUNXI_CCU_MULT(4, 2), + .common = { + .reg = 0x020, + .hw.init = CLK_HW_INIT("pll-ddr-base", + "hosc", + &ccu_nk_ops, + 0), + }, +}; + +static SUNXI_CCU_M(pll_ddr_clk, "pll-ddr", "pll-ddr-base", 0x020, 0, 2, + CLK_IS_CRITICAL); + +static struct ccu_div pll_ddr_other_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(16, 2, CLK_DIVIDER_POWER_OF_TWO), + + .common = { + .reg = 0x020, + .hw.init = CLK_HW_INIT("pll-ddr-other", "pll-ddr-base", + &ccu_div_ops, + 0), + }, +}; + +static struct ccu_nk pll_periph_clk = { + .enable = BIT(31), + .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), + .k = _SUNXI_CCU_MULT(4, 2), + .fixed_post_div = 2, + .common = { + .reg = 0x028, + .features = CCU_FEATURE_FIXED_POSTDIV, + .hw.init = CLK_HW_INIT("pll-periph", + "hosc", + &ccu_nk_ops, + 0), + }, +}; + +static struct ccu_mult pll_video1_clk = { + .enable = BIT(31), + .mult = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(0, 7, 0, 9, 127), + .frac = _SUNXI_CCU_FRAC(BIT(15), BIT(14), + 270000000, 297000000), + .common = { + .reg = 0x030, + .features = (CCU_FEATURE_FRACTIONAL | + CCU_FEATURE_ALL_PREDIV), + .prediv = 8, + .hw.init = CLK_HW_INIT("pll-video1", + "hosc", + &ccu_mult_ops, + 0), + }, +}; + +static SUNXI_CCU_GATE(hosc_clk, "hosc", "osc24M", 0x050, BIT(0), 0); + +#define SUN5I_AHB_REG 0x054 +static const char * const cpu_parents[] = { "osc32k", "hosc", + "pll-core" , "pll-periph" }; +static const struct ccu_mux_fixed_prediv cpu_predivs[] = { + { .index = 3, .div = 3, }, +}; +static struct ccu_mux cpu_clk = { + .mux = { + .shift = 16, + .width = 2, + .fixed_predivs = cpu_predivs, + .n_predivs = ARRAY_SIZE(cpu_predivs), + }, + .common = { + .reg = 0x054, + .features = CCU_FEATURE_FIXED_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("cpu", + cpu_parents, + &ccu_mux_ops, + CLK_IS_CRITICAL), + } +}; + +static SUNXI_CCU_M(axi_clk, "axi", "cpu", 0x054, 0, 2, 0); + +static const char * const ahb_parents[] = { "axi" , "cpu", "pll-periph" }; +static const struct ccu_mux_fixed_prediv ahb_predivs[] = { + { .index = 2, .div = 2, }, +}; +static struct ccu_div ahb_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO), + .mux = { + .shift = 6, + .width = 2, + .fixed_predivs = ahb_predivs, + .n_predivs = ARRAY_SIZE(ahb_predivs), + }, + + .common = { + .reg = 0x054, + .hw.init = CLK_HW_INIT_PARENTS("ahb", + ahb_parents, + &ccu_div_ops, + 0), + }, +}; + +static struct clk_div_table apb0_div_table[] = { + { .val = 0, .div = 2 }, + { .val = 1, .div = 2 }, + { .val = 2, .div = 4 }, + { .val = 3, .div = 8 }, + { /* Sentinel */ }, +}; +static SUNXI_CCU_DIV_TABLE(apb0_clk, "apb0", "ahb", + 0x054, 8, 2, apb0_div_table, 0); + +static const char * const apb1_parents[] = { "hosc", "pll-periph", "osc32k" }; +static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", apb1_parents, 0x058, + 0, 5, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + 0); + +static SUNXI_CCU_GATE(axi_dram_clk, "axi-dram", "axi", + 0x05c, BIT(0), 0); + +static SUNXI_CCU_GATE(ahb_otg_clk, "ahb-otg", "ahb", + 0x060, BIT(0), 0); +static SUNXI_CCU_GATE(ahb_ehci_clk, "ahb-ehci", "ahb", + 0x060, BIT(1), 0); +static SUNXI_CCU_GATE(ahb_ohci_clk, "ahb-ohci", "ahb", + 0x060, BIT(2), 0); +static SUNXI_CCU_GATE(ahb_ss_clk, "ahb-ss", "ahb", + 0x060, BIT(5), 0); +static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb", + 0x060, BIT(6), 0); +static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb", + 0x060, BIT(6), 0); +static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb", + 0x060, BIT(8), 0); +static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb", + 0x060, BIT(9), 0); +static SUNXI_CCU_GATE(ahb_mmc2_clk, "ahb-mmc2", "ahb", + 0x060, BIT(10), 0); +static SUNXI_CCU_GATE(ahb_nand_clk, "ahb-nand", "ahb", + 0x060, BIT(13), 0); +static SUNXI_CCU_GATE(ahb_sdram_clk, "ahb-sdram", "ahb", + 0x060, BIT(14), CLK_IS_CRITICAL); +static SUNXI_CCU_GATE(ahb_emac_clk, "ahb-emac", "ahb", + 0x060, BIT(17), 0); +static SUNXI_CCU_GATE(ahb_ts_clk, "ahb-ts", "ahb", + 0x060, BIT(18), 0); +static SUNXI_CCU_GATE(ahb_spi0_clk, "ahb-spi0", "ahb", + 0x060, BIT(20), 0); +static SUNXI_CCU_GATE(ahb_spi1_clk, "ahb-spi1", "ahb", + 0x060, BIT(21), 0); +static SUNXI_CCU_GATE(ahb_spi2_clk, "ahb-spi2", "ahb", + 0x060, BIT(22), 0); +static SUNXI_CCU_GATE(ahb_gps_clk, "ahb-gps", "ahb", + 0x060, BIT(26), 0); +static SUNXI_CCU_GATE(ahb_hstimer_clk, "ahb-hstimer", "ahb", + 0x060, BIT(28), 0); + +static SUNXI_CCU_GATE(ahb_ve_clk, "ahb-ve", "ahb", + 0x064, BIT(0), 0); +static SUNXI_CCU_GATE(ahb_tve_clk, "ahb-tve", "ahb", + 0x064, BIT(2), 0); +static SUNXI_CCU_GATE(ahb_lcd_clk, "ahb-lcd", "ahb", + 0x064, BIT(4), 0); +static SUNXI_CCU_GATE(ahb_csi_clk, "ahb-csi", "ahb", + 0x064, BIT(8), 0); +static SUNXI_CCU_GATE(ahb_hdmi_clk, "ahb-hdmi", "ahb", + 0x064, BIT(11), 0); +static SUNXI_CCU_GATE(ahb_de_be_clk, "ahb-de-be", "ahb", + 0x064, BIT(12), 0); +static SUNXI_CCU_GATE(ahb_de_fe_clk, "ahb-de-fe", "ahb", + 0x064, BIT(14), 0); +static SUNXI_CCU_GATE(ahb_iep_clk, "ahb-iep", "ahb", + 0x064, BIT(19), 0); +static SUNXI_CCU_GATE(ahb_gpu_clk, "ahb-gpu", "ahb", + 0x064, BIT(20), 0); + +static SUNXI_CCU_GATE(apb0_codec_clk, "apb0-codec", "apb0", + 0x068, BIT(0), 0); +static SUNXI_CCU_GATE(apb0_spdif_clk, "apb0-spdif", "apb0", + 0x068, BIT(1), 0); +static SUNXI_CCU_GATE(apb0_i2s_clk, "apb0-i2s", "apb0", + 0x068, BIT(3), 0); +static SUNXI_CCU_GATE(apb0_pio_clk, "apb0-pio", "apb0", + 0x068, BIT(5), 0); +static SUNXI_CCU_GATE(apb0_ir_clk, "apb0-ir", "apb0", + 0x068, BIT(6), 0); +static SUNXI_CCU_GATE(apb0_keypad_clk, "apb0-keypad", "apb0", + 0x068, BIT(10), 0); + +static SUNXI_CCU_GATE(apb1_i2c0_clk, "apb1-i2c0", "apb1", + 0x06c, BIT(0), 0); +static SUNXI_CCU_GATE(apb1_i2c1_clk, "apb1-i2c1", "apb1", + 0x06c, BIT(1), 0); +static SUNXI_CCU_GATE(apb1_i2c2_clk, "apb1-i2c2", "apb1", + 0x06c, BIT(2), 0); +static SUNXI_CCU_GATE(apb1_uart0_clk, "apb1-uart0", "apb1", + 0x06c, BIT(16), 0); +static SUNXI_CCU_GATE(apb1_uart1_clk, "apb1-uart1", "apb1", + 0x06c, BIT(17), 0); +static SUNXI_CCU_GATE(apb1_uart2_clk, "apb1-uart2", "apb1", + 0x06c, BIT(18), 0); +static SUNXI_CCU_GATE(apb1_uart3_clk, "apb1-uart3", "apb1", + 0x06c, BIT(19), 0); + +static const char * const mod0_default_parents[] = { "hosc", "pll-periph", + "pll-ddr-other" }; +static SUNXI_CCU_MP_WITH_MUX_GATE(nand_clk, "nand", mod0_default_parents, 0x080, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mod0_default_parents, 0x088, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mod0_default_parents, 0x08c, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents, 0x090, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", mod0_default_parents, 0x098, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(ss_clk, "ss", mod0_default_parents, 0x09c, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, 0x0a0, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi2_clk, "spi2", mod0_default_parents, 0x0a8, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(ir_clk, "ir", mod0_default_parents, 0x0b0, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x", + "pll-audio-2x", "pll-audio" }; +static SUNXI_CCU_MUX_WITH_GATE(i2s_clk, "i2s", i2s_parents, + 0x0b8, 16, 2, BIT(31), CLK_SET_RATE_PARENT); + +static const char * const spdif_parents[] = { "pll-audio-8x", "pll-audio-4x", + "pll-audio-2x", "pll-audio" }; +static SUNXI_CCU_MUX_WITH_GATE(spdif_clk, "spdif", spdif_parents, + 0x0c0, 16, 2, BIT(31), CLK_SET_RATE_PARENT); + +static const char * const keypad_parents[] = { "hosc", "losc"}; +static const u8 keypad_table[] = { 0, 2 }; +static struct ccu_mp keypad_clk = { + .enable = BIT(31), + .m = _SUNXI_CCU_DIV(8, 5), + .p = _SUNXI_CCU_DIV(20, 2), + .mux = _SUNXI_CCU_MUX_TABLE(24, 2, keypad_table), + + .common = { + .reg = 0x0c4, + .hw.init = CLK_HW_INIT_PARENTS("keypad", + keypad_parents, + &ccu_mp_ops, + 0), + }, +}; + +static SUNXI_CCU_GATE(usb_ohci_clk, "usb-ohci", "pll-periph", + 0x0cc, BIT(6), 0); +static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "pll-periph", + 0x0cc, BIT(8), 0); +static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "pll-periph", + 0x0cc, BIT(9), 0); + +static const char * const gps_parents[] = { "hosc", "pll-periph", + "pll-video1", "pll-ve" }; +static SUNXI_CCU_M_WITH_MUX_GATE(gps_clk, "gps", gps_parents, + 0x0d0, 0, 3, 24, 2, BIT(31), 0); + +static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "pll-ddr", + 0x100, BIT(0), 0); +static SUNXI_CCU_GATE(dram_csi_clk, "dram-csi", "pll-ddr", + 0x100, BIT(1), 0); +static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "pll-ddr", + 0x100, BIT(3), 0); +static SUNXI_CCU_GATE(dram_tve_clk, "dram-tve", "pll-ddr", + 0x100, BIT(5), 0); +static SUNXI_CCU_GATE(dram_de_fe_clk, "dram-de-fe", "pll-ddr", + 0x100, BIT(25), 0); +static SUNXI_CCU_GATE(dram_de_be_clk, "dram-de-be", "pll-ddr", + 0x100, BIT(26), 0); +static SUNXI_CCU_GATE(dram_ace_clk, "dram-ace", "pll-ddr", + 0x100, BIT(29), 0); +static SUNXI_CCU_GATE(dram_iep_clk, "dram-iep", "pll-ddr", + 0x100, BIT(31), 0); + +static const char * const de_parents[] = { "pll-video0", "pll-video1", + "pll-ddr-other" }; +static SUNXI_CCU_M_WITH_MUX_GATE(de_be_clk, "de-be", de_parents, + 0x104, 0, 4, 24, 2, BIT(31), 0); + +static SUNXI_CCU_M_WITH_MUX_GATE(de_fe_clk, "de-fe", de_parents, + 0x10c, 0, 4, 24, 2, BIT(31), 0); + +static const char * const tcon_parents[] = { "pll-video0", "pll-video1", + "pll-video0-2x", "pll-video1-2x" }; +static SUNXI_CCU_MUX_WITH_GATE(tcon_ch0_clk, "tcon-ch0-sclk", tcon_parents, + 0x118, 24, 2, BIT(31), CLK_SET_RATE_PARENT); + +static SUNXI_CCU_M_WITH_MUX_GATE(tcon_ch1_sclk2_clk, "tcon-ch1-sclk2", + tcon_parents, + 0x12c, 0, 4, 24, 2, BIT(31), CLK_SET_RATE_PARENT); + +static SUNXI_CCU_M_WITH_GATE(tcon_ch1_sclk1_clk, "tcon-ch1-sclk1", "tcon-ch1-sclk2", + 0x12c, 11, 1, BIT(15), CLK_SET_RATE_PARENT); + +static const char * const csi_parents[] = { "hosc", "pll-video0", "pll-video1", + "pll-video0-2x", "pll-video1-2x" }; +static const u8 csi_table[] = { 0, 1, 2, 5, 6 }; +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_clk, "csi", + csi_parents, csi_table, + 0x134, 0, 5, 24, 2, BIT(31), 0); + +static SUNXI_CCU_GATE(ve_clk, "ve", "pll-ve", + 0x13c, BIT(31), CLK_SET_RATE_PARENT); + +static SUNXI_CCU_GATE(codec_clk, "codec", "pll-audio", + 0x140, BIT(31), CLK_SET_RATE_PARENT); + +static SUNXI_CCU_GATE(avs_clk, "avs", "hosc", + 0x144, BIT(31), 0); + +static const char * const hdmi_parents[] = { "pll-video0", "pll-video0-2x" }; +static const u8 hdmi_table[] = { 0, 2 }; +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(hdmi_clk, "hdmi", + hdmi_parents, hdmi_table, + 0x150, 0, 4, 24, 2, BIT(31), + CLK_SET_RATE_PARENT); + +static const char * const gpu_parents[] = { "pll-video0", "pll-ve", + "pll-ddr-other", "pll-video1", + "pll-video1-2x" }; +static SUNXI_CCU_M_WITH_MUX_GATE(gpu_clk, "gpu", gpu_parents, + 0x154, 0, 4, 24, 3, BIT(31), 0); + +static const char * const mbus_parents[] = { "hosc", "pll-periph", "pll-ddr" }; +static SUNXI_CCU_MP_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents, + 0x15c, 0, 4, 16, 2, 24, 2, BIT(31), CLK_IS_CRITICAL); + +static SUNXI_CCU_GATE(iep_clk, "iep", "de-be", + 0x160, BIT(31), 0); + +static struct ccu_common *sun5i_a10s_ccu_clks[] = { + &hosc_clk.common, + &pll_core_clk.common, + &pll_audio_base_clk.common, + &pll_video0_clk.common, + &pll_ve_clk.common, + &pll_ddr_base_clk.common, + &pll_ddr_clk.common, + &pll_ddr_other_clk.common, + &pll_periph_clk.common, + &pll_video1_clk.common, + &cpu_clk.common, + &axi_clk.common, + &ahb_clk.common, + &apb0_clk.common, + &apb1_clk.common, + &axi_dram_clk.common, + &ahb_otg_clk.common, + &ahb_ehci_clk.common, + &ahb_ohci_clk.common, + &ahb_ss_clk.common, + &ahb_dma_clk.common, + &ahb_bist_clk.common, + &ahb_mmc0_clk.common, + &ahb_mmc1_clk.common, + &ahb_mmc2_clk.common, + &ahb_nand_clk.common, + &ahb_sdram_clk.common, + &ahb_emac_clk.common, + &ahb_ts_clk.common, + &ahb_spi0_clk.common, + &ahb_spi1_clk.common, + &ahb_spi2_clk.common, + &ahb_gps_clk.common, + &ahb_hstimer_clk.common, + &ahb_ve_clk.common, + &ahb_tve_clk.common, + &ahb_lcd_clk.common, + &ahb_csi_clk.common, + &ahb_hdmi_clk.common, + &ahb_de_be_clk.common, + &ahb_de_fe_clk.common, + &ahb_iep_clk.common, + &ahb_gpu_clk.common, + &apb0_codec_clk.common, + &apb0_spdif_clk.common, + &apb0_i2s_clk.common, + &apb0_pio_clk.common, + &apb0_ir_clk.common, + &apb0_keypad_clk.common, + &apb1_i2c0_clk.common, + &apb1_i2c1_clk.common, + &apb1_i2c2_clk.common, + &apb1_uart0_clk.common, + &apb1_uart1_clk.common, + &apb1_uart2_clk.common, + &apb1_uart3_clk.common, + &nand_clk.common, + &mmc0_clk.common, + &mmc1_clk.common, + &mmc2_clk.common, + &ts_clk.common, + &ss_clk.common, + &spi0_clk.common, + &spi1_clk.common, + &spi2_clk.common, + &ir_clk.common, + &i2s_clk.common, + &spdif_clk.common, + &keypad_clk.common, + &usb_ohci_clk.common, + &usb_phy0_clk.common, + &usb_phy1_clk.common, + &gps_clk.common, + &dram_ve_clk.common, + &dram_csi_clk.common, + &dram_ts_clk.common, + &dram_tve_clk.common, + &dram_de_fe_clk.common, + &dram_de_be_clk.common, + &dram_ace_clk.common, + &dram_iep_clk.common, + &de_be_clk.common, + &de_fe_clk.common, + &tcon_ch0_clk.common, + &tcon_ch1_sclk2_clk.common, + &tcon_ch1_sclk1_clk.common, + &csi_clk.common, + &ve_clk.common, + &codec_clk.common, + &avs_clk.common, + &hdmi_clk.common, + &gpu_clk.common, + &mbus_clk.common, + &iep_clk.common, +}; + +/* We hardcode the divider to 4 for now */ +static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", + "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", + "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", + "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x", + "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_video0_2x_clk, "pll-video0-2x", + "pll-video0", 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_video1_2x_clk, "pll-video1-2x", + "pll-video1", 1, 2, CLK_SET_RATE_PARENT); + +static struct clk_hw_onecell_data sun5i_a10s_hw_clks = { + .hws = { + [CLK_HOSC] = &hosc_clk.common.hw, + [CLK_PLL_CORE] = &pll_core_clk.common.hw, + [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw, + [CLK_PLL_AUDIO] = &pll_audio_clk.hw, + [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw, + [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw, + [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw, + [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw, + [CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw, + [CLK_PLL_VE] = &pll_ve_clk.common.hw, + [CLK_PLL_DDR_BASE] = &pll_ddr_base_clk.common.hw, + [CLK_PLL_DDR] = &pll_ddr_clk.common.hw, + [CLK_PLL_DDR_OTHER] = &pll_ddr_other_clk.common.hw, + [CLK_PLL_PERIPH] = &pll_periph_clk.common.hw, + [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw, + [CLK_PLL_VIDEO1_2X] = &pll_video1_2x_clk.hw, + [CLK_CPU] = &cpu_clk.common.hw, + [CLK_AXI] = &axi_clk.common.hw, + [CLK_AHB] = &ahb_clk.common.hw, + [CLK_APB0] = &apb0_clk.common.hw, + [CLK_APB1] = &apb1_clk.common.hw, + [CLK_DRAM_AXI] = &axi_dram_clk.common.hw, + [CLK_AHB_OTG] = &ahb_otg_clk.common.hw, + [CLK_AHB_EHCI] = &ahb_ehci_clk.common.hw, + [CLK_AHB_OHCI] = &ahb_ohci_clk.common.hw, + [CLK_AHB_SS] = &ahb_ss_clk.common.hw, + [CLK_AHB_DMA] = &ahb_dma_clk.common.hw, + [CLK_AHB_BIST] = &ahb_bist_clk.common.hw, + [CLK_AHB_MMC0] = &ahb_mmc0_clk.common.hw, + [CLK_AHB_MMC1] = &ahb_mmc1_clk.common.hw, + [CLK_AHB_MMC2] = &ahb_mmc2_clk.common.hw, + [CLK_AHB_NAND] = &ahb_nand_clk.common.hw, + [CLK_AHB_SDRAM] = &ahb_sdram_clk.common.hw, + [CLK_AHB_EMAC] = &ahb_emac_clk.common.hw, + [CLK_AHB_TS] = &ahb_ts_clk.common.hw, + [CLK_AHB_SPI0] = &ahb_spi0_clk.common.hw, + [CLK_AHB_SPI1] = &ahb_spi1_clk.common.hw, + [CLK_AHB_SPI2] = &ahb_spi2_clk.common.hw, + [CLK_AHB_GPS] = &ahb_gps_clk.common.hw, + [CLK_AHB_HSTIMER] = &ahb_hstimer_clk.common.hw, + [CLK_AHB_VE] = &ahb_ve_clk.common.hw, + [CLK_AHB_TVE] = &ahb_tve_clk.common.hw, + [CLK_AHB_LCD] = &ahb_lcd_clk.common.hw, + [CLK_AHB_CSI] = &ahb_csi_clk.common.hw, + [CLK_AHB_HDMI] = &ahb_hdmi_clk.common.hw, + [CLK_AHB_DE_BE] = &ahb_de_be_clk.common.hw, + [CLK_AHB_DE_FE] = &ahb_de_fe_clk.common.hw, + [CLK_AHB_IEP] = &ahb_iep_clk.common.hw, + [CLK_AHB_GPU] = &ahb_gpu_clk.common.hw, + [CLK_APB0_CODEC] = &apb0_codec_clk.common.hw, + [CLK_APB0_I2S] = &apb0_i2s_clk.common.hw, + [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, + [CLK_APB0_IR] = &apb0_ir_clk.common.hw, + [CLK_APB0_KEYPAD] = &apb0_keypad_clk.common.hw, + [CLK_APB1_I2C0] = &apb1_i2c0_clk.common.hw, + [CLK_APB1_I2C1] = &apb1_i2c1_clk.common.hw, + [CLK_APB1_I2C2] = &apb1_i2c2_clk.common.hw, + [CLK_APB1_UART0] = &apb1_uart0_clk.common.hw, + [CLK_APB1_UART1] = &apb1_uart1_clk.common.hw, + [CLK_APB1_UART2] = &apb1_uart2_clk.common.hw, + [CLK_APB1_UART3] = &apb1_uart3_clk.common.hw, + [CLK_NAND] = &nand_clk.common.hw, + [CLK_MMC0] = &mmc0_clk.common.hw, + [CLK_MMC1] = &mmc1_clk.common.hw, + [CLK_MMC2] = &mmc2_clk.common.hw, + [CLK_TS] = &ts_clk.common.hw, + [CLK_SS] = &ss_clk.common.hw, + [CLK_SPI0] = &spi0_clk.common.hw, + [CLK_SPI1] = &spi1_clk.common.hw, + [CLK_SPI2] = &spi2_clk.common.hw, + [CLK_IR] = &ir_clk.common.hw, + [CLK_I2S] = &i2s_clk.common.hw, + [CLK_KEYPAD] = &keypad_clk.common.hw, + [CLK_USB_OHCI] = &usb_ohci_clk.common.hw, + [CLK_USB_PHY0] = &usb_phy0_clk.common.hw, + [CLK_USB_PHY1] = &usb_phy1_clk.common.hw, + [CLK_GPS] = &gps_clk.common.hw, + [CLK_DRAM_VE] = &dram_ve_clk.common.hw, + [CLK_DRAM_CSI] = &dram_csi_clk.common.hw, + [CLK_DRAM_TS] = &dram_ts_clk.common.hw, + [CLK_DRAM_TVE] = &dram_tve_clk.common.hw, + [CLK_DRAM_DE_FE] = &dram_de_fe_clk.common.hw, + [CLK_DRAM_DE_BE] = &dram_de_be_clk.common.hw, + [CLK_DRAM_ACE] = &dram_ace_clk.common.hw, + [CLK_DRAM_IEP] = &dram_iep_clk.common.hw, + [CLK_DE_BE] = &de_be_clk.common.hw, + [CLK_DE_FE] = &de_fe_clk.common.hw, + [CLK_TCON_CH0] = &tcon_ch0_clk.common.hw, + [CLK_TCON_CH1_SCLK] = &tcon_ch1_sclk2_clk.common.hw, + [CLK_TCON_CH1] = &tcon_ch1_sclk1_clk.common.hw, + [CLK_CSI] = &csi_clk.common.hw, + [CLK_VE] = &ve_clk.common.hw, + [CLK_CODEC] = &codec_clk.common.hw, + [CLK_AVS] = &avs_clk.common.hw, + [CLK_HDMI] = &hdmi_clk.common.hw, + [CLK_GPU] = &gpu_clk.common.hw, + [CLK_MBUS] = &mbus_clk.common.hw, + [CLK_IEP] = &iep_clk.common.hw, + }, + .num = CLK_NUMBER, +}; + +static struct ccu_reset_map sun5i_a10s_ccu_resets[] = { + [RST_USB_PHY0] = { 0x0cc, BIT(0) }, + [RST_USB_PHY1] = { 0x0cc, BIT(1) }, + + [RST_GPS] = { 0x0d0, BIT(30) }, + + [RST_DE_BE] = { 0x104, BIT(30) }, + + [RST_DE_FE] = { 0x10c, BIT(30) }, + + [RST_TVE] = { 0x118, BIT(29) }, + [RST_LCD] = { 0x118, BIT(30) }, + + [RST_CSI] = { 0x134, BIT(30) }, + + [RST_VE] = { 0x13c, BIT(0) }, + + [RST_GPU] = { 0x154, BIT(30) }, + + [RST_IEP] = { 0x160, BIT(30) }, +}; + +static const struct sunxi_ccu_desc sun5i_a10s_ccu_desc = { + .ccu_clks = sun5i_a10s_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun5i_a10s_ccu_clks), + + .hw_clks = &sun5i_a10s_hw_clks, + + .resets = sun5i_a10s_ccu_resets, + .num_resets = ARRAY_SIZE(sun5i_a10s_ccu_resets), +}; + +/* + * The A13 is the A10s minus the TS, GPS, HDMI, I2S and the keypad + */ +static struct clk_hw_onecell_data sun5i_a13_hw_clks = { + .hws = { + [CLK_HOSC] = &hosc_clk.common.hw, + [CLK_PLL_CORE] = &pll_core_clk.common.hw, + [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw, + [CLK_PLL_AUDIO] = &pll_audio_clk.hw, + [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw, + [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw, + [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw, + [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw, + [CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw, + [CLK_PLL_VE] = &pll_ve_clk.common.hw, + [CLK_PLL_DDR_BASE] = &pll_ddr_base_clk.common.hw, + [CLK_PLL_DDR] = &pll_ddr_clk.common.hw, + [CLK_PLL_DDR_OTHER] = &pll_ddr_other_clk.common.hw, + [CLK_PLL_PERIPH] = &pll_periph_clk.common.hw, + [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw, + [CLK_PLL_VIDEO1_2X] = &pll_video1_2x_clk.hw, + [CLK_CPU] = &cpu_clk.common.hw, + [CLK_AXI] = &axi_clk.common.hw, + [CLK_AHB] = &ahb_clk.common.hw, + [CLK_APB0] = &apb0_clk.common.hw, + [CLK_APB1] = &apb1_clk.common.hw, + [CLK_DRAM_AXI] = &axi_dram_clk.common.hw, + [CLK_AHB_OTG] = &ahb_otg_clk.common.hw, + [CLK_AHB_EHCI] = &ahb_ehci_clk.common.hw, + [CLK_AHB_OHCI] = &ahb_ohci_clk.common.hw, + [CLK_AHB_SS] = &ahb_ss_clk.common.hw, + [CLK_AHB_DMA] = &ahb_dma_clk.common.hw, + [CLK_AHB_BIST] = &ahb_bist_clk.common.hw, + [CLK_AHB_MMC0] = &ahb_mmc0_clk.common.hw, + [CLK_AHB_MMC1] = &ahb_mmc1_clk.common.hw, + [CLK_AHB_MMC2] = &ahb_mmc2_clk.common.hw, + [CLK_AHB_NAND] = &ahb_nand_clk.common.hw, + [CLK_AHB_SDRAM] = &ahb_sdram_clk.common.hw, + [CLK_AHB_EMAC] = &ahb_emac_clk.common.hw, + [CLK_AHB_SPI0] = &ahb_spi0_clk.common.hw, + [CLK_AHB_SPI1] = &ahb_spi1_clk.common.hw, + [CLK_AHB_SPI2] = &ahb_spi2_clk.common.hw, + [CLK_AHB_HSTIMER] = &ahb_hstimer_clk.common.hw, + [CLK_AHB_VE] = &ahb_ve_clk.common.hw, + [CLK_AHB_TVE] = &ahb_tve_clk.common.hw, + [CLK_AHB_LCD] = &ahb_lcd_clk.common.hw, + [CLK_AHB_CSI] = &ahb_csi_clk.common.hw, + [CLK_AHB_DE_BE] = &ahb_de_be_clk.common.hw, + [CLK_AHB_DE_FE] = &ahb_de_fe_clk.common.hw, + [CLK_AHB_IEP] = &ahb_iep_clk.common.hw, + [CLK_AHB_GPU] = &ahb_gpu_clk.common.hw, + [CLK_APB0_CODEC] = &apb0_codec_clk.common.hw, + [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, + [CLK_APB0_IR] = &apb0_ir_clk.common.hw, + [CLK_APB1_I2C0] = &apb1_i2c0_clk.common.hw, + [CLK_APB1_I2C1] = &apb1_i2c1_clk.common.hw, + [CLK_APB1_I2C2] = &apb1_i2c2_clk.common.hw, + [CLK_APB1_UART0] = &apb1_uart0_clk.common.hw, + [CLK_APB1_UART1] = &apb1_uart1_clk.common.hw, + [CLK_APB1_UART2] = &apb1_uart2_clk.common.hw, + [CLK_APB1_UART3] = &apb1_uart3_clk.common.hw, + [CLK_NAND] = &nand_clk.common.hw, + [CLK_MMC0] = &mmc0_clk.common.hw, + [CLK_MMC1] = &mmc1_clk.common.hw, + [CLK_MMC2] = &mmc2_clk.common.hw, + [CLK_SS] = &ss_clk.common.hw, + [CLK_SPI0] = &spi0_clk.common.hw, + [CLK_SPI1] = &spi1_clk.common.hw, + [CLK_SPI2] = &spi2_clk.common.hw, + [CLK_IR] = &ir_clk.common.hw, + [CLK_USB_OHCI] = &usb_ohci_clk.common.hw, + [CLK_USB_PHY0] = &usb_phy0_clk.common.hw, + [CLK_USB_PHY1] = &usb_phy1_clk.common.hw, + [CLK_DRAM_VE] = &dram_ve_clk.common.hw, + [CLK_DRAM_CSI] = &dram_csi_clk.common.hw, + [CLK_DRAM_TVE] = &dram_tve_clk.common.hw, + [CLK_DRAM_DE_FE] = &dram_de_fe_clk.common.hw, + [CLK_DRAM_DE_BE] = &dram_de_be_clk.common.hw, + [CLK_DRAM_ACE] = &dram_ace_clk.common.hw, + [CLK_DRAM_IEP] = &dram_iep_clk.common.hw, + [CLK_DE_BE] = &de_be_clk.common.hw, + [CLK_DE_FE] = &de_fe_clk.common.hw, + [CLK_TCON_CH0] = &tcon_ch0_clk.common.hw, + [CLK_TCON_CH1_SCLK] = &tcon_ch1_sclk2_clk.common.hw, + [CLK_TCON_CH1] = &tcon_ch1_sclk1_clk.common.hw, + [CLK_CSI] = &csi_clk.common.hw, + [CLK_VE] = &ve_clk.common.hw, + [CLK_CODEC] = &codec_clk.common.hw, + [CLK_AVS] = &avs_clk.common.hw, + [CLK_GPU] = &gpu_clk.common.hw, + [CLK_MBUS] = &mbus_clk.common.hw, + [CLK_IEP] = &iep_clk.common.hw, + }, + .num = CLK_NUMBER, +}; + +static const struct sunxi_ccu_desc sun5i_a13_ccu_desc = { + .ccu_clks = sun5i_a10s_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun5i_a10s_ccu_clks), + + .hw_clks = &sun5i_a13_hw_clks, + + .resets = sun5i_a10s_ccu_resets, + .num_resets = ARRAY_SIZE(sun5i_a10s_ccu_resets), +}; + +/* + * The GR8 is the A10s CCU minus the HDMI and keypad, plus SPDIF + */ +static struct clk_hw_onecell_data sun5i_gr8_hw_clks = { + .hws = { + [CLK_HOSC] = &hosc_clk.common.hw, + [CLK_PLL_CORE] = &pll_core_clk.common.hw, + [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw, + [CLK_PLL_AUDIO] = &pll_audio_clk.hw, + [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw, + [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw, + [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw, + [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw, + [CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw, + [CLK_PLL_VE] = &pll_ve_clk.common.hw, + [CLK_PLL_DDR_BASE] = &pll_ddr_base_clk.common.hw, + [CLK_PLL_DDR] = &pll_ddr_clk.common.hw, + [CLK_PLL_DDR_OTHER] = &pll_ddr_other_clk.common.hw, + [CLK_PLL_PERIPH] = &pll_periph_clk.common.hw, + [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw, + [CLK_PLL_VIDEO1_2X] = &pll_video1_2x_clk.hw, + [CLK_CPU] = &cpu_clk.common.hw, + [CLK_AXI] = &axi_clk.common.hw, + [CLK_AHB] = &ahb_clk.common.hw, + [CLK_APB0] = &apb0_clk.common.hw, + [CLK_APB1] = &apb1_clk.common.hw, + [CLK_DRAM_AXI] = &axi_dram_clk.common.hw, + [CLK_AHB_OTG] = &ahb_otg_clk.common.hw, + [CLK_AHB_EHCI] = &ahb_ehci_clk.common.hw, + [CLK_AHB_OHCI] = &ahb_ohci_clk.common.hw, + [CLK_AHB_SS] = &ahb_ss_clk.common.hw, + [CLK_AHB_DMA] = &ahb_dma_clk.common.hw, + [CLK_AHB_BIST] = &ahb_bist_clk.common.hw, + [CLK_AHB_MMC0] = &ahb_mmc0_clk.common.hw, + [CLK_AHB_MMC1] = &ahb_mmc1_clk.common.hw, + [CLK_AHB_MMC2] = &ahb_mmc2_clk.common.hw, + [CLK_AHB_NAND] = &ahb_nand_clk.common.hw, + [CLK_AHB_SDRAM] = &ahb_sdram_clk.common.hw, + [CLK_AHB_EMAC] = &ahb_emac_clk.common.hw, + [CLK_AHB_TS] = &ahb_ts_clk.common.hw, + [CLK_AHB_SPI0] = &ahb_spi0_clk.common.hw, + [CLK_AHB_SPI1] = &ahb_spi1_clk.common.hw, + [CLK_AHB_SPI2] = &ahb_spi2_clk.common.hw, + [CLK_AHB_GPS] = &ahb_gps_clk.common.hw, + [CLK_AHB_HSTIMER] = &ahb_hstimer_clk.common.hw, + [CLK_AHB_VE] = &ahb_ve_clk.common.hw, + [CLK_AHB_TVE] = &ahb_tve_clk.common.hw, + [CLK_AHB_LCD] = &ahb_lcd_clk.common.hw, + [CLK_AHB_CSI] = &ahb_csi_clk.common.hw, + [CLK_AHB_DE_BE] = &ahb_de_be_clk.common.hw, + [CLK_AHB_DE_FE] = &ahb_de_fe_clk.common.hw, + [CLK_AHB_IEP] = &ahb_iep_clk.common.hw, + [CLK_AHB_GPU] = &ahb_gpu_clk.common.hw, + [CLK_APB0_CODEC] = &apb0_codec_clk.common.hw, + [CLK_APB0_SPDIF] = &apb0_spdif_clk.common.hw, + [CLK_APB0_I2S] = &apb0_i2s_clk.common.hw, + [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, + [CLK_APB0_IR] = &apb0_ir_clk.common.hw, + [CLK_APB1_I2C0] = &apb1_i2c0_clk.common.hw, + [CLK_APB1_I2C1] = &apb1_i2c1_clk.common.hw, + [CLK_APB1_I2C2] = &apb1_i2c2_clk.common.hw, + [CLK_APB1_UART0] = &apb1_uart0_clk.common.hw, + [CLK_APB1_UART1] = &apb1_uart1_clk.common.hw, + [CLK_APB1_UART2] = &apb1_uart2_clk.common.hw, + [CLK_APB1_UART3] = &apb1_uart3_clk.common.hw, + [CLK_NAND] = &nand_clk.common.hw, + [CLK_MMC0] = &mmc0_clk.common.hw, + [CLK_MMC1] = &mmc1_clk.common.hw, + [CLK_MMC2] = &mmc2_clk.common.hw, + [CLK_TS] = &ts_clk.common.hw, + [CLK_SS] = &ss_clk.common.hw, + [CLK_SPI0] = &spi0_clk.common.hw, + [CLK_SPI1] = &spi1_clk.common.hw, + [CLK_SPI2] = &spi2_clk.common.hw, + [CLK_IR] = &ir_clk.common.hw, + [CLK_I2S] = &i2s_clk.common.hw, + [CLK_SPDIF] = &spdif_clk.common.hw, + [CLK_USB_OHCI] = &usb_ohci_clk.common.hw, + [CLK_USB_PHY0] = &usb_phy0_clk.common.hw, + [CLK_USB_PHY1] = &usb_phy1_clk.common.hw, + [CLK_GPS] = &gps_clk.common.hw, + [CLK_DRAM_VE] = &dram_ve_clk.common.hw, + [CLK_DRAM_CSI] = &dram_csi_clk.common.hw, + [CLK_DRAM_TS] = &dram_ts_clk.common.hw, + [CLK_DRAM_TVE] = &dram_tve_clk.common.hw, + [CLK_DRAM_DE_FE] = &dram_de_fe_clk.common.hw, + [CLK_DRAM_DE_BE] = &dram_de_be_clk.common.hw, + [CLK_DRAM_ACE] = &dram_ace_clk.common.hw, + [CLK_DRAM_IEP] = &dram_iep_clk.common.hw, + [CLK_DE_BE] = &de_be_clk.common.hw, + [CLK_DE_FE] = &de_fe_clk.common.hw, + [CLK_TCON_CH0] = &tcon_ch0_clk.common.hw, + [CLK_TCON_CH1_SCLK] = &tcon_ch1_sclk2_clk.common.hw, + [CLK_TCON_CH1] = &tcon_ch1_sclk1_clk.common.hw, + [CLK_CSI] = &csi_clk.common.hw, + [CLK_VE] = &ve_clk.common.hw, + [CLK_CODEC] = &codec_clk.common.hw, + [CLK_AVS] = &avs_clk.common.hw, + [CLK_GPU] = &gpu_clk.common.hw, + [CLK_MBUS] = &mbus_clk.common.hw, + [CLK_IEP] = &iep_clk.common.hw, + }, + .num = CLK_NUMBER, +}; + +static const struct sunxi_ccu_desc sun5i_gr8_ccu_desc = { + .ccu_clks = sun5i_a10s_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun5i_a10s_ccu_clks), + + .hw_clks = &sun5i_gr8_hw_clks, + + .resets = sun5i_a10s_ccu_resets, + .num_resets = ARRAY_SIZE(sun5i_a10s_ccu_resets), +}; + +static void __init sun5i_ccu_init(struct device_node *node, + const struct sunxi_ccu_desc *desc) +{ + void __iomem *reg; + u32 val; + + reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (IS_ERR(reg)) { + pr_err("%s: Could not map the clock registers\n", + of_node_full_name(node)); + return; + } + + /* Force the PLL-Audio-1x divider to 4 */ + val = readl(reg + SUN5I_PLL_AUDIO_REG); + val &= ~GENMASK(19, 16); + writel(val | (3 << 16), reg + SUN5I_PLL_AUDIO_REG); + + /* + * Use the peripheral PLL as the AHB parent, instead of CPU / + * AXI which have rate changes due to cpufreq. + * + * This is especially a big deal for the HS timer whose parent + * clock is AHB. + */ + val = readl(reg + SUN5I_AHB_REG); + val &= ~GENMASK(7, 6); + writel(val | (2 << 6), reg + SUN5I_AHB_REG); + + sunxi_ccu_probe(node, reg, desc); +} + +static void __init sun5i_a10s_ccu_setup(struct device_node *node) +{ + sun5i_ccu_init(node, &sun5i_a10s_ccu_desc); +} +CLK_OF_DECLARE(sun5i_a10s_ccu, "allwinner,sun5i-a10s-ccu", + sun5i_a10s_ccu_setup); + +static void __init sun5i_a13_ccu_setup(struct device_node *node) +{ + sun5i_ccu_init(node, &sun5i_a13_ccu_desc); +} +CLK_OF_DECLARE(sun5i_a13_ccu, "allwinner,sun5i-a13-ccu", + sun5i_a13_ccu_setup); + +static void __init sun5i_gr8_ccu_setup(struct device_node *node) +{ + sun5i_ccu_init(node, &sun5i_gr8_ccu_desc); +} +CLK_OF_DECLARE(sun5i_gr8_ccu, "nextthing,gr8-ccu", + sun5i_gr8_ccu_setup); diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.h b/drivers/clk/sunxi-ng/ccu-sun5i.h new file mode 100644 index 000000000000..8144487eb7ca --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun5i.h @@ -0,0 +1,67 @@ +/* + * Copyright 2016 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_SUN5I_H_ +#define _CCU_SUN5I_H_ + +#include <dt-bindings/clock/sun5i-ccu.h> +#include <dt-bindings/reset/sun5i-ccu.h> + +/* The HOSC is exported */ +#define CLK_PLL_CORE 2 +#define CLK_PLL_AUDIO_BASE 3 +#define CLK_PLL_AUDIO 4 +#define CLK_PLL_AUDIO_2X 5 +#define CLK_PLL_AUDIO_4X 6 +#define CLK_PLL_AUDIO_8X 7 +#define CLK_PLL_VIDEO0 8 +#define CLK_PLL_VIDEO0_2X 9 +#define CLK_PLL_VE 10 +#define CLK_PLL_DDR_BASE 11 +#define CLK_PLL_DDR 12 +#define CLK_PLL_DDR_OTHER 13 +#define CLK_PLL_PERIPH 14 +#define CLK_PLL_VIDEO1 15 +#define CLK_PLL_VIDEO1_2X 16 + +/* The CPU clock is exported */ + +#define CLK_AXI 18 +#define CLK_AHB 19 +#define CLK_APB0 20 +#define CLK_APB1 21 +#define CLK_DRAM_AXI 22 + +/* AHB gates are exported */ +/* APB0 gates are exported */ +/* APB1 gates are exported */ +/* Modules clocks are exported */ +/* USB clocks are exported */ +/* GPS clock is exported */ +/* DRAM gates are exported */ +/* More display modules clocks are exported */ + +#define CLK_TCON_CH1_SCLK 91 + +/* The rest of the module clocks are exported */ + +#define CLK_MBUS 99 + +/* And finally the IEP clock */ + +#define CLK_NUMBER (CLK_IEP + 1) + +#endif /* _CCU_SUN5I_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index fc75a335a7ce..4c9a920ff4ab 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -468,8 +468,8 @@ static SUNXI_CCU_MUX_WITH_GATE(daudio0_clk, "daudio0", daudio_parents, static SUNXI_CCU_MUX_WITH_GATE(daudio1_clk, "daudio1", daudio_parents, 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT); -static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio", - 0x0c0, 0, 4, BIT(31), CLK_SET_RATE_PARENT); +static SUNXI_CCU_MUX_WITH_GATE(spdif_clk, "spdif", daudio_parents, + 0x0c0, 16, 2, BIT(31), CLK_SET_RATE_PARENT); static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M", 0x0cc, BIT(8), 0); diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c index 9bd1f78a0547..a7b3c08ed0e2 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c @@ -170,7 +170,7 @@ static SUNXI_CCU_N_WITH_GATE_LOCK(pll_ddr1_clk, "pll-ddr1", static const char * const cpux_parents[] = { "osc32k", "osc24M", "pll-cpux" , "pll-cpux" }; static SUNXI_CCU_MUX(cpux_clk, "cpux", cpux_parents, - 0x050, 16, 2, CLK_IS_CRITICAL); + 0x050, 16, 2, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT); static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x050, 0, 2, 0); @@ -440,7 +440,7 @@ static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve", 0x13c, 16, 3, BIT(31), CLK_SET_RATE_PARENT); static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio", - 0x140, BIT(31), 0); + 0x140, BIT(31), CLK_SET_RATE_PARENT); static SUNXI_CCU_GATE(ac_dig_4x_clk, "ac-dig-4x", "pll-audio-4x", 0x140, BIT(30), 0); static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M", @@ -468,7 +468,7 @@ static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(drc_clk, "drc", 0x180, 0, 4, 24, 3, BIT(31), 0); static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu", - 0x1a0, 0, 3, BIT(31), 0); + 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT); static const char * const ats_parents[] = { "osc24M", "pll-periph" }; static SUNXI_CCU_M_WITH_MUX_GATE(ats_clk, "ats", ats_parents, @@ -752,6 +752,13 @@ static const struct sunxi_ccu_desc sun8i_a33_ccu_desc = { .num_resets = ARRAY_SIZE(sun8i_a33_ccu_resets), }; +static struct ccu_mux_nb sun8i_a33_cpu_nb = { + .common = &cpux_clk.common, + .cm = &cpux_clk.mux, + .delay_us = 1, /* > 8 clock cycles at 24 MHz */ + .bypass_index = 1, /* index of 24 MHz oscillator */ +}; + static void __init sun8i_a33_ccu_setup(struct device_node *node) { void __iomem *reg; @@ -775,6 +782,9 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node) writel(val, reg + SUN8I_A33_PLL_MIPI_REG); sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc); + + ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk, + &sun8i_a33_cpu_nb); } CLK_OF_DECLARE(sun8i_a33_ccu, "allwinner,sun8i-a33-ccu", sun8i_a33_ccu_setup); diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c index 21c427d86f28..a26c8a19fe93 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c @@ -803,6 +803,13 @@ static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = { .num_resets = ARRAY_SIZE(sun8i_h3_ccu_resets), }; +static struct ccu_mux_nb sun8i_h3_cpu_nb = { + .common = &cpux_clk.common, + .cm = &cpux_clk.mux, + .delay_us = 1, /* > 8 clock cycles at 24 MHz */ + .bypass_index = 1, /* index of 24 MHz oscillator */ +}; + static void __init sun8i_h3_ccu_setup(struct device_node *node) { void __iomem *reg; @@ -821,6 +828,9 @@ static void __init sun8i_h3_ccu_setup(struct device_node *node) writel(val | (3 << 16), reg + SUN8I_H3_PLL_AUDIO_REG); sunxi_ccu_probe(node, reg, &sun8i_h3_ccu_desc); + + ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk, + &sun8i_h3_cpu_nb); } CLK_OF_DECLARE(sun8i_h3_ccu, "allwinner,sun8i-h3-ccu", sun8i_h3_ccu_setup); diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c new file mode 100644 index 000000000000..e58706b40ae9 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -0,0 +1,591 @@ +/* + * Copyright (c) 2016 Icenowy Zheng <icenowy@aosc.xyz> + * + * Based on ccu-sun8i-h3.c, which is: + * Copyright (c) 2016 Maxime Ripard. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk-provider.h> +#include <linux/of_address.h> + +#include "ccu_common.h" +#include "ccu_reset.h" + +#include "ccu_div.h" +#include "ccu_gate.h" +#include "ccu_mp.h" +#include "ccu_mult.h" +#include "ccu_nk.h" +#include "ccu_nkm.h" +#include "ccu_nkmp.h" +#include "ccu_nm.h" +#include "ccu_phase.h" + +#include "ccu-sun8i-v3s.h" + +static SUNXI_CCU_NKMP_WITH_GATE_LOCK(pll_cpu_clk, "pll-cpu", + "osc24M", 0x000, + 8, 5, /* N */ + 4, 2, /* K */ + 0, 2, /* M */ + 16, 2, /* P */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 0); + +/* + * The Audio PLL is supposed to have 4 outputs: 3 fixed factors from + * the base (2x, 4x and 8x), and one variable divider (the one true + * pll audio). + * + * We don't have any need for the variable divider for now, so we just + * hardcode it to match with the clock names + */ +#define SUN8I_V3S_PLL_AUDIO_REG 0x008 + +static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base", + "osc24M", 0x008, + 8, 7, /* N */ + 0, 5, /* M */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 0); + +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video", + "osc24M", 0x0010, + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 0); + +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve", + "osc24M", 0x0018, + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 0); + +static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr_clk, "pll-ddr", + "osc24M", 0x020, + 8, 5, /* N */ + 4, 2, /* K */ + 0, 2, /* M */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 0); + +static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph0_clk, "pll-periph0", + "osc24M", 0x028, + 8, 5, /* N */ + 4, 2, /* K */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 2, /* post-div */ + 0); + +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_isp_clk, "pll-isp", + "osc24M", 0x002c, + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 0); + +static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph1_clk, "pll-periph1", + "osc24M", 0x044, + 8, 5, /* N */ + 4, 2, /* K */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 2, /* post-div */ + 0); + +static const char * const cpu_parents[] = { "osc32k", "osc24M", + "pll-cpu", "pll-cpu" }; +static SUNXI_CCU_MUX(cpu_clk, "cpu", cpu_parents, + 0x050, 16, 2, CLK_IS_CRITICAL); + +static SUNXI_CCU_M(axi_clk, "axi", "cpu", 0x050, 0, 2, 0); + +static const char * const ahb1_parents[] = { "osc32k", "osc24M", + "axi", "pll-periph0" }; +static struct ccu_div ahb1_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO), + + .mux = { + .shift = 12, + .width = 2, + + .variable_prediv = { + .index = 3, + .shift = 6, + .width = 2, + }, + }, + + .common = { + .reg = 0x054, + .features = CCU_FEATURE_VARIABLE_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("ahb1", + ahb1_parents, + &ccu_div_ops, + 0), + }, +}; + +static struct clk_div_table apb1_div_table[] = { + { .val = 0, .div = 2 }, + { .val = 1, .div = 2 }, + { .val = 2, .div = 4 }, + { .val = 3, .div = 8 }, + { /* Sentinel */ }, +}; +static SUNXI_CCU_DIV_TABLE(apb1_clk, "apb1", "ahb1", + 0x054, 8, 2, apb1_div_table, 0); + +static const char * const apb2_parents[] = { "osc32k", "osc24M", + "pll-periph0", "pll-periph0" }; +static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", apb2_parents, 0x058, + 0, 5, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + 0); + +static const char * const ahb2_parents[] = { "ahb1", "pll-periph0" }; +static const struct ccu_mux_fixed_prediv ahb2_fixed_predivs[] = { + { .index = 1, .div = 2 }, +}; +static struct ccu_mux ahb2_clk = { + .mux = { + .shift = 0, + .width = 1, + .fixed_predivs = ahb2_fixed_predivs, + .n_predivs = ARRAY_SIZE(ahb2_fixed_predivs), + }, + + .common = { + .reg = 0x05c, + .features = CCU_FEATURE_FIXED_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("ahb2", + ahb2_parents, + &ccu_mux_ops, + 0), + }, +}; + +static SUNXI_CCU_GATE(bus_ce_clk, "bus-ce", "ahb1", + 0x060, BIT(5), 0); +static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "ahb1", + 0x060, BIT(6), 0); +static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb1", + 0x060, BIT(8), 0); +static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb1", + 0x060, BIT(9), 0); +static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb1", + 0x060, BIT(10), 0); +static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "ahb1", + 0x060, BIT(14), 0); +static SUNXI_CCU_GATE(bus_emac_clk, "bus-emac", "ahb2", + 0x060, BIT(17), 0); +static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "ahb1", + 0x060, BIT(19), 0); +static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb1", + 0x060, BIT(20), 0); +static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb1", + 0x060, BIT(24), 0); +static SUNXI_CCU_GATE(bus_ehci0_clk, "bus-ehci0", "ahb1", + 0x060, BIT(26), 0); +static SUNXI_CCU_GATE(bus_ohci0_clk, "bus-ohci0", "ahb1", + 0x060, BIT(29), 0); + +static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "ahb1", + 0x064, BIT(0), 0); +static SUNXI_CCU_GATE(bus_tcon0_clk, "bus-tcon0", "ahb1", + 0x064, BIT(4), 0); +static SUNXI_CCU_GATE(bus_csi_clk, "bus-csi", "ahb1", + 0x064, BIT(8), 0); +static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "ahb1", + 0x064, BIT(12), 0); + +static SUNXI_CCU_GATE(bus_codec_clk, "bus-codec", "apb1", + 0x068, BIT(0), 0); +static SUNXI_CCU_GATE(bus_pio_clk, "bus-pio", "apb1", + 0x068, BIT(5), 0); + +static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2", + 0x06c, BIT(0), 0); +static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2", + 0x06c, BIT(1), 0); +static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2", + 0x06c, BIT(16), 0); +static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2", + 0x06c, BIT(17), 0); +static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2", + 0x06c, BIT(18), 0); + +static SUNXI_CCU_GATE(bus_ephy_clk, "bus-ephy", "ahb1", + 0x070, BIT(0), 0); +static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "ahb1", + 0x070, BIT(7), 0); + +static const char * const mod0_default_parents[] = { "osc24M", "pll-periph0", + "pll-periph1" }; +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mod0_default_parents, 0x088, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_PHASE(mmc0_sample_clk, "mmc0_sample", "mmc0", + 0x088, 20, 3, 0); +static SUNXI_CCU_PHASE(mmc0_output_clk, "mmc0_output", "mmc0", + 0x088, 8, 3, 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mod0_default_parents, 0x08c, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_PHASE(mmc1_sample_clk, "mmc1_sample", "mmc1", + 0x08c, 20, 3, 0); +static SUNXI_CCU_PHASE(mmc1_output_clk, "mmc1_output", "mmc1", + 0x08c, 8, 3, 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents, 0x090, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_PHASE(mmc2_sample_clk, "mmc2_sample", "mmc2", + 0x090, 20, 3, 0); +static SUNXI_CCU_PHASE(mmc2_output_clk, "mmc2_output", "mmc2", + 0x090, 8, 3, 0); + +static const char * const ce_parents[] = { "osc24M", "pll-periph0", }; + +static SUNXI_CCU_MP_WITH_MUX_GATE(ce_clk, "ce", ce_parents, 0x09c, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, 0x0a0, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M", + 0x0cc, BIT(8), 0); +static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc24M", + 0x0cc, BIT(16), 0); + +static const char * const dram_parents[] = { "pll-ddr", "pll-periph0-2x" }; +static SUNXI_CCU_M_WITH_MUX(dram_clk, "dram", dram_parents, + 0x0f4, 0, 4, 20, 2, CLK_IS_CRITICAL); + +static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "dram", + 0x100, BIT(0), 0); +static SUNXI_CCU_GATE(dram_csi_clk, "dram-csi", "dram", + 0x100, BIT(1), 0); +static SUNXI_CCU_GATE(dram_ehci_clk, "dram-ehci", "dram", + 0x100, BIT(17), 0); +static SUNXI_CCU_GATE(dram_ohci_clk, "dram-ohci", "dram", + 0x100, BIT(18), 0); + +static const char * const de_parents[] = { "pll-video", "pll-periph0" }; +static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents, + 0x104, 0, 4, 24, 2, BIT(31), 0); + +static const char * const tcon_parents[] = { "pll-video" }; +static SUNXI_CCU_M_WITH_MUX_GATE(tcon_clk, "tcon", tcon_parents, + 0x118, 0, 4, 24, 3, BIT(31), 0); + +static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M", + 0x130, BIT(31), 0); + +static const char * const csi_mclk_parents[] = { "osc24M", "pll-video", + "pll-periph0", "pll-periph1" }; +static SUNXI_CCU_M_WITH_MUX_GATE(csi0_mclk_clk, "csi0-mclk", csi_mclk_parents, + 0x130, 0, 5, 8, 3, BIT(15), 0); + +static const char * const csi1_sclk_parents[] = { "pll-video", "pll-isp" }; +static SUNXI_CCU_M_WITH_MUX_GATE(csi1_sclk_clk, "csi-sclk", csi1_sclk_parents, + 0x134, 16, 4, 24, 3, BIT(31), 0); + +static SUNXI_CCU_M_WITH_MUX_GATE(csi1_mclk_clk, "csi-mclk", csi_mclk_parents, + 0x134, 0, 5, 8, 3, BIT(15), 0); + +static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve", + 0x13c, 16, 3, BIT(31), 0); + +static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio", + 0x140, BIT(31), CLK_SET_RATE_PARENT); +static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M", + 0x144, BIT(31), 0); + +static const char * const mbus_parents[] = { "osc24M", "pll-periph0-2x", + "pll-ddr" }; +static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents, + 0x15c, 0, 3, 24, 2, BIT(31), CLK_IS_CRITICAL); + +static const char * const mipi_csi_parents[] = { "pll-video", "pll-periph0", + "pll-isp" }; +static SUNXI_CCU_M_WITH_MUX_GATE(mipi_csi_clk, "mipi-csi", mipi_csi_parents, + 0x16c, 0, 3, 24, 2, BIT(31), 0); + +static struct ccu_common *sun8i_v3s_ccu_clks[] = { + &pll_cpu_clk.common, + &pll_audio_base_clk.common, + &pll_video_clk.common, + &pll_ve_clk.common, + &pll_ddr_clk.common, + &pll_periph0_clk.common, + &pll_isp_clk.common, + &pll_periph1_clk.common, + &cpu_clk.common, + &axi_clk.common, + &ahb1_clk.common, + &apb1_clk.common, + &apb2_clk.common, + &ahb2_clk.common, + &bus_ce_clk.common, + &bus_dma_clk.common, + &bus_mmc0_clk.common, + &bus_mmc1_clk.common, + &bus_mmc2_clk.common, + &bus_dram_clk.common, + &bus_emac_clk.common, + &bus_hstimer_clk.common, + &bus_spi0_clk.common, + &bus_otg_clk.common, + &bus_ehci0_clk.common, + &bus_ohci0_clk.common, + &bus_ve_clk.common, + &bus_tcon0_clk.common, + &bus_csi_clk.common, + &bus_de_clk.common, + &bus_codec_clk.common, + &bus_pio_clk.common, + &bus_i2c0_clk.common, + &bus_i2c1_clk.common, + &bus_uart0_clk.common, + &bus_uart1_clk.common, + &bus_uart2_clk.common, + &bus_ephy_clk.common, + &bus_dbg_clk.common, + &mmc0_clk.common, + &mmc0_sample_clk.common, + &mmc0_output_clk.common, + &mmc1_clk.common, + &mmc1_sample_clk.common, + &mmc1_output_clk.common, + &mmc2_clk.common, + &mmc2_sample_clk.common, + &mmc2_output_clk.common, + &ce_clk.common, + &spi0_clk.common, + &usb_phy0_clk.common, + &usb_ohci0_clk.common, + &dram_clk.common, + &dram_ve_clk.common, + &dram_csi_clk.common, + &dram_ohci_clk.common, + &dram_ehci_clk.common, + &de_clk.common, + &tcon_clk.common, + &csi_misc_clk.common, + &csi0_mclk_clk.common, + &csi1_sclk_clk.common, + &csi1_mclk_clk.common, + &ve_clk.common, + &ac_dig_clk.common, + &avs_clk.common, + &mbus_clk.common, + &mipi_csi_clk.common, +}; + +/* We hardcode the divider to 4 for now */ +static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", + "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", + "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", + "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x", + "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_periph0_2x_clk, "pll-periph0-2x", + "pll-periph0", 1, 2, 0); + +static struct clk_hw_onecell_data sun8i_v3s_hw_clks = { + .hws = { + [CLK_PLL_CPU] = &pll_cpu_clk.common.hw, + [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw, + [CLK_PLL_AUDIO] = &pll_audio_clk.hw, + [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw, + [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw, + [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw, + [CLK_PLL_VIDEO] = &pll_video_clk.common.hw, + [CLK_PLL_VE] = &pll_ve_clk.common.hw, + [CLK_PLL_DDR] = &pll_ddr_clk.common.hw, + [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw, + [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw, + [CLK_PLL_ISP] = &pll_isp_clk.common.hw, + [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw, + [CLK_CPU] = &cpu_clk.common.hw, + [CLK_AXI] = &axi_clk.common.hw, + [CLK_AHB1] = &ahb1_clk.common.hw, + [CLK_APB1] = &apb1_clk.common.hw, + [CLK_APB2] = &apb2_clk.common.hw, + [CLK_AHB2] = &ahb2_clk.common.hw, + [CLK_BUS_CE] = &bus_ce_clk.common.hw, + [CLK_BUS_DMA] = &bus_dma_clk.common.hw, + [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw, + [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw, + [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw, + [CLK_BUS_DRAM] = &bus_dram_clk.common.hw, + [CLK_BUS_EMAC] = &bus_emac_clk.common.hw, + [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw, + [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw, + [CLK_BUS_OTG] = &bus_otg_clk.common.hw, + [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw, + [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw, + [CLK_BUS_VE] = &bus_ve_clk.common.hw, + [CLK_BUS_TCON0] = &bus_tcon0_clk.common.hw, + [CLK_BUS_CSI] = &bus_csi_clk.common.hw, + [CLK_BUS_DE] = &bus_de_clk.common.hw, + [CLK_BUS_CODEC] = &bus_codec_clk.common.hw, + [CLK_BUS_PIO] = &bus_pio_clk.common.hw, + [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw, + [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw, + [CLK_BUS_UART0] = &bus_uart0_clk.common.hw, + [CLK_BUS_UART1] = &bus_uart1_clk.common.hw, + [CLK_BUS_UART2] = &bus_uart2_clk.common.hw, + [CLK_BUS_EPHY] = &bus_ephy_clk.common.hw, + [CLK_BUS_DBG] = &bus_dbg_clk.common.hw, + [CLK_MMC0] = &mmc0_clk.common.hw, + [CLK_MMC0_SAMPLE] = &mmc0_sample_clk.common.hw, + [CLK_MMC0_OUTPUT] = &mmc0_output_clk.common.hw, + [CLK_MMC1] = &mmc1_clk.common.hw, + [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw, + [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw, + [CLK_CE] = &ce_clk.common.hw, + [CLK_SPI0] = &spi0_clk.common.hw, + [CLK_USB_PHY0] = &usb_phy0_clk.common.hw, + [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw, + [CLK_DRAM] = &dram_clk.common.hw, + [CLK_DRAM_VE] = &dram_ve_clk.common.hw, + [CLK_DRAM_CSI] = &dram_csi_clk.common.hw, + [CLK_DRAM_EHCI] = &dram_ehci_clk.common.hw, + [CLK_DRAM_OHCI] = &dram_ohci_clk.common.hw, + [CLK_DE] = &de_clk.common.hw, + [CLK_TCON0] = &tcon_clk.common.hw, + [CLK_CSI_MISC] = &csi_misc_clk.common.hw, + [CLK_CSI0_MCLK] = &csi0_mclk_clk.common.hw, + [CLK_CSI1_SCLK] = &csi1_sclk_clk.common.hw, + [CLK_CSI1_MCLK] = &csi1_mclk_clk.common.hw, + [CLK_VE] = &ve_clk.common.hw, + [CLK_AC_DIG] = &ac_dig_clk.common.hw, + [CLK_AVS] = &avs_clk.common.hw, + [CLK_MBUS] = &mbus_clk.common.hw, + [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw, + }, + .num = CLK_NUMBER, +}; + +static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { + [RST_USB_PHY0] = { 0x0cc, BIT(0) }, + + [RST_MBUS] = { 0x0fc, BIT(31) }, + + [RST_BUS_CE] = { 0x2c0, BIT(5) }, + [RST_BUS_DMA] = { 0x2c0, BIT(6) }, + [RST_BUS_MMC0] = { 0x2c0, BIT(8) }, + [RST_BUS_MMC1] = { 0x2c0, BIT(9) }, + [RST_BUS_MMC2] = { 0x2c0, BIT(10) }, + [RST_BUS_DRAM] = { 0x2c0, BIT(14) }, + [RST_BUS_EMAC] = { 0x2c0, BIT(17) }, + [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) }, + [RST_BUS_SPI0] = { 0x2c0, BIT(20) }, + [RST_BUS_OTG] = { 0x2c0, BIT(23) }, + [RST_BUS_EHCI0] = { 0x2c0, BIT(26) }, + [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, + + [RST_BUS_VE] = { 0x2c4, BIT(0) }, + [RST_BUS_TCON0] = { 0x2c4, BIT(3) }, + [RST_BUS_CSI] = { 0x2c4, BIT(8) }, + [RST_BUS_DE] = { 0x2c4, BIT(12) }, + [RST_BUS_DBG] = { 0x2c4, BIT(31) }, + + [RST_BUS_EPHY] = { 0x2c8, BIT(2) }, + + [RST_BUS_CODEC] = { 0x2d0, BIT(0) }, + + [RST_BUS_I2C0] = { 0x2d8, BIT(0) }, + [RST_BUS_I2C1] = { 0x2d8, BIT(1) }, + [RST_BUS_UART0] = { 0x2d8, BIT(16) }, + [RST_BUS_UART1] = { 0x2d8, BIT(17) }, + [RST_BUS_UART2] = { 0x2d8, BIT(18) }, +}; + +static const struct sunxi_ccu_desc sun8i_v3s_ccu_desc = { + .ccu_clks = sun8i_v3s_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun8i_v3s_ccu_clks), + + .hw_clks = &sun8i_v3s_hw_clks, + + .resets = sun8i_v3s_ccu_resets, + .num_resets = ARRAY_SIZE(sun8i_v3s_ccu_resets), +}; + +static void __init sun8i_v3s_ccu_setup(struct device_node *node) +{ + void __iomem *reg; + u32 val; + + reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (IS_ERR(reg)) { + pr_err("%s: Could not map the clock registers\n", + of_node_full_name(node)); + return; + } + + /* Force the PLL-Audio-1x divider to 4 */ + val = readl(reg + SUN8I_V3S_PLL_AUDIO_REG); + val &= ~GENMASK(19, 16); + writel(val | (3 << 16), reg + SUN8I_V3S_PLL_AUDIO_REG); + + sunxi_ccu_probe(node, reg, &sun8i_v3s_ccu_desc); +} +CLK_OF_DECLARE(sun8i_v3s_ccu, "allwinner,sun8i-v3s-ccu", + sun8i_v3s_ccu_setup); diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h new file mode 100644 index 000000000000..4a4d36fdad96 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2016 Icenowy Zheng <icenowy@aosc.xyz> + * + * Based on ccu-sun8i-h3.h, which is: + * Copyright (c) 2016 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_SUN8I_H3_H_ +#define _CCU_SUN8I_H3_H_ + +#include <dt-bindings/clock/sun8i-v3s-ccu.h> +#include <dt-bindings/reset/sun8i-v3s-ccu.h> + +#define CLK_PLL_CPU 0 +#define CLK_PLL_AUDIO_BASE 1 +#define CLK_PLL_AUDIO 2 +#define CLK_PLL_AUDIO_2X 3 +#define CLK_PLL_AUDIO_4X 4 +#define CLK_PLL_AUDIO_8X 5 +#define CLK_PLL_VIDEO 6 +#define CLK_PLL_VE 7 +#define CLK_PLL_DDR 8 +#define CLK_PLL_PERIPH0 9 +#define CLK_PLL_PERIPH0_2X 10 +#define CLK_PLL_ISP 11 +#define CLK_PLL_PERIPH1 12 +/* Reserve one number for not implemented and not used PLL_DDR1 */ + +/* The CPU clock is exported */ + +#define CLK_AXI 15 +#define CLK_AHB1 16 +#define CLK_APB1 17 +#define CLK_APB2 18 +#define CLK_AHB2 19 + +/* All the bus gates are exported */ + +/* The first bunch of module clocks are exported */ + +#define CLK_DRAM 58 + +/* All the DRAM gates are exported */ + +/* Some more module clocks are exported */ + +#define CLK_MBUS 72 + +/* And the GPU module clock is exported */ + +#define CLK_NUMBER (CLK_MIPI_CSI + 1) + +#endif /* _CCU_SUN8I_H3_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c new file mode 100644 index 000000000000..6d116581c86d --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c @@ -0,0 +1,283 @@ +/* + * Copyright (c) 2016 Chen-Yu Tsai. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include "ccu_common.h" +#include "ccu_div.h" +#include "ccu_gate.h" +#include "ccu_reset.h" + +#include "ccu-sun9i-a80-de.h" + +static SUNXI_CCU_GATE(fe0_clk, "fe0", "fe0-div", + 0x00, BIT(0), 0); +static SUNXI_CCU_GATE(fe1_clk, "fe1", "fe1-div", + 0x00, BIT(1), 0); +static SUNXI_CCU_GATE(fe2_clk, "fe2", "fe2-div", + 0x00, BIT(2), 0); +static SUNXI_CCU_GATE(iep_deu0_clk, "iep-deu0", "de", + 0x00, BIT(4), 0); +static SUNXI_CCU_GATE(iep_deu1_clk, "iep-deu1", "de", + 0x00, BIT(5), 0); +static SUNXI_CCU_GATE(be0_clk, "be0", "be0-div", + 0x00, BIT(8), 0); +static SUNXI_CCU_GATE(be1_clk, "be1", "be1-div", + 0x00, BIT(9), 0); +static SUNXI_CCU_GATE(be2_clk, "be2", "be2-div", + 0x00, BIT(10), 0); +static SUNXI_CCU_GATE(iep_drc0_clk, "iep-drc0", "de", + 0x00, BIT(12), 0); +static SUNXI_CCU_GATE(iep_drc1_clk, "iep-drc1", "de", + 0x00, BIT(13), 0); +static SUNXI_CCU_GATE(merge_clk, "merge", "de", + 0x00, BIT(20), 0); + +static SUNXI_CCU_GATE(dram_fe0_clk, "dram-fe0", "sdram", + 0x04, BIT(0), 0); +static SUNXI_CCU_GATE(dram_fe1_clk, "dram-fe1", "sdram", + 0x04, BIT(1), 0); +static SUNXI_CCU_GATE(dram_fe2_clk, "dram-fe2", "sdram", + 0x04, BIT(2), 0); +static SUNXI_CCU_GATE(dram_deu0_clk, "dram-deu0", "sdram", + 0x04, BIT(4), 0); +static SUNXI_CCU_GATE(dram_deu1_clk, "dram-deu1", "sdram", + 0x04, BIT(5), 0); +static SUNXI_CCU_GATE(dram_be0_clk, "dram-be0", "sdram", + 0x04, BIT(8), 0); +static SUNXI_CCU_GATE(dram_be1_clk, "dram-be1", "sdram", + 0x04, BIT(9), 0); +static SUNXI_CCU_GATE(dram_be2_clk, "dram-be2", "sdram", + 0x04, BIT(10), 0); +static SUNXI_CCU_GATE(dram_drc0_clk, "dram-drc0", "sdram", + 0x04, BIT(12), 0); +static SUNXI_CCU_GATE(dram_drc1_clk, "dram-drc1", "sdram", + 0x04, BIT(13), 0); + +static SUNXI_CCU_GATE(bus_fe0_clk, "bus-fe0", "bus-de", + 0x08, BIT(0), 0); +static SUNXI_CCU_GATE(bus_fe1_clk, "bus-fe1", "bus-de", + 0x08, BIT(1), 0); +static SUNXI_CCU_GATE(bus_fe2_clk, "bus-fe2", "bus-de", + 0x08, BIT(2), 0); +static SUNXI_CCU_GATE(bus_deu0_clk, "bus-deu0", "bus-de", + 0x08, BIT(4), 0); +static SUNXI_CCU_GATE(bus_deu1_clk, "bus-deu1", "bus-de", + 0x08, BIT(5), 0); +static SUNXI_CCU_GATE(bus_be0_clk, "bus-be0", "bus-de", + 0x08, BIT(8), 0); +static SUNXI_CCU_GATE(bus_be1_clk, "bus-be1", "bus-de", + 0x08, BIT(9), 0); +static SUNXI_CCU_GATE(bus_be2_clk, "bus-be2", "bus-de", + 0x08, BIT(10), 0); +static SUNXI_CCU_GATE(bus_drc0_clk, "bus-drc0", "bus-de", + 0x08, BIT(12), 0); +static SUNXI_CCU_GATE(bus_drc1_clk, "bus-drc1", "bus-de", + 0x08, BIT(13), 0); + +static SUNXI_CCU_M(fe0_div_clk, "fe0-div", "de", 0x20, 0, 4, 0); +static SUNXI_CCU_M(fe1_div_clk, "fe1-div", "de", 0x20, 4, 4, 0); +static SUNXI_CCU_M(fe2_div_clk, "fe2-div", "de", 0x20, 8, 4, 0); +static SUNXI_CCU_M(be0_div_clk, "be0-div", "de", 0x20, 16, 4, 0); +static SUNXI_CCU_M(be1_div_clk, "be1-div", "de", 0x20, 20, 4, 0); +static SUNXI_CCU_M(be2_div_clk, "be2-div", "de", 0x20, 24, 4, 0); + +static struct ccu_common *sun9i_a80_de_clks[] = { + &fe0_clk.common, + &fe1_clk.common, + &fe2_clk.common, + &iep_deu0_clk.common, + &iep_deu1_clk.common, + &be0_clk.common, + &be1_clk.common, + &be2_clk.common, + &iep_drc0_clk.common, + &iep_drc1_clk.common, + &merge_clk.common, + + &dram_fe0_clk.common, + &dram_fe1_clk.common, + &dram_fe2_clk.common, + &dram_deu0_clk.common, + &dram_deu1_clk.common, + &dram_be0_clk.common, + &dram_be1_clk.common, + &dram_be2_clk.common, + &dram_drc0_clk.common, + &dram_drc1_clk.common, + + &bus_fe0_clk.common, + &bus_fe1_clk.common, + &bus_fe2_clk.common, + &bus_deu0_clk.common, + &bus_deu1_clk.common, + &bus_be0_clk.common, + &bus_be1_clk.common, + &bus_be2_clk.common, + &bus_drc0_clk.common, + &bus_drc1_clk.common, + + &fe0_div_clk.common, + &fe1_div_clk.common, + &fe2_div_clk.common, + &be0_div_clk.common, + &be1_div_clk.common, + &be2_div_clk.common, +}; + +static struct clk_hw_onecell_data sun9i_a80_de_hw_clks = { + .hws = { + [CLK_FE0] = &fe0_clk.common.hw, + [CLK_FE1] = &fe1_clk.common.hw, + [CLK_FE2] = &fe2_clk.common.hw, + [CLK_IEP_DEU0] = &iep_deu0_clk.common.hw, + [CLK_IEP_DEU1] = &iep_deu1_clk.common.hw, + [CLK_BE0] = &be0_clk.common.hw, + [CLK_BE1] = &be1_clk.common.hw, + [CLK_BE2] = &be2_clk.common.hw, + [CLK_IEP_DRC0] = &iep_drc0_clk.common.hw, + [CLK_IEP_DRC1] = &iep_drc1_clk.common.hw, + [CLK_MERGE] = &merge_clk.common.hw, + + [CLK_DRAM_FE0] = &dram_fe0_clk.common.hw, + [CLK_DRAM_FE1] = &dram_fe1_clk.common.hw, + [CLK_DRAM_FE2] = &dram_fe2_clk.common.hw, + [CLK_DRAM_DEU0] = &dram_deu0_clk.common.hw, + [CLK_DRAM_DEU1] = &dram_deu1_clk.common.hw, + [CLK_DRAM_BE0] = &dram_be0_clk.common.hw, + [CLK_DRAM_BE1] = &dram_be1_clk.common.hw, + [CLK_DRAM_BE2] = &dram_be2_clk.common.hw, + [CLK_DRAM_DRC0] = &dram_drc0_clk.common.hw, + [CLK_DRAM_DRC1] = &dram_drc1_clk.common.hw, + + [CLK_BUS_FE0] = &bus_fe0_clk.common.hw, + [CLK_BUS_FE1] = &bus_fe1_clk.common.hw, + [CLK_BUS_FE2] = &bus_fe2_clk.common.hw, + [CLK_BUS_DEU0] = &bus_deu0_clk.common.hw, + [CLK_BUS_DEU1] = &bus_deu1_clk.common.hw, + [CLK_BUS_BE0] = &bus_be0_clk.common.hw, + [CLK_BUS_BE1] = &bus_be1_clk.common.hw, + [CLK_BUS_BE2] = &bus_be2_clk.common.hw, + [CLK_BUS_DRC0] = &bus_drc0_clk.common.hw, + [CLK_BUS_DRC1] = &bus_drc1_clk.common.hw, + + [CLK_FE0_DIV] = &fe0_div_clk.common.hw, + [CLK_FE1_DIV] = &fe1_div_clk.common.hw, + [CLK_FE2_DIV] = &fe2_div_clk.common.hw, + [CLK_BE0_DIV] = &be0_div_clk.common.hw, + [CLK_BE1_DIV] = &be1_div_clk.common.hw, + [CLK_BE2_DIV] = &be2_div_clk.common.hw, + }, + .num = CLK_NUMBER, +}; + +static struct ccu_reset_map sun9i_a80_de_resets[] = { + [RST_FE0] = { 0x0c, BIT(0) }, + [RST_FE1] = { 0x0c, BIT(1) }, + [RST_FE2] = { 0x0c, BIT(2) }, + [RST_DEU0] = { 0x0c, BIT(4) }, + [RST_DEU1] = { 0x0c, BIT(5) }, + [RST_BE0] = { 0x0c, BIT(8) }, + [RST_BE1] = { 0x0c, BIT(9) }, + [RST_BE2] = { 0x0c, BIT(10) }, + [RST_DRC0] = { 0x0c, BIT(12) }, + [RST_DRC1] = { 0x0c, BIT(13) }, + [RST_MERGE] = { 0x0c, BIT(20) }, +}; + +static const struct sunxi_ccu_desc sun9i_a80_de_clk_desc = { + .ccu_clks = sun9i_a80_de_clks, + .num_ccu_clks = ARRAY_SIZE(sun9i_a80_de_clks), + + .hw_clks = &sun9i_a80_de_hw_clks, + + .resets = sun9i_a80_de_resets, + .num_resets = ARRAY_SIZE(sun9i_a80_de_resets), +}; + +static int sun9i_a80_de_clk_probe(struct platform_device *pdev) +{ + struct resource *res; + struct clk *bus_clk; + struct reset_control *rstc; + void __iomem *reg; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + bus_clk = devm_clk_get(&pdev->dev, "bus"); + if (IS_ERR(bus_clk)) { + ret = PTR_ERR(bus_clk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Couldn't get bus clk: %d\n", ret); + return ret; + } + + rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (IS_ERR(rstc)) { + ret = PTR_ERR(rstc); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "Couldn't get reset control: %d\n", ret); + return ret; + } + + /* The bus clock needs to be enabled for us to access the registers */ + ret = clk_prepare_enable(bus_clk); + if (ret) { + dev_err(&pdev->dev, "Couldn't enable bus clk: %d\n", ret); + return ret; + } + + /* The reset control needs to be asserted for the controls to work */ + ret = reset_control_deassert(rstc); + if (ret) { + dev_err(&pdev->dev, + "Couldn't deassert reset control: %d\n", ret); + goto err_disable_clk; + } + + ret = sunxi_ccu_probe(pdev->dev.of_node, reg, + &sun9i_a80_de_clk_desc); + if (ret) + goto err_assert_reset; + + return 0; + +err_assert_reset: + reset_control_assert(rstc); +err_disable_clk: + clk_disable_unprepare(bus_clk); + return ret; +} + +static const struct of_device_id sun9i_a80_de_clk_ids[] = { + { .compatible = "allwinner,sun9i-a80-de-clks" }, + { } +}; + +static struct platform_driver sun9i_a80_de_clk_driver = { + .probe = sun9i_a80_de_clk_probe, + .driver = { + .name = "sun9i-a80-de-clks", + .of_match_table = sun9i_a80_de_clk_ids, + }, +}; +builtin_platform_driver(sun9i_a80_de_clk_driver); diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.h b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.h new file mode 100644 index 000000000000..a4769041e40f --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Chen-Yu Tsai + * + * Chen-Yu Tsai <wens@csie.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_SUN9I_A80_DE_H_ +#define _CCU_SUN9I_A80_DE_H_ + +#include <dt-bindings/clock/sun9i-a80-de.h> +#include <dt-bindings/reset/sun9i-a80-de.h> + +/* Intermediary clock dividers are not exported */ +#define CLK_FE0_DIV 31 +#define CLK_FE1_DIV 32 +#define CLK_FE2_DIV 33 +#define CLK_BE0_DIV 34 +#define CLK_BE1_DIV 35 +#define CLK_BE2_DIV 36 + +#define CLK_NUMBER (CLK_BE2_DIV + 1) + +#endif /* _CCU_SUN9I_A80_DE_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c new file mode 100644 index 000000000000..1d76f24f7df3 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2016 Chen-Yu Tsai. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> + +#include "ccu_common.h" +#include "ccu_gate.h" +#include "ccu_reset.h" + +#include "ccu-sun9i-a80-usb.h" + +static SUNXI_CCU_GATE(bus_hci0_clk, "bus-hci0", "bus-usb", 0x0, BIT(1), 0); +static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc24M", 0x0, BIT(2), 0); +static SUNXI_CCU_GATE(bus_hci1_clk, "bus-hci1", "bus-usb", 0x0, BIT(3), 0); +static SUNXI_CCU_GATE(bus_hci2_clk, "bus-hci2", "bus-usb", 0x0, BIT(5), 0); +static SUNXI_CCU_GATE(usb_ohci2_clk, "usb-ohci2", "osc24M", 0x0, BIT(6), 0); + +static SUNXI_CCU_GATE(usb0_phy_clk, "usb0-phy", "osc24M", 0x4, BIT(1), 0); +static SUNXI_CCU_GATE(usb1_hsic_clk, "usb1-hsic", "osc24M", 0x4, BIT(2), 0); +static SUNXI_CCU_GATE(usb1_phy_clk, "usb1-phy", "osc24M", 0x4, BIT(3), 0); +static SUNXI_CCU_GATE(usb2_hsic_clk, "usb2-hsic", "osc24M", 0x4, BIT(4), 0); +static SUNXI_CCU_GATE(usb2_phy_clk, "usb2-phy", "osc24M", 0x4, BIT(5), 0); +static SUNXI_CCU_GATE(usb_hsic_clk, "usb-hsic", "osc24M", 0x4, BIT(10), 0); + +static struct ccu_common *sun9i_a80_usb_clks[] = { + &bus_hci0_clk.common, + &usb_ohci0_clk.common, + &bus_hci1_clk.common, + &bus_hci2_clk.common, + &usb_ohci2_clk.common, + + &usb0_phy_clk.common, + &usb1_hsic_clk.common, + &usb1_phy_clk.common, + &usb2_hsic_clk.common, + &usb2_phy_clk.common, + &usb_hsic_clk.common, +}; + +static struct clk_hw_onecell_data sun9i_a80_usb_hw_clks = { + .hws = { + [CLK_BUS_HCI0] = &bus_hci0_clk.common.hw, + [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw, + [CLK_BUS_HCI1] = &bus_hci1_clk.common.hw, + [CLK_BUS_HCI2] = &bus_hci2_clk.common.hw, + [CLK_USB_OHCI2] = &usb_ohci2_clk.common.hw, + + [CLK_USB0_PHY] = &usb0_phy_clk.common.hw, + [CLK_USB1_HSIC] = &usb1_hsic_clk.common.hw, + [CLK_USB1_PHY] = &usb1_phy_clk.common.hw, + [CLK_USB2_HSIC] = &usb2_hsic_clk.common.hw, + [CLK_USB2_PHY] = &usb2_phy_clk.common.hw, + [CLK_USB_HSIC] = &usb_hsic_clk.common.hw, + }, + .num = CLK_NUMBER, +}; + +static struct ccu_reset_map sun9i_a80_usb_resets[] = { + [RST_USB0_HCI] = { 0x0, BIT(17) }, + [RST_USB1_HCI] = { 0x0, BIT(18) }, + [RST_USB2_HCI] = { 0x0, BIT(19) }, + + [RST_USB0_PHY] = { 0x4, BIT(17) }, + [RST_USB1_HSIC] = { 0x4, BIT(18) }, + [RST_USB1_PHY] = { 0x4, BIT(19) }, + [RST_USB2_HSIC] = { 0x4, BIT(20) }, + [RST_USB2_PHY] = { 0x4, BIT(21) }, +}; + +static const struct sunxi_ccu_desc sun9i_a80_usb_clk_desc = { + .ccu_clks = sun9i_a80_usb_clks, + .num_ccu_clks = ARRAY_SIZE(sun9i_a80_usb_clks), + + .hw_clks = &sun9i_a80_usb_hw_clks, + + .resets = sun9i_a80_usb_resets, + .num_resets = ARRAY_SIZE(sun9i_a80_usb_resets), +}; + +static int sun9i_a80_usb_clk_probe(struct platform_device *pdev) +{ + struct resource *res; + struct clk *bus_clk; + void __iomem *reg; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + bus_clk = devm_clk_get(&pdev->dev, "bus"); + if (IS_ERR(bus_clk)) { + ret = PTR_ERR(bus_clk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Couldn't get bus clk: %d\n", ret); + return ret; + } + + /* The bus clock needs to be enabled for us to access the registers */ + ret = clk_prepare_enable(bus_clk); + if (ret) { + dev_err(&pdev->dev, "Couldn't enable bus clk: %d\n", ret); + return ret; + } + + ret = sunxi_ccu_probe(pdev->dev.of_node, reg, + &sun9i_a80_usb_clk_desc); + if (ret) + goto err_disable_clk; + + return 0; + +err_disable_clk: + clk_disable_unprepare(bus_clk); + return ret; +} + +static const struct of_device_id sun9i_a80_usb_clk_ids[] = { + { .compatible = "allwinner,sun9i-a80-usb-clks" }, + { } +}; + +static struct platform_driver sun9i_a80_usb_clk_driver = { + .probe = sun9i_a80_usb_clk_probe, + .driver = { + .name = "sun9i-a80-usb-clks", + .of_match_table = sun9i_a80_usb_clk_ids, + }, +}; +builtin_platform_driver(sun9i_a80_usb_clk_driver); diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.h b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.h new file mode 100644 index 000000000000..a184280ba854 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.h @@ -0,0 +1,25 @@ +/* + * Copyright 2016 Chen-Yu Tsai + * + * Chen-Yu Tsai <wens@csie.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_SUN9I_A80_USB_H_ +#define _CCU_SUN9I_A80_USB_H_ + +#include <dt-bindings/clock/sun9i-a80-usb.h> +#include <dt-bindings/reset/sun9i-a80-usb.h> + +#define CLK_NUMBER (CLK_USB_HSIC + 1) + +#endif /* _CCU_SUN9I_A80_USB_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c new file mode 100644 index 000000000000..e13e313ce4f5 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c @@ -0,0 +1,1223 @@ +/* + * Copyright (c) 2016 Chen-Yu Tsai. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk-provider.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> + +#include "ccu_common.h" +#include "ccu_reset.h" + +#include "ccu_div.h" +#include "ccu_gate.h" +#include "ccu_mp.h" +#include "ccu_nkmp.h" +#include "ccu_nm.h" +#include "ccu_phase.h" + +#include "ccu-sun9i-a80.h" + +#define CCU_SUN9I_LOCK_REG 0x09c + +static struct clk_div_table pll_cpux_p_div_table[] = { + { .val = 0, .div = 1 }, + { .val = 1, .div = 4 }, + { /* Sentinel */ }, +}; + +/* + * The CPU PLLs are actually NP clocks, but P is /1 or /4, so here we + * use the NM clocks with a divider table for M. + */ +static struct ccu_nm pll_c0cpux_clk = { + .enable = BIT(31), + .lock = BIT(0), + .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), + .m = _SUNXI_CCU_DIV_TABLE(16, 1, pll_cpux_p_div_table), + .common = { + .reg = 0x000, + .lock_reg = CCU_SUN9I_LOCK_REG, + .features = CCU_FEATURE_LOCK_REG, + .hw.init = CLK_HW_INIT("pll-c0cpux", "osc24M", + &ccu_nm_ops, CLK_SET_RATE_UNGATE), + }, +}; + +static struct ccu_nm pll_c1cpux_clk = { + .enable = BIT(31), + .lock = BIT(1), + .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), + .m = _SUNXI_CCU_DIV_TABLE(16, 1, pll_cpux_p_div_table), + .common = { + .reg = 0x004, + .lock_reg = CCU_SUN9I_LOCK_REG, + .features = CCU_FEATURE_LOCK_REG, + .hw.init = CLK_HW_INIT("pll-c1cpux", "osc24M", + &ccu_nm_ops, CLK_SET_RATE_UNGATE), + }, +}; + +/* + * The Audio PLL has d1, d2 dividers in addition to the usual N, M + * factors. Since we only need 2 frequencies from this PLL: 22.5792 MHz + * and 24.576 MHz, ignore them for now. Enforce the default for them, + * which is d1 = 0, d2 = 1. + */ +#define SUN9I_A80_PLL_AUDIO_REG 0x008 + +static struct ccu_nm pll_audio_clk = { + .enable = BIT(31), + .lock = BIT(2), + .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), + .m = _SUNXI_CCU_DIV_OFFSET(0, 6, 0), + .common = { + .reg = 0x008, + .lock_reg = CCU_SUN9I_LOCK_REG, + .features = CCU_FEATURE_LOCK_REG, + .hw.init = CLK_HW_INIT("pll-audio", "osc24M", + &ccu_nm_ops, CLK_SET_RATE_UNGATE), + }, +}; + +/* Some PLLs are input * N / div1 / div2. Model them as NKMP with no K */ +static struct ccu_nkmp pll_periph0_clk = { + .enable = BIT(31), + .lock = BIT(3), + .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), + .m = _SUNXI_CCU_DIV(16, 1), /* input divider */ + .p = _SUNXI_CCU_DIV(18, 1), /* output divider */ + .common = { + .reg = 0x00c, + .lock_reg = CCU_SUN9I_LOCK_REG, + .features = CCU_FEATURE_LOCK_REG, + .hw.init = CLK_HW_INIT("pll-periph0", "osc24M", + &ccu_nkmp_ops, + CLK_SET_RATE_UNGATE), + }, +}; + +static struct ccu_nkmp pll_ve_clk = { + .enable = BIT(31), + .lock = BIT(4), + .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), + .m = _SUNXI_CCU_DIV(16, 1), /* input divider */ + .p = _SUNXI_CCU_DIV(18, 1), /* output divider */ + .common = { + .reg = 0x010, + .lock_reg = CCU_SUN9I_LOCK_REG, + .features = CCU_FEATURE_LOCK_REG, + .hw.init = CLK_HW_INIT("pll-ve", "osc24M", + &ccu_nkmp_ops, + CLK_SET_RATE_UNGATE), + }, +}; + +static struct ccu_nkmp pll_ddr_clk = { + .enable = BIT(31), + .lock = BIT(5), + .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), + .m = _SUNXI_CCU_DIV(16, 1), /* input divider */ + .p = _SUNXI_CCU_DIV(18, 1), /* output divider */ + .common = { + .reg = 0x014, + .lock_reg = CCU_SUN9I_LOCK_REG, + .features = CCU_FEATURE_LOCK_REG, + .hw.init = CLK_HW_INIT("pll-ddr", "osc24M", + &ccu_nkmp_ops, + CLK_SET_RATE_UNGATE), + }, +}; + +static struct ccu_nm pll_video0_clk = { + .enable = BIT(31), + .lock = BIT(6), + .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), + .m = _SUNXI_CCU_DIV(16, 1), /* input divider */ + .common = { + .reg = 0x018, + .lock_reg = CCU_SUN9I_LOCK_REG, + .features = CCU_FEATURE_LOCK_REG, + .hw.init = CLK_HW_INIT("pll-video0", "osc24M", + &ccu_nm_ops, + CLK_SET_RATE_UNGATE), + }, +}; + +static struct ccu_nkmp pll_video1_clk = { + .enable = BIT(31), + .lock = BIT(7), + .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), + .m = _SUNXI_CCU_DIV(16, 1), /* input divider */ + .p = _SUNXI_CCU_DIV(0, 2), /* external divider p */ + .common = { + .reg = 0x01c, + .lock_reg = CCU_SUN9I_LOCK_REG, + .features = CCU_FEATURE_LOCK_REG, + .hw.init = CLK_HW_INIT("pll-video1", "osc24M", + &ccu_nkmp_ops, + CLK_SET_RATE_UNGATE), + }, +}; + +static struct ccu_nkmp pll_gpu_clk = { + .enable = BIT(31), + .lock = BIT(8), + .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), + .m = _SUNXI_CCU_DIV(16, 1), /* input divider */ + .p = _SUNXI_CCU_DIV(18, 1), /* output divider */ + .common = { + .reg = 0x020, + .lock_reg = CCU_SUN9I_LOCK_REG, + .features = CCU_FEATURE_LOCK_REG, + .hw.init = CLK_HW_INIT("pll-gpu", "osc24M", + &ccu_nkmp_ops, + CLK_SET_RATE_UNGATE), + }, +}; + +static struct ccu_nkmp pll_de_clk = { + .enable = BIT(31), + .lock = BIT(9), + .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), + .m = _SUNXI_CCU_DIV(16, 1), /* input divider */ + .p = _SUNXI_CCU_DIV(18, 1), /* output divider */ + .common = { + .reg = 0x024, + .lock_reg = CCU_SUN9I_LOCK_REG, + .features = CCU_FEATURE_LOCK_REG, + .hw.init = CLK_HW_INIT("pll-de", "osc24M", + &ccu_nkmp_ops, + CLK_SET_RATE_UNGATE), + }, +}; + +static struct ccu_nkmp pll_isp_clk = { + .enable = BIT(31), + .lock = BIT(10), + .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), + .m = _SUNXI_CCU_DIV(16, 1), /* input divider */ + .p = _SUNXI_CCU_DIV(18, 1), /* output divider */ + .common = { + .reg = 0x028, + .lock_reg = CCU_SUN9I_LOCK_REG, + .features = CCU_FEATURE_LOCK_REG, + .hw.init = CLK_HW_INIT("pll-isp", "osc24M", + &ccu_nkmp_ops, + CLK_SET_RATE_UNGATE), + }, +}; + +static struct ccu_nkmp pll_periph1_clk = { + .enable = BIT(31), + .lock = BIT(11), + .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), + .m = _SUNXI_CCU_DIV(16, 1), /* input divider */ + .p = _SUNXI_CCU_DIV(18, 1), /* output divider */ + .common = { + .reg = 0x028, + .lock_reg = CCU_SUN9I_LOCK_REG, + .features = CCU_FEATURE_LOCK_REG, + .hw.init = CLK_HW_INIT("pll-periph1", "osc24M", + &ccu_nkmp_ops, + CLK_SET_RATE_UNGATE), + }, +}; + +static const char * const c0cpux_parents[] = { "osc24M", "pll-c0cpux" }; +static SUNXI_CCU_MUX(c0cpux_clk, "c0cpux", c0cpux_parents, + 0x50, 0, 1, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL); + +static const char * const c1cpux_parents[] = { "osc24M", "pll-c1cpux" }; +static SUNXI_CCU_MUX(c1cpux_clk, "c1cpux", c1cpux_parents, + 0x50, 8, 1, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL); + +static struct clk_div_table axi_div_table[] = { + { .val = 0, .div = 1 }, + { .val = 1, .div = 2 }, + { .val = 2, .div = 3 }, + { .val = 3, .div = 4 }, + { .val = 4, .div = 4 }, + { .val = 5, .div = 4 }, + { .val = 6, .div = 4 }, + { .val = 7, .div = 4 }, + { /* Sentinel */ }, +}; + +static SUNXI_CCU_M(atb0_clk, "atb0", "c0cpux", 0x054, 8, 2, 0); + +static SUNXI_CCU_DIV_TABLE(axi0_clk, "axi0", "c0cpux", + 0x054, 0, 3, axi_div_table, 0); + +static SUNXI_CCU_M(atb1_clk, "atb1", "c1cpux", 0x058, 8, 2, 0); + +static SUNXI_CCU_DIV_TABLE(axi1_clk, "axi1", "c1cpux", + 0x058, 0, 3, axi_div_table, 0); + +static const char * const gtbus_parents[] = { "osc24M", "pll-periph0", + "pll-periph1", "pll-periph1" }; +static SUNXI_CCU_M_WITH_MUX(gtbus_clk, "gtbus", gtbus_parents, + 0x05c, 0, 2, 24, 2, CLK_IS_CRITICAL); + +static const char * const ahb_parents[] = { "gtbus", "pll-periph0", + "pll-periph1", "pll-periph1" }; +static struct ccu_div ahb0_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO), + .mux = _SUNXI_CCU_MUX(24, 2), + .common = { + .reg = 0x060, + .hw.init = CLK_HW_INIT_PARENTS("ahb0", + ahb_parents, + &ccu_div_ops, + 0), + }, +}; + +static struct ccu_div ahb1_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO), + .mux = _SUNXI_CCU_MUX(24, 2), + .common = { + .reg = 0x064, + .hw.init = CLK_HW_INIT_PARENTS("ahb1", + ahb_parents, + &ccu_div_ops, + 0), + }, +}; + +static struct ccu_div ahb2_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO), + .mux = _SUNXI_CCU_MUX(24, 2), + .common = { + .reg = 0x068, + .hw.init = CLK_HW_INIT_PARENTS("ahb2", + ahb_parents, + &ccu_div_ops, + 0), + }, +}; + +static const char * const apb_parents[] = { "osc24M", "pll-periph0" }; + +static struct ccu_div apb0_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO), + .mux = _SUNXI_CCU_MUX(24, 1), + .common = { + .reg = 0x070, + .hw.init = CLK_HW_INIT_PARENTS("apb0", + apb_parents, + &ccu_div_ops, + 0), + }, +}; + +static struct ccu_div apb1_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO), + .mux = _SUNXI_CCU_MUX(24, 1), + .common = { + .reg = 0x074, + .hw.init = CLK_HW_INIT_PARENTS("apb1", + apb_parents, + &ccu_div_ops, + 0), + }, +}; + +static struct ccu_div cci400_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO), + .mux = _SUNXI_CCU_MUX(24, 2), + .common = { + .reg = 0x078, + .hw.init = CLK_HW_INIT_PARENTS("cci400", + ahb_parents, + &ccu_div_ops, + CLK_IS_CRITICAL), + }, +}; + +static SUNXI_CCU_M_WITH_MUX_GATE(ats_clk, "ats", apb_parents, + 0x080, 0, 3, 24, 2, BIT(31), 0); + +static SUNXI_CCU_M_WITH_MUX_GATE(trace_clk, "trace", apb_parents, + 0x084, 0, 3, 24, 2, BIT(31), 0); + +static const char * const out_parents[] = { "osc24M", "osc32k", "osc24M" }; +static const struct ccu_mux_fixed_prediv out_prediv = { + .index = 0, .div = 750 +}; + +static struct ccu_mp out_a_clk = { + .enable = BIT(31), + .m = _SUNXI_CCU_DIV(8, 5), + .p = _SUNXI_CCU_DIV(20, 2), + .mux = { + .shift = 24, + .width = 4, + .fixed_predivs = &out_prediv, + .n_predivs = 1, + }, + .common = { + .reg = 0x180, + .features = CCU_FEATURE_FIXED_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("out-a", + out_parents, + &ccu_mp_ops, + 0), + }, +}; + +static struct ccu_mp out_b_clk = { + .enable = BIT(31), + .m = _SUNXI_CCU_DIV(8, 5), + .p = _SUNXI_CCU_DIV(20, 2), + .mux = { + .shift = 24, + .width = 4, + .fixed_predivs = &out_prediv, + .n_predivs = 1, + }, + .common = { + .reg = 0x184, + .features = CCU_FEATURE_FIXED_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("out-b", + out_parents, + &ccu_mp_ops, + 0), + }, +}; + +static const char * const mod0_default_parents[] = { "osc24M", "pll-periph0" }; + +static SUNXI_CCU_MP_WITH_MUX_GATE(nand0_0_clk, "nand0-0", mod0_default_parents, + 0x400, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(nand0_1_clk, "nand0-1", mod0_default_parents, + 0x404, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(nand1_0_clk, "nand1-0", mod0_default_parents, + 0x408, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(nand1_1_clk, "nand1-1", mod0_default_parents, + 0x40c, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mod0_default_parents, + 0x410, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_PHASE(mmc0_sample_clk, "mmc0-sample", "mmc0", + 0x410, 20, 3, 0); +static SUNXI_CCU_PHASE(mmc0_output_clk, "mmc0-output", "mmc0", + 0x410, 8, 3, 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mod0_default_parents, + 0x414, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_PHASE(mmc1_sample_clk, "mmc1-sample", "mmc1", + 0x414, 20, 3, 0); +static SUNXI_CCU_PHASE(mmc1_output_clk, "mmc1-output", "mmc1", + 0x414, 8, 3, 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents, + 0x418, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_PHASE(mmc2_sample_clk, "mmc2-sample", "mmc2", + 0x418, 20, 3, 0); +static SUNXI_CCU_PHASE(mmc2_output_clk, "mmc2-output", "mmc2", + 0x418, 8, 3, 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc3_clk, "mmc3", mod0_default_parents, + 0x41c, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_PHASE(mmc3_sample_clk, "mmc3-sample", "mmc3", + 0x41c, 20, 3, 0); +static SUNXI_CCU_PHASE(mmc3_output_clk, "mmc3-output", "mmc3", + 0x41c, 8, 3, 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", mod0_default_parents, + 0x428, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); + +static const char * const ss_parents[] = { "osc24M", "pll-periph", + "pll-periph1" }; +static const u8 ss_table[] = { 0, 1, 13 }; +static struct ccu_mp ss_clk = { + .enable = BIT(31), + .m = _SUNXI_CCU_DIV(0, 4), + .p = _SUNXI_CCU_DIV(16, 2), + .mux = _SUNXI_CCU_MUX_TABLE(24, 4, ss_table), + .common = { + .reg = 0x42c, + .hw.init = CLK_HW_INIT_PARENTS("ss", + ss_parents, + &ccu_mp_ops, + 0), + }, +}; + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, + 0x430, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, + 0x434, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi2_clk, "spi2", mod0_default_parents, + 0x438, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi3_clk, "spi3", mod0_default_parents, + 0x43c, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_M_WITH_GATE(i2s0_clk, "i2s0", "pll-audio", + 0x440, 0, 4, BIT(31), CLK_SET_RATE_PARENT); +static SUNXI_CCU_M_WITH_GATE(i2s1_clk, "i2s1", "pll-audio", + 0x444, 0, 4, BIT(31), CLK_SET_RATE_PARENT); +static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio", + 0x44c, 0, 4, BIT(31), CLK_SET_RATE_PARENT); + +static const char * const sdram_parents[] = { "pll-periph0", "pll-ddr" }; +static const u8 sdram_table[] = { 0, 3 }; + +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(sdram_clk, "sdram", + sdram_parents, sdram_table, + 0x484, + 8, 4, /* M */ + 12, 4, /* mux */ + 0, /* no gate */ + CLK_IS_CRITICAL); + +static SUNXI_CCU_M_WITH_GATE(de_clk, "de", "pll-de", 0x490, + 0, 4, BIT(31), CLK_SET_RATE_PARENT); + +static SUNXI_CCU_GATE(edp_clk, "edp", "osc24M", 0x494, BIT(31), 0); + +static const char * const mp_parents[] = { "pll-video1", "pll-gpu", "pll-de" }; +static const u8 mp_table[] = { 9, 10, 11 }; +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(mp_clk, "mp", mp_parents, mp_table, + 0x498, + 0, 4, /* M */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); + +static const char * const display_parents[] = { "pll-video0", "pll-video1" }; +static const u8 display_table[] = { 8, 9 }; + +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(lcd0_clk, "lcd0", + display_parents, display_table, + 0x49c, + 0, 4, /* M */ + 24, 4, /* mux */ + BIT(31), /* gate */ + CLK_SET_RATE_NO_REPARENT | + CLK_SET_RATE_PARENT); + +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(lcd1_clk, "lcd1", + display_parents, display_table, + 0x4a0, + 0, 4, /* M */ + 24, 4, /* mux */ + BIT(31), /* gate */ + CLK_SET_RATE_NO_REPARENT | + CLK_SET_RATE_PARENT); + +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(mipi_dsi0_clk, "mipi-dsi0", + display_parents, display_table, + 0x4a8, + 0, 4, /* M */ + 24, 4, /* mux */ + BIT(31), /* gate */ + CLK_SET_RATE_PARENT); + +static const char * const mipi_dsi1_parents[] = { "osc24M", "pll-video1" }; +static const u8 mipi_dsi1_table[] = { 0, 9 }; +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(mipi_dsi1_clk, "mipi-dsi1", + mipi_dsi1_parents, mipi_dsi1_table, + 0x4ac, + 0, 4, /* M */ + 24, 4, /* mux */ + BIT(31), /* gate */ + CLK_SET_RATE_PARENT); + +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(hdmi_clk, "hdmi", + display_parents, display_table, + 0x4b0, + 0, 4, /* M */ + 24, 4, /* mux */ + BIT(31), /* gate */ + CLK_SET_RATE_NO_REPARENT | + CLK_SET_RATE_PARENT); + +static SUNXI_CCU_GATE(hdmi_slow_clk, "hdmi-slow", "osc24M", 0x4b4, BIT(31), 0); + +static SUNXI_CCU_M_WITH_GATE(mipi_csi_clk, "mipi-csi", "osc24M", 0x4bc, + 0, 4, BIT(31), 0); + +static SUNXI_CCU_M_WITH_GATE(csi_isp_clk, "csi-isp", "pll-isp", 0x4c0, + 0, 4, BIT(31), CLK_SET_RATE_PARENT); + +static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M", 0x4c0, BIT(16), 0); + +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi0_mclk_clk, "csi0-mclk", + mipi_dsi1_parents, mipi_dsi1_table, + 0x4c4, + 0, 4, /* M */ + 24, 4, /* mux */ + BIT(31), /* gate */ + CLK_SET_RATE_PARENT); + +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi1_mclk_clk, "csi1-mclk", + mipi_dsi1_parents, mipi_dsi1_table, + 0x4c8, + 0, 4, /* M */ + 24, 4, /* mux */ + BIT(31), /* gate */ + CLK_SET_RATE_PARENT); + +static const char * const fd_parents[] = { "pll-periph0", "pll-isp" }; +static const u8 fd_table[] = { 1, 12 }; +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(fd_clk, "fd", fd_parents, fd_table, + 0x4cc, + 0, 4, /* M */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); +static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve", 0x4d0, + 16, 3, BIT(31), CLK_SET_RATE_PARENT); + +static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M", 0x4d4, BIT(31), 0); + +static SUNXI_CCU_M_WITH_GATE(gpu_core_clk, "gpu-core", "pll-gpu", 0x4f0, + 0, 3, BIT(31), CLK_SET_RATE_PARENT); +static SUNXI_CCU_M_WITH_GATE(gpu_memory_clk, "gpu-memory", "pll-gpu", 0x4f4, + 0, 3, BIT(31), CLK_SET_RATE_PARENT); + +static const char * const gpu_axi_parents[] = { "pll-periph0", "pll-gpu" }; +static const u8 gpu_axi_table[] = { 1, 10 }; +static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(gpu_axi_clk, "gpu-axi", + gpu_axi_parents, gpu_axi_table, + 0x4f8, + 0, 4, /* M */ + 24, 4, /* mux */ + BIT(31), /* gate */ + CLK_SET_RATE_PARENT); + +static SUNXI_CCU_M_WITH_GATE(sata_clk, "sata", "pll-periph0", 0x500, + 0, 4, BIT(31), 0); + +static SUNXI_CCU_M_WITH_GATE(ac97_clk, "ac97", "pll-audio", + 0x504, 0, 4, BIT(31), CLK_SET_RATE_PARENT); + +static SUNXI_CCU_M_WITH_MUX_GATE(mipi_hsi_clk, "mipi-hsi", + mod0_default_parents, 0x508, + 0, 4, /* M */ + 24, 4, /* mux */ + BIT(31), /* gate */ + 0); + +static const char * const gpadc_parents[] = { "osc24M", "pll-audio", "osc32k" }; +static const u8 gpadc_table[] = { 0, 4, 7 }; +static struct ccu_mp gpadc_clk = { + .enable = BIT(31), + .m = _SUNXI_CCU_DIV(0, 4), + .p = _SUNXI_CCU_DIV(16, 2), + .mux = _SUNXI_CCU_MUX_TABLE(24, 4, gpadc_table), + .common = { + .reg = 0x50c, + .hw.init = CLK_HW_INIT_PARENTS("gpadc", + gpadc_parents, + &ccu_mp_ops, + 0), + }, +}; + +static const char * const cir_tx_parents[] = { "osc24M", "osc32k" }; +static const u8 cir_tx_table[] = { 0, 7 }; +static struct ccu_mp cir_tx_clk = { + .enable = BIT(31), + .m = _SUNXI_CCU_DIV(0, 4), + .p = _SUNXI_CCU_DIV(16, 2), + .mux = _SUNXI_CCU_MUX_TABLE(24, 4, cir_tx_table), + .common = { + .reg = 0x510, + .hw.init = CLK_HW_INIT_PARENTS("cir-tx", + cir_tx_parents, + &ccu_mp_ops, + 0), + }, +}; + +/* AHB0 bus gates */ +static SUNXI_CCU_GATE(bus_fd_clk, "bus-fd", "ahb0", + 0x580, BIT(0), 0); +static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "ahb0", + 0x580, BIT(1), 0); +static SUNXI_CCU_GATE(bus_gpu_ctrl_clk, "bus-gpu-ctrl", "ahb0", + 0x580, BIT(3), 0); +static SUNXI_CCU_GATE(bus_ss_clk, "bus-ss", "ahb0", + 0x580, BIT(5), 0); +static SUNXI_CCU_GATE(bus_mmc_clk, "bus-mmc", "ahb0", + 0x580, BIT(8), 0); +static SUNXI_CCU_GATE(bus_nand0_clk, "bus-nand0", "ahb0", + 0x580, BIT(12), 0); +static SUNXI_CCU_GATE(bus_nand1_clk, "bus-nand1", "ahb0", + 0x580, BIT(13), 0); +static SUNXI_CCU_GATE(bus_sdram_clk, "bus-sdram", "ahb0", + 0x580, BIT(14), 0); +static SUNXI_CCU_GATE(bus_mipi_hsi_clk, "bus-mipi-hsi", "ahb0", + 0x580, BIT(15), 0); +static SUNXI_CCU_GATE(bus_sata_clk, "bus-sata", "ahb0", + 0x580, BIT(16), 0); +static SUNXI_CCU_GATE(bus_ts_clk, "bus-ts", "ahb0", + 0x580, BIT(18), 0); +static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb0", + 0x580, BIT(20), 0); +static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb0", + 0x580, BIT(21), 0); +static SUNXI_CCU_GATE(bus_spi2_clk, "bus-spi2", "ahb0", + 0x580, BIT(22), 0); +static SUNXI_CCU_GATE(bus_spi3_clk, "bus-spi3", "ahb0", + 0x580, BIT(23), 0); + +/* AHB1 bus gates */ +static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb1", + 0x584, BIT(0), 0); +static SUNXI_CCU_GATE(bus_usb_clk, "bus-usb", "ahb1", + 0x584, BIT(1), 0); +static SUNXI_CCU_GATE(bus_gmac_clk, "bus-gmac", "ahb1", + 0x584, BIT(17), 0); +static SUNXI_CCU_GATE(bus_msgbox_clk, "bus-msgbox", "ahb1", + 0x584, BIT(21), 0); +static SUNXI_CCU_GATE(bus_spinlock_clk, "bus-spinlock", "ahb1", + 0x584, BIT(22), 0); +static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "ahb1", + 0x584, BIT(23), 0); +static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "ahb1", + 0x584, BIT(24), 0); + +/* AHB2 bus gates */ +static SUNXI_CCU_GATE(bus_lcd0_clk, "bus-lcd0", "ahb2", + 0x588, BIT(0), 0); +static SUNXI_CCU_GATE(bus_lcd1_clk, "bus-lcd1", "ahb2", + 0x588, BIT(1), 0); +static SUNXI_CCU_GATE(bus_edp_clk, "bus-edp", "ahb2", + 0x588, BIT(2), 0); +static SUNXI_CCU_GATE(bus_csi_clk, "bus-csi", "ahb2", + 0x588, BIT(4), 0); +static SUNXI_CCU_GATE(bus_hdmi_clk, "bus-hdmi", "ahb2", + 0x588, BIT(5), 0); +static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "ahb2", + 0x588, BIT(7), 0); +static SUNXI_CCU_GATE(bus_mp_clk, "bus-mp", "ahb2", + 0x588, BIT(8), 0); +static SUNXI_CCU_GATE(bus_mipi_dsi_clk, "bus-mipi-dsi", "ahb2", + 0x588, BIT(11), 0); + +/* APB0 bus gates */ +static SUNXI_CCU_GATE(bus_spdif_clk, "bus-spdif", "apb0", + 0x590, BIT(1), 0); +static SUNXI_CCU_GATE(bus_pio_clk, "bus-pio", "apb0", + 0x590, BIT(5), 0); +static SUNXI_CCU_GATE(bus_ac97_clk, "bus-ac97", "apb0", + 0x590, BIT(11), 0); +static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb0", + 0x590, BIT(12), 0); +static SUNXI_CCU_GATE(bus_i2s1_clk, "bus-i2s1", "apb0", + 0x590, BIT(13), 0); +static SUNXI_CCU_GATE(bus_lradc_clk, "bus-lradc", "apb0", + 0x590, BIT(15), 0); +static SUNXI_CCU_GATE(bus_gpadc_clk, "bus-gpadc", "apb0", + 0x590, BIT(17), 0); +static SUNXI_CCU_GATE(bus_twd_clk, "bus-twd", "apb0", + 0x590, BIT(18), 0); +static SUNXI_CCU_GATE(bus_cir_tx_clk, "bus-cir-tx", "apb0", + 0x590, BIT(19), 0); + +/* APB1 bus gates */ +static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb1", + 0x594, BIT(0), 0); +static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb1", + 0x594, BIT(1), 0); +static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb1", + 0x594, BIT(2), 0); +static SUNXI_CCU_GATE(bus_i2c3_clk, "bus-i2c3", "apb1", + 0x594, BIT(3), 0); +static SUNXI_CCU_GATE(bus_i2c4_clk, "bus-i2c4", "apb1", + 0x594, BIT(4), 0); +static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb1", + 0x594, BIT(16), 0); +static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb1", + 0x594, BIT(17), 0); +static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb1", + 0x594, BIT(18), 0); +static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb1", + 0x594, BIT(19), 0); +static SUNXI_CCU_GATE(bus_uart4_clk, "bus-uart4", "apb1", + 0x594, BIT(20), 0); +static SUNXI_CCU_GATE(bus_uart5_clk, "bus-uart5", "apb1", + 0x594, BIT(21), 0); + +static struct ccu_common *sun9i_a80_ccu_clks[] = { + &pll_c0cpux_clk.common, + &pll_c1cpux_clk.common, + &pll_audio_clk.common, + &pll_periph0_clk.common, + &pll_ve_clk.common, + &pll_ddr_clk.common, + &pll_video0_clk.common, + &pll_video1_clk.common, + &pll_gpu_clk.common, + &pll_de_clk.common, + &pll_isp_clk.common, + &pll_periph1_clk.common, + &c0cpux_clk.common, + &c1cpux_clk.common, + &atb0_clk.common, + &axi0_clk.common, + &atb1_clk.common, + &axi1_clk.common, + >bus_clk.common, + &ahb0_clk.common, + &ahb1_clk.common, + &ahb2_clk.common, + &apb0_clk.common, + &apb1_clk.common, + &cci400_clk.common, + &ats_clk.common, + &trace_clk.common, + + &out_a_clk.common, + &out_b_clk.common, + + /* module clocks */ + &nand0_0_clk.common, + &nand0_1_clk.common, + &nand1_0_clk.common, + &nand1_1_clk.common, + &mmc0_clk.common, + &mmc0_sample_clk.common, + &mmc0_output_clk.common, + &mmc1_clk.common, + &mmc1_sample_clk.common, + &mmc1_output_clk.common, + &mmc2_clk.common, + &mmc2_sample_clk.common, + &mmc2_output_clk.common, + &mmc3_clk.common, + &mmc3_sample_clk.common, + &mmc3_output_clk.common, + &ts_clk.common, + &ss_clk.common, + &spi0_clk.common, + &spi1_clk.common, + &spi2_clk.common, + &spi3_clk.common, + &i2s0_clk.common, + &i2s1_clk.common, + &spdif_clk.common, + &sdram_clk.common, + &de_clk.common, + &edp_clk.common, + &mp_clk.common, + &lcd0_clk.common, + &lcd1_clk.common, + &mipi_dsi0_clk.common, + &mipi_dsi1_clk.common, + &hdmi_clk.common, + &hdmi_slow_clk.common, + &mipi_csi_clk.common, + &csi_isp_clk.common, + &csi_misc_clk.common, + &csi0_mclk_clk.common, + &csi1_mclk_clk.common, + &fd_clk.common, + &ve_clk.common, + &avs_clk.common, + &gpu_core_clk.common, + &gpu_memory_clk.common, + &gpu_axi_clk.common, + &sata_clk.common, + &ac97_clk.common, + &mipi_hsi_clk.common, + &gpadc_clk.common, + &cir_tx_clk.common, + + /* AHB0 bus gates */ + &bus_fd_clk.common, + &bus_ve_clk.common, + &bus_gpu_ctrl_clk.common, + &bus_ss_clk.common, + &bus_mmc_clk.common, + &bus_nand0_clk.common, + &bus_nand1_clk.common, + &bus_sdram_clk.common, + &bus_mipi_hsi_clk.common, + &bus_sata_clk.common, + &bus_ts_clk.common, + &bus_spi0_clk.common, + &bus_spi1_clk.common, + &bus_spi2_clk.common, + &bus_spi3_clk.common, + + /* AHB1 bus gates */ + &bus_otg_clk.common, + &bus_usb_clk.common, + &bus_gmac_clk.common, + &bus_msgbox_clk.common, + &bus_spinlock_clk.common, + &bus_hstimer_clk.common, + &bus_dma_clk.common, + + /* AHB2 bus gates */ + &bus_lcd0_clk.common, + &bus_lcd1_clk.common, + &bus_edp_clk.common, + &bus_csi_clk.common, + &bus_hdmi_clk.common, + &bus_de_clk.common, + &bus_mp_clk.common, + &bus_mipi_dsi_clk.common, + + /* APB0 bus gates */ + &bus_spdif_clk.common, + &bus_pio_clk.common, + &bus_ac97_clk.common, + &bus_i2s0_clk.common, + &bus_i2s1_clk.common, + &bus_lradc_clk.common, + &bus_gpadc_clk.common, + &bus_twd_clk.common, + &bus_cir_tx_clk.common, + + /* APB1 bus gates */ + &bus_i2c0_clk.common, + &bus_i2c1_clk.common, + &bus_i2c2_clk.common, + &bus_i2c3_clk.common, + &bus_i2c4_clk.common, + &bus_uart0_clk.common, + &bus_uart1_clk.common, + &bus_uart2_clk.common, + &bus_uart3_clk.common, + &bus_uart4_clk.common, + &bus_uart5_clk.common, +}; + +static struct clk_hw_onecell_data sun9i_a80_hw_clks = { + .hws = { + [CLK_PLL_C0CPUX] = &pll_c0cpux_clk.common.hw, + [CLK_PLL_C1CPUX] = &pll_c1cpux_clk.common.hw, + [CLK_PLL_AUDIO] = &pll_audio_clk.common.hw, + [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw, + [CLK_PLL_VE] = &pll_ve_clk.common.hw, + [CLK_PLL_DDR] = &pll_ddr_clk.common.hw, + [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw, + [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw, + [CLK_PLL_GPU] = &pll_gpu_clk.common.hw, + [CLK_PLL_DE] = &pll_de_clk.common.hw, + [CLK_PLL_ISP] = &pll_isp_clk.common.hw, + [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw, + [CLK_C0CPUX] = &c0cpux_clk.common.hw, + [CLK_C1CPUX] = &c1cpux_clk.common.hw, + [CLK_ATB0] = &atb0_clk.common.hw, + [CLK_AXI0] = &axi0_clk.common.hw, + [CLK_ATB1] = &atb1_clk.common.hw, + [CLK_AXI1] = &axi1_clk.common.hw, + [CLK_GTBUS] = >bus_clk.common.hw, + [CLK_AHB0] = &ahb0_clk.common.hw, + [CLK_AHB1] = &ahb1_clk.common.hw, + [CLK_AHB2] = &ahb2_clk.common.hw, + [CLK_APB0] = &apb0_clk.common.hw, + [CLK_APB1] = &apb1_clk.common.hw, + [CLK_CCI400] = &cci400_clk.common.hw, + [CLK_ATS] = &ats_clk.common.hw, + [CLK_TRACE] = &trace_clk.common.hw, + + [CLK_OUT_A] = &out_a_clk.common.hw, + [CLK_OUT_B] = &out_b_clk.common.hw, + + [CLK_NAND0_0] = &nand0_0_clk.common.hw, + [CLK_NAND0_1] = &nand0_1_clk.common.hw, + [CLK_NAND1_0] = &nand1_0_clk.common.hw, + [CLK_NAND1_1] = &nand1_1_clk.common.hw, + [CLK_MMC0] = &mmc0_clk.common.hw, + [CLK_MMC0_SAMPLE] = &mmc0_sample_clk.common.hw, + [CLK_MMC0_OUTPUT] = &mmc0_output_clk.common.hw, + [CLK_MMC1] = &mmc1_clk.common.hw, + [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw, + [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw, + [CLK_MMC2] = &mmc2_clk.common.hw, + [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw, + [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw, + [CLK_MMC3] = &mmc3_clk.common.hw, + [CLK_MMC3_SAMPLE] = &mmc3_sample_clk.common.hw, + [CLK_MMC3_OUTPUT] = &mmc3_output_clk.common.hw, + [CLK_TS] = &ts_clk.common.hw, + [CLK_SS] = &ss_clk.common.hw, + [CLK_SPI0] = &spi0_clk.common.hw, + [CLK_SPI1] = &spi1_clk.common.hw, + [CLK_SPI2] = &spi2_clk.common.hw, + [CLK_SPI3] = &spi3_clk.common.hw, + [CLK_I2S0] = &i2s0_clk.common.hw, + [CLK_I2S1] = &i2s1_clk.common.hw, + [CLK_SPDIF] = &spdif_clk.common.hw, + [CLK_SDRAM] = &sdram_clk.common.hw, + [CLK_DE] = &de_clk.common.hw, + [CLK_EDP] = &edp_clk.common.hw, + [CLK_MP] = &mp_clk.common.hw, + [CLK_LCD0] = &lcd0_clk.common.hw, + [CLK_LCD1] = &lcd1_clk.common.hw, + [CLK_MIPI_DSI0] = &mipi_dsi0_clk.common.hw, + [CLK_MIPI_DSI1] = &mipi_dsi1_clk.common.hw, + [CLK_HDMI] = &hdmi_clk.common.hw, + [CLK_HDMI_SLOW] = &hdmi_slow_clk.common.hw, + [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw, + [CLK_CSI_ISP] = &csi_isp_clk.common.hw, + [CLK_CSI_MISC] = &csi_misc_clk.common.hw, + [CLK_CSI0_MCLK] = &csi0_mclk_clk.common.hw, + [CLK_CSI1_MCLK] = &csi1_mclk_clk.common.hw, + [CLK_FD] = &fd_clk.common.hw, + [CLK_VE] = &ve_clk.common.hw, + [CLK_AVS] = &avs_clk.common.hw, + [CLK_GPU_CORE] = &gpu_core_clk.common.hw, + [CLK_GPU_MEMORY] = &gpu_memory_clk.common.hw, + [CLK_GPU_AXI] = &gpu_axi_clk.common.hw, + [CLK_SATA] = &sata_clk.common.hw, + [CLK_AC97] = &ac97_clk.common.hw, + [CLK_MIPI_HSI] = &mipi_hsi_clk.common.hw, + [CLK_GPADC] = &gpadc_clk.common.hw, + [CLK_CIR_TX] = &cir_tx_clk.common.hw, + + [CLK_BUS_FD] = &bus_fd_clk.common.hw, + [CLK_BUS_VE] = &bus_ve_clk.common.hw, + [CLK_BUS_GPU_CTRL] = &bus_gpu_ctrl_clk.common.hw, + [CLK_BUS_SS] = &bus_ss_clk.common.hw, + [CLK_BUS_MMC] = &bus_mmc_clk.common.hw, + [CLK_BUS_NAND0] = &bus_nand0_clk.common.hw, + [CLK_BUS_NAND1] = &bus_nand1_clk.common.hw, + [CLK_BUS_SDRAM] = &bus_sdram_clk.common.hw, + [CLK_BUS_MIPI_HSI] = &bus_mipi_hsi_clk.common.hw, + [CLK_BUS_SATA] = &bus_sata_clk.common.hw, + [CLK_BUS_TS] = &bus_ts_clk.common.hw, + [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw, + [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw, + [CLK_BUS_SPI2] = &bus_spi2_clk.common.hw, + [CLK_BUS_SPI3] = &bus_spi3_clk.common.hw, + + [CLK_BUS_OTG] = &bus_otg_clk.common.hw, + [CLK_BUS_USB] = &bus_usb_clk.common.hw, + [CLK_BUS_GMAC] = &bus_gmac_clk.common.hw, + [CLK_BUS_MSGBOX] = &bus_msgbox_clk.common.hw, + [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw, + [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw, + [CLK_BUS_DMA] = &bus_dma_clk.common.hw, + + [CLK_BUS_LCD0] = &bus_lcd0_clk.common.hw, + [CLK_BUS_LCD1] = &bus_lcd1_clk.common.hw, + [CLK_BUS_EDP] = &bus_edp_clk.common.hw, + [CLK_BUS_CSI] = &bus_csi_clk.common.hw, + [CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw, + [CLK_BUS_DE] = &bus_de_clk.common.hw, + [CLK_BUS_MP] = &bus_mp_clk.common.hw, + [CLK_BUS_MIPI_DSI] = &bus_mipi_dsi_clk.common.hw, + + [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw, + [CLK_BUS_PIO] = &bus_pio_clk.common.hw, + [CLK_BUS_AC97] = &bus_ac97_clk.common.hw, + [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw, + [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw, + [CLK_BUS_LRADC] = &bus_lradc_clk.common.hw, + [CLK_BUS_GPADC] = &bus_gpadc_clk.common.hw, + [CLK_BUS_TWD] = &bus_twd_clk.common.hw, + [CLK_BUS_CIR_TX] = &bus_cir_tx_clk.common.hw, + + [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw, + [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw, + [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw, + [CLK_BUS_I2C3] = &bus_i2c3_clk.common.hw, + [CLK_BUS_I2C4] = &bus_i2c4_clk.common.hw, + [CLK_BUS_UART0] = &bus_uart0_clk.common.hw, + [CLK_BUS_UART1] = &bus_uart1_clk.common.hw, + [CLK_BUS_UART2] = &bus_uart2_clk.common.hw, + [CLK_BUS_UART3] = &bus_uart3_clk.common.hw, + [CLK_BUS_UART4] = &bus_uart4_clk.common.hw, + [CLK_BUS_UART5] = &bus_uart5_clk.common.hw, + }, + .num = CLK_NUMBER, +}; + +static struct ccu_reset_map sun9i_a80_ccu_resets[] = { + /* AHB0 reset controls */ + [RST_BUS_FD] = { 0x5a0, BIT(0) }, + [RST_BUS_VE] = { 0x5a0, BIT(1) }, + [RST_BUS_GPU_CTRL] = { 0x5a0, BIT(3) }, + [RST_BUS_SS] = { 0x5a0, BIT(5) }, + [RST_BUS_MMC] = { 0x5a0, BIT(8) }, + [RST_BUS_NAND0] = { 0x5a0, BIT(12) }, + [RST_BUS_NAND1] = { 0x5a0, BIT(13) }, + [RST_BUS_SDRAM] = { 0x5a0, BIT(14) }, + [RST_BUS_SATA] = { 0x5a0, BIT(16) }, + [RST_BUS_TS] = { 0x5a0, BIT(18) }, + [RST_BUS_SPI0] = { 0x5a0, BIT(20) }, + [RST_BUS_SPI1] = { 0x5a0, BIT(21) }, + [RST_BUS_SPI2] = { 0x5a0, BIT(22) }, + [RST_BUS_SPI3] = { 0x5a0, BIT(23) }, + + /* AHB1 reset controls */ + [RST_BUS_OTG] = { 0x5a4, BIT(0) }, + [RST_BUS_OTG_PHY] = { 0x5a4, BIT(1) }, + [RST_BUS_MIPI_HSI] = { 0x5a4, BIT(9) }, + [RST_BUS_GMAC] = { 0x5a4, BIT(17) }, + [RST_BUS_MSGBOX] = { 0x5a4, BIT(21) }, + [RST_BUS_SPINLOCK] = { 0x5a4, BIT(22) }, + [RST_BUS_HSTIMER] = { 0x5a4, BIT(23) }, + [RST_BUS_DMA] = { 0x5a4, BIT(24) }, + + /* AHB2 reset controls */ + [RST_BUS_LCD0] = { 0x5a8, BIT(0) }, + [RST_BUS_LCD1] = { 0x5a8, BIT(1) }, + [RST_BUS_EDP] = { 0x5a8, BIT(2) }, + [RST_BUS_LVDS] = { 0x5a8, BIT(3) }, + [RST_BUS_CSI] = { 0x5a8, BIT(4) }, + [RST_BUS_HDMI0] = { 0x5a8, BIT(5) }, + [RST_BUS_HDMI1] = { 0x5a8, BIT(6) }, + [RST_BUS_DE] = { 0x5a8, BIT(7) }, + [RST_BUS_MP] = { 0x5a8, BIT(8) }, + [RST_BUS_GPU] = { 0x5a8, BIT(9) }, + [RST_BUS_MIPI_DSI] = { 0x5a8, BIT(11) }, + + /* APB0 reset controls */ + [RST_BUS_SPDIF] = { 0x5b0, BIT(1) }, + [RST_BUS_AC97] = { 0x5b0, BIT(11) }, + [RST_BUS_I2S0] = { 0x5b0, BIT(12) }, + [RST_BUS_I2S1] = { 0x5b0, BIT(13) }, + [RST_BUS_LRADC] = { 0x5b0, BIT(15) }, + [RST_BUS_GPADC] = { 0x5b0, BIT(17) }, + [RST_BUS_CIR_TX] = { 0x5b0, BIT(19) }, + + /* APB1 reset controls */ + [RST_BUS_I2C0] = { 0x5b4, BIT(0) }, + [RST_BUS_I2C1] = { 0x5b4, BIT(1) }, + [RST_BUS_I2C2] = { 0x5b4, BIT(2) }, + [RST_BUS_I2C3] = { 0x5b4, BIT(3) }, + [RST_BUS_I2C4] = { 0x5b4, BIT(4) }, + [RST_BUS_UART0] = { 0x5b4, BIT(16) }, + [RST_BUS_UART1] = { 0x5b4, BIT(17) }, + [RST_BUS_UART2] = { 0x5b4, BIT(18) }, + [RST_BUS_UART3] = { 0x5b4, BIT(19) }, + [RST_BUS_UART4] = { 0x5b4, BIT(20) }, + [RST_BUS_UART5] = { 0x5b4, BIT(21) }, +}; + +static const struct sunxi_ccu_desc sun9i_a80_ccu_desc = { + .ccu_clks = sun9i_a80_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun9i_a80_ccu_clks), + + .hw_clks = &sun9i_a80_hw_clks, + + .resets = sun9i_a80_ccu_resets, + .num_resets = ARRAY_SIZE(sun9i_a80_ccu_resets), +}; + +static int sun9i_a80_ccu_probe(struct platform_device *pdev) +{ + struct resource *res; + void __iomem *reg; + u32 val; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + /* Enforce d1 = 0, d2 = 0 for Audio PLL */ + val = readl(reg + SUN9I_A80_PLL_AUDIO_REG); + val &= (BIT(16) & BIT(18)); + writel(val, reg + SUN9I_A80_PLL_AUDIO_REG); + + return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun9i_a80_ccu_desc); +} + +static const struct of_device_id sun9i_a80_ccu_ids[] = { + { .compatible = "allwinner,sun9i-a80-ccu" }, + { } +}; + +static struct platform_driver sun9i_a80_ccu_driver = { + .probe = sun9i_a80_ccu_probe, + .driver = { + .name = "sun9i-a80-ccu", + .of_match_table = sun9i_a80_ccu_ids, + }, +}; +builtin_platform_driver(sun9i_a80_ccu_driver); diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.h b/drivers/clk/sunxi-ng/ccu-sun9i-a80.h new file mode 100644 index 000000000000..315662341c70 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.h @@ -0,0 +1,57 @@ +/* + * Copyright 2016 Chen-Yu Tsai + * + * Chen-Yu Tsai <wens@csie.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_SUN9I_A80_H_ +#define _CCU_SUN9I_A80_H_ + +#include <dt-bindings/clock/sun9i-a80-ccu.h> +#include <dt-bindings/reset/sun9i-a80-ccu.h> + +#define CLK_PLL_C0CPUX 0 +#define CLK_PLL_C1CPUX 1 + +/* pll-audio and pll-periph0 are exported to the PRCM block */ + +#define CLK_PLL_VE 4 +#define CLK_PLL_DDR 5 +#define CLK_PLL_VIDEO0 6 +#define CLK_PLL_VIDEO1 7 +#define CLK_PLL_GPU 8 +#define CLK_PLL_DE 9 +#define CLK_PLL_ISP 10 +#define CLK_PLL_PERIPH1 11 + +/* The CPUX clocks are exported */ + +#define CLK_ATB0 14 +#define CLK_AXI0 15 +#define CLK_ATB1 16 +#define CLK_AXI1 17 +#define CLK_GTBUS 18 +#define CLK_AHB0 19 +#define CLK_AHB1 20 +#define CLK_AHB2 21 +#define CLK_APB0 22 +#define CLK_APB1 23 +#define CLK_CCI400 24 +#define CLK_ATS 25 +#define CLK_TRACE 26 + +/* module clocks and bus gates exported */ + +#define CLK_NUMBER (CLK_BUS_UART5 + 1) + +#endif /* _CCU_SUN9I_A80_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c index 51d4bac97ab3..8a47bafd7890 100644 --- a/drivers/clk/sunxi-ng/ccu_common.c +++ b/drivers/clk/sunxi-ng/ccu_common.c @@ -25,13 +25,18 @@ static DEFINE_SPINLOCK(ccu_lock); void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock) { + void __iomem *addr; u32 reg; if (!lock) return; - WARN_ON(readl_relaxed_poll_timeout(common->base + common->reg, reg, - reg & lock, 100, 70000)); + if (common->features & CCU_FEATURE_LOCK_REG) + addr = common->base + common->lock_reg; + else + addr = common->base + common->reg; + + WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000)); } int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, @@ -70,6 +75,11 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, goto err_clk_unreg; reset = kzalloc(sizeof(*reset), GFP_KERNEL); + if (!reset) { + ret = -ENOMEM; + goto err_alloc_reset; + } + reset->rcdev.of_node = node; reset->rcdev.ops = &ccu_reset_ops; reset->rcdev.owner = THIS_MODULE; @@ -85,6 +95,16 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, return 0; err_of_clk_unreg: + kfree(reset); +err_alloc_reset: + of_clk_del_provider(node); err_clk_unreg: + while (--i >= 0) { + struct clk_hw *hw = desc->hw_clks->hws[i]; + + if (!hw) + continue; + clk_hw_unregister(hw); + } return ret; } diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h index b3d9abfbd721..73d81dc58fc5 100644 --- a/drivers/clk/sunxi-ng/ccu_common.h +++ b/drivers/clk/sunxi-ng/ccu_common.h @@ -21,6 +21,8 @@ #define CCU_FEATURE_VARIABLE_PREDIV BIT(1) #define CCU_FEATURE_FIXED_PREDIV BIT(2) #define CCU_FEATURE_FIXED_POSTDIV BIT(3) +#define CCU_FEATURE_ALL_PREDIV BIT(4) +#define CCU_FEATURE_LOCK_REG BIT(5) struct device_node; @@ -56,6 +58,8 @@ struct device_node; struct ccu_common { void __iomem *base; u16 reg; + u16 lock_reg; + u32 prediv; unsigned long features; spinlock_t *lock; diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c index 8659b4cb6c20..4057e6021aa9 100644 --- a/drivers/clk/sunxi-ng/ccu_div.c +++ b/drivers/clk/sunxi-ng/ccu_div.c @@ -77,6 +77,18 @@ static int ccu_div_determine_rate(struct clk_hw *hw, { struct ccu_div *cd = hw_to_ccu_div(hw); + if (clk_hw_get_num_parents(hw) == 1) { + req->rate = divider_round_rate(hw, req->rate, + &req->best_parent_rate, + cd->div.table, + cd->div.width, + cd->div.flags); + + req->best_parent_hw = clk_hw_get_parent(hw); + + return 0; + } + return ccu_mux_helper_determine_rate(&cd->common, &cd->mux, req, ccu_div_round_rate, cd); } diff --git a/drivers/clk/sunxi-ng/ccu_div.h b/drivers/clk/sunxi-ng/ccu_div.h index 06540f7cf41c..08d074451204 100644 --- a/drivers/clk/sunxi-ng/ccu_div.h +++ b/drivers/clk/sunxi-ng/ccu_div.h @@ -41,6 +41,7 @@ struct ccu_div_internal { u8 width; u32 max; + u32 offset; u32 flags; @@ -58,20 +59,27 @@ struct ccu_div_internal { #define _SUNXI_CCU_DIV_TABLE(_shift, _width, _table) \ _SUNXI_CCU_DIV_TABLE_FLAGS(_shift, _width, _table, 0) -#define _SUNXI_CCU_DIV_MAX_FLAGS(_shift, _width, _max, _flags) \ +#define _SUNXI_CCU_DIV_OFFSET_MAX_FLAGS(_shift, _width, _off, _max, _flags) \ { \ .shift = _shift, \ .width = _width, \ .flags = _flags, \ .max = _max, \ + .offset = _off, \ } +#define _SUNXI_CCU_DIV_MAX_FLAGS(_shift, _width, _max, _flags) \ + _SUNXI_CCU_DIV_OFFSET_MAX_FLAGS(_shift, _width, 1, _max, _flags) + #define _SUNXI_CCU_DIV_FLAGS(_shift, _width, _flags) \ _SUNXI_CCU_DIV_MAX_FLAGS(_shift, _width, 0, _flags) #define _SUNXI_CCU_DIV_MAX(_shift, _width, _max) \ _SUNXI_CCU_DIV_MAX_FLAGS(_shift, _width, _max, 0) +#define _SUNXI_CCU_DIV_OFFSET(_shift, _width, _offset) \ + _SUNXI_CCU_DIV_OFFSET_MAX_FLAGS(_shift, _width, _offset, 0, 0) + #define _SUNXI_CCU_DIV(_shift, _width) \ _SUNXI_CCU_DIV_FLAGS(_shift, _width, 0) diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c index ebb1b31568a5..22c2ca7a2a22 100644 --- a/drivers/clk/sunxi-ng/ccu_mp.c +++ b/drivers/clk/sunxi-ng/ccu_mp.c @@ -89,11 +89,14 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw, m = reg >> cmp->m.shift; m &= (1 << cmp->m.width) - 1; + m += cmp->m.offset; + if (!m) + m++; p = reg >> cmp->p.shift; p &= (1 << cmp->p.width) - 1; - return (parent_rate >> p) / (m + 1); + return (parent_rate >> p) / m; } static int ccu_mp_determine_rate(struct clk_hw *hw, @@ -124,9 +127,10 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate, reg = readl(cmp->common.base + cmp->common.reg); reg &= ~GENMASK(cmp->m.width + cmp->m.shift - 1, cmp->m.shift); reg &= ~GENMASK(cmp->p.width + cmp->p.shift - 1, cmp->p.shift); + reg |= (m - cmp->m.offset) << cmp->m.shift; + reg |= ilog2(p) << cmp->p.shift; - writel(reg | (ilog2(p) << cmp->p.shift) | ((m - 1) << cmp->m.shift), - cmp->common.base + cmp->common.reg); + writel(reg, cmp->common.base + cmp->common.reg); spin_unlock_irqrestore(cmp->common.lock, flags); diff --git a/drivers/clk/sunxi-ng/ccu_mult.c b/drivers/clk/sunxi-ng/ccu_mult.c index 678b6cb49f01..8724c01171b1 100644 --- a/drivers/clk/sunxi-ng/ccu_mult.c +++ b/drivers/clk/sunxi-ng/ccu_mult.c @@ -40,8 +40,13 @@ static unsigned long ccu_mult_round_rate(struct ccu_mux_internal *mux, struct ccu_mult *cm = data; struct _ccu_mult _cm; - _cm.min = 1; - _cm.max = 1 << cm->mult.width; + _cm.min = cm->mult.min; + + if (cm->mult.max) + _cm.max = cm->mult.max; + else + _cm.max = (1 << cm->mult.width) + cm->mult.offset - 1; + ccu_mult_find_best(parent_rate, rate, &_cm); return parent_rate * _cm.mult; @@ -75,6 +80,9 @@ static unsigned long ccu_mult_recalc_rate(struct clk_hw *hw, unsigned long val; u32 reg; + if (ccu_frac_helper_is_enabled(&cm->common, &cm->frac)) + return ccu_frac_helper_read_rate(&cm->common, &cm->frac); + reg = readl(cm->common.base + cm->common.reg); val = reg >> cm->mult.shift; val &= (1 << cm->mult.width) - 1; @@ -82,7 +90,7 @@ static unsigned long ccu_mult_recalc_rate(struct clk_hw *hw, ccu_mux_helper_adjust_parent_for_prediv(&cm->common, &cm->mux, -1, &parent_rate); - return parent_rate * (val + 1); + return parent_rate * (val + cm->mult.offset); } static int ccu_mult_determine_rate(struct clk_hw *hw, @@ -102,20 +110,30 @@ static int ccu_mult_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long flags; u32 reg; + if (ccu_frac_helper_has_rate(&cm->common, &cm->frac, rate)) + return ccu_frac_helper_set_rate(&cm->common, &cm->frac, rate); + else + ccu_frac_helper_disable(&cm->common, &cm->frac); + ccu_mux_helper_adjust_parent_for_prediv(&cm->common, &cm->mux, -1, &parent_rate); _cm.min = cm->mult.min; - _cm.max = 1 << cm->mult.width; + + if (cm->mult.max) + _cm.max = cm->mult.max; + else + _cm.max = (1 << cm->mult.width) + cm->mult.offset - 1; + ccu_mult_find_best(parent_rate, rate, &_cm); spin_lock_irqsave(cm->common.lock, flags); reg = readl(cm->common.base + cm->common.reg); reg &= ~GENMASK(cm->mult.width + cm->mult.shift - 1, cm->mult.shift); + reg |= ((_cm.mult - cm->mult.offset) << cm->mult.shift); - writel(reg | ((_cm.mult - 1) << cm->mult.shift), - cm->common.base + cm->common.reg); + writel(reg, cm->common.base + cm->common.reg); spin_unlock_irqrestore(cm->common.lock, flags); diff --git a/drivers/clk/sunxi-ng/ccu_mult.h b/drivers/clk/sunxi-ng/ccu_mult.h index c1a2134bdc71..524acddfcb2e 100644 --- a/drivers/clk/sunxi-ng/ccu_mult.h +++ b/drivers/clk/sunxi-ng/ccu_mult.h @@ -2,27 +2,39 @@ #define _CCU_MULT_H_ #include "ccu_common.h" +#include "ccu_frac.h" #include "ccu_mux.h" struct ccu_mult_internal { + u8 offset; u8 shift; u8 width; u8 min; + u8 max; }; -#define _SUNXI_CCU_MULT_MIN(_shift, _width, _min) \ - { \ - .shift = _shift, \ - .width = _width, \ - .min = _min, \ +#define _SUNXI_CCU_MULT_OFFSET_MIN_MAX(_shift, _width, _offset, _min, _max) \ + { \ + .min = _min, \ + .max = _max, \ + .offset = _offset, \ + .shift = _shift, \ + .width = _width, \ } +#define _SUNXI_CCU_MULT_MIN(_shift, _width, _min) \ + _SUNXI_CCU_MULT_OFFSET_MIN_MAX(_shift, _width, 1, _min, 0) + +#define _SUNXI_CCU_MULT_OFFSET(_shift, _width, _offset) \ + _SUNXI_CCU_MULT_OFFSET_MIN_MAX(_shift, _width, _offset, 1, 0) + #define _SUNXI_CCU_MULT(_shift, _width) \ - _SUNXI_CCU_MULT_MIN(_shift, _width, 1) + _SUNXI_CCU_MULT_OFFSET_MIN_MAX(_shift, _width, 1, 1, 0) struct ccu_mult { u32 enable; + struct ccu_frac_internal frac; struct ccu_mult_internal mult; struct ccu_mux_internal mux; struct ccu_common common; diff --git a/drivers/clk/sunxi-ng/ccu_mux.c b/drivers/clk/sunxi-ng/ccu_mux.c index a43ad52a957d..c6bb1f523232 100644 --- a/drivers/clk/sunxi-ng/ccu_mux.c +++ b/drivers/clk/sunxi-ng/ccu_mux.c @@ -25,9 +25,15 @@ void ccu_mux_helper_adjust_parent_for_prediv(struct ccu_common *common, int i; if (!((common->features & CCU_FEATURE_FIXED_PREDIV) || - (common->features & CCU_FEATURE_VARIABLE_PREDIV))) + (common->features & CCU_FEATURE_VARIABLE_PREDIV) || + (common->features & CCU_FEATURE_ALL_PREDIV))) return; + if (common->features & CCU_FEATURE_ALL_PREDIV) { + *parent_rate = *parent_rate / common->prediv; + return; + } + reg = readl(common->base + common->reg); if (parent_index < 0) { parent_index = reg >> cm->shift; @@ -64,19 +70,46 @@ int ccu_mux_helper_determine_rate(struct ccu_common *common, struct clk_hw *best_parent, *hw = &common->hw; unsigned int i; + if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) { + unsigned long adj_parent_rate; + + best_parent = clk_hw_get_parent(hw); + best_parent_rate = clk_hw_get_rate(best_parent); + + adj_parent_rate = best_parent_rate; + ccu_mux_helper_adjust_parent_for_prediv(common, cm, -1, + &adj_parent_rate); + + best_rate = round(cm, adj_parent_rate, req->rate, data); + + goto out; + } + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { - unsigned long tmp_rate, parent_rate; + unsigned long tmp_rate, parent_rate, adj_parent_rate; struct clk_hw *parent; parent = clk_hw_get_parent_by_index(hw, i); if (!parent) continue; - parent_rate = clk_hw_get_rate(parent); + if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) { + struct clk_rate_request parent_req = *req; + int ret = __clk_determine_rate(parent, &parent_req); + + if (ret) + continue; + + parent_rate = parent_req.rate; + } else { + parent_rate = clk_hw_get_rate(parent); + } + + adj_parent_rate = parent_rate; ccu_mux_helper_adjust_parent_for_prediv(common, cm, i, - &parent_rate); + &adj_parent_rate); - tmp_rate = round(cm, clk_hw_get_rate(parent), req->rate, data); + tmp_rate = round(cm, adj_parent_rate, req->rate, data); if (tmp_rate == req->rate) { best_parent = parent; best_parent_rate = parent_rate; diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c index eaf0fdf78d2b..b9e9b8a9d1b4 100644 --- a/drivers/clk/sunxi-ng/ccu_nk.c +++ b/drivers/clk/sunxi-ng/ccu_nk.c @@ -76,12 +76,17 @@ static unsigned long ccu_nk_recalc_rate(struct clk_hw *hw, n = reg >> nk->n.shift; n &= (1 << nk->n.width) - 1; + n += nk->n.offset; + if (!n) + n++; k = reg >> nk->k.shift; k &= (1 << nk->k.width) - 1; + k += nk->k.offset; + if (!k) + k++; - rate = parent_rate * (n + 1) * (k + 1); - + rate = parent_rate * n * k; if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV) rate /= nk->fixed_post_div; @@ -98,9 +103,9 @@ static long ccu_nk_round_rate(struct clk_hw *hw, unsigned long rate, rate *= nk->fixed_post_div; _nk.min_n = nk->n.min; - _nk.max_n = 1 << nk->n.width; + _nk.max_n = nk->n.max ?: 1 << nk->n.width; _nk.min_k = nk->k.min; - _nk.max_k = 1 << nk->k.width; + _nk.max_k = nk->k.max ?: 1 << nk->k.width; ccu_nk_find_best(*parent_rate, rate, &_nk); rate = *parent_rate * _nk.n * _nk.k; @@ -123,9 +128,9 @@ static int ccu_nk_set_rate(struct clk_hw *hw, unsigned long rate, rate = rate * nk->fixed_post_div; _nk.min_n = nk->n.min; - _nk.max_n = 1 << nk->n.width; + _nk.max_n = nk->n.max ?: 1 << nk->n.width; _nk.min_k = nk->k.min; - _nk.max_k = 1 << nk->k.width; + _nk.max_k = nk->k.max ?: 1 << nk->k.width; ccu_nk_find_best(parent_rate, rate, &_nk); @@ -135,8 +140,9 @@ static int ccu_nk_set_rate(struct clk_hw *hw, unsigned long rate, reg &= ~GENMASK(nk->n.width + nk->n.shift - 1, nk->n.shift); reg &= ~GENMASK(nk->k.width + nk->k.shift - 1, nk->k.shift); - writel(reg | ((_nk.k - 1) << nk->k.shift) | ((_nk.n - 1) << nk->n.shift), - nk->common.base + nk->common.reg); + reg |= (_nk.k - nk->k.offset) << nk->k.shift; + reg |= (_nk.n - nk->n.offset) << nk->n.shift; + writel(reg, nk->common.base + nk->common.reg); spin_unlock_irqrestore(nk->common.lock, flags); diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c index 9b840a47a94d..71f81e95a061 100644 --- a/drivers/clk/sunxi-ng/ccu_nkm.c +++ b/drivers/clk/sunxi-ng/ccu_nkm.c @@ -82,14 +82,23 @@ static unsigned long ccu_nkm_recalc_rate(struct clk_hw *hw, n = reg >> nkm->n.shift; n &= (1 << nkm->n.width) - 1; + n += nkm->n.offset; + if (!n) + n++; k = reg >> nkm->k.shift; k &= (1 << nkm->k.width) - 1; + k += nkm->k.offset; + if (!k) + k++; m = reg >> nkm->m.shift; m &= (1 << nkm->m.width) - 1; + m += nkm->m.offset; + if (!m) + m++; - return parent_rate * (n + 1) * (k + 1) / (m + 1); + return parent_rate * n * k / m; } static unsigned long ccu_nkm_round_rate(struct ccu_mux_internal *mux, @@ -101,9 +110,9 @@ static unsigned long ccu_nkm_round_rate(struct ccu_mux_internal *mux, struct _ccu_nkm _nkm; _nkm.min_n = nkm->n.min; - _nkm.max_n = 1 << nkm->n.width; + _nkm.max_n = nkm->n.max ?: 1 << nkm->n.width; _nkm.min_k = nkm->k.min; - _nkm.max_k = 1 << nkm->k.width; + _nkm.max_k = nkm->k.max ?: 1 << nkm->k.width; _nkm.min_m = 1; _nkm.max_m = nkm->m.max ?: 1 << nkm->m.width; @@ -130,9 +139,9 @@ static int ccu_nkm_set_rate(struct clk_hw *hw, unsigned long rate, u32 reg; _nkm.min_n = nkm->n.min; - _nkm.max_n = 1 << nkm->n.width; + _nkm.max_n = nkm->n.max ?: 1 << nkm->n.width; _nkm.min_k = nkm->k.min; - _nkm.max_k = 1 << nkm->k.width; + _nkm.max_k = nkm->k.max ?: 1 << nkm->k.width; _nkm.min_m = 1; _nkm.max_m = nkm->m.max ?: 1 << nkm->m.width; @@ -145,10 +154,9 @@ static int ccu_nkm_set_rate(struct clk_hw *hw, unsigned long rate, reg &= ~GENMASK(nkm->k.width + nkm->k.shift - 1, nkm->k.shift); reg &= ~GENMASK(nkm->m.width + nkm->m.shift - 1, nkm->m.shift); - reg |= (_nkm.n - 1) << nkm->n.shift; - reg |= (_nkm.k - 1) << nkm->k.shift; - reg |= (_nkm.m - 1) << nkm->m.shift; - + reg |= (_nkm.n - nkm->n.offset) << nkm->n.shift; + reg |= (_nkm.k - nkm->k.offset) << nkm->k.shift; + reg |= (_nkm.m - nkm->m.offset) << nkm->m.shift; writel(reg, nkm->common.base + nkm->common.reg); spin_unlock_irqrestore(nkm->common.lock, flags); diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c index 684c42da3ebb..a2b40a000157 100644 --- a/drivers/clk/sunxi-ng/ccu_nkmp.c +++ b/drivers/clk/sunxi-ng/ccu_nkmp.c @@ -88,17 +88,26 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw, n = reg >> nkmp->n.shift; n &= (1 << nkmp->n.width) - 1; + n += nkmp->n.offset; + if (!n) + n++; k = reg >> nkmp->k.shift; k &= (1 << nkmp->k.width) - 1; + k += nkmp->k.offset; + if (!k) + k++; m = reg >> nkmp->m.shift; m &= (1 << nkmp->m.width) - 1; + m += nkmp->m.offset; + if (!m) + m++; p = reg >> nkmp->p.shift; p &= (1 << nkmp->p.width) - 1; - return (parent_rate * (n + 1) * (k + 1) >> p) / (m + 1); + return parent_rate * n * k >> p / m; } static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate, @@ -108,9 +117,9 @@ static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate, struct _ccu_nkmp _nkmp; _nkmp.min_n = nkmp->n.min; - _nkmp.max_n = 1 << nkmp->n.width; + _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width; _nkmp.min_k = nkmp->k.min; - _nkmp.max_k = 1 << nkmp->k.width; + _nkmp.max_k = nkmp->k.max ?: 1 << nkmp->k.width; _nkmp.min_m = 1; _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width; _nkmp.min_p = 1; @@ -130,9 +139,9 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate, u32 reg; _nkmp.min_n = 1; - _nkmp.max_n = 1 << nkmp->n.width; + _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width; _nkmp.min_k = 1; - _nkmp.max_k = 1 << nkmp->k.width; + _nkmp.max_k = nkmp->k.max ?: 1 << nkmp->k.width; _nkmp.min_m = 1; _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width; _nkmp.min_p = 1; @@ -148,9 +157,9 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate, reg &= ~GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift); reg &= ~GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift); - reg |= (_nkmp.n - 1) << nkmp->n.shift; - reg |= (_nkmp.k - 1) << nkmp->k.shift; - reg |= (_nkmp.m - 1) << nkmp->m.shift; + reg |= (_nkmp.n - nkmp->n.offset) << nkmp->n.shift; + reg |= (_nkmp.k - nkmp->k.offset) << nkmp->k.shift; + reg |= (_nkmp.m - nkmp->m.offset) << nkmp->m.shift; reg |= ilog2(_nkmp.p) << nkmp->p.shift; writel(reg, nkmp->common.base + nkmp->common.reg); diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c index c9f3b6c982f0..af71b1909cd9 100644 --- a/drivers/clk/sunxi-ng/ccu_nm.c +++ b/drivers/clk/sunxi-ng/ccu_nm.c @@ -80,11 +80,17 @@ static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw, n = reg >> nm->n.shift; n &= (1 << nm->n.width) - 1; + n += nm->n.offset; + if (!n) + n++; m = reg >> nm->m.shift; m &= (1 << nm->m.width) - 1; + m += nm->m.offset; + if (!m) + m++; - return parent_rate * (n + 1) / (m + 1); + return parent_rate * n / m; } static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate, @@ -94,7 +100,7 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate, struct _ccu_nm _nm; _nm.min_n = nm->n.min; - _nm.max_n = 1 << nm->n.width; + _nm.max_n = nm->n.max ?: 1 << nm->n.width; _nm.min_m = 1; _nm.max_m = nm->m.max ?: 1 << nm->m.width; @@ -117,7 +123,7 @@ static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate, ccu_frac_helper_disable(&nm->common, &nm->frac); _nm.min_n = 1; - _nm.max_n = 1 << nm->n.width; + _nm.max_n = nm->n.max ?: 1 << nm->n.width; _nm.min_m = 1; _nm.max_m = nm->m.max ?: 1 << nm->m.width; @@ -129,8 +135,9 @@ static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate, reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift); reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift); - writel(reg | ((_nm.m - 1) << nm->m.shift) | ((_nm.n - 1) << nm->n.shift), - nm->common.base + nm->common.reg); + reg |= (_nm.n - nm->n.offset) << nm->n.shift; + reg |= (_nm.m - nm->m.offset) << nm->m.shift; + writel(reg, nm->common.base + nm->common.reg); spin_unlock_irqrestore(nm->common.lock, flags); diff --git a/drivers/clk/tegra/Kconfig b/drivers/clk/tegra/Kconfig index 1ba30d1e14f2..7ddacae5d0b1 100644 --- a/drivers/clk/tegra/Kconfig +++ b/drivers/clk/tegra/Kconfig @@ -1,3 +1,7 @@ config TEGRA_CLK_EMC def_bool y depends on TEGRA124_EMC + +config CLK_TEGRA_BPMP + def_bool y + depends on TEGRA_BPMP diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile index 33fd0938d79e..4be8af28ee61 100644 --- a/drivers/clk/tegra/Makefile +++ b/drivers/clk/tegra/Makefile @@ -22,3 +22,4 @@ obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124-dfll-fcpu.o obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o obj-y += cvb.o obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210.o +obj-$(CONFIG_CLK_TEGRA_BPMP) += clk-bpmp.o diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c new file mode 100644 index 000000000000..638ace64033b --- /dev/null +++ b/drivers/clk/tegra/clk-bpmp.c @@ -0,0 +1,620 @@ +/* + * Copyright (C) 2016 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/seq_buf.h> +#include <linux/slab.h> + +#include <soc/tegra/bpmp.h> +#include <soc/tegra/bpmp-abi.h> + +#define TEGRA_BPMP_DUMP_CLOCK_INFO 0 + +#define TEGRA_BPMP_CLK_HAS_MUX BIT(0) +#define TEGRA_BPMP_CLK_HAS_SET_RATE BIT(1) +#define TEGRA_BPMP_CLK_IS_ROOT BIT(2) + +struct tegra_bpmp_clk_info { + unsigned int id; + char name[MRQ_CLK_NAME_MAXLEN]; + unsigned int parents[MRQ_CLK_MAX_PARENTS]; + unsigned int num_parents; + unsigned long flags; +}; + +struct tegra_bpmp_clk { + struct clk_hw hw; + + struct tegra_bpmp *bpmp; + unsigned int id; + + unsigned int num_parents; + unsigned int *parents; +}; + +static inline struct tegra_bpmp_clk *to_tegra_bpmp_clk(struct clk_hw *hw) +{ + return container_of(hw, struct tegra_bpmp_clk, hw); +} + +struct tegra_bpmp_clk_message { + unsigned int cmd; + unsigned int id; + + struct { + const void *data; + size_t size; + } tx; + + struct { + void *data; + size_t size; + } rx; +}; + +static int tegra_bpmp_clk_transfer(struct tegra_bpmp *bpmp, + const struct tegra_bpmp_clk_message *clk) +{ + struct mrq_clk_request request; + struct tegra_bpmp_message msg; + void *req = &request; + + memset(&request, 0, sizeof(request)); + request.cmd_and_id = (clk->cmd << 24) | clk->id; + + /* + * The mrq_clk_request structure has an anonymous union at offset 4 + * that contains all possible sub-command structures. Copy the data + * to that union. Ideally we'd be able to refer to it by name, but + * doing so would require changing the ABI header and increase the + * maintenance burden. + */ + memcpy(req + 4, clk->tx.data, clk->tx.size); + + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_CLK; + msg.tx.data = &request; + msg.tx.size = sizeof(request); + msg.rx.data = clk->rx.data; + msg.rx.size = clk->rx.size; + + return tegra_bpmp_transfer(bpmp, &msg); +} + +static int tegra_bpmp_clk_prepare(struct clk_hw *hw) +{ + struct tegra_bpmp_clk *clk = to_tegra_bpmp_clk(hw); + struct tegra_bpmp_clk_message msg; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = CMD_CLK_ENABLE; + msg.id = clk->id; + + return tegra_bpmp_clk_transfer(clk->bpmp, &msg); +} + +static void tegra_bpmp_clk_unprepare(struct clk_hw *hw) +{ + struct tegra_bpmp_clk *clk = to_tegra_bpmp_clk(hw); + struct tegra_bpmp_clk_message msg; + int err; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = CMD_CLK_DISABLE; + msg.id = clk->id; + + err = tegra_bpmp_clk_transfer(clk->bpmp, &msg); + if (err < 0) + dev_err(clk->bpmp->dev, "failed to disable clock %s: %d\n", + clk_hw_get_name(hw), err); +} + +static int tegra_bpmp_clk_is_prepared(struct clk_hw *hw) +{ + struct tegra_bpmp_clk *clk = to_tegra_bpmp_clk(hw); + struct cmd_clk_is_enabled_response response; + struct tegra_bpmp_clk_message msg; + int err; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = CMD_CLK_IS_ENABLED; + msg.id = clk->id; + msg.rx.data = &response; + msg.rx.size = sizeof(response); + + err = tegra_bpmp_clk_transfer(clk->bpmp, &msg); + if (err < 0) + return err; + + return response.state; +} + +static unsigned long tegra_bpmp_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct tegra_bpmp_clk *clk = to_tegra_bpmp_clk(hw); + struct cmd_clk_get_rate_response response; + struct cmd_clk_get_rate_request request; + struct tegra_bpmp_clk_message msg; + int err; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = CMD_CLK_GET_RATE; + msg.id = clk->id; + msg.tx.data = &request; + msg.tx.size = sizeof(request); + msg.rx.data = &response; + msg.rx.size = sizeof(response); + + err = tegra_bpmp_clk_transfer(clk->bpmp, &msg); + if (err < 0) + return err; + + return response.rate; +} + +static long tegra_bpmp_clk_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct tegra_bpmp_clk *clk = to_tegra_bpmp_clk(hw); + struct cmd_clk_round_rate_response response; + struct cmd_clk_round_rate_request request; + struct tegra_bpmp_clk_message msg; + int err; + + memset(&request, 0, sizeof(request)); + request.rate = rate; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = CMD_CLK_ROUND_RATE; + msg.id = clk->id; + msg.tx.data = &request; + msg.tx.size = sizeof(request); + msg.rx.data = &response; + msg.rx.size = sizeof(response); + + err = tegra_bpmp_clk_transfer(clk->bpmp, &msg); + if (err < 0) + return err; + + return response.rate; +} + +static int tegra_bpmp_clk_set_parent(struct clk_hw *hw, u8 index) +{ + struct tegra_bpmp_clk *clk = to_tegra_bpmp_clk(hw); + struct cmd_clk_set_parent_response response; + struct cmd_clk_set_parent_request request; + struct tegra_bpmp_clk_message msg; + int err; + + memset(&request, 0, sizeof(request)); + request.parent_id = clk->parents[index]; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = CMD_CLK_SET_PARENT; + msg.id = clk->id; + msg.tx.data = &request; + msg.tx.size = sizeof(request); + msg.rx.data = &response; + msg.rx.size = sizeof(response); + + err = tegra_bpmp_clk_transfer(clk->bpmp, &msg); + if (err < 0) + return err; + + /* XXX check parent ID in response */ + + return 0; +} + +static u8 tegra_bpmp_clk_get_parent(struct clk_hw *hw) +{ + struct tegra_bpmp_clk *clk = to_tegra_bpmp_clk(hw); + struct cmd_clk_get_parent_response response; + struct tegra_bpmp_clk_message msg; + unsigned int i; + int err; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = CMD_CLK_GET_PARENT; + msg.id = clk->id; + msg.rx.data = &response; + msg.rx.size = sizeof(response); + + err = tegra_bpmp_clk_transfer(clk->bpmp, &msg); + if (err < 0) { + dev_err(clk->bpmp->dev, "failed to get parent for %s: %d\n", + clk_hw_get_name(hw), err); + return U8_MAX; + } + + for (i = 0; i < clk->num_parents; i++) + if (clk->parents[i] == response.parent_id) + return i; + + return U8_MAX; +} + +static int tegra_bpmp_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct tegra_bpmp_clk *clk = to_tegra_bpmp_clk(hw); + struct cmd_clk_set_rate_response response; + struct cmd_clk_set_rate_request request; + struct tegra_bpmp_clk_message msg; + + memset(&request, 0, sizeof(request)); + request.rate = rate; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = CMD_CLK_SET_RATE; + msg.id = clk->id; + msg.tx.data = &request; + msg.tx.size = sizeof(request); + msg.rx.data = &response; + msg.rx.size = sizeof(response); + + return tegra_bpmp_clk_transfer(clk->bpmp, &msg); +} + +static const struct clk_ops tegra_bpmp_clk_gate_ops = { + .prepare = tegra_bpmp_clk_prepare, + .unprepare = tegra_bpmp_clk_unprepare, + .is_prepared = tegra_bpmp_clk_is_prepared, + .recalc_rate = tegra_bpmp_clk_recalc_rate, +}; + +static const struct clk_ops tegra_bpmp_clk_mux_ops = { + .prepare = tegra_bpmp_clk_prepare, + .unprepare = tegra_bpmp_clk_unprepare, + .is_prepared = tegra_bpmp_clk_is_prepared, + .recalc_rate = tegra_bpmp_clk_recalc_rate, + .set_parent = tegra_bpmp_clk_set_parent, + .get_parent = tegra_bpmp_clk_get_parent, +}; + +static const struct clk_ops tegra_bpmp_clk_rate_ops = { + .prepare = tegra_bpmp_clk_prepare, + .unprepare = tegra_bpmp_clk_unprepare, + .is_prepared = tegra_bpmp_clk_is_prepared, + .recalc_rate = tegra_bpmp_clk_recalc_rate, + .round_rate = tegra_bpmp_clk_round_rate, + .set_rate = tegra_bpmp_clk_set_rate, +}; + +static const struct clk_ops tegra_bpmp_clk_mux_rate_ops = { + .prepare = tegra_bpmp_clk_prepare, + .unprepare = tegra_bpmp_clk_unprepare, + .is_prepared = tegra_bpmp_clk_is_prepared, + .recalc_rate = tegra_bpmp_clk_recalc_rate, + .round_rate = tegra_bpmp_clk_round_rate, + .set_parent = tegra_bpmp_clk_set_parent, + .get_parent = tegra_bpmp_clk_get_parent, + .set_rate = tegra_bpmp_clk_set_rate, +}; + +static int tegra_bpmp_clk_get_max_id(struct tegra_bpmp *bpmp) +{ + struct cmd_clk_get_max_clk_id_response response; + struct tegra_bpmp_clk_message msg; + int err; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = CMD_CLK_GET_MAX_CLK_ID; + msg.rx.data = &response; + msg.rx.size = sizeof(response); + + err = tegra_bpmp_clk_transfer(bpmp, &msg); + if (err < 0) + return err; + + if (response.max_id > INT_MAX) + return -E2BIG; + + return response.max_id; +} + +static int tegra_bpmp_clk_get_info(struct tegra_bpmp *bpmp, unsigned int id, + struct tegra_bpmp_clk_info *info) +{ + struct cmd_clk_get_all_info_response response; + struct tegra_bpmp_clk_message msg; + unsigned int i; + int err; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = CMD_CLK_GET_ALL_INFO; + msg.id = id; + msg.rx.data = &response; + msg.rx.size = sizeof(response); + + err = tegra_bpmp_clk_transfer(bpmp, &msg); + if (err < 0) + return err; + + strlcpy(info->name, response.name, MRQ_CLK_NAME_MAXLEN); + info->num_parents = response.num_parents; + + for (i = 0; i < info->num_parents; i++) + info->parents[i] = response.parents[i]; + + info->flags = response.flags; + + return 0; +} + +static void tegra_bpmp_clk_info_dump(struct tegra_bpmp *bpmp, + const char *level, + const struct tegra_bpmp_clk_info *info) +{ + const char *prefix = ""; + struct seq_buf buf; + unsigned int i; + char flags[64]; + + seq_buf_init(&buf, flags, sizeof(flags)); + + if (info->flags) + seq_buf_printf(&buf, "("); + + if (info->flags & TEGRA_BPMP_CLK_HAS_MUX) { + seq_buf_printf(&buf, "%smux", prefix); + prefix = ", "; + } + + if ((info->flags & TEGRA_BPMP_CLK_HAS_SET_RATE) == 0) { + seq_buf_printf(&buf, "%sfixed", prefix); + prefix = ", "; + } + + if (info->flags & TEGRA_BPMP_CLK_IS_ROOT) { + seq_buf_printf(&buf, "%sroot", prefix); + prefix = ", "; + } + + if (info->flags) + seq_buf_printf(&buf, ")"); + + dev_printk(level, bpmp->dev, "%03u: %s\n", info->id, info->name); + dev_printk(level, bpmp->dev, " flags: %lx %s\n", info->flags, flags); + dev_printk(level, bpmp->dev, " parents: %u\n", info->num_parents); + + for (i = 0; i < info->num_parents; i++) + dev_printk(level, bpmp->dev, " %03u\n", info->parents[i]); +} + +static int tegra_bpmp_probe_clocks(struct tegra_bpmp *bpmp, + struct tegra_bpmp_clk_info **clocksp) +{ + struct tegra_bpmp_clk_info *clocks; + unsigned int max_id, id, count = 0; + unsigned int holes = 0; + int err; + + err = tegra_bpmp_clk_get_max_id(bpmp); + if (err < 0) + return err; + + max_id = err; + + dev_dbg(bpmp->dev, "maximum clock ID: %u\n", max_id); + + clocks = kcalloc(max_id + 1, sizeof(*clocks), GFP_KERNEL); + if (!clocks) + return -ENOMEM; + + for (id = 0; id <= max_id; id++) { + struct tegra_bpmp_clk_info *info = &clocks[count]; + + err = tegra_bpmp_clk_get_info(bpmp, id, info); + if (err < 0) { + dev_err(bpmp->dev, "failed to query clock %u: %d\n", + id, err); + continue; + } + + if (info->num_parents >= U8_MAX) { + dev_err(bpmp->dev, + "clock %u has too many parents (%u, max: %u)\n", + id, info->num_parents, U8_MAX); + continue; + } + + /* clock not exposed by BPMP */ + if (info->name[0] == '\0') { + holes++; + continue; + } + + info->id = id; + count++; + + if (TEGRA_BPMP_DUMP_CLOCK_INFO) + tegra_bpmp_clk_info_dump(bpmp, KERN_DEBUG, info); + } + + dev_dbg(bpmp->dev, "holes: %u\n", holes); + *clocksp = clocks; + + return count; +} + +static const struct tegra_bpmp_clk_info * +tegra_bpmp_clk_find(const struct tegra_bpmp_clk_info *clocks, + unsigned int num_clocks, unsigned int id) +{ + unsigned int i; + + for (i = 0; i < num_clocks; i++) + if (clocks[i].id == id) + return &clocks[i]; + + return NULL; +} + +static struct tegra_bpmp_clk * +tegra_bpmp_clk_register(struct tegra_bpmp *bpmp, + const struct tegra_bpmp_clk_info *info, + const struct tegra_bpmp_clk_info *clocks, + unsigned int num_clocks) +{ + struct tegra_bpmp_clk *clk; + struct clk_init_data init; + const char **parents; + unsigned int i; + int err; + + clk = devm_kzalloc(bpmp->dev, sizeof(*clk), GFP_KERNEL); + if (!clk) + return ERR_PTR(-ENOMEM); + + clk->id = info->id; + clk->bpmp = bpmp; + + clk->parents = devm_kcalloc(bpmp->dev, info->num_parents, + sizeof(*clk->parents), GFP_KERNEL); + if (!clk->parents) + return ERR_PTR(-ENOMEM); + + clk->num_parents = info->num_parents; + + /* hardware clock initialization */ + memset(&init, 0, sizeof(init)); + init.name = info->name; + clk->hw.init = &init; + + if (info->flags & TEGRA_BPMP_CLK_HAS_MUX) { + if (info->flags & TEGRA_BPMP_CLK_HAS_SET_RATE) + init.ops = &tegra_bpmp_clk_mux_rate_ops; + else + init.ops = &tegra_bpmp_clk_mux_ops; + } else { + if (info->flags & TEGRA_BPMP_CLK_HAS_SET_RATE) + init.ops = &tegra_bpmp_clk_rate_ops; + else + init.ops = &tegra_bpmp_clk_gate_ops; + } + + init.num_parents = info->num_parents; + + parents = kcalloc(info->num_parents, sizeof(*parents), GFP_KERNEL); + if (!parents) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < info->num_parents; i++) { + const struct tegra_bpmp_clk_info *parent; + + /* keep a private copy of the ID to parent index map */ + clk->parents[i] = info->parents[i]; + + parent = tegra_bpmp_clk_find(clocks, num_clocks, + info->parents[i]); + if (!parent) { + dev_err(bpmp->dev, "no parent %u found for %u\n", + info->parents[i], info->id); + continue; + } + + parents[i] = parent->name; + } + + init.parent_names = parents; + + err = devm_clk_hw_register(bpmp->dev, &clk->hw); + + kfree(parents); + + if (err < 0) + return ERR_PTR(err); + + return clk; +} + +static int tegra_bpmp_register_clocks(struct tegra_bpmp *bpmp, + struct tegra_bpmp_clk_info *infos, + unsigned int count) +{ + struct tegra_bpmp_clk *clk; + unsigned int i; + + bpmp->num_clocks = count; + + bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(clk), GFP_KERNEL); + if (!bpmp->clocks) + return -ENOMEM; + + for (i = 0; i < count; i++) { + struct tegra_bpmp_clk_info *info = &infos[i]; + + clk = tegra_bpmp_clk_register(bpmp, info, infos, count); + if (IS_ERR(clk)) { + dev_err(bpmp->dev, + "failed to register clock %u (%s): %ld\n", + info->id, info->name, PTR_ERR(clk)); + continue; + } + + bpmp->clocks[i] = clk; + } + + return 0; +} + +static void tegra_bpmp_unregister_clocks(struct tegra_bpmp *bpmp) +{ + unsigned int i; + + for (i = 0; i < bpmp->num_clocks; i++) + clk_hw_unregister(&bpmp->clocks[i]->hw); +} + +static struct clk_hw *tegra_bpmp_clk_of_xlate(struct of_phandle_args *clkspec, + void *data) +{ + unsigned int id = clkspec->args[0], i; + struct tegra_bpmp *bpmp = data; + + for (i = 0; i < bpmp->num_clocks; i++) + if (bpmp->clocks[i]->id == id) + return &bpmp->clocks[i]->hw; + + return NULL; +} + +int tegra_bpmp_init_clocks(struct tegra_bpmp *bpmp) +{ + struct tegra_bpmp_clk_info *clocks; + unsigned int count; + int err; + + err = tegra_bpmp_probe_clocks(bpmp, &clocks); + if (err < 0) + return err; + + count = err; + + dev_dbg(bpmp->dev, "%u clocks probed\n", count); + + err = tegra_bpmp_register_clocks(bpmp, clocks, count); + if (err < 0) + goto free; + + err = of_clk_add_hw_provider(bpmp->dev->of_node, + tegra_bpmp_clk_of_xlate, + bpmp); + if (err < 0) { + tegra_bpmp_unregister_clocks(bpmp); + goto free; + } + +free: + kfree(clocks); + return err; +} diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index b4e5de16e561..6bb87784a0d6 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c @@ -140,6 +140,35 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div) return true; } +static int _div_round_up(const struct clk_div_table *table, + unsigned long parent_rate, unsigned long rate) +{ + const struct clk_div_table *clkt; + int up = INT_MAX; + int div = DIV_ROUND_UP_ULL((u64)parent_rate, rate); + + for (clkt = table; clkt->div; clkt++) { + if (clkt->div == div) + return clkt->div; + else if (clkt->div < div) + continue; + + if ((clkt->div - div) < (up - div)) + up = clkt->div; + } + + return up; +} + +static int _div_round(const struct clk_div_table *table, + unsigned long parent_rate, unsigned long rate) +{ + if (!table) + return DIV_ROUND_UP(parent_rate, rate); + + return _div_round_up(table, parent_rate, rate); +} + static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate) { @@ -155,7 +184,7 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) { parent_rate = *best_parent_rate; - bestdiv = DIV_ROUND_UP(parent_rate, rate); + bestdiv = _div_round(divider->table, parent_rate, rate); bestdiv = bestdiv == 0 ? 1 : bestdiv; bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; return bestdiv; diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c index 0007218ce6a0..2cf386347f0c 100644 --- a/drivers/clk/uniphier/clk-uniphier-core.c +++ b/drivers/clk/uniphier/clk-uniphier-core.c @@ -90,11 +90,8 @@ static int uniphier_clk_probe(struct platform_device *pdev) dev_dbg(dev, "register %s (index=%d)\n", p->name, p->idx); hw = uniphier_clk_register(dev, regmap, p); - if (IS_ERR(hw)) { - dev_err(dev, "failed to register %s (error %ld)\n", - p->name, PTR_ERR(hw)); - return PTR_ERR(hw); - } + if (WARN(IS_ERR(hw), "failed to register %s", p->name)) + continue; if (p->idx >= 0) hw_data->hws[p->idx] = hw; diff --git a/drivers/clk/uniphier/clk-uniphier-cpugear.c b/drivers/clk/uniphier/clk-uniphier-cpugear.c index 9bff26e0cbb0..ec11f55594ad 100644 --- a/drivers/clk/uniphier/clk-uniphier-cpugear.c +++ b/drivers/clk/uniphier/clk-uniphier-cpugear.c @@ -14,7 +14,6 @@ */ #include <linux/clk-provider.h> -#include <linux/delay.h> #include <linux/device.h> #include <linux/regmap.h> diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c index d049316c1c0f..c8027d909429 100644 --- a/drivers/clk/uniphier/clk-uniphier-sys.c +++ b/drivers/clk/uniphier/clk-uniphier-sys.c @@ -29,6 +29,15 @@ UNIPHIER_CLK_FACTOR("sd-200m", -1, "spll", 1, 10), \ UNIPHIER_CLK_FACTOR("sd-133m", -1, "spll", 1, 15) +#define UNIPHIER_SLD3_SYS_CLK_NAND(idx) \ + UNIPHIER_CLK_GATE("nand", (idx), NULL, 0x2104, 2) + +#define UNIPHIER_LD11_SYS_CLK_NAND(idx) \ + UNIPHIER_CLK_GATE("nand", (idx), NULL, 0x210c, 0) + +#define UNIPHIER_LD11_SYS_CLK_EMMC(idx) \ + UNIPHIER_CLK_GATE("emmc", (idx), NULL, 0x210c, 2) + #define UNIPHIER_SLD3_SYS_CLK_STDMAC(idx) \ UNIPHIER_CLK_GATE("stdmac", (idx), NULL, 0x2104, 10) @@ -48,6 +57,7 @@ const struct uniphier_clk_data uniphier_sld3_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 5625, 512), /* 270 MHz */ UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 16), UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16), + UNIPHIER_SLD3_SYS_CLK_NAND(2), UNIPHIER_SLD3_SYS_CLK_SD, UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12), UNIPHIER_SLD3_SYS_CLK_STDMAC(8), @@ -61,6 +71,7 @@ const struct uniphier_clk_data uniphier_ld4_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 5625, 512), /* 270 MHz */ UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 16), UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16), + UNIPHIER_SLD3_SYS_CLK_NAND(2), UNIPHIER_SLD3_SYS_CLK_SD, UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12), UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */ @@ -74,6 +85,7 @@ const struct uniphier_clk_data uniphier_pro4_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 270, 25), /* 270 MHz */ UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 8), UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 32), + UNIPHIER_SLD3_SYS_CLK_NAND(2), UNIPHIER_SLD3_SYS_CLK_SD, UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12), UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* HSC, MIO, RLE */ @@ -89,6 +101,7 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 270, 25), /* 270 MHz */ UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 20), UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16), + UNIPHIER_SLD3_SYS_CLK_NAND(2), UNIPHIER_SLD3_SYS_CLK_SD, UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12), UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */ @@ -101,6 +114,7 @@ const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("dapll2", -1, "ref", 144, 125), /* 2949.12 MHz */ UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40), UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48), + UNIPHIER_SLD3_SYS_CLK_NAND(2), UNIPHIER_PRO5_SYS_CLK_SD, UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* HSC */ UNIPHIER_PRO4_SYS_CLK_GIO(12), /* PCIe, USB3 */ @@ -113,6 +127,7 @@ const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("spll", -1, "ref", 96, 1), /* 2400 MHz */ UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 27), UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48), + UNIPHIER_SLD3_SYS_CLK_NAND(2), UNIPHIER_PRO5_SYS_CLK_SD, UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* HSC, RLE */ /* GIO is always clock-enabled: no function for 0x2104 bit6 */ @@ -131,6 +146,9 @@ const struct uniphier_clk_data uniphier_ld11_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("vspll", -1, "ref", 80, 1), /* 2000 MHz */ UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34), UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40), + UNIPHIER_LD11_SYS_CLK_NAND(2), + UNIPHIER_LD11_SYS_CLK_EMMC(4), + /* Index 5 reserved for eMMC PHY */ UNIPHIER_LD11_SYS_CLK_STDMAC(8), /* HSC, MIO */ UNIPHIER_CLK_FACTOR("usb2", -1, "ref", 24, 25), /* CPU gears */ @@ -156,6 +174,9 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("vppll", -1, "ref", 504, 5), /* 2520 MHz */ UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34), UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40), + UNIPHIER_LD11_SYS_CLK_NAND(2), + UNIPHIER_LD11_SYS_CLK_EMMC(4), + /* Index 5 reserved for eMMC PHY */ UNIPHIER_LD20_SYS_CLK_SD, UNIPHIER_LD11_SYS_CLK_STDMAC(8), /* HSC */ /* GIO is always clock-enabled: no function for 0x210c bit5 */ diff --git a/drivers/clk/ux500/abx500-clk.c b/drivers/clk/ux500/abx500-clk.c index a07c31e6f26d..2257d12ba988 100644 --- a/drivers/clk/ux500/abx500-clk.c +++ b/drivers/clk/ux500/abx500-clk.c @@ -10,20 +10,26 @@ #include <linux/err.h> #include <linux/module.h> #include <linux/device.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/mfd/abx500/ab8500.h> #include <linux/mfd/abx500/ab8500-sysctrl.h> #include <linux/clkdev.h> #include <linux/clk-provider.h> -#include <linux/mfd/dbx500-prcmu.h> +#include <dt-bindings/clock/ste-ab8500.h> #include "clk.h" +#define AB8500_NUM_CLKS 6 + +static struct clk *ab8500_clks[AB8500_NUM_CLKS]; +static struct clk_onecell_data ab8500_clk_data; + /* Clock definitions for ab8500 */ static int ab8500_reg_clks(struct device *dev) { int ret; struct clk *clk; - + struct device_node *np = dev->of_node; const char *intclk_parents[] = {"ab8500_sysclk", "ulpclk"}; u16 intclk_reg_sel[] = {0 , AB8500_SYSULPCLKCTRL1}; u8 intclk_reg_mask[] = {0 , AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK}; @@ -32,55 +38,52 @@ static int ab8500_reg_clks(struct device *dev) (1 << AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_SHIFT) }; - dev_info(dev, "register clocks for ab850x\n"); - /* Enable SWAT */ ret = ab8500_sysctrl_set(AB8500_SWATCTRL, AB8500_SWATCTRL_SWATENABLE); if (ret) return ret; - /* ab8500_sysclk */ - clk = clk_reg_prcmu_gate("ab8500_sysclk", NULL, PRCMU_SYSCLK, 0); - clk_register_clkdev(clk, "sysclk", "ab8500-usb.0"); - clk_register_clkdev(clk, "sysclk", "ab-iddet.0"); - clk_register_clkdev(clk, "sysclk", "snd-soc-mop500.0"); - clk_register_clkdev(clk, "sysclk", "shrm_bus"); - /* ab8500_sysclk2 */ clk = clk_reg_sysctrl_gate(dev , "ab8500_sysclk2", "ab8500_sysclk", AB8500_SYSULPCLKCTRL1, AB8500_SYSULPCLKCTRL1_SYSCLKBUF2REQ, AB8500_SYSULPCLKCTRL1_SYSCLKBUF2REQ, 0, 0); - clk_register_clkdev(clk, "sysclk", "0-0070"); + ab8500_clks[AB8500_SYSCLK_BUF2] = clk; /* ab8500_sysclk3 */ clk = clk_reg_sysctrl_gate(dev , "ab8500_sysclk3", "ab8500_sysclk", AB8500_SYSULPCLKCTRL1, AB8500_SYSULPCLKCTRL1_SYSCLKBUF3REQ, AB8500_SYSULPCLKCTRL1_SYSCLKBUF3REQ, 0, 0); - clk_register_clkdev(clk, "sysclk", "cg1960_core.0"); + ab8500_clks[AB8500_SYSCLK_BUF3] = clk; /* ab8500_sysclk4 */ clk = clk_reg_sysctrl_gate(dev , "ab8500_sysclk4", "ab8500_sysclk", AB8500_SYSULPCLKCTRL1, AB8500_SYSULPCLKCTRL1_SYSCLKBUF4REQ, AB8500_SYSULPCLKCTRL1_SYSCLKBUF4REQ, 0, 0); + ab8500_clks[AB8500_SYSCLK_BUF4] = clk; /* ab_ulpclk */ clk = clk_reg_sysctrl_gate_fixed_rate(dev, "ulpclk", NULL, AB8500_SYSULPCLKCTRL1, AB8500_SYSULPCLKCTRL1_ULPCLKREQ, AB8500_SYSULPCLKCTRL1_ULPCLKREQ, 38400000, 9000, 0); - clk_register_clkdev(clk, "ulpclk", "snd-soc-mop500.0"); + ab8500_clks[AB8500_SYSCLK_ULP] = clk; /* ab8500_intclk */ clk = clk_reg_sysctrl_set_parent(dev , "intclk", intclk_parents, 2, intclk_reg_sel, intclk_reg_mask, intclk_reg_bits, 0); - clk_register_clkdev(clk, "intclk", "snd-soc-mop500.0"); - clk_register_clkdev(clk, NULL, "ab8500-pwm.1"); + ab8500_clks[AB8500_SYSCLK_INT] = clk; /* ab8500_audioclk */ clk = clk_reg_sysctrl_gate(dev , "audioclk", "intclk", AB8500_SYSULPCLKCTRL1, AB8500_SYSULPCLKCTRL1_AUDIOCLKENA, AB8500_SYSULPCLKCTRL1_AUDIOCLKENA, 0, 0); - clk_register_clkdev(clk, "audioclk", "ab8500-codec.0"); + ab8500_clks[AB8500_SYSCLK_AUDIO] = clk; + + ab8500_clk_data.clks = ab8500_clks; + ab8500_clk_data.clk_num = ARRAY_SIZE(ab8500_clks); + of_clk_add_provider(np, of_clk_src_onecell_get, &ab8500_clk_data); + + dev_info(dev, "registered clocks for ab850x\n"); return 0; } @@ -116,9 +119,15 @@ static int abx500_clk_probe(struct platform_device *pdev) return ret; } +static const struct of_device_id abx500_clk_match[] = { + { .compatible = "stericsson,ab8500-clk", }, + {} +}; + static struct platform_driver abx500_clk_driver = { .driver = { .name = "abx500-clk", + .of_match_table = abx500_clk_match, }, .probe = abx500_clk_probe, }; @@ -127,7 +136,6 @@ static int __init abx500_clk_init(void) { return platform_driver_register(&abx500_clk_driver); } - arch_initcall(abx500_clk_init); MODULE_AUTHOR("Ulf Hansson <ulf.hansson@linaro.org"); diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c index e960d686d9db..d5888591e1a9 100644 --- a/drivers/clk/ux500/u8500_of_clk.c +++ b/drivers/clk/ux500/u8500_of_clk.c @@ -206,6 +206,9 @@ static void u8500_clk_init(struct device_node *np) clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, 0); prcmu_clk[PRCMU_TIMCLK] = clk; + clk = clk_reg_prcmu_gate("ab8500_sysclk", NULL, PRCMU_SYSCLK, 0); + prcmu_clk[PRCMU_SYSCLK] = clk; + clk = clk_reg_prcmu_opp_volt_scalable("sdmmcclk", NULL, PRCMU_SDMMCCLK, 100000000, CLK_SET_RATE_GATE); prcmu_clk[PRCMU_SDMMCCLK] = clk; diff --git a/drivers/clk/x86/Makefile b/drivers/clk/x86/Makefile index 04781389d0fb..1367afb03858 100644 --- a/drivers/clk/x86/Makefile +++ b/drivers/clk/x86/Makefile @@ -1,2 +1,3 @@ clk-x86-lpss-objs := clk-lpt.o obj-$(CONFIG_X86_INTEL_LPSS) += clk-x86-lpss.o +obj-$(CONFIG_PMC_ATOM) += clk-pmc-atom.o diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c new file mode 100644 index 000000000000..2b60577703ef --- /dev/null +++ b/drivers/clk/x86/clk-pmc-atom.c @@ -0,0 +1,371 @@ +/* + * Intel Atom platform clocks driver for BayTrail and CherryTrail SoCs + * + * Copyright (C) 2016, Intel Corporation + * Author: Irina Tirdea <irina.tirdea@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/clk-provider.h> +#include <linux/clkdev.h> +#include <linux/err.h> +#include <linux/platform_data/x86/clk-pmc-atom.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#define PLT_CLK_NAME_BASE "pmc_plt_clk" + +#define PMC_CLK_CTL_OFFSET 0x60 +#define PMC_CLK_CTL_SIZE 4 +#define PMC_CLK_NUM 6 +#define PMC_CLK_CTL_GATED_ON_D3 0x0 +#define PMC_CLK_CTL_FORCE_ON 0x1 +#define PMC_CLK_CTL_FORCE_OFF 0x2 +#define PMC_CLK_CTL_RESERVED 0x3 +#define PMC_MASK_CLK_CTL GENMASK(1, 0) +#define PMC_MASK_CLK_FREQ BIT(2) +#define PMC_CLK_FREQ_XTAL (0 << 2) /* 25 MHz */ +#define PMC_CLK_FREQ_PLL (1 << 2) /* 19.2 MHz */ + +struct clk_plt_fixed { + struct clk_hw *clk; + struct clk_lookup *lookup; +}; + +struct clk_plt { + struct clk_hw hw; + void __iomem *reg; + struct clk_lookup *lookup; + /* protect access to PMC registers */ + spinlock_t lock; +}; + +#define to_clk_plt(_hw) container_of(_hw, struct clk_plt, hw) + +struct clk_plt_data { + struct clk_plt_fixed **parents; + u8 nparents; + struct clk_plt *clks[PMC_CLK_NUM]; +}; + +/* Return an index in parent table */ +static inline int plt_reg_to_parent(int reg) +{ + switch (reg & PMC_MASK_CLK_FREQ) { + default: + case PMC_CLK_FREQ_XTAL: + return 0; + case PMC_CLK_FREQ_PLL: + return 1; + } +} + +/* Return clk index of parent */ +static inline int plt_parent_to_reg(int index) +{ + switch (index) { + default: + case 0: + return PMC_CLK_FREQ_XTAL; + case 1: + return PMC_CLK_FREQ_PLL; + } +} + +/* Abstract status in simpler enabled/disabled value */ +static inline int plt_reg_to_enabled(int reg) +{ + switch (reg & PMC_MASK_CLK_CTL) { + case PMC_CLK_CTL_GATED_ON_D3: + case PMC_CLK_CTL_FORCE_ON: + return 1; /* enabled */ + case PMC_CLK_CTL_FORCE_OFF: + case PMC_CLK_CTL_RESERVED: + default: + return 0; /* disabled */ + } +} + +static void plt_clk_reg_update(struct clk_plt *clk, u32 mask, u32 val) +{ + u32 tmp; + unsigned long flags; + + spin_lock_irqsave(&clk->lock, flags); + + tmp = readl(clk->reg); + tmp = (tmp & ~mask) | (val & mask); + writel(tmp, clk->reg); + + spin_unlock_irqrestore(&clk->lock, flags); +} + +static int plt_clk_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_plt *clk = to_clk_plt(hw); + + plt_clk_reg_update(clk, PMC_MASK_CLK_FREQ, plt_parent_to_reg(index)); + + return 0; +} + +static u8 plt_clk_get_parent(struct clk_hw *hw) +{ + struct clk_plt *clk = to_clk_plt(hw); + u32 value; + + value = readl(clk->reg); + + return plt_reg_to_parent(value); +} + +static int plt_clk_enable(struct clk_hw *hw) +{ + struct clk_plt *clk = to_clk_plt(hw); + + plt_clk_reg_update(clk, PMC_MASK_CLK_CTL, PMC_CLK_CTL_FORCE_ON); + + return 0; +} + +static void plt_clk_disable(struct clk_hw *hw) +{ + struct clk_plt *clk = to_clk_plt(hw); + + plt_clk_reg_update(clk, PMC_MASK_CLK_CTL, PMC_CLK_CTL_FORCE_OFF); +} + +static int plt_clk_is_enabled(struct clk_hw *hw) +{ + struct clk_plt *clk = to_clk_plt(hw); + u32 value; + + value = readl(clk->reg); + + return plt_reg_to_enabled(value); +} + +static const struct clk_ops plt_clk_ops = { + .enable = plt_clk_enable, + .disable = plt_clk_disable, + .is_enabled = plt_clk_is_enabled, + .get_parent = plt_clk_get_parent, + .set_parent = plt_clk_set_parent, + .determine_rate = __clk_mux_determine_rate, +}; + +static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, + void __iomem *base, + const char **parent_names, + int num_parents) +{ + struct clk_plt *pclk; + struct clk_init_data init; + int ret; + + pclk = devm_kzalloc(&pdev->dev, sizeof(*pclk), GFP_KERNEL); + if (!pclk) + return ERR_PTR(-ENOMEM); + + init.name = kasprintf(GFP_KERNEL, "%s_%d", PLT_CLK_NAME_BASE, id); + init.ops = &plt_clk_ops; + init.flags = 0; + init.parent_names = parent_names; + init.num_parents = num_parents; + + pclk->hw.init = &init; + pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; + spin_lock_init(&pclk->lock); + + ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); + if (ret) { + pclk = ERR_PTR(ret); + goto err_free_init; + } + + pclk->lookup = clkdev_hw_create(&pclk->hw, init.name, NULL); + if (!pclk->lookup) { + pclk = ERR_PTR(-ENOMEM); + goto err_free_init; + } + +err_free_init: + kfree(init.name); + return pclk; +} + +static void plt_clk_unregister(struct clk_plt *pclk) +{ + clkdev_drop(pclk->lookup); +} + +static struct clk_plt_fixed *plt_clk_register_fixed_rate(struct platform_device *pdev, + const char *name, + const char *parent_name, + unsigned long fixed_rate) +{ + struct clk_plt_fixed *pclk; + + pclk = devm_kzalloc(&pdev->dev, sizeof(*pclk), GFP_KERNEL); + if (!pclk) + return ERR_PTR(-ENOMEM); + + pclk->clk = clk_hw_register_fixed_rate(&pdev->dev, name, parent_name, + 0, fixed_rate); + if (IS_ERR(pclk->clk)) + return ERR_CAST(pclk->clk); + + pclk->lookup = clkdev_hw_create(pclk->clk, name, NULL); + if (!pclk->lookup) { + clk_hw_unregister_fixed_rate(pclk->clk); + return ERR_PTR(-ENOMEM); + } + + return pclk; +} + +static void plt_clk_unregister_fixed_rate(struct clk_plt_fixed *pclk) +{ + clkdev_drop(pclk->lookup); + clk_hw_unregister_fixed_rate(pclk->clk); +} + +static void plt_clk_unregister_fixed_rate_loop(struct clk_plt_data *data, + unsigned int i) +{ + while (i--) + plt_clk_unregister_fixed_rate(data->parents[i]); +} + +static void plt_clk_free_parent_names_loop(const char **parent_names, + unsigned int i) +{ + while (i--) + kfree_const(parent_names[i]); + kfree(parent_names); +} + +static void plt_clk_unregister_loop(struct clk_plt_data *data, + unsigned int i) +{ + while (i--) + plt_clk_unregister(data->clks[i]); +} + +static const char **plt_clk_register_parents(struct platform_device *pdev, + struct clk_plt_data *data, + const struct pmc_clk *clks) +{ + const char **parent_names; + unsigned int i; + int err; + int nparents = 0; + + data->nparents = 0; + while (clks[nparents].name) + nparents++; + + data->parents = devm_kcalloc(&pdev->dev, nparents, + sizeof(*data->parents), GFP_KERNEL); + if (!data->parents) + return ERR_PTR(-ENOMEM); + + parent_names = kcalloc(nparents, sizeof(*parent_names), + GFP_KERNEL); + if (!parent_names) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < nparents; i++) { + data->parents[i] = + plt_clk_register_fixed_rate(pdev, clks[i].name, + clks[i].parent_name, + clks[i].freq); + if (IS_ERR(data->parents[i])) { + err = PTR_ERR(data->parents[i]); + goto err_unreg; + } + parent_names[i] = kstrdup_const(clks[i].name, GFP_KERNEL); + } + + data->nparents = nparents; + return parent_names; + +err_unreg: + plt_clk_unregister_fixed_rate_loop(data, i); + plt_clk_free_parent_names_loop(parent_names, i); + return ERR_PTR(err); +} + +static void plt_clk_unregister_parents(struct clk_plt_data *data) +{ + plt_clk_unregister_fixed_rate_loop(data, data->nparents); +} + +static int plt_clk_probe(struct platform_device *pdev) +{ + const struct pmc_clk_data *pmc_data; + const char **parent_names; + struct clk_plt_data *data; + unsigned int i; + int err; + + pmc_data = dev_get_platdata(&pdev->dev); + if (!pmc_data || !pmc_data->clks) + return -EINVAL; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + parent_names = plt_clk_register_parents(pdev, data, pmc_data->clks); + if (IS_ERR(parent_names)) + return PTR_ERR(parent_names); + + for (i = 0; i < PMC_CLK_NUM; i++) { + data->clks[i] = plt_clk_register(pdev, i, pmc_data->base, + parent_names, data->nparents); + if (IS_ERR(data->clks[i])) { + err = PTR_ERR(data->clks[i]); + goto err_unreg_clk_plt; + } + } + + plt_clk_free_parent_names_loop(parent_names, data->nparents); + + platform_set_drvdata(pdev, data); + return 0; + +err_unreg_clk_plt: + plt_clk_unregister_loop(data, i); + plt_clk_unregister_parents(data); + plt_clk_free_parent_names_loop(parent_names, data->nparents); + return err; +} + +static int plt_clk_remove(struct platform_device *pdev) +{ + struct clk_plt_data *data; + + data = platform_get_drvdata(pdev); + + plt_clk_unregister_loop(data, PMC_CLK_NUM); + plt_clk_unregister_parents(data); + return 0; +} + +static struct platform_driver plt_clk_driver = { + .driver = { + .name = "clk-pmc-atom", + }, + .probe = plt_clk_probe, + .remove = plt_clk_remove, +}; +builtin_platform_driver(plt_clk_driver); diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c index 707d62956e9b..2f7c668643fe 100644 --- a/drivers/clk/zte/clk-zx296718.c +++ b/drivers/clk/zte/clk-zx296718.c @@ -610,9 +610,12 @@ static int __init top_clocks_init(struct device_node *np) } } - if (of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &top_hw_onecell_data)) - panic("could not register clk provider\n"); - pr_info("top clk init over, nr:%d\n", TOP_NR_CLKS); + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, + &top_hw_onecell_data); + if (ret) { + pr_err("failed to register top clk provider: %d\n", ret); + return ret; + } return 0; } @@ -776,9 +779,12 @@ static int __init lsp0_clocks_init(struct device_node *np) } } - if (of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &lsp0_hw_onecell_data)) - panic("could not register clk provider\n"); - pr_info("lsp0-clk init over:%d\n", LSP0_NR_CLKS); + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, + &lsp0_hw_onecell_data); + if (ret) { + pr_err("failed to register lsp0 clk provider: %d\n", ret); + return ret; + } return 0; } @@ -881,9 +887,142 @@ static int __init lsp1_clocks_init(struct device_node *np) } } - if (of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &lsp1_hw_onecell_data)) - panic("could not register clk provider\n"); - pr_info("lsp1-clk init over, nr:%d\n", LSP1_NR_CLKS); + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, + &lsp1_hw_onecell_data); + if (ret) { + pr_err("failed to register lsp1 clk provider: %d\n", ret); + return ret; + } + + return 0; +} + +PNAME(audio_wclk_common_p) = { + "audio_99m", + "audio_24m", +}; + +PNAME(audio_timer_p) = { + "audio_24m", + "audio_32k", +}; + +static struct zx_clk_mux audio_mux_clk[] = { + MUX(0, "i2s0_wclk_mux", audio_wclk_common_p, AUDIO_I2S0_CLK, 0, 1), + MUX(0, "i2s1_wclk_mux", audio_wclk_common_p, AUDIO_I2S1_CLK, 0, 1), + MUX(0, "i2s2_wclk_mux", audio_wclk_common_p, AUDIO_I2S2_CLK, 0, 1), + MUX(0, "i2s3_wclk_mux", audio_wclk_common_p, AUDIO_I2S3_CLK, 0, 1), + MUX(0, "i2c0_wclk_mux", audio_wclk_common_p, AUDIO_I2C0_CLK, 0, 1), + MUX(0, "spdif0_wclk_mux", audio_wclk_common_p, AUDIO_SPDIF0_CLK, 0, 1), + MUX(0, "spdif1_wclk_mux", audio_wclk_common_p, AUDIO_SPDIF1_CLK, 0, 1), + MUX(0, "timer_wclk_mux", audio_timer_p, AUDIO_TIMER_CLK, 0, 1), +}; + +static struct clk_zx_audio_divider audio_adiv_clk[] = { + AUDIO_DIV(0, "i2s0_wclk_div", "i2s0_wclk_mux", AUDIO_I2S0_DIV_CFG1), + AUDIO_DIV(0, "i2s1_wclk_div", "i2s1_wclk_mux", AUDIO_I2S1_DIV_CFG1), + AUDIO_DIV(0, "i2s2_wclk_div", "i2s2_wclk_mux", AUDIO_I2S2_DIV_CFG1), + AUDIO_DIV(0, "i2s3_wclk_div", "i2s3_wclk_mux", AUDIO_I2S3_DIV_CFG1), + AUDIO_DIV(0, "spdif0_wclk_div", "spdif0_wclk_mux", AUDIO_SPDIF0_DIV_CFG1), + AUDIO_DIV(0, "spdif1_wclk_div", "spdif1_wclk_mux", AUDIO_SPDIF1_DIV_CFG1), +}; + +static struct zx_clk_div audio_div_clk[] = { + DIV_T(0, "tdm_wclk_div", "audio_16m384", AUDIO_TDM_CLK, 8, 4, 0, common_div_table), +}; + +static struct zx_clk_gate audio_gate_clk[] = { + GATE(AUDIO_I2S0_WCLK, "i2s0_wclk", "i2s0_wclk_div", AUDIO_I2S0_CLK, 9, CLK_SET_RATE_PARENT, 0), + GATE(AUDIO_I2S1_WCLK, "i2s1_wclk", "i2s1_wclk_div", AUDIO_I2S1_CLK, 9, CLK_SET_RATE_PARENT, 0), + GATE(AUDIO_I2S2_WCLK, "i2s2_wclk", "i2s2_wclk_div", AUDIO_I2S2_CLK, 9, CLK_SET_RATE_PARENT, 0), + GATE(AUDIO_I2S3_WCLK, "i2s3_wclk", "i2s3_wclk_div", AUDIO_I2S3_CLK, 9, CLK_SET_RATE_PARENT, 0), + GATE(AUDIO_I2S0_PCLK, "i2s0_pclk", "clk49m5", AUDIO_I2S0_CLK, 8, 0, 0), + GATE(AUDIO_I2S1_PCLK, "i2s1_pclk", "clk49m5", AUDIO_I2S1_CLK, 8, 0, 0), + GATE(AUDIO_I2S2_PCLK, "i2s2_pclk", "clk49m5", AUDIO_I2S2_CLK, 8, 0, 0), + GATE(AUDIO_I2S3_PCLK, "i2s3_pclk", "clk49m5", AUDIO_I2S3_CLK, 8, 0, 0), + GATE(AUDIO_I2C0_WCLK, "i2c0_wclk", "i2c0_wclk_mux", AUDIO_I2C0_CLK, 9, CLK_SET_RATE_PARENT, 0), + GATE(AUDIO_SPDIF0_WCLK, "spdif0_wclk", "spdif0_wclk_div", AUDIO_SPDIF0_CLK, 9, CLK_SET_RATE_PARENT, 0), + GATE(AUDIO_SPDIF1_WCLK, "spdif1_wclk", "spdif1_wclk_div", AUDIO_SPDIF1_CLK, 9, CLK_SET_RATE_PARENT, 0), + GATE(AUDIO_TDM_WCLK, "tdm_wclk", "tdm_wclk_div", AUDIO_TDM_CLK, 17, CLK_SET_RATE_PARENT, 0), + GATE(AUDIO_TS_PCLK, "tempsensor_pclk", "clk49m5", AUDIO_TS_CLK, 1, 0, 0), +}; + +static struct clk_hw_onecell_data audio_hw_onecell_data = { + .num = AUDIO_NR_CLKS, + .hws = { + [AUDIO_NR_CLKS - 1] = NULL, + }, +}; + +static int __init audio_clocks_init(struct device_node *np) +{ + void __iomem *reg_base; + int i, ret; + + reg_base = of_iomap(np, 0); + if (!reg_base) { + pr_err("%s: Unable to map audio clk base\n", __func__); + return -ENXIO; + } + + for (i = 0; i < ARRAY_SIZE(audio_mux_clk); i++) { + if (audio_mux_clk[i].id) + audio_hw_onecell_data.hws[audio_mux_clk[i].id] = + &audio_mux_clk[i].mux.hw; + + audio_mux_clk[i].mux.reg += (uintptr_t)reg_base; + ret = clk_hw_register(NULL, &audio_mux_clk[i].mux.hw); + if (ret) { + pr_warn("audio clk %s init error!\n", + audio_mux_clk[i].mux.hw.init->name); + } + } + + for (i = 0; i < ARRAY_SIZE(audio_adiv_clk); i++) { + if (audio_adiv_clk[i].id) + audio_hw_onecell_data.hws[audio_adiv_clk[i].id] = + &audio_adiv_clk[i].hw; + + audio_adiv_clk[i].reg_base += (uintptr_t)reg_base; + ret = clk_hw_register(NULL, &audio_adiv_clk[i].hw); + if (ret) { + pr_warn("audio clk %s init error!\n", + audio_adiv_clk[i].hw.init->name); + } + } + + for (i = 0; i < ARRAY_SIZE(audio_div_clk); i++) { + if (audio_div_clk[i].id) + audio_hw_onecell_data.hws[audio_div_clk[i].id] = + &audio_div_clk[i].div.hw; + + audio_div_clk[i].div.reg += (uintptr_t)reg_base; + ret = clk_hw_register(NULL, &audio_div_clk[i].div.hw); + if (ret) { + pr_warn("audio clk %s init error!\n", + audio_div_clk[i].div.hw.init->name); + } + } + + for (i = 0; i < ARRAY_SIZE(audio_gate_clk); i++) { + if (audio_gate_clk[i].id) + audio_hw_onecell_data.hws[audio_gate_clk[i].id] = + &audio_gate_clk[i].gate.hw; + + audio_gate_clk[i].gate.reg += (uintptr_t)reg_base; + ret = clk_hw_register(NULL, &audio_gate_clk[i].gate.hw); + if (ret) { + pr_warn("audio clk %s init error!\n", + audio_gate_clk[i].gate.hw.init->name); + } + } + + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, + &audio_hw_onecell_data); + if (ret) { + pr_err("failed to register audio clk provider: %d\n", ret); + return ret; + } return 0; } @@ -892,6 +1031,7 @@ static const struct of_device_id zx_clkc_match_table[] = { { .compatible = "zte,zx296718-topcrm", .data = &top_clocks_init }, { .compatible = "zte,zx296718-lsp0crm", .data = &lsp0_clocks_init }, { .compatible = "zte,zx296718-lsp1crm", .data = &lsp1_clocks_init }, + { .compatible = "zte,zx296718-audiocrm", .data = &audio_clocks_init }, { } }; diff --git a/drivers/clk/zte/clk.c b/drivers/clk/zte/clk.c index c4c1251bc1e7..878d879b23ff 100644 --- a/drivers/clk/zte/clk.c +++ b/drivers/clk/zte/clk.c @@ -9,6 +9,7 @@ #include <linux/clk-provider.h> #include <linux/err.h> +#include <linux/gcd.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/slab.h> @@ -310,3 +311,129 @@ struct clk *clk_register_zx_audio(const char *name, return clk; } + +#define CLK_AUDIO_DIV_FRAC BIT(0) +#define CLK_AUDIO_DIV_INT BIT(1) +#define CLK_AUDIO_DIV_UNCOMMON BIT(1) + +#define CLK_AUDIO_DIV_FRAC_NSHIFT 16 +#define CLK_AUDIO_DIV_INT_FRAC_RE BIT(16) +#define CLK_AUDIO_DIV_INT_FRAC_MAX (0xffff) +#define CLK_AUDIO_DIV_INT_FRAC_MIN (0x2) +#define CLK_AUDIO_DIV_INT_INT_SHIFT 24 +#define CLK_AUDIO_DIV_INT_INT_WIDTH 4 + +struct zx_clk_audio_div_table { + unsigned long rate; + unsigned int int_reg; + unsigned int frac_reg; +}; + +#define to_clk_zx_audio_div(_hw) container_of(_hw, struct clk_zx_audio_divider, hw) + +static unsigned long audio_calc_rate(struct clk_zx_audio_divider *audio_div, + u32 reg_frac, u32 reg_int, + unsigned long parent_rate) +{ + unsigned long rate, m, n; + + m = reg_frac & 0xffff; + n = (reg_frac >> 16) & 0xffff; + + m = (reg_int & 0xffff) * n + m; + rate = (parent_rate * n) / m; + + return rate; +} + +static void audio_calc_reg(struct clk_zx_audio_divider *audio_div, + struct zx_clk_audio_div_table *div_table, + unsigned long rate, unsigned long parent_rate) +{ + unsigned int reg_int, reg_frac; + unsigned long m, n, div; + + reg_int = parent_rate / rate; + + if (reg_int > CLK_AUDIO_DIV_INT_FRAC_MAX) + reg_int = CLK_AUDIO_DIV_INT_FRAC_MAX; + else if (reg_int < CLK_AUDIO_DIV_INT_FRAC_MIN) + reg_int = 0; + m = parent_rate - rate * reg_int; + n = rate; + + div = gcd(m, n); + m = m / div; + n = n / div; + + if ((m >> 16) || (n >> 16)) { + if (m > n) { + n = n * 0xffff / m; + m = 0xffff; + } else { + m = m * 0xffff / n; + n = 0xffff; + } + } + reg_frac = m | (n << 16); + + div_table->rate = parent_rate * n / (reg_int * n + m); + div_table->int_reg = reg_int; + div_table->frac_reg = reg_frac; +} + +static unsigned long zx_audio_div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_zx_audio_divider *zx_audio_div = to_clk_zx_audio_div(hw); + u32 reg_frac, reg_int; + + reg_frac = readl_relaxed(zx_audio_div->reg_base); + reg_int = readl_relaxed(zx_audio_div->reg_base + 0x4); + + return audio_calc_rate(zx_audio_div, reg_frac, reg_int, parent_rate); +} + +static long zx_audio_div_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_zx_audio_divider *zx_audio_div = to_clk_zx_audio_div(hw); + struct zx_clk_audio_div_table divt; + + audio_calc_reg(zx_audio_div, &divt, rate, *prate); + + return audio_calc_rate(zx_audio_div, divt.frac_reg, divt.int_reg, *prate); +} + +static int zx_audio_div_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_zx_audio_divider *zx_audio_div = to_clk_zx_audio_div(hw); + struct zx_clk_audio_div_table divt; + unsigned int val; + + audio_calc_reg(zx_audio_div, &divt, rate, parent_rate); + if (divt.rate != rate) + pr_debug("the real rate is:%ld", divt.rate); + + writel_relaxed(divt.frac_reg, zx_audio_div->reg_base); + + val = readl_relaxed(zx_audio_div->reg_base + 0x4); + val &= ~0xffff; + val |= divt.int_reg | CLK_AUDIO_DIV_INT_FRAC_RE; + writel_relaxed(val, zx_audio_div->reg_base + 0x4); + + mdelay(1); + + val = readl_relaxed(zx_audio_div->reg_base + 0x4); + val &= ~CLK_AUDIO_DIV_INT_FRAC_RE; + writel_relaxed(val, zx_audio_div->reg_base + 0x4); + + return 0; +} + +const struct clk_ops zx_audio_div_ops = { + .recalc_rate = zx_audio_div_recalc_rate, + .round_rate = zx_audio_div_round_rate, + .set_rate = zx_audio_div_set_rate, +}; diff --git a/drivers/clk/zte/clk.h b/drivers/clk/zte/clk.h index 0df3474b2cf3..84a55a3e2bd4 100644 --- a/drivers/clk/zte/clk.h +++ b/drivers/clk/zte/clk.h @@ -153,6 +153,25 @@ struct zx_clk_div { .id = _id, \ } +struct clk_zx_audio_divider { + struct clk_hw hw; + void __iomem *reg_base; + unsigned int rate_count; + spinlock_t *lock; + u16 id; +}; + +#define AUDIO_DIV(_id, _name, _parent, _reg) \ +{ \ + .reg_base = (void __iomem *) _reg, \ + .lock = &clk_lock, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &zx_audio_div_ops, \ + 0), \ + .id = _id, \ +} + struct clk *clk_register_zx_pll(const char *name, const char *parent_name, unsigned long flags, void __iomem *reg_base, const struct zx_pll_config *lookup_table, int count, spinlock_t *lock); @@ -167,4 +186,6 @@ struct clk *clk_register_zx_audio(const char *name, unsigned long flags, void __iomem *reg_base); extern const struct clk_ops zx_pll_ops; +extern const struct clk_ops zx_audio_div_ops; + #endif diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 93aa1364376a..7a8a4117f123 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -24,6 +24,7 @@ #include <linux/of_address.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/sched/clock.h> #include <linux/sched_clock.h> #include <linux/acpi.h> diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c index 9cae38eebec2..1c24de215c14 100644 --- a/drivers/clocksource/pxa_timer.c +++ b/drivers/clocksource/pxa_timer.c @@ -19,6 +19,7 @@ #include <linux/clockchips.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/sched/clock.h> #include <linux/sched_clock.h> #include <clocksource/pxa.h> diff --git a/drivers/clocksource/timer-digicolor.c b/drivers/clocksource/timer-digicolor.c index 10318cc99c0e..e9f50d289362 100644 --- a/drivers/clocksource/timer-digicolor.c +++ b/drivers/clocksource/timer-digicolor.c @@ -31,6 +31,7 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqreturn.h> +#include <linux/sched/clock.h> #include <linux/sched_clock.h> #include <linux/of.h> #include <linux/of_address.h> diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 631bd2c86c5e..47e24b5384b3 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -18,7 +18,6 @@ #include <linux/export.h> #include <linux/kernel_stat.h> -#include <linux/sched.h> #include <linux/slab.h> #include "cpufreq_governor.h" diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index f5717ca070cc..0236ec2cd654 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -20,6 +20,7 @@ #include <linux/atomic.h> #include <linux/irq_work.h> #include <linux/cpufreq.h> +#include <linux/sched/cpufreq.h> #include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/mutex.h> diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 4a017e895296..3937acf7e026 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -16,6 +16,7 @@ #include <linux/percpu-defs.h> #include <linux/slab.h> #include <linux/tick.h> +#include <linux/sched/cpufreq.h> #include "cpufreq_ondemand.h" diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index eb0f7fb71685..b1fbaa30ae04 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -19,7 +19,7 @@ #include <linux/hrtimer.h> #include <linux/tick.h> #include <linux/slab.h> -#include <linux/sched.h> +#include <linux/sched/cpufreq.h> #include <linux/list.h> #include <linux/cpu.h> #include <linux/cpufreq.h> @@ -39,11 +39,6 @@ #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 -#define ATOM_RATIOS 0x66a -#define ATOM_VIDS 0x66b -#define ATOM_TURBO_RATIOS 0x66c -#define ATOM_TURBO_VIDS 0x66d - #ifdef CONFIG_ACPI #include <acpi/processor.h> #include <acpi/cppc_acpi.h> @@ -364,37 +359,25 @@ static bool driver_registered __read_mostly; static bool acpi_ppc; #endif -static struct perf_limits performance_limits = { - .no_turbo = 0, - .turbo_disabled = 0, - .max_perf_pct = 100, - .max_perf = int_ext_tofp(1), - .min_perf_pct = 100, - .min_perf = int_ext_tofp(1), - .max_policy_pct = 100, - .max_sysfs_pct = 100, - .min_policy_pct = 0, - .min_sysfs_pct = 0, -}; +static struct perf_limits performance_limits; +static struct perf_limits powersave_limits; +static struct perf_limits *limits; -static struct perf_limits powersave_limits = { - .no_turbo = 0, - .turbo_disabled = 0, - .max_perf_pct = 100, - .max_perf = int_ext_tofp(1), - .min_perf_pct = 0, - .min_perf = 0, - .max_policy_pct = 100, - .max_sysfs_pct = 100, - .min_policy_pct = 0, - .min_sysfs_pct = 0, -}; +static void intel_pstate_init_limits(struct perf_limits *limits) +{ + memset(limits, 0, sizeof(*limits)); + limits->max_perf_pct = 100; + limits->max_perf = int_ext_tofp(1); + limits->max_policy_pct = 100; + limits->max_sysfs_pct = 100; +} -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE -static struct perf_limits *limits = &performance_limits; -#else -static struct perf_limits *limits = &powersave_limits; -#endif +static void intel_pstate_set_performance_limits(struct perf_limits *limits) +{ + intel_pstate_init_limits(limits); + limits->min_perf_pct = 100; + limits->min_perf = int_ext_tofp(1); +} static DEFINE_MUTEX(intel_pstate_driver_lock); static DEFINE_MUTEX(intel_pstate_limits_lock); @@ -1367,7 +1350,7 @@ static int atom_get_min_pstate(void) { u64 value; - rdmsrl(ATOM_RATIOS, value); + rdmsrl(MSR_ATOM_CORE_RATIOS, value); return (value >> 8) & 0x7F; } @@ -1375,7 +1358,7 @@ static int atom_get_max_pstate(void) { u64 value; - rdmsrl(ATOM_RATIOS, value); + rdmsrl(MSR_ATOM_CORE_RATIOS, value); return (value >> 16) & 0x7F; } @@ -1383,7 +1366,7 @@ static int atom_get_turbo_pstate(void) { u64 value; - rdmsrl(ATOM_TURBO_RATIOS, value); + rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); return value & 0x7F; } @@ -1445,7 +1428,7 @@ static void atom_get_vid(struct cpudata *cpudata) { u64 value; - rdmsrl(ATOM_VIDS, value); + rdmsrl(MSR_ATOM_CORE_VIDS, value); cpudata->vid.min = int_tofp((value >> 8) & 0x7f); cpudata->vid.max = int_tofp((value >> 16) & 0x7f); cpudata->vid.ratio = div_fp( @@ -1453,7 +1436,7 @@ static void atom_get_vid(struct cpudata *cpudata) int_tofp(cpudata->pstate.max_pstate - cpudata->pstate.min_pstate)); - rdmsrl(ATOM_TURBO_VIDS, value); + rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); cpudata->vid.turbo = value & 0x7f; } @@ -2084,20 +2067,6 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu) synchronize_sched(); } -static void intel_pstate_set_performance_limits(struct perf_limits *limits) -{ - limits->no_turbo = 0; - limits->turbo_disabled = 0; - limits->max_perf_pct = 100; - limits->max_perf = int_ext_tofp(1); - limits->min_perf_pct = 100; - limits->min_perf = int_ext_tofp(1); - limits->max_policy_pct = 100; - limits->max_sysfs_pct = 100; - limits->min_policy_pct = 0; - limits->min_sysfs_pct = 0; -} - static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, struct perf_limits *limits) { @@ -2466,6 +2435,11 @@ static int intel_pstate_register_driver(void) { int ret; + intel_pstate_init_limits(&powersave_limits); + intel_pstate_set_performance_limits(&performance_limits); + limits = IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) ? + &performance_limits : &powersave_limits; + ret = cpufreq_register_driver(intel_pstate_driver); if (ret) { intel_pstate_driver_cleanup(); diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c index a6fefac8afe4..bfec1bcd3835 100644 --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c @@ -23,10 +23,6 @@ #include <linux/slab.h> #include <linux/smp.h> -#if !defined(CONFIG_ARM) -#include <asm/smp.h> /* for get_hard_smp_processor_id() in UP configs */ -#endif - /** * struct cpu_data * @pclk: the parent clock of cpu diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c index b73feeb666f9..35ddb6da93aa 100644 --- a/drivers/cpufreq/sparc-us2e-cpufreq.c +++ b/drivers/cpufreq/sparc-us2e-cpufreq.c @@ -234,7 +234,7 @@ static unsigned int us2e_freq_get(unsigned int cpu) cpumask_t cpus_allowed; unsigned long clock_tick, estar; - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); + cpumask_copy(&cpus_allowed, ¤t->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(cpu)); clock_tick = sparc64_get_clock_tick(cpu) / 1000; @@ -252,7 +252,7 @@ static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index) unsigned long clock_tick, divisor, old_divisor, estar; cpumask_t cpus_allowed; - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); + cpumask_copy(&cpus_allowed, ¤t->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(cpu)); new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c index 9bb42ba50efa..a8d86a449ca1 100644 --- a/drivers/cpufreq/sparc-us3-cpufreq.c +++ b/drivers/cpufreq/sparc-us3-cpufreq.c @@ -82,7 +82,7 @@ static unsigned int us3_freq_get(unsigned int cpu) unsigned long reg; unsigned int ret; - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); + cpumask_copy(&cpus_allowed, ¤t->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(cpu)); reg = read_safari_cfg(); @@ -99,7 +99,7 @@ static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index) unsigned long new_bits, new_freq, reg; cpumask_t cpus_allowed; - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); + cpumask_copy(&cpus_allowed, ¤t->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(cpu)); new_freq = sparc64_get_clock_tick(cpu) / 1000; diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 62810ff3b00f..548b90be7685 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/mutex.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/notifier.h> #include <linux/pm_qos.h> #include <linux/cpu.h> diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index ab264d393233..e53fb861beb0 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -11,6 +11,7 @@ #include <linux/mutex.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/idle.h> #include <linux/cpuidle.h> #include <linux/cpumask.h> #include <linux/tick.h> diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 8d6d25c38c02..b2330fd69e34 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -18,6 +18,8 @@ #include <linux/hrtimer.h> #include <linux/tick.h> #include <linux/sched.h> +#include <linux/sched/loadavg.h> +#include <linux/sched/stat.h> #include <linux/math64.h> #include <linux/cpu.h> @@ -287,7 +289,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) unsigned int interactivity_req; unsigned int expected_interval; unsigned long nr_iowaiters, cpu_load; - int resume_latency = dev_pm_qos_read_value(device); + int resume_latency = dev_pm_qos_raw_read_value(device); if (data->needs_update) { menu_update(drv, dev); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 2cac445b02fd..0b49dbc423e2 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -62,19 +62,32 @@ config CRYPTO_DEV_GEODE will be called geode-aes. config ZCRYPT - tristate "Support for PCI-attached cryptographic adapters" + tristate "Support for s390 cryptographic adapters" depends on S390 select HW_RANDOM help - Select this option if you want to use a PCI-attached cryptographic - adapter like: - + PCI Cryptographic Accelerator (PCICA) - + PCI Cryptographic Coprocessor (PCICC) + Select this option if you want to enable support for + s390 cryptographic adapters like: + PCI-X Cryptographic Coprocessor (PCIXCC) - + Crypto Express2 Coprocessor (CEX2C) - + Crypto Express2 Accelerator (CEX2A) - + Crypto Express3 Coprocessor (CEX3C) - + Crypto Express3 Accelerator (CEX3A) + + Crypto Express 2,3,4 or 5 Coprocessor (CEXxC) + + Crypto Express 2,3,4 or 5 Accelerator (CEXxA) + + Crypto Express 4 or 5 EP11 Coprocessor (CEXxP) + +config PKEY + tristate "Kernel API for protected key handling" + depends on S390 + depends on ZCRYPT + help + With this option enabled the pkey kernel module provides an API + for creation and handling of protected keys. Other parts of the + kernel or userspace applications may use these functions. + + Select this option if you want to enable the kernel and userspace + API for proteced key handling. + + Please note that creation of protected keys from secure keys + requires to have at least one CEX card in coprocessor mode + available at runtime. config CRYPTO_SHA1_S390 tristate "SHA1 digest algorithm" @@ -124,6 +137,7 @@ config CRYPTO_AES_S390 depends on S390 select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER + select PKEY help This is the s390 hardware accelerated implementation of the AES cipher algorithms (FIPS-197). diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 579f8263c479..fef39f9f41ee 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -269,7 +269,7 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask) /* * If the corresponding bit is set, then it means the state * handle was initialized by us, and thus it needs to be - * deintialized as well + * deinitialized as well */ if ((1 << sh_idx) & state_handle_mask) { /* diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index b5b153317376..21472e427f6f 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -120,7 +120,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi) } ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, - names); + names, NULL); if (ret) goto err_find; diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index b75c77254fdb..8d9829ff2a78 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c @@ -13,6 +13,7 @@ #include <linux/pagemap.h> #include <linux/module.h> #include <linux/device.h> +#include <linux/magic.h> #include <linux/mount.h> #include <linux/pfn_t.h> #include <linux/hash.h> diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 551a271353d2..dea04871b50d 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -1228,7 +1228,7 @@ static int __init devfreq_init(void) subsys_initcall(devfreq_init); /* - * The followings are helper functions for devfreq user device drivers with + * The following are helper functions for devfreq user device drivers with * OPP framework. */ diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 718f832a5c71..0007b792827b 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -325,6 +325,9 @@ static const struct file_operations dma_buf_fops = { .llseek = dma_buf_llseek, .poll = dma_buf_poll, .unlocked_ioctl = dma_buf_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = dma_buf_ioctl, +#endif }; /* diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index d1f1f456f5c4..d195d617076d 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -22,6 +22,7 @@ #include <linux/export.h> #include <linux/atomic.h> #include <linux/dma-fence.h> +#include <linux/sched/signal.h> #define CREATE_TRACE_POINTS #include <trace/events/dma_fence.h> diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index c9297605058c..54d581d407aa 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -16,6 +16,7 @@ #include <linux/freezer.h> #include <linux/init.h> #include <linux/kthread.h> +#include <linux/sched/task.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/random.h> diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c index 3e882aa107e8..eaa355e7d9e4 100644 --- a/drivers/extcon/extcon-rt8973a.c +++ b/drivers/extcon/extcon-rt8973a.c @@ -537,7 +537,7 @@ static void rt8973a_init_dev_type(struct rt8973a_muic_info *info) regmap_update_bits(info->regmap, reg, mask, val); } - /* Check whether RT8973A is auto swithcing mode or not */ + /* Check whether RT8973A is auto switching mode or not */ ret = regmap_read(info->regmap, RT8973A_REG_CONTROL1, &data); if (ret) { dev_err(info->dev, diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index aee149bdf4c0..a301fcf46e88 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -1307,8 +1307,7 @@ static void iso_resource_work(struct work_struct *work) */ if (r->todo == ISO_RES_REALLOC && !success && !client->in_shutdown && - idr_find(&client->resource_idr, r->resource.handle)) { - idr_remove(&client->resource_idr, r->resource.handle); + idr_remove(&client->resource_idr, r->resource.handle)) { client_put(client); free = true; } diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index f9e3aee6a211..7c2eed76011e 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -1068,7 +1068,7 @@ static void fw_device_init(struct work_struct *work) /* * Transition the device to running state. If it got pulled - * out from under us while we did the intialization work, we + * out from under us while we did the initialization work, we * have to shut down the device again here. Normally, though, * fw_node_event will be responsible for shutting it down when * necessary. We have to use the atomic cmpxchg here to avoid @@ -1231,7 +1231,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) break; /* - * Do minimal intialization of the device here, the + * Do minimal initialization of the device here, the * rest will happen in fw_device_init(). * * Attention: A lot of things, even fw_device_get(), diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c index 29d58feaf675..6523ce962865 100644 --- a/drivers/firmware/psci_checker.c +++ b/drivers/firmware/psci_checker.c @@ -20,6 +20,7 @@ #include <linux/cpu_pm.h> #include <linux/kernel.h> #include <linux/kthread.h> +#include <uapi/linux/sched/types.h> #include <linux/module.h> #include <linux/preempt.h> #include <linux/psci.h> diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c index 4ff02d310868..84e4c9a58a0c 100644 --- a/drivers/firmware/tegra/bpmp.c +++ b/drivers/firmware/tegra/bpmp.c @@ -19,6 +19,7 @@ #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/semaphore.h> +#include <linux/sched/clock.h> #include <soc/tegra/bpmp.h> #include <soc/tegra/bpmp-abi.h> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index c02db01f6583..0218cea6be4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -70,10 +70,10 @@ static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id) struct amdgpu_bo_list *list; mutex_lock(&fpriv->bo_list_lock); - list = idr_find(&fpriv->bo_list_handles, id); + list = idr_remove(&fpriv->bo_list_handles, id); if (list) { + /* Another user may have a reference to this list still */ mutex_lock(&list->lock); - idr_remove(&fpriv->bo_list_handles, id); mutex_unlock(&list->lock); amdgpu_bo_list_free(list); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 400c66ba4c6b..cf0500671353 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -135,15 +135,11 @@ static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) struct amdgpu_ctx *ctx; mutex_lock(&mgr->lock); - ctx = idr_find(&mgr->ctx_handles, id); - if (ctx) { - idr_remove(&mgr->ctx_handles, id); + ctx = idr_remove(&mgr->ctx_handles, id); + if (ctx) kref_put(&ctx->refcount, amdgpu_ctx_do_release); - mutex_unlock(&mgr->lock); - return 0; - } mutex_unlock(&mgr->lock); - return -EINVAL; + return ctx ? 0 : -EINVAL; } static int amdgpu_ctx_query(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 6a3470f84998..d1ce83d73a87 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -23,7 +23,7 @@ #include <linux/mm_types.h> #include <linux/slab.h> #include <linux/types.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/uaccess.h> #include <linux/mm.h> #include <linux/mman.h> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index d83de985e88c..6acc4313363e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -23,6 +23,8 @@ #include <linux/printk.h> #include <linux/slab.h> +#include <linux/mm_types.h> + #include "kfd_priv.h" #include "kfd_mqd_manager.h" #include "cik_regs.h" diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index fa32c32fa1c2..a9b9882a9a77 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -23,6 +23,8 @@ #include <linux/printk.h> #include <linux/slab.h> +#include <linux/mm_types.h> + #include "kfd_priv.h" #include "kfd_mqd_manager.h" #include "vi_structs.h" diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index ef7c8de7060e..84d1ffd1eef9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -23,6 +23,7 @@ #include <linux/mutex.h> #include <linux/log2.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/slab.h> #include <linux/amd-iommu.h> #include <linux/notifier.h> @@ -262,7 +263,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, * and because the mmu_notifier_unregister function also drop * mm_count we need to take an extra count here. */ - atomic_inc(&p->mm->mm_count); + mmgrab(p->mm); mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm); mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed); } diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h index 4a4d3797a6d3..181a2c3c6362 100644 --- a/drivers/gpu/drm/amd/include/atombios.h +++ b/drivers/gpu/drm/amd/include/atombios.h @@ -188,7 +188,7 @@ #define HW_ASSISTED_I2C_STATUS_FAILURE 2 #define HW_ASSISTED_I2C_STATUS_SUCCESS 1 -#pragma pack(1) // BIOS data must use byte aligment +#pragma pack(1) // BIOS data must use byte alignment // Define offset to location of ROM header. #define OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER 0x00000048L @@ -4361,7 +4361,7 @@ typedef struct _ATOM_GPIO_PIN_ASSIGNMENT // GPIO use to control PCIE_VDDC in certain SLT board #define PCIE_VDDC_CONTROL_GPIO_PINID 56 -//from SMU7.x, if ucGPIO_ID=PP_AC_DC_SWITCH_GPIO_PINID in GPIO_LUTTable, AC/DC swithing feature is enable +//from SMU7.x, if ucGPIO_ID=PP_AC_DC_SWITCH_GPIO_PINID in GPIO_LUTTable, AC/DC switching feature is enable #define PP_AC_DC_SWITCH_GPIO_PINID 60 //from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable #define VDDC_VRHOT_GPIO_PINID 61 @@ -9180,7 +9180,7 @@ typedef struct _ATOM_POWERPLAY_INFO_V3 /*********************************************************************************/ -#pragma pack() // BIOS data must use byte aligment +#pragma pack() // BIOS data must use byte alignment #pragma pack(1) @@ -9211,7 +9211,7 @@ typedef struct _ATOM_SERVICE_INFO -#pragma pack() // BIOS data must use byte aligment +#pragma pack() // BIOS data must use byte alignment // // AMD ACPI Table diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index 26129972f686..80ed65985af8 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -89,7 +89,7 @@ enum phm_platform_caps { PHM_PlatformCaps_EnableSideportControl, /* indicates Sideport can be controlled */ PHM_PlatformCaps_VideoPlaybackEEUNotification, /* indicates EEU notification of video start/stop is required */ PHM_PlatformCaps_TurnOffPll_ASPML1, /* PCIE Turn Off PLL in ASPM L1 */ - PHM_PlatformCaps_EnableHTLinkControl, /* indicates HT Link can be controlled by ACPI or CLMC overrided/automated mode. */ + PHM_PlatformCaps_EnableHTLinkControl, /* indicates HT Link can be controlled by ACPI or CLMC overridden/automated mode. */ PHM_PlatformCaps_PerformanceStateOnly, /* indicates only performance power state to be used on current system. */ PHM_PlatformCaps_ExclusiveModeAlwaysHigh, /* In Exclusive (3D) mode always stay in High state. */ PHM_PlatformCaps_DisableMGClockGating, /* to disable Medium Grain Clock Gating or not */ diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 1bf83ed113b3..16f96563cd2b 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -24,6 +24,7 @@ #include <linux/kthread.h> #include <linux/wait.h> #include <linux/sched.h> +#include <uapi/linux/sched/types.h> #include <drm/drmP.h> #include "gpu_scheduler.h" diff --git a/drivers/gpu/drm/ast/ast_dram_tables.h b/drivers/gpu/drm/ast/ast_dram_tables.h index cc04539c0ff3..1d9c4e75d303 100644 --- a/drivers/gpu/drm/ast/ast_dram_tables.h +++ b/drivers/gpu/drm/ast/ast_dram_tables.h @@ -141,4 +141,66 @@ static const struct ast_dramstruct ast2100_dram_table_data[] = { { 0xffff, 0xffffffff }, }; +/* + * AST2500 DRAM settings modules + */ +#define REGTBL_NUM 17 +#define REGIDX_010 0 +#define REGIDX_014 1 +#define REGIDX_018 2 +#define REGIDX_020 3 +#define REGIDX_024 4 +#define REGIDX_02C 5 +#define REGIDX_030 6 +#define REGIDX_214 7 +#define REGIDX_2E0 8 +#define REGIDX_2E4 9 +#define REGIDX_2E8 10 +#define REGIDX_2EC 11 +#define REGIDX_2F0 12 +#define REGIDX_2F4 13 +#define REGIDX_2F8 14 +#define REGIDX_RFC 15 +#define REGIDX_PLL 16 + +static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = { + 0x64604D38, /* 0x010 */ + 0x29690599, /* 0x014 */ + 0x00000300, /* 0x018 */ + 0x00000000, /* 0x020 */ + 0x00000000, /* 0x024 */ + 0x02181E70, /* 0x02C */ + 0x00000040, /* 0x030 */ + 0x00000024, /* 0x214 */ + 0x02001300, /* 0x2E0 */ + 0x0E0000A0, /* 0x2E4 */ + 0x000E001B, /* 0x2E8 */ + 0x35B8C105, /* 0x2EC */ + 0x08090408, /* 0x2F0 */ + 0x9B000800, /* 0x2F4 */ + 0x0E400A00, /* 0x2F8 */ + 0x9971452F, /* tRFC */ + 0x000071C1 /* PLL */ +}; + +static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = { + 0x63604E37, /* 0x010 */ + 0xE97AFA99, /* 0x014 */ + 0x00019000, /* 0x018 */ + 0x08000000, /* 0x020 */ + 0x00000400, /* 0x024 */ + 0x00000410, /* 0x02C */ + 0x00000101, /* 0x030 */ + 0x00000024, /* 0x214 */ + 0x03002900, /* 0x2E0 */ + 0x0E0000A0, /* 0x2E4 */ + 0x000E001C, /* 0x2E8 */ + 0x35B8C106, /* 0x2EC */ + 0x08080607, /* 0x2F0 */ + 0x9B000900, /* 0x2F4 */ + 0x0E400A00, /* 0x2F8 */ + 0x99714545, /* tRFC */ + 0x000071C1 /* PLL */ +}; + #endif diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 5a8fa1c85229..8880f0b62e9c 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -65,6 +65,7 @@ enum ast_chip { AST2150, AST2300, AST2400, + AST2500, AST1180, }; @@ -81,6 +82,7 @@ enum ast_tx_chip { #define AST_DRAM_1Gx32 3 #define AST_DRAM_2Gx16 6 #define AST_DRAM_4Gx16 7 +#define AST_DRAM_8Gx16 8 struct ast_fbdev; @@ -114,7 +116,11 @@ struct ast_private { struct ttm_bo_kmap_obj cache_kmap; int next_cursor; bool support_wide_screen; - bool DisableP2A; + enum { + ast_use_p2a, + ast_use_dt, + ast_use_defaults + } config_mode; enum ast_tx_chip tx_chip_type; u8 dp501_maxclk; @@ -301,8 +307,8 @@ struct ast_vbios_dclk_info { }; struct ast_vbios_mode_info { - struct ast_vbios_stdtable *std_table; - struct ast_vbios_enhtable *enh_table; + const struct ast_vbios_stdtable *std_table; + const struct ast_vbios_enhtable *enh_table; }; extern int ast_mode_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 993909430736..262c2c0e43b4 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -32,8 +32,6 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> -#include "ast_dram_tables.h" - void ast_set_index_reg_mask(struct ast_private *ast, uint32_t base, uint8_t index, uint8_t mask, uint8_t val) @@ -62,30 +60,99 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast, return ret; } +static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev) +{ + struct device_node *np = dev->pdev->dev.of_node; + struct ast_private *ast = dev->dev_private; + uint32_t data, jregd0, jregd1; + + /* Defaults */ + ast->config_mode = ast_use_defaults; + *scu_rev = 0xffffffff; + + /* Check if we have device-tree properties */ + if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", + scu_rev)) { + /* We do, disable P2A access */ + ast->config_mode = ast_use_dt; + DRM_INFO("Using device-tree for configuration\n"); + return; + } + + /* Not all families have a P2A bridge */ + if (dev->pdev->device != PCI_CHIP_AST2000) + return; + + /* + * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge + * is disabled. We force using P2A if VGA only mode bit + * is set D[7] + */ + jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); + if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) { + /* Double check it's actually working */ + data = ast_read32(ast, 0xf004); + if (data != 0xFFFFFFFF) { + /* P2A works, grab silicon revision */ + ast->config_mode = ast_use_p2a; + + DRM_INFO("Using P2A bridge for configuration\n"); + + /* Read SCU7c (silicon revision register) */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + *scu_rev = ast_read32(ast, 0x1207c); + return; + } + } + + /* We have a P2A bridge but it's disabled */ + DRM_INFO("P2A bridge disabled, using default configuration\n"); +} static int ast_detect_chip(struct drm_device *dev, bool *need_post) { struct ast_private *ast = dev->dev_private; - uint32_t data, jreg; + uint32_t jreg, scu_rev; + + /* + * If VGA isn't enabled, we need to enable now or subsequent + * access to the scratch registers will fail. We also inform + * our caller that it needs to POST the chip + * (Assumption: VGA not enabled -> need to POST) + */ + if (!ast_is_vga_enabled(dev)) { + ast_enable_vga(dev); + DRM_INFO("VGA not enabled on entry, requesting chip POST\n"); + *need_post = true; + } else + *need_post = false; + + + /* Enable extended register access */ + ast_enable_mmio(dev); ast_open_key(ast); + /* Find out whether P2A works or whether to use device-tree */ + ast_detect_config_mode(dev, &scu_rev); + + /* Identify chipset */ if (dev->pdev->device == PCI_CHIP_AST1180) { ast->chip = AST1100; DRM_INFO("AST 1180 detected\n"); } else { - if (dev->pdev->revision >= 0x30) { + if (dev->pdev->revision >= 0x40) { + ast->chip = AST2500; + DRM_INFO("AST 2500 detected\n"); + } else if (dev->pdev->revision >= 0x30) { ast->chip = AST2400; DRM_INFO("AST 2400 detected\n"); } else if (dev->pdev->revision >= 0x20) { ast->chip = AST2300; DRM_INFO("AST 2300 detected\n"); } else if (dev->pdev->revision >= 0x10) { - uint32_t data; - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - - data = ast_read32(ast, 0x1207c); - switch (data & 0x0300) { + switch (scu_rev & 0x0300) { case 0x0200: ast->chip = AST1100; DRM_INFO("AST 1100 detected\n"); @@ -110,26 +177,6 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } } - /* - * If VGA isn't enabled, we need to enable now or subsequent - * access to the scratch registers will fail. We also inform - * our caller that it needs to POST the chip - * (Assumption: VGA not enabled -> need to POST) - */ - if (!ast_is_vga_enabled(dev)) { - ast_enable_vga(dev); - ast_enable_mmio(dev); - DRM_INFO("VGA not enabled on entry, requesting chip POST\n"); - *need_post = true; - } else - *need_post = false; - - /* Check P2A Access */ - ast->DisableP2A = true; - data = ast_read32(ast, 0xf004); - if (data != 0xFFFFFFFF) - ast->DisableP2A = false; - /* Check if we support wide screen */ switch (ast->chip) { case AST1180: @@ -146,17 +193,15 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->support_wide_screen = true; else { ast->support_wide_screen = false; - if (ast->DisableP2A == false) { - /* Read SCU7c (silicon revision register) */ - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - data = ast_read32(ast, 0x1207c); - data &= 0x300; - if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ - ast->support_wide_screen = true; - if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ - ast->support_wide_screen = true; - } + if (ast->chip == AST2300 && + (scu_rev & 0x300) == 0x0) /* ast1300 */ + ast->support_wide_screen = true; + if (ast->chip == AST2400 && + (scu_rev & 0x300) == 0x100) /* ast1400 */ + ast->support_wide_screen = true; + if (ast->chip == AST2500 && + scu_rev == 0x100) /* ast2510 */ + ast->support_wide_screen = true; } break; } @@ -220,85 +265,121 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) static int ast_get_dram_info(struct drm_device *dev) { + struct device_node *np = dev->pdev->dev.of_node; struct ast_private *ast = dev->dev_private; - uint32_t data, data2; - uint32_t denum, num, div, ref_pll; + uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap; + uint32_t denum, num, div, ref_pll, dsel; - if (ast->DisableP2A) - { - ast->dram_bus_width = 16; - ast->dram_type = AST_DRAM_1Gx16; - ast->mclk = 396; - } - else - { + switch (ast->config_mode) { + case ast_use_dt: + /* + * If some properties are missing, use reasonable + * defaults for AST2400 + */ + if (of_property_read_u32(np, "aspeed,mcr-configuration", + &mcr_cfg)) + mcr_cfg = 0x00000577; + if (of_property_read_u32(np, "aspeed,mcr-scu-mpll", + &mcr_scu_mpll)) + mcr_scu_mpll = 0x000050C0; + if (of_property_read_u32(np, "aspeed,mcr-scu-strap", + &mcr_scu_strap)) + mcr_scu_strap = 0; + break; + case ast_use_p2a: ast_write32(ast, 0xf004, 0x1e6e0000); ast_write32(ast, 0xf000, 0x1); - data = ast_read32(ast, 0x10004); - - if (data & 0x40) - ast->dram_bus_width = 16; + mcr_cfg = ast_read32(ast, 0x10004); + mcr_scu_mpll = ast_read32(ast, 0x10120); + mcr_scu_strap = ast_read32(ast, 0x10170); + break; + case ast_use_defaults: + default: + ast->dram_bus_width = 16; + ast->dram_type = AST_DRAM_1Gx16; + if (ast->chip == AST2500) + ast->mclk = 800; else - ast->dram_bus_width = 32; - - if (ast->chip == AST2300 || ast->chip == AST2400) { - switch (data & 0x03) { - case 0: - ast->dram_type = AST_DRAM_512Mx16; - break; - default: - case 1: - ast->dram_type = AST_DRAM_1Gx16; - break; - case 2: - ast->dram_type = AST_DRAM_2Gx16; - break; - case 3: - ast->dram_type = AST_DRAM_4Gx16; - break; - } - } else { - switch (data & 0x0c) { - case 0: - case 4: - ast->dram_type = AST_DRAM_512Mx16; - break; - case 8: - if (data & 0x40) - ast->dram_type = AST_DRAM_1Gx16; - else - ast->dram_type = AST_DRAM_512Mx32; - break; - case 0xc: - ast->dram_type = AST_DRAM_1Gx32; - break; - } - } + ast->mclk = 396; + return 0; + } - data = ast_read32(ast, 0x10120); - data2 = ast_read32(ast, 0x10170); - if (data2 & 0x2000) - ref_pll = 14318; - else - ref_pll = 12000; + if (mcr_cfg & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; - denum = data & 0x1f; - num = (data & 0x3fe0) >> 5; - data = (data & 0xc000) >> 14; - switch (data) { - case 3: - div = 0x4; + if (ast->chip == AST2500) { + switch (mcr_cfg & 0x03) { + case 0: + ast->dram_type = AST_DRAM_1Gx16; break; - case 2: + default: case 1: - div = 0x2; + ast->dram_type = AST_DRAM_2Gx16; + break; + case 2: + ast->dram_type = AST_DRAM_4Gx16; + break; + case 3: + ast->dram_type = AST_DRAM_8Gx16; + break; + } + } else if (ast->chip == AST2300 || ast->chip == AST2400) { + switch (mcr_cfg & 0x03) { + case 0: + ast->dram_type = AST_DRAM_512Mx16; break; default: - div = 0x1; + case 1: + ast->dram_type = AST_DRAM_1Gx16; + break; + case 2: + ast->dram_type = AST_DRAM_2Gx16; + break; + case 3: + ast->dram_type = AST_DRAM_4Gx16; + break; + } + } else { + switch (mcr_cfg & 0x0c) { + case 0: + case 4: + ast->dram_type = AST_DRAM_512Mx16; + break; + case 8: + if (mcr_cfg & 0x40) + ast->dram_type = AST_DRAM_1Gx16; + else + ast->dram_type = AST_DRAM_512Mx32; + break; + case 0xc: + ast->dram_type = AST_DRAM_1Gx32; break; } - ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); } + + if (mcr_scu_strap & 0x2000) + ref_pll = 14318; + else + ref_pll = 12000; + + denum = mcr_scu_mpll & 0x1f; + num = (mcr_scu_mpll & 0x3fe0) >> 5; + dsel = (mcr_scu_mpll & 0xc000) >> 14; + switch (dsel) { + case 3: + div = 0x4; + break; + case 2: + case 1: + div = 0x2; + break; + default: + div = 0x1; + break; + } + ast->mclk = ref_pll * (num + 2) / ((denum + 2) * (div * 1000)); return 0; } @@ -437,17 +518,19 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags) ast_detect_chip(dev, &need_post); + if (need_post) + ast_post_gpu(dev); + if (ast->chip != AST1180) { ret = ast_get_dram_info(dev); if (ret) goto out_free; ast->vram_size = ast_get_vram_info(dev); - DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size); + DRM_INFO("dram MCLK=%u Mhz type=%d bus_width=%d size=%08x\n", + ast->mclk, ast->dram_type, + ast->dram_bus_width, ast->vram_size); } - if (need_post) - ast_post_gpu(dev); - ret = ast_mm_init(ast); if (ret) goto out_free; @@ -465,6 +548,7 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags) ast->chip == AST2200 || ast->chip == AST2300 || ast->chip == AST2400 || + ast->chip == AST2500 || ast->chip == AST1180) { dev->mode_config.max_width = 1920; dev->mode_config.max_height = 2048; diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 606cb40f6c7c..47b78e52691c 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -81,9 +81,9 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo struct ast_private *ast = crtc->dev->dev_private; const struct drm_framebuffer *fb = crtc->primary->fb; u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate; + const struct ast_vbios_enhtable *best = NULL; u32 hborder, vborder; bool check_sync; - struct ast_vbios_enhtable *best = NULL; switch (fb->format->cpp[0] * 8) { case 8: @@ -147,7 +147,7 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo refresh_rate = drm_mode_vrefresh(mode); check_sync = vbios_mode->enh_table->flags & WideScreenMode; do { - struct ast_vbios_enhtable *loop = vbios_mode->enh_table; + const struct ast_vbios_enhtable *loop = vbios_mode->enh_table; while (loop->refresh_rate != 0xff) { if ((check_sync) && @@ -227,7 +227,7 @@ static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode struct ast_vbios_mode_info *vbios_mode) { struct ast_private *ast = crtc->dev->dev_private; - struct ast_vbios_stdtable *stdtable; + const struct ast_vbios_stdtable *stdtable; u32 i; u8 jreg; @@ -273,7 +273,11 @@ static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mod { struct ast_private *ast = crtc->dev->dev_private; u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0; - u16 temp; + u16 temp, precache = 0; + + if ((ast->chip == AST2500) && + (vbios_mode->enh_table->flags & AST2500PreCatchCRT)) + precache = 40; ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00); @@ -299,12 +303,12 @@ static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mod jregAD |= 0x01; /* HBE D[5] */ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x03, 0xE0, (temp & 0x1f)); - temp = (mode->crtc_hsync_start >> 3) - 1; + temp = ((mode->crtc_hsync_start-precache) >> 3) - 1; if (temp & 0x100) jregAC |= 0x40; /* HRS D[5] */ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x04, 0x00, temp); - temp = ((mode->crtc_hsync_end >> 3) - 1) & 0x3f; + temp = (((mode->crtc_hsync_end-precache) >> 3) - 1) & 0x3f; if (temp & 0x20) jregAD |= 0x04; /* HRE D[5] */ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05)); @@ -365,6 +369,11 @@ static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mod ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x09, 0xdf, jreg09); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAE, 0x00, (jregAE | 0x80)); + if (precache) + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0x3f, 0x80); + else + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0x3f, 0x00); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80); } @@ -384,14 +393,18 @@ static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mo struct ast_vbios_mode_info *vbios_mode) { struct ast_private *ast = dev->dev_private; - struct ast_vbios_dclk_info *clk_info; + const struct ast_vbios_dclk_info *clk_info; - clk_info = &dclk_table[vbios_mode->enh_table->dclk_index]; + if (ast->chip == AST2500) + clk_info = &dclk_table_ast2500[vbios_mode->enh_table->dclk_index]; + else + clk_info = &dclk_table[vbios_mode->enh_table->dclk_index]; ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc0, 0x00, clk_info->param1); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc1, 0x00, clk_info->param2); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xbb, 0x0f, - (clk_info->param3 & 0x80) | ((clk_info->param3 & 0x3) << 4)); + (clk_info->param3 & 0xc0) | + ((clk_info->param3 & 0x3) << 4)); } static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode, @@ -425,7 +438,8 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8); /* Set Threshold */ - if (ast->chip == AST2300 || ast->chip == AST2400) { + if (ast->chip == AST2300 || ast->chip == AST2400 || + ast->chip == AST2500) { ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78); ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60); } else if (ast->chip == AST2100 || @@ -800,7 +814,9 @@ static int ast_mode_valid(struct drm_connector *connector, if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) return MODE_OK; - if ((ast->chip == AST2100) || (ast->chip == AST2200) || (ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST1180)) { + if ((ast->chip == AST2100) || (ast->chip == AST2200) || + (ast->chip == AST2300) || (ast->chip == AST2400) || + (ast->chip == AST2500) || (ast->chip == AST1180)) { if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080)) return MODE_OK; diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 5331ee1df086..f7d421359d56 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -31,7 +31,8 @@ #include "ast_dram_tables.h" -static void ast_init_dram_2300(struct drm_device *dev); +static void ast_post_chip_2300(struct drm_device *dev); +static void ast_post_chip_2500(struct drm_device *dev); void ast_enable_vga(struct drm_device *dev) { @@ -58,13 +59,9 @@ bool ast_is_vga_enabled(struct drm_device *dev) /* TODO 1180 */ } else { ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT); - if (ch) { - ast_open_key(ast); - ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff); - return ch & 0x04; - } + return !!(ch & 0x01); } - return 0; + return false; } static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff }; @@ -79,10 +76,11 @@ ast_set_def_ext_reg(struct drm_device *dev) const u8 *ext_reg_info; /* reset scratch */ - for (i = 0x81; i <= 0x8f; i++) + for (i = 0x81; i <= 0x9f; i++) ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00); - if (ast->chip == AST2300 || ast->chip == AST2400) { + if (ast->chip == AST2300 || ast->chip == AST2400 || + ast->chip == AST2500) { if (dev->pdev->revision >= 0x20) ext_reg_info = extreginfo_ast2300; else @@ -106,7 +104,8 @@ ast_set_def_ext_reg(struct drm_device *dev) /* Enable RAMDAC for A1 */ reg = 0x04; - if (ast->chip == AST2300 || ast->chip == AST2400) + if (ast->chip == AST2300 || ast->chip == AST2400 || + ast->chip == AST2500) reg |= 0x20; ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg); } @@ -375,21 +374,20 @@ void ast_post_gpu(struct drm_device *dev) pci_write_config_dword(ast->dev->pdev, 0x04, reg); ast_enable_vga(dev); - ast_enable_mmio(dev); ast_open_key(ast); + ast_enable_mmio(dev); ast_set_def_ext_reg(dev); - if (ast->DisableP2A == false) - { - if (ast->chip == AST2300 || ast->chip == AST2400) - ast_init_dram_2300(dev); + if (ast->config_mode == ast_use_p2a) { + if (ast->chip == AST2500) + ast_post_chip_2500(dev); + else if (ast->chip == AST2300 || ast->chip == AST2400) + ast_post_chip_2300(dev); else ast_init_dram_reg(dev); ast_init_3rdtx(dev); - } - else - { + } else { if (ast->tx_chip_type != AST_TX_NONE) ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ } @@ -448,85 +446,70 @@ static const u32 pattern[8] = { 0x7C61D253 }; -static int mmc_test_burst(struct ast_private *ast, u32 datagen) +static bool mmc_test(struct ast_private *ast, u32 datagen, u8 test_ctl) { u32 data, timeout; ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - ast_moutdwm(ast, 0x1e6e0070, 0x000000c1 | (datagen << 3)); + ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl); timeout = 0; do { data = ast_mindwm(ast, 0x1e6e0070) & 0x3000; - if (data & 0x2000) { - return 0; - } + if (data & 0x2000) + return false; if (++timeout > TIMEOUT) { ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - return 0; + return false; } } while (!data); - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - return 1; + ast_moutdwm(ast, 0x1e6e0070, 0x0); + return true; } -static int mmc_test_burst2(struct ast_private *ast, u32 datagen) +static u32 mmc_test2(struct ast_private *ast, u32 datagen, u8 test_ctl) { u32 data, timeout; ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - ast_moutdwm(ast, 0x1e6e0070, 0x00000041 | (datagen << 3)); + ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl); timeout = 0; do { data = ast_mindwm(ast, 0x1e6e0070) & 0x1000; if (++timeout > TIMEOUT) { ast_moutdwm(ast, 0x1e6e0070, 0x0); - return -1; + return 0xffffffff; } } while (!data); data = ast_mindwm(ast, 0x1e6e0078); data = (data | (data >> 16)) & 0xffff; - ast_moutdwm(ast, 0x1e6e0070, 0x0); + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); return data; } -static int mmc_test_single(struct ast_private *ast, u32 datagen) + +static bool mmc_test_burst(struct ast_private *ast, u32 datagen) { - u32 data, timeout; + return mmc_test(ast, datagen, 0xc1); +} - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - ast_moutdwm(ast, 0x1e6e0070, 0x000000c5 | (datagen << 3)); - timeout = 0; - do { - data = ast_mindwm(ast, 0x1e6e0070) & 0x3000; - if (data & 0x2000) - return 0; - if (++timeout > TIMEOUT) { - ast_moutdwm(ast, 0x1e6e0070, 0x0); - return 0; - } - } while (!data); - ast_moutdwm(ast, 0x1e6e0070, 0x0); - return 1; +static u32 mmc_test_burst2(struct ast_private *ast, u32 datagen) +{ + return mmc_test2(ast, datagen, 0x41); } -static int mmc_test_single2(struct ast_private *ast, u32 datagen) +static bool mmc_test_single(struct ast_private *ast, u32 datagen) { - u32 data, timeout; + return mmc_test(ast, datagen, 0xc5); +} - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3)); - timeout = 0; - do { - data = ast_mindwm(ast, 0x1e6e0070) & 0x1000; - if (++timeout > TIMEOUT) { - ast_moutdwm(ast, 0x1e6e0070, 0x0); - return -1; - } - } while (!data); - data = ast_mindwm(ast, 0x1e6e0078); - data = (data | (data >> 16)) & 0xffff; - ast_moutdwm(ast, 0x1e6e0070, 0x0); - return data; +static u32 mmc_test_single2(struct ast_private *ast, u32 datagen) +{ + return mmc_test2(ast, datagen, 0x05); +} + +static bool mmc_test_single_2500(struct ast_private *ast, u32 datagen) +{ + return mmc_test(ast, datagen, 0x85); } static int cbr_test(struct ast_private *ast) @@ -604,16 +587,16 @@ static u32 cbr_scan2(struct ast_private *ast) return data2; } -static u32 cbr_test3(struct ast_private *ast) +static bool cbr_test3(struct ast_private *ast) { if (!mmc_test_burst(ast, 0)) - return 0; + return false; if (!mmc_test_single(ast, 0)) - return 0; - return 1; + return false; + return true; } -static u32 cbr_scan3(struct ast_private *ast) +static bool cbr_scan3(struct ast_private *ast) { u32 patcnt, loop; @@ -624,9 +607,9 @@ static u32 cbr_scan3(struct ast_private *ast) break; } if (loop == 2) - return 0; + return false; } - return 1; + return true; } static bool finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param) @@ -1612,7 +1595,7 @@ ddr2_init_start: } -static void ast_init_dram_2300(struct drm_device *dev) +static void ast_post_chip_2300(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; struct ast2300_dram_param param; @@ -1638,12 +1621,44 @@ static void ast_init_dram_2300(struct drm_device *dev) temp |= 0x73; ast_write32(ast, 0x12008, temp); + param.dram_freq = 396; param.dram_type = AST_DDR3; + temp = ast_mindwm(ast, 0x1e6e2070); if (temp & 0x01000000) param.dram_type = AST_DDR2; - param.dram_chipid = ast->dram_type; - param.dram_freq = ast->mclk; - param.vram_size = ast->vram_size; + switch (temp & 0x18000000) { + case 0: + param.dram_chipid = AST_DRAM_512Mx16; + break; + default: + case 0x08000000: + param.dram_chipid = AST_DRAM_1Gx16; + break; + case 0x10000000: + param.dram_chipid = AST_DRAM_2Gx16; + break; + case 0x18000000: + param.dram_chipid = AST_DRAM_4Gx16; + break; + } + switch (temp & 0x0c) { + default: + case 0x00: + param.vram_size = AST_VIDMEM_SIZE_8M; + break; + + case 0x04: + param.vram_size = AST_VIDMEM_SIZE_16M; + break; + + case 0x08: + param.vram_size = AST_VIDMEM_SIZE_32M; + break; + + case 0x0c: + param.vram_size = AST_VIDMEM_SIZE_64M; + break; + } if (param.dram_type == AST_DDR3) { get_ddr3_info(ast, ¶m); @@ -1663,3 +1678,404 @@ static void ast_init_dram_2300(struct drm_device *dev) } while ((reg & 0x40) == 0); } +static bool cbr_test_2500(struct ast_private *ast) +{ + ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); + ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); + if (!mmc_test_burst(ast, 0)) + return false; + if (!mmc_test_single_2500(ast, 0)) + return false; + return true; +} + +static bool ddr_test_2500(struct ast_private *ast) +{ + ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); + ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); + if (!mmc_test_burst(ast, 0)) + return false; + if (!mmc_test_burst(ast, 1)) + return false; + if (!mmc_test_burst(ast, 2)) + return false; + if (!mmc_test_burst(ast, 3)) + return false; + if (!mmc_test_single_2500(ast, 0)) + return false; + return true; +} + +static void ddr_init_common_2500(struct ast_private *ast) +{ + ast_moutdwm(ast, 0x1E6E0034, 0x00020080); + ast_moutdwm(ast, 0x1E6E0008, 0x2003000F); + ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF); + ast_moutdwm(ast, 0x1E6E0040, 0x88448844); + ast_moutdwm(ast, 0x1E6E0044, 0x24422288); + ast_moutdwm(ast, 0x1E6E0048, 0x22222222); + ast_moutdwm(ast, 0x1E6E004C, 0x22222222); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0208, 0x00000000); + ast_moutdwm(ast, 0x1E6E0218, 0x00000000); + ast_moutdwm(ast, 0x1E6E0220, 0x00000000); + ast_moutdwm(ast, 0x1E6E0228, 0x00000000); + ast_moutdwm(ast, 0x1E6E0230, 0x00000000); + ast_moutdwm(ast, 0x1E6E02A8, 0x00000000); + ast_moutdwm(ast, 0x1E6E02B0, 0x00000000); + ast_moutdwm(ast, 0x1E6E0240, 0x86000000); + ast_moutdwm(ast, 0x1E6E0244, 0x00008600); + ast_moutdwm(ast, 0x1E6E0248, 0x80000000); + ast_moutdwm(ast, 0x1E6E024C, 0x80808080); +} + +static void ddr_phy_init_2500(struct ast_private *ast) +{ + u32 data, pass, timecnt; + + pass = 0; + ast_moutdwm(ast, 0x1E6E0060, 0x00000005); + while (!pass) { + for (timecnt = 0; timecnt < TIMEOUT; timecnt++) { + data = ast_mindwm(ast, 0x1E6E0060) & 0x1; + if (!data) + break; + } + if (timecnt != TIMEOUT) { + data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000; + if (!data) + pass = 1; + } + if (!pass) { + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + udelay(10); /* delay 10 us */ + ast_moutdwm(ast, 0x1E6E0060, 0x00000005); + } + } + + ast_moutdwm(ast, 0x1E6E0060, 0x00000006); +} + +/* + * Check DRAM Size + * 1Gb : 0x80000000 ~ 0x87FFFFFF + * 2Gb : 0x80000000 ~ 0x8FFFFFFF + * 4Gb : 0x80000000 ~ 0x9FFFFFFF + * 8Gb : 0x80000000 ~ 0xBFFFFFFF + */ +static void check_dram_size_2500(struct ast_private *ast, u32 tRFC) +{ + u32 reg_04, reg_14; + + reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc; + reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00; + + ast_moutdwm(ast, 0xA0100000, 0x41424344); + ast_moutdwm(ast, 0x90100000, 0x35363738); + ast_moutdwm(ast, 0x88100000, 0x292A2B2C); + ast_moutdwm(ast, 0x80100000, 0x1D1E1F10); + + /* Check 8Gbit */ + if (ast_mindwm(ast, 0xA0100000) == 0x41424344) { + reg_04 |= 0x03; + reg_14 |= (tRFC >> 24) & 0xFF; + /* Check 4Gbit */ + } else if (ast_mindwm(ast, 0x90100000) == 0x35363738) { + reg_04 |= 0x02; + reg_14 |= (tRFC >> 16) & 0xFF; + /* Check 2Gbit */ + } else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) { + reg_04 |= 0x01; + reg_14 |= (tRFC >> 8) & 0xFF; + } else { + reg_14 |= tRFC & 0xFF; + } + ast_moutdwm(ast, 0x1E6E0004, reg_04); + ast_moutdwm(ast, 0x1E6E0014, reg_14); +} + +static void enable_cache_2500(struct ast_private *ast) +{ + u32 reg_04, data; + + reg_04 = ast_mindwm(ast, 0x1E6E0004); + ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000); + + do + data = ast_mindwm(ast, 0x1E6E0004); + while (!(data & 0x80000)); + ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400); +} + +static void set_mpll_2500(struct ast_private *ast) +{ + u32 addr, data, param; + + /* Reset MMC */ + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); + ast_moutdwm(ast, 0x1E6E0034, 0x00020080); + for (addr = 0x1e6e0004; addr < 0x1e6e0090;) { + ast_moutdwm(ast, addr, 0x0); + addr += 4; + } + ast_moutdwm(ast, 0x1E6E0034, 0x00020000); + + ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); + data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000; + if (data) { + /* CLKIN = 25MHz */ + param = 0x930023E0; + ast_moutdwm(ast, 0x1E6E2160, 0x00011320); + } else { + /* CLKIN = 24MHz */ + param = 0x93002400; + } + ast_moutdwm(ast, 0x1E6E2020, param); + udelay(100); +} + +static void reset_mmc_2500(struct ast_private *ast) +{ + ast_moutdwm(ast, 0x1E78505C, 0x00000004); + ast_moutdwm(ast, 0x1E785044, 0x00000001); + ast_moutdwm(ast, 0x1E785048, 0x00004755); + ast_moutdwm(ast, 0x1E78504C, 0x00000013); + mdelay(100); + ast_moutdwm(ast, 0x1E785054, 0x00000077); + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); +} + +static void ddr3_init_2500(struct ast_private *ast, const u32 *ddr_table) +{ + + ast_moutdwm(ast, 0x1E6E0004, 0x00000303); + ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]); + ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]); + ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]); + ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */ + ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */ + ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */ + ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */ + + /* DDR PHY Setting */ + ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE); + ast_moutdwm(ast, 0x1E6E0204, 0x00001001); + ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B); + ast_moutdwm(ast, 0x1E6E0210, 0x20000000); + ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]); + ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]); + ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]); + ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]); + ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]); + ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]); + ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]); + ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]); + ast_moutdwm(ast, 0x1E6E0290, 0x00100008); + ast_moutdwm(ast, 0x1E6E02C0, 0x00000006); + + /* Controller Setting */ + ast_moutdwm(ast, 0x1E6E0034, 0x00020091); + + /* Wait DDR PHY init done */ + ddr_phy_init_2500(ast); + + ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]); + ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81); + ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93); + + check_dram_size_2500(ast, ddr_table[REGIDX_RFC]); + enable_cache_2500(ast); + ast_moutdwm(ast, 0x1E6E001C, 0x00000008); + ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); +} + +static void ddr4_init_2500(struct ast_private *ast, const u32 *ddr_table) +{ + u32 data, data2, pass, retrycnt; + u32 ddr_vref, phy_vref; + u32 min_ddr_vref = 0, min_phy_vref = 0; + u32 max_ddr_vref = 0, max_phy_vref = 0; + + ast_moutdwm(ast, 0x1E6E0004, 0x00000313); + ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]); + ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]); + ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]); + ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */ + ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */ + ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */ + ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */ + + /* DDR PHY Setting */ + ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE); + ast_moutdwm(ast, 0x1E6E0204, 0x09002000); + ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B); + ast_moutdwm(ast, 0x1E6E0210, 0x20000000); + ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]); + ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]); + ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]); + ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]); + ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]); + ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]); + ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]); + ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]); + ast_moutdwm(ast, 0x1E6E0290, 0x00100008); + ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C); + ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E); + + /* Controller Setting */ + ast_moutdwm(ast, 0x1E6E0034, 0x0001A991); + + /* Train PHY Vref first */ + pass = 0; + + for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) { + max_phy_vref = 0x0; + pass = 0; + ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06); + for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) { + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + ast_moutdwm(ast, 0x1E6E02CC, phy_vref | (phy_vref << 8)); + /* Fire DFI Init */ + ddr_phy_init_2500(ast); + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); + if (cbr_test_2500(ast)) { + pass++; + data = ast_mindwm(ast, 0x1E6E03D0); + data2 = data >> 8; + data = data & 0xff; + if (data > data2) + data = data2; + if (max_phy_vref < data) { + max_phy_vref = data; + min_phy_vref = phy_vref; + } + } else if (pass > 0) + break; + } + } + ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8)); + + /* Train DDR Vref next */ + pass = 0; + + for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) { + min_ddr_vref = 0xFF; + max_ddr_vref = 0x0; + pass = 0; + for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) { + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8)); + /* Fire DFI Init */ + ddr_phy_init_2500(ast); + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); + if (cbr_test_2500(ast)) { + pass++; + if (min_ddr_vref > ddr_vref) + min_ddr_vref = ddr_vref; + if (max_ddr_vref < ddr_vref) + max_ddr_vref = ddr_vref; + } else if (pass != 0) + break; + } + } + + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1; + ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8)); + + /* Wait DDR PHY init done */ + ddr_phy_init_2500(ast); + + ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]); + ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81); + ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93); + + check_dram_size_2500(ast, ddr_table[REGIDX_RFC]); + enable_cache_2500(ast); + ast_moutdwm(ast, 0x1E6E001C, 0x00000008); + ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); +} + +static bool ast_dram_init_2500(struct ast_private *ast) +{ + u32 data; + u32 max_tries = 5; + + do { + if (max_tries-- == 0) + return false; + set_mpll_2500(ast); + reset_mmc_2500(ast); + ddr_init_common_2500(ast); + + data = ast_mindwm(ast, 0x1E6E2070); + if (data & 0x01000000) + ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table); + else + ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table); + } while (!ddr_test_2500(ast)); + + ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41); + + /* Patch code */ + data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF; + ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000); + + return true; +} + +void ast_post_chip_2500(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + u32 temp; + u8 reg; + + reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + if ((reg & 0x80) == 0) {/* vga only */ + /* Clear bus lock condition */ + ast_moutdwm(ast, 0x1e600000, 0xAEED1A03); + ast_moutdwm(ast, 0x1e600084, 0x00010000); + ast_moutdwm(ast, 0x1e600088, 0x00000000); + ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x12000, 0x1688a8a8); + while (ast_read32(ast, 0x12000) != 0x1) + ; + + ast_write32(ast, 0x10000, 0xfc600309); + while (ast_read32(ast, 0x10000) != 0x1) + ; + + /* Slow down CPU/AHB CLK in VGA only mode */ + temp = ast_read32(ast, 0x12008); + temp |= 0x73; + ast_write32(ast, 0x12008, temp); + + /* Reset USB port to patch USB unknown device issue */ + ast_moutdwm(ast, 0x1e6e2090, 0x20000000); + temp = ast_mindwm(ast, 0x1e6e2094); + temp |= 0x00004000; + ast_moutdwm(ast, 0x1e6e2094, temp); + temp = ast_mindwm(ast, 0x1e6e2070); + if (temp & 0x00800000) { + ast_moutdwm(ast, 0x1e6e207c, 0x00800000); + mdelay(100); + ast_moutdwm(ast, 0x1e6e2070, 0x00800000); + } + + if (!ast_dram_init_2500(ast)) + DRM_ERROR("DRAM init failed !\n"); + + temp = ast_mindwm(ast, 0x1e6e2040); + ast_moutdwm(ast, 0x1e6e2040, temp | 0x40); + } + + /* wait ready */ + do { + reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + } while ((reg & 0x40) == 0); +} diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index 3608d5aa7451..5f4c2e833a65 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h @@ -47,6 +47,7 @@ #define SyncPN (PVSync | NHSync) #define SyncNP (NVSync | PHSync) #define SyncNN (NVSync | NHSync) +#define AST2500PreCatchCRT 0x00004000 /* DCLK Index */ #define VCLK25_175 0x00 @@ -78,37 +79,67 @@ #define VCLK97_75 0x19 #define VCLK118_25 0x1A -static struct ast_vbios_dclk_info dclk_table[] = { - {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ - {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ - {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ - {0x76, 0x63, 0x01}, /* 03: VCLK36 */ - {0xEE, 0x67, 0x01}, /* 04: VCLK40 */ - {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ - {0xC6, 0x64, 0x01}, /* 06: VCLK50 */ - {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ - {0x80, 0x64, 0x00}, /* 08: VCLK65 */ - {0x7B, 0x63, 0x00}, /* 09: VCLK75 */ - {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */ - {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */ - {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */ - {0x85, 0x24, 0x00}, /* 0D: VCLK135 */ - {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ - {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ - {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ - {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ - {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ - {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ - {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ - {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ - {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ - {0x77, 0x58, 0x80}, /* 17: VCLK119 */ - {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ - {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ - {0x3b, 0x2c, 0x81}, /* 1A: VCLK118_25 */ +static const struct ast_vbios_dclk_info dclk_table[] = { + {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ + {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ + {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ + {0x76, 0x63, 0x01}, /* 03: VCLK36 */ + {0xEE, 0x67, 0x01}, /* 04: VCLK40 */ + {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ + {0xC6, 0x64, 0x01}, /* 06: VCLK50 */ + {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ + {0x80, 0x64, 0x00}, /* 08: VCLK65 */ + {0x7B, 0x63, 0x00}, /* 09: VCLK75 */ + {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */ + {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */ + {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */ + {0x85, 0x24, 0x00}, /* 0D: VCLK135 */ + {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ + {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ + {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ + {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ + {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ + {0x77, 0x58, 0x80}, /* 17: VCLK119 */ + {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ + {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ + {0x3b, 0x2c, 0x81}, /* 1A: VCLK118_25 */ }; -static struct ast_vbios_stdtable vbios_stdtable[] = { +static const struct ast_vbios_dclk_info dclk_table_ast2500[] = { + {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ + {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ + {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ + {0x76, 0x63, 0x01}, /* 03: VCLK36 */ + {0xEE, 0x67, 0x01}, /* 04: VCLK40 */ + {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ + {0xC6, 0x64, 0x01}, /* 06: VCLK50 */ + {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ + {0x80, 0x64, 0x00}, /* 08: VCLK65 */ + {0x7B, 0x63, 0x00}, /* 09: VCLK75 */ + {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */ + {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */ + {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */ + {0x85, 0x24, 0x00}, /* 0D: VCLK135 */ + {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ + {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ + {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ + {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ + {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ + {0x58, 0x01, 0x42}, /* 17: VCLK119 */ + {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ + {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ + {0x44, 0x20, 0x43}, /* 1A: VCLK118_25 */ +}; + +static const struct ast_vbios_stdtable vbios_stdtable[] = { /* MD_2_3_400 */ { 0x67, @@ -181,21 +212,21 @@ static struct ast_vbios_stdtable vbios_stdtable[] = { }, }; -static struct ast_vbios_enhtable res_640x480[] = { +static const struct ast_vbios_enhtable res_640x480[] = { { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60Hz */ (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E }, { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72Hz */ (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E }, { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75Hz */ (SyncNN | Charx8Dot) , 75, 3, 0x2E }, - { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */ + { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */ (SyncNN | Charx8Dot) , 85, 4, 0x2E }, - { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */ + { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */ (SyncNN | Charx8Dot) , 0xFF, 4, 0x2E }, }; -static struct ast_vbios_enhtable res_800x600[] = { - {1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */ +static const struct ast_vbios_enhtable res_800x600[] = { + {1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */ (SyncPP | Charx8Dot), 56, 1, 0x30 }, {1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60Hz */ (SyncPP | Charx8Dot), 60, 2, 0x30 }, @@ -210,7 +241,7 @@ static struct ast_vbios_enhtable res_800x600[] = { }; -static struct ast_vbios_enhtable res_1024x768[] = { +static const struct ast_vbios_enhtable res_1024x768[] = { {1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60Hz */ (SyncNN | Charx8Dot), 60, 1, 0x31 }, {1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70Hz */ @@ -223,7 +254,7 @@ static struct ast_vbios_enhtable res_1024x768[] = { (SyncPP | Charx8Dot), 0xFF, 4, 0x31 }, }; -static struct ast_vbios_enhtable res_1280x1024[] = { +static const struct ast_vbios_enhtable res_1280x1024[] = { {1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60Hz */ (SyncPP | Charx8Dot), 60, 1, 0x32 }, {1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75Hz */ @@ -234,7 +265,7 @@ static struct ast_vbios_enhtable res_1280x1024[] = { (SyncPP | Charx8Dot), 0xFF, 3, 0x32 }, }; -static struct ast_vbios_enhtable res_1600x1200[] = { +static const struct ast_vbios_enhtable res_1600x1200[] = { {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60Hz */ (SyncPP | Charx8Dot), 60, 1, 0x33 }, {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* end */ @@ -242,34 +273,39 @@ static struct ast_vbios_enhtable res_1600x1200[] = { }; /* 16:9 */ -static struct ast_vbios_enhtable res_1360x768[] = { - {1792, 1360, 64,112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */ +static const struct ast_vbios_enhtable res_1360x768[] = { + {1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */ (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x39 }, - {1792, 1360, 64,112, 795, 768, 3, 6, VCLK85_5, /* end */ - (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x39 }, + {1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* end */ + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 0xFF, 1, 0x39 }, }; -static struct ast_vbios_enhtable res_1600x900[] = { - {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x3A }, - {2112, 1600, 88,168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */ +static const struct ast_vbios_enhtable res_1600x900[] = { + {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 60, 1, 0x3A }, + {2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x3A }, - {2112, 1600, 88,168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */ + {2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x3A }, }; -static struct ast_vbios_enhtable res_1920x1080[] = { +static const struct ast_vbios_enhtable res_1920x1080[] = { {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x38 }, + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 60, 1, 0x38 }, {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x38 }, + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 0xFF, 1, 0x38 }, }; /* 16:10 */ -static struct ast_vbios_enhtable res_1280x800[] = { - {1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x35 }, +static const struct ast_vbios_enhtable res_1280x800[] = { + {1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 60, 1, 0x35 }, {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x35 }, {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */ @@ -277,29 +313,33 @@ static struct ast_vbios_enhtable res_1280x800[] = { }; -static struct ast_vbios_enhtable res_1440x900[] = { +static const struct ast_vbios_enhtable res_1440x900[] = { {1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60Hz RB */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x36 }, + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 60, 1, 0x36 }, {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x36 }, {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x36 }, }; -static struct ast_vbios_enhtable res_1680x1050[] = { - {1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x37 }, +static const struct ast_vbios_enhtable res_1680x1050[] = { + {1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 60, 1, 0x37 }, {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x37 }, {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x37 }, }; -static struct ast_vbios_enhtable res_1920x1200[] = { - {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB*/ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x34 }, - {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x34 }, +static const struct ast_vbios_enhtable res_1920x1200[] = { + {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB*/ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 60, 1, 0x34 }, + {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), 0xFF, 1, 0x34 }, }; #endif diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 32d43f86a8f2..96bb6badb818 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -34,6 +34,8 @@ */ #include <linux/export.h> +#include <linux/sched/signal.h> + #include <drm/drmP.h> #include "drm_legacy.h" #include "drm_internal.h" diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 93381454bdf7..dc4419ada12c 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -220,8 +220,8 @@ drm_connector_detect(struct drm_connector *connector, bool force) * - drm_mode_validate_basic() performs basic sanity checks * - drm_mode_validate_size() filters out modes larger than @maxX and @maxY * (if specified) - * - drm_mode_validate_flag() checks the modes againt basic connector - * capabilites (interlace_allowed,doublescan_allowed,stereo_allowed) + * - drm_mode_validate_flag() checks the modes against basic connector + * capabilities (interlace_allowed,doublescan_allowed,stereo_allowed) * - the optional &drm_connector_helper_funcs.mode_valid helper can perform * driver and/or hardware specific checks * diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index e78f1406885d..fd56f92f3469 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -16,6 +16,8 @@ #include <linux/spinlock.h> #include <linux/shmem_fs.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include "etnaviv_drv.h" #include "etnaviv_gem.h" diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index d037adcda6f2..29bb8011dbc4 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -141,7 +141,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct * if (!obj->base.filp) return -ENODEV; - ret = obj->base.filp->f_op->mmap(obj->base.filp, vma); + ret = call_mmap(obj->base.filp, vma); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index b42c81b42487..7032c542a9b1 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -60,7 +60,7 @@ render_state_get_rodata(const struct intel_engine_cs *engine) * this is sufficient as the null state generator makes the final batch * with two passes to build command and state separately. At this point * the size of both are known and it compacts them by relocating the state - * right after the commands taking care of aligment so we should sufficient + * right after the commands taking care of alignment so we should sufficient * space below them for adding new commands. */ #define OUT_BATCH(batch, i, val) \ diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index f31deeb72703..e7c3c0318ff6 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -24,6 +24,9 @@ #include <linux/prefetch.h> #include <linux/dma-fence-array.h> +#include <linux/sched.h> +#include <linux/sched/clock.h> +#include <linux/sched/signal.h> #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 6a8fa085b74e..22b46398831e 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -31,6 +31,7 @@ #include <linux/mmu_notifier.h> #include <linux/mempolicy.h> #include <linux/swap.h> +#include <linux/sched/mm.h> struct i915_mm_struct { struct mm_struct *mm; @@ -334,7 +335,7 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) mm->i915 = to_i915(obj->base.dev); mm->mm = current->mm; - atomic_inc(¤t->mm->mm_count); + mmgrab(current->mm); mm->mn = NULL; @@ -507,7 +508,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) flags |= FOLL_WRITE; ret = -EFAULT; - if (atomic_inc_not_zero(&mm->mm_users)) { + if (mmget_not_zero(mm)) { down_read(&mm->mmap_sem); while (pinned < npages) { ret = get_user_pages_remote diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index fcfa423d08bd..7044e9a6abf7 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -23,6 +23,7 @@ */ #include <linux/kthread.h> +#include <uapi/linux/sched/types.h> #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index b9cde116dab3..344f238b283f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -28,6 +28,7 @@ #include <linux/async.h> #include <linux/i2c.h> #include <linux/hdmi.h> +#include <linux/sched/clock.h> #include <drm/i915_drm.h> #include "i915_drv.h" #include <drm/drm_crtc.h> diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h index d5ce829b3199..45cf363d25ad 100644 --- a/drivers/gpu/drm/mga/mga_drv.h +++ b/drivers/gpu/drm/mga/mga_drv.h @@ -266,7 +266,7 @@ do { \ do { \ if (MGA_VERBOSE) { \ DRM_INFO("BEGIN_DMA(%d)\n", (n)); \ - DRM_INFO(" space=0x%x req=0x%Zx\n", \ + DRM_INFO(" space=0x%x req=0x%zx\n", \ dev_priv->prim.space, (n) * DMA_BLOCK_SIZE); \ } \ prim = dev_priv->prim.start; \ @@ -313,7 +313,7 @@ do { \ #define DMA_WRITE(offset, val) \ do { \ if (MGA_VERBOSE) \ - DRM_INFO(" DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \ + DRM_INFO(" DMA_WRITE( 0x%08x ) at 0x%04zx\n", \ (u32)(val), write + (offset) * sizeof(u32)); \ *(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \ } while (0) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index a2bb855a2851..ac5800c72cb4 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -18,7 +18,7 @@ #include <linux/jiffies.h> #include <linux/module.h> #include <linux/platform_device.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/of_device.h> diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index ab89eed9ddd9..4b86e8b45009 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h @@ -181,7 +181,7 @@ #define HW_ASSISTED_I2C_STATUS_FAILURE 2 #define HW_ASSISTED_I2C_STATUS_SUCCESS 1 -#pragma pack(1) /* BIOS data must use byte aligment */ +#pragma pack(1) /* BIOS data must use byte alignment */ /* Define offset to location of ROM header. */ @@ -3883,7 +3883,7 @@ typedef struct _ATOM_GPIO_PIN_ASSIGNMENT }ATOM_GPIO_PIN_ASSIGNMENT; //ucGPIO_ID pre-define id for multiple usage -//from SMU7.x, if ucGPIO_ID=PP_AC_DC_SWITCH_GPIO_PINID in GPIO_LUTTable, AC/DC swithing feature is enable +//from SMU7.x, if ucGPIO_ID=PP_AC_DC_SWITCH_GPIO_PINID in GPIO_LUTTable, AC/DC switching feature is enable #define PP_AC_DC_SWITCH_GPIO_PINID 60 //from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable #define VDDC_VRHOT_GPIO_PINID 61 @@ -7909,7 +7909,7 @@ typedef struct _ATOM_POWERPLAY_INFO_V3 /*********************************************************************************/ -#pragma pack() // BIOS data must use byte aligment +#pragma pack() // BIOS data must use byte alignment // // AMD ACPI Table diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index ad31b3eb408f..0e4eb845cbb0 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -24,6 +24,7 @@ config ROCKCHIP_ANALOGIX_DP config ROCKCHIP_CDN_DP tristate "Rockchip cdn DP" depends on DRM_ROCKCHIP + depends on EXTCON select SND_SOC_HDMI_CODEC if SND_SOC help This selects support for Rockchip SoC specific extensions diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index 9ab67a670885..fd79a70b8552 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -111,7 +111,7 @@ static int cdn_dp_clk_enable(struct cdn_dp_device *dp) ret = pm_runtime_get_sync(dp->dev); if (ret < 0) { DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret); - goto err_pclk; + goto err_pm_runtime_get; } reset_control_assert(dp->core_rst); @@ -133,6 +133,8 @@ static int cdn_dp_clk_enable(struct cdn_dp_device *dp) return 0; err_set_rate: + pm_runtime_put(dp->dev); +err_pm_runtime_get: clk_disable_unprepare(dp->core_clk); err_core_clk: clk_disable_unprepare(dp->pclk); diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c index f154fb1929bd..913f4318cdc0 100644 --- a/drivers/gpu/drm/ttm/ttm_lock.c +++ b/drivers/gpu/drm/ttm/ttm_lock.c @@ -33,7 +33,7 @@ #include <linux/atomic.h> #include <linux/errno.h> #include <linux/wait.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/module.h> #define TTM_WRITE_LOCK_PENDING (1 << 0) diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index ab3016982466..1eef98c3331d 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -26,6 +26,7 @@ #include <linux/pm_runtime.h> #include <linux/device.h> #include <linux/io.h> +#include <linux/sched/signal.h> #include "uapi/drm/vc4_drm.h" #include "vc4_drv.h" diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 7ccbb03e98de..a1f42d125e6e 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -288,7 +288,7 @@ static int vgem_prime_mmap(struct drm_gem_object *obj, if (!obj->filp) return -ENODEV; - ret = obj->filp->f_op->mmap(obj->filp, vma); + ret = call_mmap(obj->filp, vma); if (ret) return ret; diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 30f989a0cafc..491866865c33 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -176,7 +176,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) #endif ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs, - callbacks, names); + callbacks, names, NULL); if (ret) { DRM_ERROR("failed to find virt queues\n"); goto err_vqs; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 541a5887dd6c..d08f26973d0b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -199,9 +199,14 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { VMW_IOCTL_DEF(VMW_PRESENT_READBACK, vmw_present_readback_ioctl, DRM_MASTER | DRM_AUTH), + /* + * The permissions of the below ioctl are overridden in + * vmw_generic_ioctl(). We require either + * DRM_MASTER or capable(CAP_SYS_ADMIN). + */ VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, - DRM_MASTER | DRM_CONTROL_ALLOW), + DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_CREATE_SHADER, vmw_shader_define_ioctl, DRM_AUTH | DRM_RENDER_ALLOW), @@ -1123,6 +1128,10 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, return (long) vmw_execbuf_ioctl(dev, arg, file_priv, _IOC_SIZE(cmd)); + } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) { + if (!drm_is_current_master(file_priv) && + !capable(CAP_SYS_ADMIN)) + return -EACCES; } if (unlikely(ioctl->cmd != cmd)) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 1e59a486bba8..59ff4197173a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -41,9 +41,9 @@ #include <drm/ttm/ttm_module.h> #include "vmwgfx_fence.h" -#define VMWGFX_DRIVER_DATE "20160210" +#define VMWGFX_DRIVER_DATE "20170221" #define VMWGFX_DRIVER_MAJOR 2 -#define VMWGFX_DRIVER_MINOR 11 +#define VMWGFX_DRIVER_MINOR 12 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c index 1d08ba381098..d646ac931663 100644 --- a/drivers/gpu/drm/zte/zx_plane.c +++ b/drivers/gpu/drm/zte/zx_plane.c @@ -159,7 +159,7 @@ static void zx_vl_rsz_setup(struct zx_plane *zplane, uint32_t format, void __iomem *rsz = zplane->rsz; u32 src_chroma_w = src_w; u32 src_chroma_h = src_h; - u32 fmt; + int fmt; /* Set up source and destination resolution */ zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1)); @@ -203,7 +203,7 @@ static void zx_vl_plane_atomic_update(struct drm_plane *plane, u32 src_x, src_y, src_w, src_h; u32 dst_x, dst_y, dst_w, dst_h; uint32_t format; - u32 fmt; + int fmt; int num_planes; int i; diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 0f5b2dd24507..92f1452dad57 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -41,7 +41,7 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/list.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/spinlock.h> #include <linux/poll.h> diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index acfb522a432a..c6c9c51c806f 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -30,7 +30,7 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/uaccess.h> diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c index 0dd1167b2c9b..9c113f62472d 100644 --- a/drivers/hid/hid-kye.c +++ b/drivers/hid/hid-kye.c @@ -487,7 +487,7 @@ static __u8 *kye_consumer_control_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize, int offset, const char *device_name) { /* * the fixup that need to be done: - * - change Usage Maximum in the Comsumer Control + * - change Usage Maximum in the Consumer Control * (report ID 3) to a reasonable value */ if (*rsize >= offset + 31 && diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c index 76d06cf87b2a..fb77dec720a4 100644 --- a/drivers/hid/hid-roccat.c +++ b/drivers/hid/hid-roccat.c @@ -25,7 +25,7 @@ #include <linux/cdev.h> #include <linux/poll.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/hid-roccat.h> #include <linux/module.h> diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index f0e2757cb909..ec530454e6f6 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -33,7 +33,7 @@ #include <linux/slab.h> #include <linux/hid.h> #include <linux/mutex.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/string.h> #include <linux/hidraw.h> diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 700145b15088..774bd701dae0 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -27,6 +27,7 @@ #include <linux/poll.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c index 7175e6bedf21..727f968ac1cb 100644 --- a/drivers/hsi/clients/cmt_speech.c +++ b/drivers/hsi/clients/cmt_speech.c @@ -31,7 +31,7 @@ #include <linux/slab.h> #include <linux/fs.h> #include <linux/poll.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/ioctl.h> #include <linux/uaccess.h> #include <linux/pm_qos.h> diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index f7f6b9144b07..da6b59ba5940 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -34,6 +34,8 @@ #include <linux/kernel_stat.h> #include <linux/clockchips.h> #include <linux/cpu.h> +#include <linux/sched/task_stack.h> + #include <asm/hyperv.h> #include <asm/hypervisor.h> #include <asm/mshyperv.h> diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c index 6dca2fd3d303..6d1208b2b6d2 100644 --- a/drivers/hwmon/g762.c +++ b/drivers/hwmon/g762.c @@ -861,7 +861,7 @@ static ssize_t fan1_pulses_store(struct device *dev, * (i.e. closed or open-loop). * * Following documentation about hwmon's sysfs interface, a pwm1_enable node - * should accept followings: + * should accept the following: * * 0 : no fan speed control (i.e. fan at full speed) * 1 : manual fan speed control enabled (use pwm[1-*]) (open-loop) diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 0cdc8443deab..8adc0f1d7ad0 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -128,6 +128,7 @@ config I2C_I801 DNV (SOC) Broxton (SOC) Lewisburg (PCH) + Gemini Lake (SOC) This driver can also be built as a module. If so, the module will be called i2c-i801. @@ -886,6 +887,16 @@ config I2C_ST This driver can also be built as module. If so, the module will be called i2c-st. +config I2C_STM32F4 + tristate "STMicroelectronics STM32F4 I2C support" + depends on ARCH_STM32 || COMPILE_TEST + help + Enable this option to add support for STM32 I2C controller embedded + in STM32F4 SoCs. + + This driver can also be built as module. If so, the module + will be called i2c-stm32f4. + config I2C_STU300 tristate "ST Microelectronics DDC I2C interface" depends on MACH_U300 @@ -919,6 +930,17 @@ config I2C_TEGRA If you say yes to this option, support will be included for the I2C controller embedded in NVIDIA Tegra SOCs +config I2C_TEGRA_BPMP + tristate "NVIDIA Tegra BPMP I2C controller" + depends on TEGRA_BPMP + help + If you say yes to this option, support will be included for the I2C + controller embedded in NVIDIA Tegra SoCs accessed via the BPMP. + + This I2C driver is a 'virtual' I2C driver. The real driver is part + of the BPMP firmware, and this driver merely communicates with that + real driver. + config I2C_UNIPHIER tristate "UniPhier FIFO-less I2C controller" depends on ARCH_UNIPHIER || COMPILE_TEST diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 1c1bac87a9db..30b60855fbcd 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -85,9 +85,11 @@ obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o obj-$(CONFIG_I2C_SIRF) += i2c-sirf.o obj-$(CONFIG_I2C_ST) += i2c-st.o +obj-$(CONFIG_I2C_STM32F4) += i2c-stm32f4.o obj-$(CONFIG_I2C_STU300) += i2c-stu300.o obj-$(CONFIG_I2C_SUN6I_P2WI) += i2c-sun6i-p2wi.o obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o +obj-$(CONFIG_I2C_TEGRA_BPMP) += i2c-tegra-bpmp.o obj-$(CONFIG_I2C_UNIPHIER) += i2c-uniphier.o obj-$(CONFIG_I2C_UNIPHIER_F) += i2c-uniphier-f.o obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index 0b86c6173e07..fabbb9e49161 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c @@ -820,7 +820,7 @@ static u32 at91_twi_func(struct i2c_adapter *adapter) | I2C_FUNC_SMBUS_READ_BLOCK_DATA; } -static struct i2c_algorithm at91_twi_algorithm = { +static const struct i2c_algorithm at91_twi_algorithm = { .master_xfer = at91_twi_xfer, .functionality = at91_twi_func, }; @@ -1180,6 +1180,7 @@ static int at91_twi_suspend_noirq(struct device *dev) static int at91_twi_resume_noirq(struct device *dev) { + struct at91_twi_dev *twi_dev = dev_get_drvdata(dev); int ret; if (!pm_runtime_status_suspended(dev)) { @@ -1191,6 +1192,8 @@ static int at91_twi_resume_noirq(struct device *dev) pm_runtime_mark_last_busy(dev); pm_request_autosuspend(dev); + at91_init_twi_bus(twi_dev); + return 0; } diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index c3436f627028..cd07a69e2e93 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -195,7 +195,9 @@ static irqreturn_t bcm2835_i2c_isr(int this_irq, void *data) } if (val & BCM2835_I2C_S_DONE) { - if (i2c_dev->curr_msg->flags & I2C_M_RD) { + if (!i2c_dev->curr_msg) { + dev_err(i2c_dev->dev, "Got unexpected interrupt (from firmware?)\n"); + } else if (i2c_dev->curr_msg->flags & I2C_M_RD) { bcm2835_drain_rxfifo(i2c_dev); val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S); } diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c index 29d00c4f7824..9fe942b8c610 100644 --- a/drivers/i2c/busses/i2c-bfin-twi.c +++ b/drivers/i2c/busses/i2c-bfin-twi.c @@ -563,7 +563,7 @@ static u32 bfin_twi_functionality(struct i2c_adapter *adap) I2C_FUNC_I2C | I2C_FUNC_SMBUS_I2C_BLOCK; } -static struct i2c_algorithm bfin_twi_algorithm = { +static const struct i2c_algorithm bfin_twi_algorithm = { .master_xfer = bfin_twi_master_xfer, .smbus_xfer = bfin_twi_smbus_xfer, .functionality = bfin_twi_functionality, diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c index 9b36a7b3befd..eb76b76f4754 100644 --- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c +++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c @@ -154,8 +154,10 @@ static int ec_i2c_parse_response(const u8 *buf, struct i2c_msg i2c_msgs[], resp = (const struct ec_response_i2c_passthru *)buf; if (resp->i2c_status & EC_I2C_STATUS_TIMEOUT) return -ETIMEDOUT; + else if (resp->i2c_status & EC_I2C_STATUS_NAK) + return -ENXIO; else if (resp->i2c_status & EC_I2C_STATUS_ERROR) - return -EREMOTEIO; + return -EIO; /* Other side could send us back fewer messages, but not more */ if (resp->num_msgs > *num) @@ -222,10 +224,8 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[], } result = ec_i2c_parse_response(msg->data, i2c_msgs, &num); - if (result < 0) { - dev_err(dev, "Error parsing EC i2c message %d\n", result); + if (result < 0) goto exit; - } /* Indicate success by saying how many messages were sent */ result = num; diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index e9db857c6226..7a3faa551cf8 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@ -820,7 +820,7 @@ static u32 i2c_dw_func(struct i2c_adapter *adap) return dev->functionality; } -static struct i2c_algorithm i2c_dw_algo = { +static const struct i2c_algorithm i2c_dw_algo = { .master_xfer = i2c_dw_xfer, .functionality = i2c_dw_func, }; diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index 5ce71ce7b6c4..bdeab0174fec 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -715,7 +715,7 @@ static u32 pch_i2c_func(struct i2c_adapter *adap) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR; } -static struct i2c_algorithm pch_algorithm = { +static const struct i2c_algorithm pch_algorithm = { .master_xfer = pch_i2c_xfer, .functionality = pch_i2c_func }; diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c index 96bb4e749012..312912708854 100644 --- a/drivers/i2c/busses/i2c-emev2.c +++ b/drivers/i2c/busses/i2c-emev2.c @@ -347,7 +347,7 @@ static int em_i2c_unreg_slave(struct i2c_client *slave) return 0; } -static struct i2c_algorithm em_i2c_algo = { +static const struct i2c_algorithm em_i2c_algo = { .master_xfer = em_i2c_xfer, .functionality = em_i2c_func, .reg_slave = em_i2c_reg_slave, diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c index bea607149972..cbd93ce0661f 100644 --- a/drivers/i2c/busses/i2c-exynos5.c +++ b/drivers/i2c/busses/i2c-exynos5.c @@ -130,12 +130,32 @@ /* I2C_TRANS_STATUS register bits */ #define HSI2C_MASTER_BUSY (1u << 17) #define HSI2C_SLAVE_BUSY (1u << 16) + +/* I2C_TRANS_STATUS register bits for Exynos5 variant */ #define HSI2C_TIMEOUT_AUTO (1u << 4) #define HSI2C_NO_DEV (1u << 3) #define HSI2C_NO_DEV_ACK (1u << 2) #define HSI2C_TRANS_ABORT (1u << 1) #define HSI2C_TRANS_DONE (1u << 0) +/* I2C_TRANS_STATUS register bits for Exynos7 variant */ +#define HSI2C_MASTER_ST_MASK 0xf +#define HSI2C_MASTER_ST_IDLE 0x0 +#define HSI2C_MASTER_ST_START 0x1 +#define HSI2C_MASTER_ST_RESTART 0x2 +#define HSI2C_MASTER_ST_STOP 0x3 +#define HSI2C_MASTER_ST_MASTER_ID 0x4 +#define HSI2C_MASTER_ST_ADDR0 0x5 +#define HSI2C_MASTER_ST_ADDR1 0x6 +#define HSI2C_MASTER_ST_ADDR2 0x7 +#define HSI2C_MASTER_ST_ADDR_SR 0x8 +#define HSI2C_MASTER_ST_READ 0x9 +#define HSI2C_MASTER_ST_WRITE 0xa +#define HSI2C_MASTER_ST_NO_ACK 0xb +#define HSI2C_MASTER_ST_LOSE 0xc +#define HSI2C_MASTER_ST_WAIT 0xd +#define HSI2C_MASTER_ST_WAIT_CMD 0xe + /* I2C_ADDR register bits */ #define HSI2C_SLV_ADDR_SLV(x) ((x & 0x3ff) << 0) #define HSI2C_SLV_ADDR_MAS(x) ((x & 0x3ff) << 10) @@ -437,6 +457,7 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) int_status = readl(i2c->regs + HSI2C_INT_STATUS); writel(int_status, i2c->regs + HSI2C_INT_STATUS); + trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); /* handle interrupt related to the transfer status */ if (i2c->variant->hw == HSI2C_EXYNOS7) { @@ -460,8 +481,12 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) i2c->state = -ETIMEDOUT; goto stop; } + + if ((trans_status & HSI2C_MASTER_ST_MASK) == HSI2C_MASTER_ST_LOSE) { + i2c->state = -EAGAIN; + goto stop; + } } else if (int_status & HSI2C_INT_I2C) { - trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); if (trans_status & HSI2C_NO_DEV_ACK) { dev_dbg(i2c->dev, "No ACK from device\n"); i2c->state = -ENXIO; @@ -502,8 +527,13 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) fifo_level = HSI2C_TX_FIFO_LVL(fifo_status); len = i2c->variant->fifo_depth - fifo_level; - if (len > (i2c->msg->len - i2c->msg_ptr)) + if (len > (i2c->msg->len - i2c->msg_ptr)) { + u32 int_en = readl(i2c->regs + HSI2C_INT_ENABLE); + + int_en &= ~HSI2C_INT_TX_ALMOSTEMPTY_EN; + writel(int_en, i2c->regs + HSI2C_INT_ENABLE); len = i2c->msg->len - i2c->msg_ptr; + } while (len > 0) { byte = i2c->msg->buf[i2c->msg_ptr++]; diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index e242db43774b..6484fa6dbb84 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -65,6 +65,7 @@ * Lewisburg (PCH) 0xa1a3 32 hard yes yes yes * Lewisburg Supersku (PCH) 0xa223 32 hard yes yes yes * Kaby Lake PCH-H (PCH) 0xa2a3 32 hard yes yes yes + * Gemini Lake (SOC) 0x31d4 32 hard yes yes yes * * Features supported by this driver: * Software PEC no @@ -213,6 +214,7 @@ #define PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS 0x2292 #define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 #define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS 0x23b0 +#define PCI_DEVICE_ID_INTEL_GEMINILAKE_SMBUS 0x31d4 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22 @@ -1012,6 +1014,7 @@ static const struct pci_device_id i801_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GEMINILAKE_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) }, diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c index 412b91d255ad..961c5f42d956 100644 --- a/drivers/i2c/busses/i2c-ibm_iic.c +++ b/drivers/i2c/busses/i2c-ibm_iic.c @@ -37,6 +37,8 @@ #include <linux/delay.h> #include <linux/slab.h> #include <linux/interrupt.h> +#include <linux/sched/signal.h> + #include <asm/irq.h> #include <linux/io.h> #include <linux/i2c.h> diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 3310f2e0dbd3..e86801a63120 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -538,7 +538,7 @@ static u32 lpi2c_imx_func(struct i2c_adapter *adapter) I2C_FUNC_SMBUS_READ_BLOCK_DATA; } -static struct i2c_algorithm lpi2c_imx_algo = { +static const struct i2c_algorithm lpi2c_imx_algo = { .master_xfer = lpi2c_imx_xfer, .functionality = lpi2c_imx_func, }; diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 47fc1f1acff7..95ed17183e73 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -1037,7 +1037,7 @@ static u32 i2c_imx_func(struct i2c_adapter *adapter) | I2C_FUNC_SMBUS_READ_BLOCK_DATA; } -static struct i2c_algorithm i2c_imx_algo = { +static const struct i2c_algorithm i2c_imx_algo = { .master_xfer = i2c_imx_xfer, .functionality = i2c_imx_func, }; diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 565a49a0c564..96caf378b1dc 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -15,7 +15,7 @@ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index b4dec0841bc2..a50bd6891e27 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -977,11 +977,32 @@ mv64xxx_i2c_remove(struct platform_device *dev) return 0; } +#ifdef CONFIG_PM +static int mv64xxx_i2c_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct mv64xxx_i2c_data *drv_data = platform_get_drvdata(pdev); + + mv64xxx_i2c_hw_init(drv_data); + + return 0; +} + +static const struct dev_pm_ops mv64xxx_i2c_pm = { + .resume = mv64xxx_i2c_resume, +}; + +#define mv64xxx_i2c_pm_ops (&mv64xxx_i2c_pm) +#else +#define mv64xxx_i2c_pm_ops NULL +#endif + static struct platform_driver mv64xxx_i2c_driver = { .probe = mv64xxx_i2c_probe, .remove = mv64xxx_i2c_remove, .driver = { .name = MV64XXX_I2C_CTLR_NAME, + .pm = mv64xxx_i2c_pm_ops, .of_match_table = mv64xxx_i2c_of_match_table, }, }; diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c index 374b35e7e450..3241bb9d6c18 100644 --- a/drivers/i2c/busses/i2c-nforce2.c +++ b/drivers/i2c/busses/i2c-nforce2.c @@ -296,7 +296,7 @@ static u32 nforce2_func(struct i2c_adapter *adapter) I2C_FUNC_SMBUS_BLOCK_DATA : 0); } -static struct i2c_algorithm smbus_algorithm = { +static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = nforce2_access, .functionality = nforce2_func, }; diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h index e160f838c254..aa3c8f4771c1 100644 --- a/drivers/i2c/busses/i2c-octeon-core.h +++ b/drivers/i2c/busses/i2c-octeon-core.h @@ -6,7 +6,6 @@ #include <linux/i2c-smbus.h> #include <linux/io.h> #include <linux/kernel.h> -#include <linux/pci.h> /* Controller command patterns */ #define SW_TWSI_V BIT_ULL(63) /* Valid bit */ @@ -118,9 +117,6 @@ struct octeon_i2c { void (*hlc_int_disable)(struct octeon_i2c *); atomic_t int_enable_cnt; atomic_t hlc_int_enable_cnt; -#if IS_ENABLED(CONFIG_I2C_THUNDERX) - struct msix_entry i2c_msix; -#endif struct i2c_smbus_alert_setup alert_data; struct i2c_client *ara; }; diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index c7da0c42baee..1ebb5e947e0b 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1504,7 +1504,7 @@ static int omap_i2c_runtime_resume(struct device *dev) return 0; } -static struct dev_pm_ops omap_i2c_pm_ops = { +static const struct dev_pm_ops omap_i2c_pm_ops = { SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend, omap_i2c_runtime_resume, NULL) }; diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c index 6263ea82d6ac..8f11d347b3ec 100644 --- a/drivers/i2c/busses/i2c-riic.c +++ b/drivers/i2c/busses/i2c-riic.c @@ -80,6 +80,7 @@ #define ICIER_TEIE 0x40 #define ICIER_RIE 0x20 #define ICIER_NAKIE 0x10 +#define ICIER_SPIE 0x08 #define ICSR2_NACKF 0x10 @@ -216,11 +217,10 @@ static irqreturn_t riic_tend_isr(int irq, void *data) return IRQ_NONE; } - if (riic->is_last || riic->err) + if (riic->is_last || riic->err) { + riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER); writeb(ICCR2_SP, riic->base + RIIC_ICCR2); - - writeb(0, riic->base + RIIC_ICIER); - complete(&riic->msg_done); + } return IRQ_HANDLED; } @@ -240,13 +240,13 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data) if (riic->bytes_left == 1) { /* STOP must come before we set ACKBT! */ - if (riic->is_last) + if (riic->is_last) { + riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER); writeb(ICCR2_SP, riic->base + RIIC_ICCR2); + } riic_clear_set_bit(riic, 0, ICMR3_ACKBT, RIIC_ICMR3); - writeb(0, riic->base + RIIC_ICIER); - complete(&riic->msg_done); } else { riic_clear_set_bit(riic, ICMR3_ACKBT, 0, RIIC_ICMR3); } @@ -259,6 +259,21 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data) return IRQ_HANDLED; } +static irqreturn_t riic_stop_isr(int irq, void *data) +{ + struct riic_dev *riic = data; + + /* read back registers to confirm writes have fully propagated */ + writeb(0, riic->base + RIIC_ICSR2); + readb(riic->base + RIIC_ICSR2); + writeb(0, riic->base + RIIC_ICIER); + readb(riic->base + RIIC_ICIER); + + complete(&riic->msg_done); + + return IRQ_HANDLED; +} + static u32 riic_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; @@ -326,6 +341,7 @@ static struct riic_irq_desc riic_irqs[] = { { .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" }, { .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" }, { .res_num = 2, .isr = riic_tdre_isr, .name = "riic-tdre" }, + { .res_num = 3, .isr = riic_stop_isr, .name = "riic-stop" }, { .res_num = 5, .isr = riic_tend_isr, .name = "riic-nack" }, }; diff --git a/drivers/i2c/busses/i2c-robotfuzz-osif.c b/drivers/i2c/busses/i2c-robotfuzz-osif.c index 89d8b41b6668..9c0f52b7ff7e 100644 --- a/drivers/i2c/busses/i2c-robotfuzz-osif.c +++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c @@ -117,7 +117,7 @@ static u32 osif_func(struct i2c_adapter *adapter) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } -static struct i2c_algorithm osif_algorithm = { +static const struct i2c_algorithm osif_algorithm = { .master_xfer = osif_xfer, .functionality = osif_func, }; diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 3d9ebe6e5716..3d7559348745 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -781,7 +781,7 @@ static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING; } -static struct i2c_algorithm sh_mobile_i2c_algorithm = { +static const struct i2c_algorithm sh_mobile_i2c_algorithm = { .functionality = sh_mobile_i2c_func, .master_xfer = sh_mobile_i2c_xfer, }; diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c index 1371547ce1a3..1eb9fa82dcfd 100644 --- a/drivers/i2c/busses/i2c-st.c +++ b/drivers/i2c/busses/i2c-st.c @@ -776,7 +776,7 @@ static u32 st_i2c_func(struct i2c_adapter *adap) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } -static struct i2c_algorithm st_i2c_algo = { +static const struct i2c_algorithm st_i2c_algo = { .master_xfer = st_i2c_xfer, .functionality = st_i2c_func, }; diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c new file mode 100644 index 000000000000..f9dd7e86b861 --- /dev/null +++ b/drivers/i2c/busses/i2c-stm32f4.c @@ -0,0 +1,897 @@ +/* + * Driver for STMicroelectronics STM32 I2C controller + * + * This I2C controller is described in the STM32F429/439 Soc reference manual. + * Please see below a link to the documentation: + * http://www.st.com/resource/en/reference_manual/DM00031020.pdf + * + * Copyright (C) M'boumba Cedric Madianga 2016 + * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> + * + * This driver is based on i2c-st.c + * + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +/* STM32F4 I2C offset registers */ +#define STM32F4_I2C_CR1 0x00 +#define STM32F4_I2C_CR2 0x04 +#define STM32F4_I2C_DR 0x10 +#define STM32F4_I2C_SR1 0x14 +#define STM32F4_I2C_SR2 0x18 +#define STM32F4_I2C_CCR 0x1C +#define STM32F4_I2C_TRISE 0x20 +#define STM32F4_I2C_FLTR 0x24 + +/* STM32F4 I2C control 1*/ +#define STM32F4_I2C_CR1_POS BIT(11) +#define STM32F4_I2C_CR1_ACK BIT(10) +#define STM32F4_I2C_CR1_STOP BIT(9) +#define STM32F4_I2C_CR1_START BIT(8) +#define STM32F4_I2C_CR1_PE BIT(0) + +/* STM32F4 I2C control 2 */ +#define STM32F4_I2C_CR2_FREQ_MASK GENMASK(5, 0) +#define STM32F4_I2C_CR2_FREQ(n) ((n) & STM32F4_I2C_CR2_FREQ_MASK) +#define STM32F4_I2C_CR2_ITBUFEN BIT(10) +#define STM32F4_I2C_CR2_ITEVTEN BIT(9) +#define STM32F4_I2C_CR2_ITERREN BIT(8) +#define STM32F4_I2C_CR2_IRQ_MASK (STM32F4_I2C_CR2_ITBUFEN | \ + STM32F4_I2C_CR2_ITEVTEN | \ + STM32F4_I2C_CR2_ITERREN) + +/* STM32F4 I2C Status 1 */ +#define STM32F4_I2C_SR1_AF BIT(10) +#define STM32F4_I2C_SR1_ARLO BIT(9) +#define STM32F4_I2C_SR1_BERR BIT(8) +#define STM32F4_I2C_SR1_TXE BIT(7) +#define STM32F4_I2C_SR1_RXNE BIT(6) +#define STM32F4_I2C_SR1_BTF BIT(2) +#define STM32F4_I2C_SR1_ADDR BIT(1) +#define STM32F4_I2C_SR1_SB BIT(0) +#define STM32F4_I2C_SR1_ITEVTEN_MASK (STM32F4_I2C_SR1_BTF | \ + STM32F4_I2C_SR1_ADDR | \ + STM32F4_I2C_SR1_SB) +#define STM32F4_I2C_SR1_ITBUFEN_MASK (STM32F4_I2C_SR1_TXE | \ + STM32F4_I2C_SR1_RXNE) +#define STM32F4_I2C_SR1_ITERREN_MASK (STM32F4_I2C_SR1_AF | \ + STM32F4_I2C_SR1_ARLO | \ + STM32F4_I2C_SR1_BERR) + +/* STM32F4 I2C Status 2 */ +#define STM32F4_I2C_SR2_BUSY BIT(1) + +/* STM32F4 I2C Control Clock */ +#define STM32F4_I2C_CCR_CCR_MASK GENMASK(11, 0) +#define STM32F4_I2C_CCR_CCR(n) ((n) & STM32F4_I2C_CCR_CCR_MASK) +#define STM32F4_I2C_CCR_FS BIT(15) +#define STM32F4_I2C_CCR_DUTY BIT(14) + +/* STM32F4 I2C Trise */ +#define STM32F4_I2C_TRISE_VALUE_MASK GENMASK(5, 0) +#define STM32F4_I2C_TRISE_VALUE(n) ((n) & STM32F4_I2C_TRISE_VALUE_MASK) + +#define STM32F4_I2C_MIN_STANDARD_FREQ 2U +#define STM32F4_I2C_MIN_FAST_FREQ 6U +#define STM32F4_I2C_MAX_FREQ 46U +#define HZ_TO_MHZ 1000000 + +enum stm32f4_i2c_speed { + STM32F4_I2C_SPEED_STANDARD, /* 100 kHz */ + STM32F4_I2C_SPEED_FAST, /* 400 kHz */ + STM32F4_I2C_SPEED_END, +}; + +/** + * struct stm32f4_i2c_msg - client specific data + * @addr: 8-bit slave addr, including r/w bit + * @count: number of bytes to be transferred + * @buf: data buffer + * @result: result of the transfer + * @stop: last I2C msg to be sent, i.e. STOP to be generated + */ +struct stm32f4_i2c_msg { + u8 addr; + u32 count; + u8 *buf; + int result; + bool stop; +}; + +/** + * struct stm32f4_i2c_dev - private data of the controller + * @adap: I2C adapter for this controller + * @dev: device for this controller + * @base: virtual memory area + * @complete: completion of I2C message + * @clk: hw i2c clock + * @speed: I2C clock frequency of the controller. Standard or Fast are supported + * @parent_rate: I2C clock parent rate in MHz + * @msg: I2C transfer information + */ +struct stm32f4_i2c_dev { + struct i2c_adapter adap; + struct device *dev; + void __iomem *base; + struct completion complete; + struct clk *clk; + int speed; + int parent_rate; + struct stm32f4_i2c_msg msg; +}; + +static inline void stm32f4_i2c_set_bits(void __iomem *reg, u32 mask) +{ + writel_relaxed(readl_relaxed(reg) | mask, reg); +} + +static inline void stm32f4_i2c_clr_bits(void __iomem *reg, u32 mask) +{ + writel_relaxed(readl_relaxed(reg) & ~mask, reg); +} + +static void stm32f4_i2c_disable_irq(struct stm32f4_i2c_dev *i2c_dev) +{ + void __iomem *reg = i2c_dev->base + STM32F4_I2C_CR2; + + stm32f4_i2c_clr_bits(reg, STM32F4_I2C_CR2_IRQ_MASK); +} + +static int stm32f4_i2c_set_periph_clk_freq(struct stm32f4_i2c_dev *i2c_dev) +{ + u32 freq; + u32 cr2 = 0; + + i2c_dev->parent_rate = clk_get_rate(i2c_dev->clk); + freq = DIV_ROUND_UP(i2c_dev->parent_rate, HZ_TO_MHZ); + + if (i2c_dev->speed == STM32F4_I2C_SPEED_STANDARD) { + /* + * To reach 100 kHz, the parent clk frequency should be between + * a minimum value of 2 MHz and a maximum value of 46 MHz due + * to hardware limitation + */ + if (freq < STM32F4_I2C_MIN_STANDARD_FREQ || + freq > STM32F4_I2C_MAX_FREQ) { + dev_err(i2c_dev->dev, + "bad parent clk freq for standard mode\n"); + return -EINVAL; + } + } else { + /* + * To be as close as possible to 400 kHz, the parent clk + * frequency should be between a minimum value of 6 MHz and a + * maximum value of 46 MHz due to hardware limitation + */ + if (freq < STM32F4_I2C_MIN_FAST_FREQ || + freq > STM32F4_I2C_MAX_FREQ) { + dev_err(i2c_dev->dev, + "bad parent clk freq for fast mode\n"); + return -EINVAL; + } + } + + cr2 |= STM32F4_I2C_CR2_FREQ(freq); + writel_relaxed(cr2, i2c_dev->base + STM32F4_I2C_CR2); + + return 0; +} + +static void stm32f4_i2c_set_rise_time(struct stm32f4_i2c_dev *i2c_dev) +{ + u32 freq = DIV_ROUND_UP(i2c_dev->parent_rate, HZ_TO_MHZ); + u32 trise; + + /* + * These bits must be programmed with the maximum SCL rise time given in + * the I2C bus specification, incremented by 1. + * + * In standard mode, the maximum allowed SCL rise time is 1000 ns. + * If, in the I2C_CR2 register, the value of FREQ[5:0] bits is equal to + * 0x08 so period = 125 ns therefore the TRISE[5:0] bits must be + * programmed with 0x9. (1000 ns / 125 ns + 1) + * So, for I2C standard mode TRISE = FREQ[5:0] + 1 + * + * In fast mode, the maximum allowed SCL rise time is 300 ns. + * If, in the I2C_CR2 register, the value of FREQ[5:0] bits is equal to + * 0x08 so period = 125 ns therefore the TRISE[5:0] bits must be + * programmed with 0x3. (300 ns / 125 ns + 1) + * So, for I2C fast mode TRISE = FREQ[5:0] * 300 / 1000 + 1 + * + * Function stm32f4_i2c_set_periph_clk_freq made sure that parent rate + * is not higher than 46 MHz . As a result trise is at most 4 bits wide + * and so fits into the TRISE bits [5:0]. + */ + if (i2c_dev->speed == STM32F4_I2C_SPEED_STANDARD) + trise = freq + 1; + else + trise = freq * 3 / 10 + 1; + + writel_relaxed(STM32F4_I2C_TRISE_VALUE(trise), + i2c_dev->base + STM32F4_I2C_TRISE); +} + +static void stm32f4_i2c_set_speed_mode(struct stm32f4_i2c_dev *i2c_dev) +{ + u32 val; + u32 ccr = 0; + + if (i2c_dev->speed == STM32F4_I2C_SPEED_STANDARD) { + /* + * In standard mode: + * t_scl_high = t_scl_low = CCR * I2C parent clk period + * So to reach 100 kHz, we have: + * CCR = I2C parent rate / 100 kHz >> 1 + * + * For example with parent rate = 2 MHz: + * CCR = 2000000 / (100000 << 1) = 10 + * t_scl_high = t_scl_low = 10 * (1 / 2000000) = 5000 ns + * t_scl_high + t_scl_low = 10000 ns so 100 kHz is reached + * + * Function stm32f4_i2c_set_periph_clk_freq made sure that + * parent rate is not higher than 46 MHz . As a result val + * is at most 8 bits wide and so fits into the CCR bits [11:0]. + */ + val = i2c_dev->parent_rate / (100000 << 1); + } else { + /* + * In fast mode, we compute CCR with duty = 0 as with low + * frequencies we are not able to reach 400 kHz. + * In that case: + * t_scl_high = CCR * I2C parent clk period + * t_scl_low = 2 * CCR * I2C parent clk period + * So, CCR = I2C parent rate / (400 kHz * 3) + * + * For example with parent rate = 6 MHz: + * CCR = 6000000 / (400000 * 3) = 5 + * t_scl_high = 5 * (1 / 6000000) = 833 ns > 600 ns + * t_scl_low = 2 * 5 * (1 / 6000000) = 1667 ns > 1300 ns + * t_scl_high + t_scl_low = 2500 ns so 400 kHz is reached + * + * Function stm32f4_i2c_set_periph_clk_freq made sure that + * parent rate is not higher than 46 MHz . As a result val + * is at most 6 bits wide and so fits into the CCR bits [11:0]. + */ + val = DIV_ROUND_UP(i2c_dev->parent_rate, 400000 * 3); + + /* Select Fast mode */ + ccr |= STM32F4_I2C_CCR_FS; + } + + ccr |= STM32F4_I2C_CCR_CCR(val); + writel_relaxed(ccr, i2c_dev->base + STM32F4_I2C_CCR); +} + +/** + * stm32f4_i2c_hw_config() - Prepare I2C block + * @i2c_dev: Controller's private data + */ +static int stm32f4_i2c_hw_config(struct stm32f4_i2c_dev *i2c_dev) +{ + int ret; + + ret = stm32f4_i2c_set_periph_clk_freq(i2c_dev); + if (ret) + return ret; + + stm32f4_i2c_set_rise_time(i2c_dev); + + stm32f4_i2c_set_speed_mode(i2c_dev); + + /* Enable I2C */ + writel_relaxed(STM32F4_I2C_CR1_PE, i2c_dev->base + STM32F4_I2C_CR1); + + return 0; +} + +static int stm32f4_i2c_wait_free_bus(struct stm32f4_i2c_dev *i2c_dev) +{ + u32 status; + int ret; + + ret = readl_relaxed_poll_timeout(i2c_dev->base + STM32F4_I2C_SR2, + status, + !(status & STM32F4_I2C_SR2_BUSY), + 10, 1000); + if (ret) { + dev_dbg(i2c_dev->dev, "bus not free\n"); + ret = -EBUSY; + } + + return ret; +} + +/** + * stm32f4_i2c_write_ byte() - Write a byte in the data register + * @i2c_dev: Controller's private data + * @byte: Data to write in the register + */ +static void stm32f4_i2c_write_byte(struct stm32f4_i2c_dev *i2c_dev, u8 byte) +{ + writel_relaxed(byte, i2c_dev->base + STM32F4_I2C_DR); +} + +/** + * stm32f4_i2c_write_msg() - Fill the data register in write mode + * @i2c_dev: Controller's private data + * + * This function fills the data register with I2C transfer buffer + */ +static void stm32f4_i2c_write_msg(struct stm32f4_i2c_dev *i2c_dev) +{ + struct stm32f4_i2c_msg *msg = &i2c_dev->msg; + + stm32f4_i2c_write_byte(i2c_dev, *msg->buf++); + msg->count--; +} + +static void stm32f4_i2c_read_msg(struct stm32f4_i2c_dev *i2c_dev) +{ + struct stm32f4_i2c_msg *msg = &i2c_dev->msg; + u32 rbuf; + + rbuf = readl_relaxed(i2c_dev->base + STM32F4_I2C_DR); + *msg->buf++ = rbuf; + msg->count--; +} + +static void stm32f4_i2c_terminate_xfer(struct stm32f4_i2c_dev *i2c_dev) +{ + struct stm32f4_i2c_msg *msg = &i2c_dev->msg; + void __iomem *reg = i2c_dev->base + STM32F4_I2C_CR2; + + stm32f4_i2c_disable_irq(i2c_dev); + + reg = i2c_dev->base + STM32F4_I2C_CR1; + if (msg->stop) + stm32f4_i2c_set_bits(reg, STM32F4_I2C_CR1_STOP); + else + stm32f4_i2c_set_bits(reg, STM32F4_I2C_CR1_START); + + complete(&i2c_dev->complete); +} + +/** + * stm32f4_i2c_handle_write() - Handle FIFO empty interrupt in case of write + * @i2c_dev: Controller's private data + */ +static void stm32f4_i2c_handle_write(struct stm32f4_i2c_dev *i2c_dev) +{ + struct stm32f4_i2c_msg *msg = &i2c_dev->msg; + void __iomem *reg = i2c_dev->base + STM32F4_I2C_CR2; + + if (msg->count) { + stm32f4_i2c_write_msg(i2c_dev); + if (!msg->count) { + /* + * Disable buffer interrupts for RX not empty and TX + * empty events + */ + stm32f4_i2c_clr_bits(reg, STM32F4_I2C_CR2_ITBUFEN); + } + } else { + stm32f4_i2c_terminate_xfer(i2c_dev); + } +} + +/** + * stm32f4_i2c_handle_read() - Handle FIFO empty interrupt in case of read + * @i2c_dev: Controller's private data + * + * This function is called when a new data is received in data register + */ +static void stm32f4_i2c_handle_read(struct stm32f4_i2c_dev *i2c_dev) +{ + struct stm32f4_i2c_msg *msg = &i2c_dev->msg; + void __iomem *reg = i2c_dev->base + STM32F4_I2C_CR2; + + switch (msg->count) { + case 1: + stm32f4_i2c_disable_irq(i2c_dev); + stm32f4_i2c_read_msg(i2c_dev); + complete(&i2c_dev->complete); + break; + /* + * For 2-byte reception, 3-byte reception and for Data N-2, N-1 and N + * for N-byte reception with N > 3, we do not have to read the data + * register when RX not empty event occurs as we have to wait for byte + * transferred finished event before reading data. + * So, here we just disable buffer interrupt in order to avoid another + * system preemption due to RX not empty event. + */ + case 2: + case 3: + stm32f4_i2c_clr_bits(reg, STM32F4_I2C_CR2_ITBUFEN); + break; + /* + * For N byte reception with N > 3 we directly read data register + * until N-2 data. + */ + default: + stm32f4_i2c_read_msg(i2c_dev); + } +} + +/** + * stm32f4_i2c_handle_rx_done() - Handle byte transfer finished interrupt + * in case of read + * @i2c_dev: Controller's private data + * + * This function is called when a new data is received in the shift register + * but data register has not been read yet. + */ +static void stm32f4_i2c_handle_rx_done(struct stm32f4_i2c_dev *i2c_dev) +{ + struct stm32f4_i2c_msg *msg = &i2c_dev->msg; + void __iomem *reg; + u32 mask; + int i; + + switch (msg->count) { + case 2: + /* + * In order to correctly send the Stop or Repeated Start + * condition on the I2C bus, the STOP/START bit has to be set + * before reading the last two bytes (data N-1 and N). + * After that, we could read the last two bytes, disable + * remaining interrupts and notify the end of xfer to the + * client + */ + reg = i2c_dev->base + STM32F4_I2C_CR1; + if (msg->stop) + stm32f4_i2c_set_bits(reg, STM32F4_I2C_CR1_STOP); + else + stm32f4_i2c_set_bits(reg, STM32F4_I2C_CR1_START); + + for (i = 2; i > 0; i--) + stm32f4_i2c_read_msg(i2c_dev); + + reg = i2c_dev->base + STM32F4_I2C_CR2; + mask = STM32F4_I2C_CR2_ITEVTEN | STM32F4_I2C_CR2_ITERREN; + stm32f4_i2c_clr_bits(reg, mask); + + complete(&i2c_dev->complete); + break; + case 3: + /* + * In order to correctly generate the NACK pulse after the last + * received data byte, we have to enable NACK before reading N-2 + * data + */ + reg = i2c_dev->base + STM32F4_I2C_CR1; + stm32f4_i2c_clr_bits(reg, STM32F4_I2C_CR1_ACK); + stm32f4_i2c_read_msg(i2c_dev); + break; + default: + stm32f4_i2c_read_msg(i2c_dev); + } +} + +/** + * stm32f4_i2c_handle_rx_addr() - Handle address matched interrupt in case of + * master receiver + * @i2c_dev: Controller's private data + */ +static void stm32f4_i2c_handle_rx_addr(struct stm32f4_i2c_dev *i2c_dev) +{ + struct stm32f4_i2c_msg *msg = &i2c_dev->msg; + u32 cr1; + + switch (msg->count) { + case 0: + stm32f4_i2c_terminate_xfer(i2c_dev); + + /* Clear ADDR flag */ + readl_relaxed(i2c_dev->base + STM32F4_I2C_SR2); + break; + case 1: + /* + * Single byte reception: + * Enable NACK and reset POS (Acknowledge position). + * Then, clear ADDR flag and set STOP or RepSTART. + * In that way, the NACK and STOP or RepStart pulses will be + * sent as soon as the byte will be received in shift register + */ + cr1 = readl_relaxed(i2c_dev->base + STM32F4_I2C_CR1); + cr1 &= ~(STM32F4_I2C_CR1_ACK | STM32F4_I2C_CR1_POS); + writel_relaxed(cr1, i2c_dev->base + STM32F4_I2C_CR1); + + readl_relaxed(i2c_dev->base + STM32F4_I2C_SR2); + + if (msg->stop) + cr1 |= STM32F4_I2C_CR1_STOP; + else + cr1 |= STM32F4_I2C_CR1_START; + writel_relaxed(cr1, i2c_dev->base + STM32F4_I2C_CR1); + break; + case 2: + /* + * 2-byte reception: + * Enable NACK, set POS (NACK position) and clear ADDR flag. + * In that way, NACK will be sent for the next byte which will + * be received in the shift register instead of the current + * one. + */ + cr1 = readl_relaxed(i2c_dev->base + STM32F4_I2C_CR1); + cr1 &= ~STM32F4_I2C_CR1_ACK; + cr1 |= STM32F4_I2C_CR1_POS; + writel_relaxed(cr1, i2c_dev->base + STM32F4_I2C_CR1); + + readl_relaxed(i2c_dev->base + STM32F4_I2C_SR2); + break; + + default: + /* + * N-byte reception: + * Enable ACK, reset POS (ACK postion) and clear ADDR flag. + * In that way, ACK will be sent as soon as the current byte + * will be received in the shift register + */ + cr1 = readl_relaxed(i2c_dev->base + STM32F4_I2C_CR1); + cr1 |= STM32F4_I2C_CR1_ACK; + cr1 &= ~STM32F4_I2C_CR1_POS; + writel_relaxed(cr1, i2c_dev->base + STM32F4_I2C_CR1); + + readl_relaxed(i2c_dev->base + STM32F4_I2C_SR2); + break; + } +} + +/** + * stm32f4_i2c_isr_event() - Interrupt routine for I2C bus event + * @irq: interrupt number + * @data: Controller's private data + */ +static irqreturn_t stm32f4_i2c_isr_event(int irq, void *data) +{ + struct stm32f4_i2c_dev *i2c_dev = data; + struct stm32f4_i2c_msg *msg = &i2c_dev->msg; + u32 possible_status = STM32F4_I2C_SR1_ITEVTEN_MASK; + u32 status, ien, event, cr2; + + cr2 = readl_relaxed(i2c_dev->base + STM32F4_I2C_CR2); + ien = cr2 & STM32F4_I2C_CR2_IRQ_MASK; + + /* Update possible_status if buffer interrupt is enabled */ + if (ien & STM32F4_I2C_CR2_ITBUFEN) + possible_status |= STM32F4_I2C_SR1_ITBUFEN_MASK; + + status = readl_relaxed(i2c_dev->base + STM32F4_I2C_SR1); + event = status & possible_status; + if (!event) { + dev_dbg(i2c_dev->dev, + "spurious evt irq (status=0x%08x, ien=0x%08x)\n", + status, ien); + return IRQ_NONE; + } + + /* Start condition generated */ + if (event & STM32F4_I2C_SR1_SB) + stm32f4_i2c_write_byte(i2c_dev, msg->addr); + + /* I2C Address sent */ + if (event & STM32F4_I2C_SR1_ADDR) { + if (msg->addr & I2C_M_RD) + stm32f4_i2c_handle_rx_addr(i2c_dev); + else + readl_relaxed(i2c_dev->base + STM32F4_I2C_SR2); + + /* + * Enable buffer interrupts for RX not empty and TX empty + * events + */ + cr2 |= STM32F4_I2C_CR2_ITBUFEN; + writel_relaxed(cr2, i2c_dev->base + STM32F4_I2C_CR2); + } + + /* TX empty */ + if ((event & STM32F4_I2C_SR1_TXE) && !(msg->addr & I2C_M_RD)) + stm32f4_i2c_handle_write(i2c_dev); + + /* RX not empty */ + if ((event & STM32F4_I2C_SR1_RXNE) && (msg->addr & I2C_M_RD)) + stm32f4_i2c_handle_read(i2c_dev); + + /* + * The BTF (Byte Transfer finished) event occurs when: + * - in reception : a new byte is received in the shift register + * but the previous byte has not been read yet from data register + * - in transmission: a new byte should be sent but the data register + * has not been written yet + */ + if (event & STM32F4_I2C_SR1_BTF) { + if (msg->addr & I2C_M_RD) + stm32f4_i2c_handle_rx_done(i2c_dev); + else + stm32f4_i2c_handle_write(i2c_dev); + } + + return IRQ_HANDLED; +} + +/** + * stm32f4_i2c_isr_error() - Interrupt routine for I2C bus error + * @irq: interrupt number + * @data: Controller's private data + */ +static irqreturn_t stm32f4_i2c_isr_error(int irq, void *data) +{ + struct stm32f4_i2c_dev *i2c_dev = data; + struct stm32f4_i2c_msg *msg = &i2c_dev->msg; + void __iomem *reg; + u32 status; + + status = readl_relaxed(i2c_dev->base + STM32F4_I2C_SR1); + + /* Arbitration lost */ + if (status & STM32F4_I2C_SR1_ARLO) { + status &= ~STM32F4_I2C_SR1_ARLO; + writel_relaxed(status, i2c_dev->base + STM32F4_I2C_SR1); + msg->result = -EAGAIN; + } + + /* + * Acknowledge failure: + * In master transmitter mode a Stop must be generated by software + */ + if (status & STM32F4_I2C_SR1_AF) { + if (!(msg->addr & I2C_M_RD)) { + reg = i2c_dev->base + STM32F4_I2C_CR1; + stm32f4_i2c_set_bits(reg, STM32F4_I2C_CR1_STOP); + } + status &= ~STM32F4_I2C_SR1_AF; + writel_relaxed(status, i2c_dev->base + STM32F4_I2C_SR1); + msg->result = -EIO; + } + + /* Bus error */ + if (status & STM32F4_I2C_SR1_BERR) { + status &= ~STM32F4_I2C_SR1_BERR; + writel_relaxed(status, i2c_dev->base + STM32F4_I2C_SR1); + msg->result = -EIO; + } + + stm32f4_i2c_disable_irq(i2c_dev); + complete(&i2c_dev->complete); + + return IRQ_HANDLED; +} + +/** + * stm32f4_i2c_xfer_msg() - Transfer a single I2C message + * @i2c_dev: Controller's private data + * @msg: I2C message to transfer + * @is_first: first message of the sequence + * @is_last: last message of the sequence + */ +static int stm32f4_i2c_xfer_msg(struct stm32f4_i2c_dev *i2c_dev, + struct i2c_msg *msg, bool is_first, + bool is_last) +{ + struct stm32f4_i2c_msg *f4_msg = &i2c_dev->msg; + void __iomem *reg = i2c_dev->base + STM32F4_I2C_CR1; + unsigned long timeout; + u32 mask; + int ret; + + f4_msg->addr = i2c_8bit_addr_from_msg(msg); + f4_msg->buf = msg->buf; + f4_msg->count = msg->len; + f4_msg->result = 0; + f4_msg->stop = is_last; + + reinit_completion(&i2c_dev->complete); + + /* Enable events and errors interrupts */ + mask = STM32F4_I2C_CR2_ITEVTEN | STM32F4_I2C_CR2_ITERREN; + stm32f4_i2c_set_bits(i2c_dev->base + STM32F4_I2C_CR2, mask); + + if (is_first) { + ret = stm32f4_i2c_wait_free_bus(i2c_dev); + if (ret) + return ret; + + /* START generation */ + stm32f4_i2c_set_bits(reg, STM32F4_I2C_CR1_START); + } + + timeout = wait_for_completion_timeout(&i2c_dev->complete, + i2c_dev->adap.timeout); + ret = f4_msg->result; + + if (!timeout) + ret = -ETIMEDOUT; + + return ret; +} + +/** + * stm32f4_i2c_xfer() - Transfer combined I2C message + * @i2c_adap: Adapter pointer to the controller + * @msgs: Pointer to data to be written. + * @num: Number of messages to be executed + */ +static int stm32f4_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], + int num) +{ + struct stm32f4_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); + int ret, i; + + ret = clk_enable(i2c_dev->clk); + if (ret) { + dev_err(i2c_dev->dev, "Failed to enable clock\n"); + return ret; + } + + for (i = 0; i < num && !ret; i++) + ret = stm32f4_i2c_xfer_msg(i2c_dev, &msgs[i], i == 0, + i == num - 1); + + clk_disable(i2c_dev->clk); + + return (ret < 0) ? ret : num; +} + +static u32 stm32f4_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static struct i2c_algorithm stm32f4_i2c_algo = { + .master_xfer = stm32f4_i2c_xfer, + .functionality = stm32f4_i2c_func, +}; + +static int stm32f4_i2c_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct stm32f4_i2c_dev *i2c_dev; + struct resource *res; + u32 irq_event, irq_error, clk_rate; + struct i2c_adapter *adap; + struct reset_control *rst; + int ret; + + i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); + if (!i2c_dev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + i2c_dev->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(i2c_dev->base)) + return PTR_ERR(i2c_dev->base); + + irq_event = irq_of_parse_and_map(np, 0); + if (!irq_event) { + dev_err(&pdev->dev, "IRQ event missing or invalid\n"); + return -EINVAL; + } + + irq_error = irq_of_parse_and_map(np, 1); + if (!irq_error) { + dev_err(&pdev->dev, "IRQ error missing or invalid\n"); + return -EINVAL; + } + + i2c_dev->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(i2c_dev->clk)) { + dev_err(&pdev->dev, "Error: Missing controller clock\n"); + return PTR_ERR(i2c_dev->clk); + } + ret = clk_prepare_enable(i2c_dev->clk); + if (ret) { + dev_err(i2c_dev->dev, "Failed to prepare_enable clock\n"); + return ret; + } + + rst = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(rst)) { + dev_err(&pdev->dev, "Error: Missing controller reset\n"); + ret = PTR_ERR(rst); + goto clk_free; + } + reset_control_assert(rst); + udelay(2); + reset_control_deassert(rst); + + i2c_dev->speed = STM32F4_I2C_SPEED_STANDARD; + ret = of_property_read_u32(np, "clock-frequency", &clk_rate); + if (!ret && clk_rate >= 400000) + i2c_dev->speed = STM32F4_I2C_SPEED_FAST; + + i2c_dev->dev = &pdev->dev; + + ret = devm_request_irq(&pdev->dev, irq_event, stm32f4_i2c_isr_event, 0, + pdev->name, i2c_dev); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq event %i\n", + irq_event); + goto clk_free; + } + + ret = devm_request_irq(&pdev->dev, irq_error, stm32f4_i2c_isr_error, 0, + pdev->name, i2c_dev); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq error %i\n", + irq_error); + goto clk_free; + } + + ret = stm32f4_i2c_hw_config(i2c_dev); + if (ret) + goto clk_free; + + adap = &i2c_dev->adap; + i2c_set_adapdata(adap, i2c_dev); + snprintf(adap->name, sizeof(adap->name), "STM32 I2C(%pa)", &res->start); + adap->owner = THIS_MODULE; + adap->timeout = 2 * HZ; + adap->retries = 0; + adap->algo = &stm32f4_i2c_algo; + adap->dev.parent = &pdev->dev; + adap->dev.of_node = pdev->dev.of_node; + + init_completion(&i2c_dev->complete); + + ret = i2c_add_adapter(adap); + if (ret) + goto clk_free; + + platform_set_drvdata(pdev, i2c_dev); + + clk_disable(i2c_dev->clk); + + dev_info(i2c_dev->dev, "STM32F4 I2C driver registered\n"); + + return 0; + +clk_free: + clk_disable_unprepare(i2c_dev->clk); + return ret; +} + +static int stm32f4_i2c_remove(struct platform_device *pdev) +{ + struct stm32f4_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + + i2c_del_adapter(&i2c_dev->adap); + + clk_unprepare(i2c_dev->clk); + + return 0; +} + +static const struct of_device_id stm32f4_i2c_match[] = { + { .compatible = "st,stm32f4-i2c", }, + {}, +}; +MODULE_DEVICE_TABLE(of, stm32f4_i2c_match); + +static struct platform_driver stm32f4_i2c_driver = { + .driver = { + .name = "stm32f4-i2c", + .of_match_table = stm32f4_i2c_match, + }, + .probe = stm32f4_i2c_probe, + .remove = stm32f4_i2c_remove, +}; + +module_platform_driver(stm32f4_i2c_driver); + +MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>"); +MODULE_DESCRIPTION("STMicroelectronics STM32F4 I2C driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c new file mode 100644 index 000000000000..9eed69d5e17e --- /dev/null +++ b/drivers/i2c/busses/i2c-tegra-bpmp.c @@ -0,0 +1,346 @@ +/* + * drivers/i2c/busses/i2c-tegra-bpmp.c + * + * Copyright (c) 2016 NVIDIA Corporation. All rights reserved. + * + * Author: Shardar Shariff Md <smohammed@nvidia.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include <soc/tegra/bpmp-abi.h> +#include <soc/tegra/bpmp.h> + +/* + * Serialized I2C message header size is 6 bytes and includes address, flags + * and length + */ +#define SERIALI2C_HDR_SIZE 6 + +struct tegra_bpmp_i2c { + struct i2c_adapter adapter; + struct device *dev; + + struct tegra_bpmp *bpmp; + unsigned int bus; +}; + +/* + * Linux flags are translated to BPMP defined I2C flags that are used in BPMP + * firmware I2C driver to avoid any issues in future if Linux I2C flags are + * changed. + */ +static int tegra_bpmp_xlate_flags(u16 flags, u16 *out) +{ + if (flags & I2C_M_TEN) { + *out |= SERIALI2C_TEN; + flags &= ~I2C_M_TEN; + } + + if (flags & I2C_M_RD) { + *out |= SERIALI2C_RD; + flags &= ~I2C_M_RD; + } + + if (flags & I2C_M_STOP) { + *out |= SERIALI2C_STOP; + flags &= ~I2C_M_STOP; + } + + if (flags & I2C_M_NOSTART) { + *out |= SERIALI2C_NOSTART; + flags &= ~I2C_M_NOSTART; + } + + if (flags & I2C_M_REV_DIR_ADDR) { + *out |= SERIALI2C_REV_DIR_ADDR; + flags &= ~I2C_M_REV_DIR_ADDR; + } + + if (flags & I2C_M_IGNORE_NAK) { + *out |= SERIALI2C_IGNORE_NAK; + flags &= ~I2C_M_IGNORE_NAK; + } + + if (flags & I2C_M_NO_RD_ACK) { + *out |= SERIALI2C_NO_RD_ACK; + flags &= ~I2C_M_NO_RD_ACK; + } + + if (flags & I2C_M_RECV_LEN) { + *out |= SERIALI2C_RECV_LEN; + flags &= ~I2C_M_RECV_LEN; + } + + return (flags != 0) ? -EINVAL : 0; +} + +/** + * The serialized I2C format is simply the following: + * [addr little-endian][flags little-endian][len little-endian][data if write] + * [addr little-endian][flags little-endian][len little-endian][data if write] + * ... + * + * The flags are translated from Linux kernel representation to seriali2c + * representation. Any undefined flag being set causes an error. + * + * The data is there only for writes. Reads have the data transferred in the + * other direction, and thus data is not present. + * + * See deserialize_i2c documentation for the data format in the other direction. + */ +static int tegra_bpmp_serialize_i2c_msg(struct tegra_bpmp_i2c *i2c, + struct mrq_i2c_request *request, + struct i2c_msg *msgs, + unsigned int num) +{ + char *buf = request->xfer.data_buf; + unsigned int i, j, pos = 0; + int err; + + for (i = 0; i < num; i++) { + struct i2c_msg *msg = &msgs[i]; + u16 flags = 0; + + err = tegra_bpmp_xlate_flags(msg->flags, &flags); + if (err < 0) + return err; + + buf[pos++] = msg->addr & 0xff; + buf[pos++] = (msg->addr & 0xff00) >> 8; + buf[pos++] = flags & 0xff; + buf[pos++] = (flags & 0xff00) >> 8; + buf[pos++] = msg->len & 0xff; + buf[pos++] = (msg->len & 0xff00) >> 8; + + if ((flags & SERIALI2C_RD) == 0) { + for (j = 0; j < msg->len; j++) + buf[pos++] = msg->buf[j]; + } + } + + request->xfer.data_size = pos; + + return 0; +} + +/** + * The data in the BPMP -> CPU direction is composed of sequential blocks for + * those messages that have I2C_M_RD. So, for example, if you have: + * + * - !I2C_M_RD, len == 5, data == a0 01 02 03 04 + * - !I2C_M_RD, len == 1, data == a0 + * - I2C_M_RD, len == 2, data == [uninitialized buffer 1] + * - !I2C_M_RD, len == 1, data == a2 + * - I2C_M_RD, len == 2, data == [uninitialized buffer 2] + * + * ...then the data in the BPMP -> CPU direction would be 4 bytes total, and + * would contain 2 bytes that will go to uninitialized buffer 1, and 2 bytes + * that will go to uninitialized buffer 2. + */ +static int tegra_bpmp_i2c_deserialize(struct tegra_bpmp_i2c *i2c, + struct mrq_i2c_response *response, + struct i2c_msg *msgs, + unsigned int num) +{ + size_t size = response->xfer.data_size, len = 0, pos = 0; + char *buf = response->xfer.data_buf; + unsigned int i; + + for (i = 0; i < num; i++) + if (msgs[i].flags & I2C_M_RD) + len += msgs[i].len; + + if (len != size) + return -EINVAL; + + for (i = 0; i < num; i++) { + if (msgs[i].flags & I2C_M_RD) { + memcpy(msgs[i].buf, buf + pos, msgs[i].len); + pos += msgs[i].len; + } + } + + return 0; +} + +static int tegra_bpmp_i2c_msg_len_check(struct i2c_msg *msgs, unsigned int num) +{ + size_t tx_len = 0, rx_len = 0; + unsigned int i; + + for (i = 0; i < num; i++) + if (!(msgs[i].flags & I2C_M_RD)) + tx_len += SERIALI2C_HDR_SIZE + msgs[i].len; + + if (tx_len > TEGRA_I2C_IPC_MAX_IN_BUF_SIZE) + return -EINVAL; + + for (i = 0; i < num; i++) + if ((msgs[i].flags & I2C_M_RD)) + rx_len += msgs[i].len; + + if (rx_len > TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE) + return -EINVAL; + + return 0; +} + +static int tegra_bpmp_i2c_msg_xfer(struct tegra_bpmp_i2c *i2c, + struct mrq_i2c_request *request, + struct mrq_i2c_response *response) +{ + struct tegra_bpmp_message msg; + int err; + + request->cmd = CMD_I2C_XFER; + request->xfer.bus_id = i2c->bus; + + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_I2C; + msg.tx.data = request; + msg.tx.size = sizeof(*request); + msg.rx.data = response; + msg.rx.size = sizeof(*response); + + if (irqs_disabled()) + err = tegra_bpmp_transfer_atomic(i2c->bpmp, &msg); + else + err = tegra_bpmp_transfer(i2c->bpmp, &msg); + + return err; +} + +static int tegra_bpmp_i2c_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, int num) +{ + struct tegra_bpmp_i2c *i2c = i2c_get_adapdata(adapter); + struct mrq_i2c_response response; + struct mrq_i2c_request request; + int err; + + err = tegra_bpmp_i2c_msg_len_check(msgs, num); + if (err < 0) { + dev_err(i2c->dev, "unsupported message length\n"); + return err; + } + + memset(&request, 0, sizeof(request)); + memset(&response, 0, sizeof(response)); + + err = tegra_bpmp_serialize_i2c_msg(i2c, &request, msgs, num); + if (err < 0) { + dev_err(i2c->dev, "failed to serialize message: %d\n", err); + return err; + } + + err = tegra_bpmp_i2c_msg_xfer(i2c, &request, &response); + if (err < 0) { + dev_err(i2c->dev, "failed to transfer message: %d\n", err); + return err; + } + + err = tegra_bpmp_i2c_deserialize(i2c, &response, msgs, num); + if (err < 0) { + dev_err(i2c->dev, "failed to deserialize message: %d\n", err); + return err; + } + + return num; +} + +static u32 tegra_bpmp_i2c_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR | + I2C_FUNC_PROTOCOL_MANGLING | I2C_FUNC_NOSTART; +} + +static const struct i2c_algorithm tegra_bpmp_i2c_algo = { + .master_xfer = tegra_bpmp_i2c_xfer, + .functionality = tegra_bpmp_i2c_func, +}; + +static int tegra_bpmp_i2c_probe(struct platform_device *pdev) +{ + struct tegra_bpmp_i2c *i2c; + u32 value; + int err; + + i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); + if (!i2c) + return -ENOMEM; + + i2c->dev = &pdev->dev; + + i2c->bpmp = dev_get_drvdata(pdev->dev.parent); + if (!i2c->bpmp) + return -ENODEV; + + err = of_property_read_u32(pdev->dev.of_node, "nvidia,bpmp-bus-id", + &value); + if (err < 0) + return err; + + i2c->bus = value; + + i2c_set_adapdata(&i2c->adapter, i2c); + i2c->adapter.owner = THIS_MODULE; + strlcpy(i2c->adapter.name, "Tegra BPMP I2C adapter", + sizeof(i2c->adapter.name)); + i2c->adapter.algo = &tegra_bpmp_i2c_algo; + i2c->adapter.dev.parent = &pdev->dev; + i2c->adapter.dev.of_node = pdev->dev.of_node; + + platform_set_drvdata(pdev, i2c); + + return i2c_add_adapter(&i2c->adapter); +} + +static int tegra_bpmp_i2c_remove(struct platform_device *pdev) +{ + struct tegra_bpmp_i2c *i2c = platform_get_drvdata(pdev); + + i2c_del_adapter(&i2c->adapter); + + return 0; +} + +static const struct of_device_id tegra_bpmp_i2c_of_match[] = { + { .compatible = "nvidia,tegra186-bpmp-i2c", }, + { } +}; +MODULE_DEVICE_TABLE(of, tegra_bpmp_i2c_of_match); + +static struct platform_driver tegra_bpmp_i2c_driver = { + .driver = { + .name = "tegra-bpmp-i2c", + .of_match_table = tegra_bpmp_i2c_of_match, + }, + .probe = tegra_bpmp_i2c_probe, + .remove = tegra_bpmp_i2c_remove, +}; +module_platform_driver(tegra_bpmp_i2c_driver); + +MODULE_DESCRIPTION("NVIDIA Tegra BPMP I2C bus contoller driver"); +MODULE_AUTHOR("Shardar Shariff Md <smohammed@nvidia.com>"); +MODULE_AUTHOR("Juha-Matti Tilli"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c index bba5b429f69c..1d4c2beacf2e 100644 --- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c +++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c @@ -188,11 +188,11 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev, i2c->hlc_int_enable = thunder_i2c_hlc_int_enable; i2c->hlc_int_disable = thunder_i2c_hlc_int_disable; - ret = pci_enable_msix(pdev, &i2c->i2c_msix, 1); - if (ret) + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); + if (ret < 0) goto error; - ret = devm_request_irq(dev, i2c->i2c_msix.vector, octeon_i2c_isr, 0, + ret = devm_request_irq(dev, pci_irq_vector(pdev, 0), octeon_i2c_isr, 0, DRV_NAME, i2c); if (ret) goto error; diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c index 0ab1e55558bc..dbe7e44c9321 100644 --- a/drivers/i2c/busses/i2c-xgene-slimpro.c +++ b/drivers/i2c/busses/i2c-xgene-slimpro.c @@ -372,7 +372,7 @@ static u32 xgene_slimpro_i2c_func(struct i2c_adapter *adapter) I2C_FUNC_SMBUS_I2C_BLOCK; } -static struct i2c_algorithm xgene_slimpro_i2c_algorithm = { +static const struct i2c_algorithm xgene_slimpro_i2c_algorithm = { .smbus_xfer = xgene_slimpro_i2c_xfer, .functionality = xgene_slimpro_i2c_func, }; diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c index 84a8b2eccffb..66b464d52c9c 100644 --- a/drivers/i2c/busses/i2c-xlp9xx.c +++ b/drivers/i2c/busses/i2c-xlp9xx.c @@ -334,7 +334,7 @@ static u32 xlp9xx_i2c_functionality(struct i2c_adapter *adapter) I2C_FUNC_10BIT_ADDR; } -static struct i2c_algorithm xlp9xx_i2c_algo = { +static const struct i2c_algorithm xlp9xx_i2c_algo = { .master_xfer = xlp9xx_i2c_xfer, .functionality = xlp9xx_i2c_functionality, }; diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c index ad17d88d8573..484bfa15d58e 100644 --- a/drivers/i2c/busses/i2c-xlr.c +++ b/drivers/i2c/busses/i2c-xlr.c @@ -335,7 +335,7 @@ static u32 xlr_func(struct i2c_adapter *adap) return (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) | I2C_FUNC_I2C; } -static struct i2c_algorithm xlr_i2c_algo = { +static const struct i2c_algorithm xlr_i2c_algo = { .master_xfer = xlr_i2c_xfer, .functionality = xlr_func, }; diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index bfb6ba7cac00..d2402bbf6729 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -3705,6 +3705,39 @@ int i2c_slave_unregister(struct i2c_client *client) return ret; } EXPORT_SYMBOL_GPL(i2c_slave_unregister); + +/** + * i2c_detect_slave_mode - detect operation mode + * @dev: The device owning the bus + * + * This checks the device nodes for an I2C slave by checking the address + * used in the reg property. If the address match the I2C_OWN_SLAVE_ADDRESS + * flag this means the device is configured to act as a I2C slave and it will + * be listening at that address. + * + * Returns true if an I2C own slave address is detected, otherwise returns + * false. + */ +bool i2c_detect_slave_mode(struct device *dev) +{ + if (IS_BUILTIN(CONFIG_OF) && dev->of_node) { + struct device_node *child; + u32 reg; + + for_each_child_of_node(dev->of_node, child) { + of_property_read_u32(child, "reg", ®); + if (reg & I2C_OWN_SLAVE_ADDRESS) { + of_node_put(child); + return true; + } + } + } else if (IS_BUILTIN(CONFIG_ACPI) && ACPI_HANDLE(dev)) { + dev_dbg(dev, "ACPI slave is not supported yet\n"); + } + return false; +} +EXPORT_SYMBOL_GPL(i2c_detect_slave_mode); + #endif MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); diff --git a/drivers/i2c/muxes/i2c-mux-mlxcpld.c b/drivers/i2c/muxes/i2c-mux-mlxcpld.c index b7ca249ec9c3..e53f2abd1350 100644 --- a/drivers/i2c/muxes/i2c-mux-mlxcpld.c +++ b/drivers/i2c/muxes/i2c-mux-mlxcpld.c @@ -40,7 +40,6 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> -#include <linux/version.h> #include <linux/i2c/mlxcpld.h> #define CPLD_MUX_MAX_NCHANS 8 diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c index 4ea7e691afc7..77840f7845a1 100644 --- a/drivers/i2c/muxes/i2c-mux-pca9541.c +++ b/drivers/i2c/muxes/i2c-mux-pca9541.c @@ -90,6 +90,7 @@ static const struct of_device_id pca9541_of_match[] = { { .compatible = "nxp,pca9541" }, {} }; +MODULE_DEVICE_TABLE(of, pca9541_of_match); #endif /* diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index dd18b9ccb1f4..dfc1c0e37c40 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c @@ -41,14 +41,20 @@ #include <linux/i2c.h> #include <linux/i2c-mux.h> #include <linux/i2c/pca954x.h> +#include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/pm.h> #include <linux/slab.h> +#include <linux/spinlock.h> #define PCA954X_MAX_NCHANS 8 +#define PCA954X_IRQ_OFFSET 4 + enum pca_type { pca_9540, pca_9542, @@ -63,6 +69,7 @@ enum pca_type { struct chip_desc { u8 nchans; u8 enable; /* used for muxes only */ + u8 has_irq; enum muxtype { pca954x_ismux = 0, pca954x_isswi @@ -75,6 +82,10 @@ struct pca954x { u8 last_chan; /* last register value */ u8 deselect; struct i2c_client *client; + + struct irq_domain *irq; + unsigned int irq_mask; + spinlock_t lock; }; /* Provide specs for the PCA954x types we know about */ @@ -84,17 +95,26 @@ static const struct chip_desc chips[] = { .enable = 0x4, .muxtype = pca954x_ismux, }, + [pca_9542] = { + .nchans = 2, + .enable = 0x4, + .has_irq = 1, + .muxtype = pca954x_ismux, + }, [pca_9543] = { .nchans = 2, + .has_irq = 1, .muxtype = pca954x_isswi, }, [pca_9544] = { .nchans = 4, .enable = 0x4, + .has_irq = 1, .muxtype = pca954x_ismux, }, [pca_9545] = { .nchans = 4, + .has_irq = 1, .muxtype = pca954x_isswi, }, [pca_9547] = { @@ -110,7 +130,7 @@ static const struct chip_desc chips[] = { static const struct i2c_device_id pca954x_id[] = { { "pca9540", pca_9540 }, - { "pca9542", pca_9540 }, + { "pca9542", pca_9542 }, { "pca9543", pca_9543 }, { "pca9544", pca_9544 }, { "pca9545", pca_9545 }, @@ -124,7 +144,7 @@ MODULE_DEVICE_TABLE(i2c, pca954x_id); #ifdef CONFIG_ACPI static const struct acpi_device_id pca954x_acpi_ids[] = { { .id = "PCA9540", .driver_data = pca_9540 }, - { .id = "PCA9542", .driver_data = pca_9540 }, + { .id = "PCA9542", .driver_data = pca_9542 }, { .id = "PCA9543", .driver_data = pca_9543 }, { .id = "PCA9544", .driver_data = pca_9544 }, { .id = "PCA9545", .driver_data = pca_9545 }, @@ -148,6 +168,7 @@ static const struct of_device_id pca954x_of_match[] = { { .compatible = "nxp,pca9548", .data = &chips[pca_9548] }, {} }; +MODULE_DEVICE_TABLE(of, pca954x_of_match); #endif /* Write to mux register. Don't use i2c_transfer()/i2c_smbus_xfer() @@ -217,6 +238,114 @@ static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan) return pca954x_reg_write(muxc->parent, client, data->last_chan); } +static irqreturn_t pca954x_irq_handler(int irq, void *dev_id) +{ + struct pca954x *data = dev_id; + unsigned int child_irq; + int ret, i, handled = 0; + + ret = i2c_smbus_read_byte(data->client); + if (ret < 0) + return IRQ_NONE; + + for (i = 0; i < data->chip->nchans; i++) { + if (ret & BIT(PCA954X_IRQ_OFFSET + i)) { + child_irq = irq_linear_revmap(data->irq, i); + handle_nested_irq(child_irq); + handled++; + } + } + return handled ? IRQ_HANDLED : IRQ_NONE; +} + +static void pca954x_irq_mask(struct irq_data *idata) +{ + struct pca954x *data = irq_data_get_irq_chip_data(idata); + unsigned int pos = idata->hwirq; + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + + data->irq_mask &= ~BIT(pos); + if (!data->irq_mask) + disable_irq(data->client->irq); + + spin_unlock_irqrestore(&data->lock, flags); +} + +static void pca954x_irq_unmask(struct irq_data *idata) +{ + struct pca954x *data = irq_data_get_irq_chip_data(idata); + unsigned int pos = idata->hwirq; + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + + if (!data->irq_mask) + enable_irq(data->client->irq); + data->irq_mask |= BIT(pos); + + spin_unlock_irqrestore(&data->lock, flags); +} + +static int pca954x_irq_set_type(struct irq_data *idata, unsigned int type) +{ + if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_LOW) + return -EINVAL; + return 0; +} + +static struct irq_chip pca954x_irq_chip = { + .name = "i2c-mux-pca954x", + .irq_mask = pca954x_irq_mask, + .irq_unmask = pca954x_irq_unmask, + .irq_set_type = pca954x_irq_set_type, +}; + +static int pca954x_irq_setup(struct i2c_mux_core *muxc) +{ + struct pca954x *data = i2c_mux_priv(muxc); + struct i2c_client *client = data->client; + int c, err, irq; + + if (!data->chip->has_irq || client->irq <= 0) + return 0; + + spin_lock_init(&data->lock); + + data->irq = irq_domain_add_linear(client->dev.of_node, + data->chip->nchans, + &irq_domain_simple_ops, data); + if (!data->irq) + return -ENODEV; + + for (c = 0; c < data->chip->nchans; c++) { + irq = irq_create_mapping(data->irq, c); + irq_set_chip_data(irq, data); + irq_set_chip_and_handler(irq, &pca954x_irq_chip, + handle_simple_irq); + } + + err = devm_request_threaded_irq(&client->dev, data->client->irq, NULL, + pca954x_irq_handler, + IRQF_ONESHOT | IRQF_SHARED, + "pca954x", data); + if (err) + goto err_req_irq; + + disable_irq(data->client->irq); + + return 0; +err_req_irq: + for (c = 0; c < data->chip->nchans; c++) { + irq = irq_find_mapping(data->irq, c); + irq_dispose_mapping(irq); + } + irq_domain_remove(data->irq); + + return err; +} + /* * I2C init/probing/exit functions */ @@ -281,6 +410,10 @@ static int pca954x_probe(struct i2c_client *client, idle_disconnect_dt = of_node && of_property_read_bool(of_node, "i2c-mux-idle-disconnect"); + ret = pca954x_irq_setup(muxc); + if (ret) + goto fail_del_adapters; + /* Now create an adapter for each channel */ for (num = 0; num < data->chip->nchans; num++) { bool idle_disconnect_pd = false; @@ -306,7 +439,7 @@ static int pca954x_probe(struct i2c_client *client, dev_err(&client->dev, "failed to register multiplexed adapter" " %d as bus %d\n", num, force); - goto virt_reg_failed; + goto fail_del_adapters; } } @@ -317,7 +450,7 @@ static int pca954x_probe(struct i2c_client *client, return 0; -virt_reg_failed: +fail_del_adapters: i2c_mux_del_adapters(muxc); return ret; } @@ -325,6 +458,16 @@ virt_reg_failed: static int pca954x_remove(struct i2c_client *client) { struct i2c_mux_core *muxc = i2c_get_clientdata(client); + struct pca954x *data = i2c_mux_priv(muxc); + int c, irq; + + if (data->irq) { + for (c = 0; c < data->chip->nchans; c++) { + irq = irq_find_mapping(data->irq, c); + irq_dispose_mapping(irq); + } + irq_domain_remove(data->irq); + } i2c_mux_del_adapters(muxc); return 0; diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c index b6940992a6ff..968038482d2f 100644 --- a/drivers/ide/ide-acpi.c +++ b/drivers/ide/ide-acpi.c @@ -447,7 +447,7 @@ void ide_acpi_get_timing(ide_hwif_t *hwif) memcpy(&hwif->acpidata->gtm, out_obj->buffer.pointer, sizeof(struct GTM_buffer)); - DEBPRINT("_GTM info: ptr: 0x%p, len: 0x%x, exp.len: 0x%Zx\n", + DEBPRINT("_GTM info: ptr: 0x%p, len: 0x%x, exp.len: 0x%zx\n", out_obj->buffer.pointer, out_obj->buffer.length, sizeof(struct GTM_buffer)); diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index aef00511ca86..74f1b7dc03f7 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -28,6 +28,7 @@ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> +#include <linux/sched/task_stack.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/seq_file.h> diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 3c1b7974d66d..d8a552b47718 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -1136,7 +1136,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf, ssize_t ret = 0; int rc; - ide_debug_log(IDE_DBG_FUNC, "count %Zd", count); + ide_debug_log(IDE_DBG_FUNC, "count %zd", count); if (tape->chrdev_dir != IDETAPE_DIR_READ) { if (test_bit(ilog2(IDE_AFLAG_DETECT_BS), &drive->atapi_flags)) @@ -1195,7 +1195,7 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf, if (tape->write_prot) return -EACCES; - ide_debug_log(IDE_DBG_FUNC, "count %Zd", count); + ide_debug_log(IDE_DBG_FUNC, "count %zd", count); /* Initialize write operation */ rc = idetape_init_rw(drive, IDETAPE_DIR_WRITE); diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index 247b9faccce1..4c0007cb74e3 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -19,6 +19,7 @@ #include <linux/delay.h> #include <linux/hdreg.h> #include <linux/ide.h> +#include <linux/nmi.h> #include <linux/scatterlist.h> #include <linux/uaccess.h> diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c index 46427ea01753..157f2d1fb7e1 100644 --- a/drivers/ide/palm_bk3710.c +++ b/drivers/ide/palm_bk3710.c @@ -300,7 +300,7 @@ static const struct ide_port_ops palm_bk3710_ports_ops = { .cable_detect = palm_bk3710_cable_detect, }; -static struct ide_port_info palm_bk3710_port_info = { +static struct ide_port_info palm_bk3710_port_info __initdata = { .init_dma = palm_bk3710_init_dma, .port_ops = &palm_bk3710_ports_ops, .dma_ops = &sff_dma_ops, diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 7d8ea3d5fda6..5805b041dd0f 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -125,7 +125,7 @@ static struct cpuidle_state *cpuidle_state_table; */ static struct cpuidle_state nehalem_cstates[] = { { - .name = "C1-NHM", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 3, @@ -133,7 +133,7 @@ static struct cpuidle_state nehalem_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-NHM", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -141,7 +141,7 @@ static struct cpuidle_state nehalem_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-NHM", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 20, @@ -149,7 +149,7 @@ static struct cpuidle_state nehalem_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-NHM", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, @@ -162,7 +162,7 @@ static struct cpuidle_state nehalem_cstates[] = { static struct cpuidle_state snb_cstates[] = { { - .name = "C1-SNB", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -170,7 +170,7 @@ static struct cpuidle_state snb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-SNB", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -178,7 +178,7 @@ static struct cpuidle_state snb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-SNB", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, @@ -186,7 +186,7 @@ static struct cpuidle_state snb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-SNB", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 104, @@ -194,7 +194,7 @@ static struct cpuidle_state snb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7-SNB", + .name = "C7", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 109, @@ -207,7 +207,7 @@ static struct cpuidle_state snb_cstates[] = { static struct cpuidle_state byt_cstates[] = { { - .name = "C1-BYT", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -215,7 +215,7 @@ static struct cpuidle_state byt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6N-BYT", + .name = "C6N", .desc = "MWAIT 0x58", .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 300, @@ -223,7 +223,7 @@ static struct cpuidle_state byt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6S-BYT", + .name = "C6S", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 500, @@ -231,7 +231,7 @@ static struct cpuidle_state byt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7-BYT", + .name = "C7", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1200, @@ -239,7 +239,7 @@ static struct cpuidle_state byt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7S-BYT", + .name = "C7S", .desc = "MWAIT 0x64", .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, @@ -252,7 +252,7 @@ static struct cpuidle_state byt_cstates[] = { static struct cpuidle_state cht_cstates[] = { { - .name = "C1-CHT", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -260,7 +260,7 @@ static struct cpuidle_state cht_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6N-CHT", + .name = "C6N", .desc = "MWAIT 0x58", .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, @@ -268,7 +268,7 @@ static struct cpuidle_state cht_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6S-CHT", + .name = "C6S", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, @@ -276,7 +276,7 @@ static struct cpuidle_state cht_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7-CHT", + .name = "C7", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1200, @@ -284,7 +284,7 @@ static struct cpuidle_state cht_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7S-CHT", + .name = "C7S", .desc = "MWAIT 0x64", .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, @@ -297,7 +297,7 @@ static struct cpuidle_state cht_cstates[] = { static struct cpuidle_state ivb_cstates[] = { { - .name = "C1-IVB", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -305,7 +305,7 @@ static struct cpuidle_state ivb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-IVB", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -313,7 +313,7 @@ static struct cpuidle_state ivb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-IVB", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, @@ -321,7 +321,7 @@ static struct cpuidle_state ivb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-IVB", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, @@ -329,7 +329,7 @@ static struct cpuidle_state ivb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7-IVB", + .name = "C7", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 87, @@ -342,7 +342,7 @@ static struct cpuidle_state ivb_cstates[] = { static struct cpuidle_state ivt_cstates[] = { { - .name = "C1-IVT", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -350,7 +350,7 @@ static struct cpuidle_state ivt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-IVT", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -358,7 +358,7 @@ static struct cpuidle_state ivt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-IVT", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, @@ -366,7 +366,7 @@ static struct cpuidle_state ivt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-IVT", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 82, @@ -379,7 +379,7 @@ static struct cpuidle_state ivt_cstates[] = { static struct cpuidle_state ivt_cstates_4s[] = { { - .name = "C1-IVT-4S", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -387,7 +387,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-IVT-4S", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -395,7 +395,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-IVT-4S", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, @@ -403,7 +403,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-IVT-4S", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 84, @@ -416,7 +416,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { static struct cpuidle_state ivt_cstates_8s[] = { { - .name = "C1-IVT-8S", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -424,7 +424,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-IVT-8S", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -432,7 +432,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-IVT-8S", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, @@ -440,7 +440,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-IVT-8S", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 88, @@ -453,7 +453,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { static struct cpuidle_state hsw_cstates[] = { { - .name = "C1-HSW", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -461,7 +461,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-HSW", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -469,7 +469,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-HSW", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 33, @@ -477,7 +477,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-HSW", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 133, @@ -485,7 +485,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7s-HSW", + .name = "C7s", .desc = "MWAIT 0x32", .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 166, @@ -493,7 +493,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C8-HSW", + .name = "C8", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 300, @@ -501,7 +501,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C9-HSW", + .name = "C9", .desc = "MWAIT 0x50", .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 600, @@ -509,7 +509,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C10-HSW", + .name = "C10", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 2600, @@ -521,7 +521,7 @@ static struct cpuidle_state hsw_cstates[] = { }; static struct cpuidle_state bdw_cstates[] = { { - .name = "C1-BDW", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -529,7 +529,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-BDW", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -537,7 +537,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-BDW", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 40, @@ -545,7 +545,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-BDW", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 133, @@ -553,7 +553,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7s-BDW", + .name = "C7s", .desc = "MWAIT 0x32", .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 166, @@ -561,7 +561,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C8-BDW", + .name = "C8", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 300, @@ -569,7 +569,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C9-BDW", + .name = "C9", .desc = "MWAIT 0x50", .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 600, @@ -577,7 +577,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C10-BDW", + .name = "C10", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 2600, @@ -590,7 +590,7 @@ static struct cpuidle_state bdw_cstates[] = { static struct cpuidle_state skl_cstates[] = { { - .name = "C1-SKL", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -598,7 +598,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-SKL", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -606,7 +606,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-SKL", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 70, @@ -614,7 +614,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-SKL", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 85, @@ -622,7 +622,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7s-SKL", + .name = "C7s", .desc = "MWAIT 0x33", .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 124, @@ -630,7 +630,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C8-SKL", + .name = "C8", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, @@ -638,7 +638,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C9-SKL", + .name = "C9", .desc = "MWAIT 0x50", .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 480, @@ -646,7 +646,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C10-SKL", + .name = "C10", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 890, @@ -659,7 +659,7 @@ static struct cpuidle_state skl_cstates[] = { static struct cpuidle_state skx_cstates[] = { { - .name = "C1-SKX", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -667,7 +667,7 @@ static struct cpuidle_state skx_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-SKX", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -675,7 +675,7 @@ static struct cpuidle_state skx_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-SKX", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 133, @@ -688,7 +688,7 @@ static struct cpuidle_state skx_cstates[] = { static struct cpuidle_state atom_cstates[] = { { - .name = "C1E-ATM", + .name = "C1E", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 10, @@ -696,7 +696,7 @@ static struct cpuidle_state atom_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C2-ATM", + .name = "C2", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10), .exit_latency = 20, @@ -704,7 +704,7 @@ static struct cpuidle_state atom_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C4-ATM", + .name = "C4", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 100, @@ -712,7 +712,7 @@ static struct cpuidle_state atom_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-ATM", + .name = "C6", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, @@ -724,7 +724,7 @@ static struct cpuidle_state atom_cstates[] = { }; static struct cpuidle_state tangier_cstates[] = { { - .name = "C1-TNG", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -732,7 +732,7 @@ static struct cpuidle_state tangier_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C4-TNG", + .name = "C4", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 100, @@ -740,7 +740,7 @@ static struct cpuidle_state tangier_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-TNG", + .name = "C6", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, @@ -748,7 +748,7 @@ static struct cpuidle_state tangier_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7-TNG", + .name = "C7", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1200, @@ -756,7 +756,7 @@ static struct cpuidle_state tangier_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C9-TNG", + .name = "C9", .desc = "MWAIT 0x64", .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, @@ -768,7 +768,7 @@ static struct cpuidle_state tangier_cstates[] = { }; static struct cpuidle_state avn_cstates[] = { { - .name = "C1-AVN", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -776,7 +776,7 @@ static struct cpuidle_state avn_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-AVN", + .name = "C6", .desc = "MWAIT 0x51", .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 15, @@ -788,7 +788,7 @@ static struct cpuidle_state avn_cstates[] = { }; static struct cpuidle_state knl_cstates[] = { { - .name = "C1-KNL", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -796,7 +796,7 @@ static struct cpuidle_state knl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze }, { - .name = "C6-KNL", + .name = "C6", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 120, @@ -809,7 +809,7 @@ static struct cpuidle_state knl_cstates[] = { static struct cpuidle_state bxt_cstates[] = { { - .name = "C1-BXT", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -817,7 +817,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-BXT", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -825,7 +825,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-BXT", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 133, @@ -833,7 +833,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7s-BXT", + .name = "C7s", .desc = "MWAIT 0x31", .flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 155, @@ -841,7 +841,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C8-BXT", + .name = "C8", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1000, @@ -849,7 +849,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C9-BXT", + .name = "C9", .desc = "MWAIT 0x50", .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 2000, @@ -857,7 +857,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C10-BXT", + .name = "C10", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, @@ -870,7 +870,7 @@ static struct cpuidle_state bxt_cstates[] = { static struct cpuidle_state dnv_cstates[] = { { - .name = "C1-DNV", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -878,7 +878,7 @@ static struct cpuidle_state dnv_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-DNV", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -886,7 +886,7 @@ static struct cpuidle_state dnv_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-DNV", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 50, @@ -961,9 +961,9 @@ static void auto_demotion_disable(void) { unsigned long long msr_bits; - rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); msr_bits &= ~(icpu->auto_demotion_disable_flags); - wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); + wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); } static void c1e_promotion_disable(void) { @@ -1273,7 +1273,7 @@ static void sklh_idle_state_table_update(void) if ((mwait_substates & (0xF << 28)) == 0) return; - rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr); + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); /* PC10 is not enabled in PKG C-state limit */ if ((msr & 0xF) != 8) diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 4972986f6455..d2b465140a6b 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -20,7 +20,7 @@ #include <linux/cdev.h> #include <linux/slab.h> #include <linux/poll.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/iio/iio.h> #include "iio_core.h" diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index edaae9f9853c..e426ac877d19 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -13,6 +13,7 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \ multicast.o mad.o smi.o agent.o mad_rmpp.o ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o +ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o ib_cm-y := cm.o diff --git a/drivers/infiniband/core/cgroup.c b/drivers/infiniband/core/cgroup.c new file mode 100644 index 000000000000..126ac5f99db7 --- /dev/null +++ b/drivers/infiniband/core/cgroup.c @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2016 Parav Pandit <pandit.parav@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "core_priv.h" + +/** + * ib_device_register_rdmacg - register with rdma cgroup. + * @device: device to register to participate in resource + * accounting by rdma cgroup. + * + * Register with the rdma cgroup. Should be called before + * exposing rdma device to user space applications to avoid + * resource accounting leak. + * Returns 0 on success or otherwise failure code. + */ +int ib_device_register_rdmacg(struct ib_device *device) +{ + device->cg_device.name = device->name; + return rdmacg_register_device(&device->cg_device); +} + +/** + * ib_device_unregister_rdmacg - unregister with rdma cgroup. + * @device: device to unregister. + * + * Unregister with the rdma cgroup. Should be called after + * all the resources are deallocated, and after a stage when any + * other resource allocation by user application cannot be done + * for this device to avoid any leak in accounting. + */ +void ib_device_unregister_rdmacg(struct ib_device *device) +{ + rdmacg_unregister_device(&device->cg_device); +} + +int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj, + struct ib_device *device, + enum rdmacg_resource_type resource_index) +{ + return rdmacg_try_charge(&cg_obj->cg, &device->cg_device, + resource_index); +} +EXPORT_SYMBOL(ib_rdmacg_try_charge); + +void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj, + struct ib_device *device, + enum rdmacg_resource_type resource_index) +{ + rdmacg_uncharge(cg_obj->cg, &device->cg_device, + resource_index); +} +EXPORT_SYMBOL(ib_rdmacg_uncharge); diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 912ab4cd6eae..cb7d372e4bdf 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -35,6 +35,7 @@ #include <linux/list.h> #include <linux/spinlock.h> +#include <linux/cgroup_rdma.h> #include <rdma/ib_verbs.h> @@ -124,6 +125,35 @@ int ib_cache_setup_one(struct ib_device *device); void ib_cache_cleanup_one(struct ib_device *device); void ib_cache_release_one(struct ib_device *device); +#ifdef CONFIG_CGROUP_RDMA +int ib_device_register_rdmacg(struct ib_device *device); +void ib_device_unregister_rdmacg(struct ib_device *device); + +int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj, + struct ib_device *device, + enum rdmacg_resource_type resource_index); + +void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj, + struct ib_device *device, + enum rdmacg_resource_type resource_index); +#else +static inline int ib_device_register_rdmacg(struct ib_device *device) +{ return 0; } + +static inline void ib_device_unregister_rdmacg(struct ib_device *device) +{ } + +static inline int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj, + struct ib_device *device, + enum rdmacg_resource_type resource_index) +{ return 0; } + +static inline void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj, + struct ib_device *device, + enum rdmacg_resource_type resource_index) +{ } +#endif + static inline bool rdma_is_upper_dev_rcu(struct net_device *dev, struct net_device *upper) { diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index f2e48655a906..593d2ce6ec7c 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -333,6 +333,15 @@ int ib_register_device(struct ib_device *device, int ret; struct ib_client *client; struct ib_udata uhw = {.outlen = 0, .inlen = 0}; + struct device *parent = device->dev.parent; + + WARN_ON_ONCE(!parent); + if (!device->dev.dma_ops) + device->dev.dma_ops = parent->dma_ops; + if (!device->dev.dma_mask) + device->dev.dma_mask = parent->dma_mask; + if (!device->dev.coherent_dma_mask) + device->dev.coherent_dma_mask = parent->coherent_dma_mask; mutex_lock(&device_mutex); @@ -360,10 +369,18 @@ int ib_register_device(struct ib_device *device, goto out; } + ret = ib_device_register_rdmacg(device); + if (ret) { + pr_warn("Couldn't register device with rdma cgroup\n"); + ib_cache_cleanup_one(device); + goto out; + } + memset(&device->attrs, 0, sizeof(device->attrs)); ret = device->query_device(device, &device->attrs, &uhw); if (ret) { pr_warn("Couldn't query the device attributes\n"); + ib_device_unregister_rdmacg(device); ib_cache_cleanup_one(device); goto out; } @@ -372,6 +389,7 @@ int ib_register_device(struct ib_device *device, if (ret) { pr_warn("Couldn't register device %s with driver model\n", device->name); + ib_device_unregister_rdmacg(device); ib_cache_cleanup_one(device); goto out; } @@ -421,6 +439,7 @@ void ib_unregister_device(struct ib_device *device) mutex_unlock(&device_mutex); + ib_device_unregister_rdmacg(device); ib_device_unregister_sysfs(device); ib_cache_cleanup_one(device); diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index c1fb545e8d78..daadf3130c9f 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -1258,7 +1258,7 @@ int ib_device_register_sysfs(struct ib_device *device, int ret; int i; - device->dev.parent = device->dma_device; + WARN_ON_ONCE(!device->dev.parent); ret = dev_set_name(class_dev, "%s", device->name); if (ret) return ret; diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index e0a995b85a2d..cc0d51fb06e3 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -1290,7 +1290,7 @@ static void ib_ucm_add_one(struct ib_device *device) goto err; ucm_dev->dev.class = &cm_class; - ucm_dev->dev.parent = device->dma_device; + ucm_dev->dev.parent = device->dev.parent; ucm_dev->dev.devt = ucm_dev->cdev.dev; ucm_dev->dev.release = ib_ucm_release_dev; dev_set_name(&ucm_dev->dev, "ucm%d", ucm_dev->devnum); diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 446b56a5260b..27f155d2df8d 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -34,7 +34,8 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/export.h> #include <linux/hugetlb.h> #include <linux/slab.h> diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index f2fc0431512d..cb2742b548bb 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -32,6 +32,8 @@ #include <linux/types.h> #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include <linux/pid.h> #include <linux/slab.h> #include <linux/export.h> diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 249b403b43a4..aca7ff7abedc 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1188,7 +1188,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, if (cdev_add(&port->cdev, base, 1)) goto err_cdev; - port->dev = device_create(umad_class, device->dma_device, + port->dev = device_create(umad_class, device->dev.parent, port->cdev.dev, port, "umad%d", port->dev_num); if (IS_ERR(port->dev)) @@ -1207,7 +1207,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, if (cdev_add(&port->sm_cdev, base, 1)) goto err_sm_cdev; - port->sm_dev = device_create(umad_class, device->dma_device, + port->sm_dev = device_create(umad_class, device->dev.parent, port->sm_cdev.dev, port, "issm%d", port->dev_num); if (IS_ERR(port->sm_dev)) diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index b4b395a054ac..7b7a76e1279a 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -316,6 +316,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, struct ib_udata udata; struct ib_ucontext *ucontext; struct file *filp; + struct ib_rdmacg_object cg_obj; int ret; if (out_len < sizeof resp) @@ -335,13 +336,18 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, (unsigned long) cmd.response + sizeof resp, in_len - sizeof cmd, out_len - sizeof resp); + ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); + if (ret) + goto err; + ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); if (IS_ERR(ucontext)) { ret = PTR_ERR(ucontext); - goto err; + goto err_alloc; } ucontext->device = ib_dev; + ucontext->cg_obj = cg_obj; INIT_LIST_HEAD(&ucontext->pd_list); INIT_LIST_HEAD(&ucontext->mr_list); INIT_LIST_HEAD(&ucontext->mw_list); @@ -407,6 +413,9 @@ err_free: put_pid(ucontext->tgid); ib_dev->dealloc_ucontext(ucontext); +err_alloc: + ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); + err: mutex_unlock(&file->mutex); return ret; @@ -561,6 +570,13 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, return -ENOMEM; init_uobj(uobj, 0, file->ucontext, &pd_lock_class); + ret = ib_rdmacg_try_charge(&uobj->cg_obj, ib_dev, + RDMACG_RESOURCE_HCA_OBJECT); + if (ret) { + kfree(uobj); + return ret; + } + down_write(&uobj->mutex); pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); @@ -605,6 +621,7 @@ err_idr: ib_dealloc_pd(pd); err: + ib_rdmacg_uncharge(&uobj->cg_obj, ib_dev, RDMACG_RESOURCE_HCA_OBJECT); put_uobj_write(uobj); return ret; } @@ -637,6 +654,8 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, if (ret) goto err_put; + ib_rdmacg_uncharge(&uobj->cg_obj, ib_dev, RDMACG_RESOURCE_HCA_OBJECT); + uobj->live = 0; put_uobj_write(uobj); @@ -1006,6 +1025,10 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, goto err_put; } } + ret = ib_rdmacg_try_charge(&uobj->cg_obj, ib_dev, + RDMACG_RESOURCE_HCA_OBJECT); + if (ret) + goto err_charge; mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, cmd.access_flags, &udata); @@ -1054,6 +1077,9 @@ err_unreg: ib_dereg_mr(mr); err_put: + ib_rdmacg_uncharge(&uobj->cg_obj, ib_dev, RDMACG_RESOURCE_HCA_OBJECT); + +err_charge: put_pd_read(pd); err_free: @@ -1178,6 +1204,8 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, if (ret) return ret; + ib_rdmacg_uncharge(&uobj->cg_obj, ib_dev, RDMACG_RESOURCE_HCA_OBJECT); + idr_remove_uobj(&ib_uverbs_mr_idr, uobj); mutex_lock(&file->mutex); @@ -1226,6 +1254,11 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), out_len - sizeof(resp)); + ret = ib_rdmacg_try_charge(&uobj->cg_obj, ib_dev, + RDMACG_RESOURCE_HCA_OBJECT); + if (ret) + goto err_charge; + mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata); if (IS_ERR(mw)) { ret = PTR_ERR(mw); @@ -1271,6 +1304,9 @@ err_unalloc: uverbs_dealloc_mw(mw); err_put: + ib_rdmacg_uncharge(&uobj->cg_obj, ib_dev, RDMACG_RESOURCE_HCA_OBJECT); + +err_charge: put_pd_read(pd); err_free: @@ -1306,6 +1342,8 @@ ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, if (ret) return ret; + ib_rdmacg_uncharge(&uobj->cg_obj, ib_dev, RDMACG_RESOURCE_HCA_OBJECT); + idr_remove_uobj(&ib_uverbs_mw_idr, uobj); mutex_lock(&file->mutex); @@ -1405,6 +1443,11 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) attr.flags = cmd->flags; + ret = ib_rdmacg_try_charge(&obj->uobject.cg_obj, ib_dev, + RDMACG_RESOURCE_HCA_OBJECT); + if (ret) + goto err_charge; + cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw); if (IS_ERR(cq)) { @@ -1452,6 +1495,10 @@ err_free: ib_destroy_cq(cq); err_file: + ib_rdmacg_uncharge(&obj->uobject.cg_obj, ib_dev, + RDMACG_RESOURCE_HCA_OBJECT); + +err_charge: if (ev_file) ib_uverbs_release_ucq(file, ev_file, obj); @@ -1732,6 +1779,8 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, if (ret) return ret; + ib_rdmacg_uncharge(&uobj->cg_obj, ib_dev, RDMACG_RESOURCE_HCA_OBJECT); + idr_remove_uobj(&ib_uverbs_cq_idr, uobj); mutex_lock(&file->mutex); @@ -1905,6 +1954,11 @@ static int create_qp(struct ib_uverbs_file *file, goto err_put; } + ret = ib_rdmacg_try_charge(&obj->uevent.uobject.cg_obj, device, + RDMACG_RESOURCE_HCA_OBJECT); + if (ret) + goto err_put; + if (cmd->qp_type == IB_QPT_XRC_TGT) qp = ib_create_qp(pd, &attr); else @@ -1912,7 +1966,7 @@ static int create_qp(struct ib_uverbs_file *file, if (IS_ERR(qp)) { ret = PTR_ERR(qp); - goto err_put; + goto err_create; } if (cmd->qp_type != IB_QPT_XRC_TGT) { @@ -1993,6 +2047,10 @@ err_cb: err_destroy: ib_destroy_qp(qp); +err_create: + ib_rdmacg_uncharge(&obj->uevent.uobject.cg_obj, device, + RDMACG_RESOURCE_HCA_OBJECT); + err_put: if (xrcd) put_xrcd_read(xrcd_uobj); @@ -2519,6 +2577,8 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, if (ret) return ret; + ib_rdmacg_uncharge(&uobj->cg_obj, ib_dev, RDMACG_RESOURCE_HCA_OBJECT); + if (obj->uxrcd) atomic_dec(&obj->uxrcd->refcnt); @@ -2970,11 +3030,16 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, memset(&attr.dmac, 0, sizeof(attr.dmac)); memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); + ret = ib_rdmacg_try_charge(&uobj->cg_obj, ib_dev, + RDMACG_RESOURCE_HCA_OBJECT); + if (ret) + goto err_charge; + ah = pd->device->create_ah(pd, &attr, &udata); if (IS_ERR(ah)) { ret = PTR_ERR(ah); - goto err_put; + goto err_create; } ah->device = pd->device; @@ -3013,7 +3078,10 @@ err_copy: err_destroy: ib_destroy_ah(ah); -err_put: +err_create: + ib_rdmacg_uncharge(&uobj->cg_obj, ib_dev, RDMACG_RESOURCE_HCA_OBJECT); + +err_charge: put_pd_read(pd); err: @@ -3047,6 +3115,8 @@ ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, if (ret) return ret; + ib_rdmacg_uncharge(&uobj->cg_obj, ib_dev, RDMACG_RESOURCE_HCA_OBJECT); + idr_remove_uobj(&ib_uverbs_ah_idr, uobj); mutex_lock(&file->mutex); @@ -3861,10 +3931,16 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, err = -EINVAL; goto err_free; } + + err = ib_rdmacg_try_charge(&uobj->cg_obj, ib_dev, + RDMACG_RESOURCE_HCA_OBJECT); + if (err) + goto err_free; + flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); if (IS_ERR(flow_id)) { err = PTR_ERR(flow_id); - goto err_free; + goto err_create; } flow_id->uobject = uobj; uobj->object = flow_id; @@ -3897,6 +3973,8 @@ err_copy: idr_remove_uobj(&ib_uverbs_rule_idr, uobj); destroy_flow: ib_destroy_flow(flow_id); +err_create: + ib_rdmacg_uncharge(&uobj->cg_obj, ib_dev, RDMACG_RESOURCE_HCA_OBJECT); err_free: kfree(flow_attr); err_put: @@ -3936,8 +4014,11 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, flow_id = uobj->object; ret = ib_destroy_flow(flow_id); - if (!ret) + if (!ret) { + ib_rdmacg_uncharge(&uobj->cg_obj, ib_dev, + RDMACG_RESOURCE_HCA_OBJECT); uobj->live = 0; + } put_uobj_write(uobj); @@ -4005,6 +4086,11 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file, obj->uevent.events_reported = 0; INIT_LIST_HEAD(&obj->uevent.event_list); + ret = ib_rdmacg_try_charge(&obj->uevent.uobject.cg_obj, ib_dev, + RDMACG_RESOURCE_HCA_OBJECT); + if (ret) + goto err_put_cq; + srq = pd->device->create_srq(pd, &attr, udata); if (IS_ERR(srq)) { ret = PTR_ERR(srq); @@ -4069,6 +4155,8 @@ err_destroy: ib_destroy_srq(srq); err_put: + ib_rdmacg_uncharge(&obj->uevent.uobject.cg_obj, ib_dev, + RDMACG_RESOURCE_HCA_OBJECT); put_pd_read(pd); err_put_cq: @@ -4255,6 +4343,8 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, if (ret) return ret; + ib_rdmacg_uncharge(&uobj->cg_obj, ib_dev, RDMACG_RESOURCE_HCA_OBJECT); + if (srq_type == IB_SRQT_XRC) { us = container_of(obj, struct ib_usrq_object, uevent); atomic_dec(&us->uxrcd->refcnt); diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index b3f95d453fba..35c788a32e26 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -51,6 +51,7 @@ #include <rdma/ib.h> #include "uverbs.h" +#include "core_priv.h" MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand userspace verbs access"); @@ -237,6 +238,8 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, idr_remove_uobj(&ib_uverbs_ah_idr, uobj); ib_destroy_ah(ah); + ib_rdmacg_uncharge(&uobj->cg_obj, context->device, + RDMACG_RESOURCE_HCA_OBJECT); kfree(uobj); } @@ -246,6 +249,8 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, idr_remove_uobj(&ib_uverbs_mw_idr, uobj); uverbs_dealloc_mw(mw); + ib_rdmacg_uncharge(&uobj->cg_obj, context->device, + RDMACG_RESOURCE_HCA_OBJECT); kfree(uobj); } @@ -254,6 +259,8 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, idr_remove_uobj(&ib_uverbs_rule_idr, uobj); ib_destroy_flow(flow_id); + ib_rdmacg_uncharge(&uobj->cg_obj, context->device, + RDMACG_RESOURCE_HCA_OBJECT); kfree(uobj); } @@ -266,6 +273,8 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, if (qp == qp->real_qp) ib_uverbs_detach_umcast(qp, uqp); ib_destroy_qp(qp); + ib_rdmacg_uncharge(&uobj->cg_obj, context->device, + RDMACG_RESOURCE_HCA_OBJECT); ib_uverbs_release_uevent(file, &uqp->uevent); kfree(uqp); } @@ -298,6 +307,8 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, idr_remove_uobj(&ib_uverbs_srq_idr, uobj); ib_destroy_srq(srq); + ib_rdmacg_uncharge(&uobj->cg_obj, context->device, + RDMACG_RESOURCE_HCA_OBJECT); ib_uverbs_release_uevent(file, uevent); kfree(uevent); } @@ -310,6 +321,8 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, idr_remove_uobj(&ib_uverbs_cq_idr, uobj); ib_destroy_cq(cq); + ib_rdmacg_uncharge(&uobj->cg_obj, context->device, + RDMACG_RESOURCE_HCA_OBJECT); ib_uverbs_release_ucq(file, ev_file, ucq); kfree(ucq); } @@ -319,6 +332,8 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, idr_remove_uobj(&ib_uverbs_mr_idr, uobj); ib_dereg_mr(mr); + ib_rdmacg_uncharge(&uobj->cg_obj, context->device, + RDMACG_RESOURCE_HCA_OBJECT); kfree(uobj); } @@ -339,11 +354,16 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, idr_remove_uobj(&ib_uverbs_pd_idr, uobj); ib_dealloc_pd(pd); + ib_rdmacg_uncharge(&uobj->cg_obj, context->device, + RDMACG_RESOURCE_HCA_OBJECT); kfree(uobj); } put_pid(context->tgid); + ib_rdmacg_uncharge(&context->cg_obj, context->device, + RDMACG_RESOURCE_HCA_HANDLE); + return context->device->dealloc_ucontext(context); } @@ -1174,7 +1194,7 @@ static void ib_uverbs_add_one(struct ib_device *device) if (cdev_add(&uverbs_dev->cdev, base, 1)) goto err_cdev; - uverbs_dev->dev = device_create(uverbs_class, device->dma_device, + uverbs_dev->dev = device_create(uverbs_class, device->dev.parent, uverbs_dev->cdev.dev, uverbs_dev, "uverbs%d", uverbs_dev->devnum); if (IS_ERR(uverbs_dev->dev)) diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index bd452a92b386..5d355401179b 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -436,7 +436,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid); ibdev->num_comp_vectors = 1; - ibdev->dma_device = &rdev->en_dev->pdev->dev; + ibdev->dev.parent = &rdev->en_dev->pdev->dev; ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY; /* User space */ diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 48649f93258a..86ecd3ea6a4b 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -37,7 +37,7 @@ #include <linux/delay.h> #include <linux/errno.h> #include <linux/list.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/rtnetlink.h> @@ -1393,7 +1393,7 @@ int iwch_register_device(struct iwch_dev *dev) memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports; dev->ibdev.num_comp_vectors = 1; - dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); + dev->ibdev.dev.parent = &dev->rdev.rnic_info.pdev->dev; dev->ibdev.query_device = iwch_query_device; dev->ibdev.query_port = iwch_query_port; dev->ibdev.query_pkey = iwch_query_pkey; diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index d19662f635b1..5846c47c8d55 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -37,7 +37,7 @@ #include <linux/idr.h> #include <linux/completion.h> #include <linux/netdevice.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/inet.h> diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index bdf7de571d83..df64417ab6f2 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -572,7 +572,7 @@ int c4iw_register_device(struct c4iw_dev *dev) memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC)); dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports; dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq; - dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev); + dev->ibdev.dev.parent = &dev->rdev.lldi.pdev->dev; dev->ibdev.query_device = c4iw_query_device; dev->ibdev.query_port = c4iw_query_port; dev->ibdev.query_pkey = c4iw_query_pkey; diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 7a3d906b3671..e2cd2cd3b28a 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -576,7 +576,7 @@ int hfi1_get_proc_affinity(int node) struct hfi1_affinity_node *entry; cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask; const struct cpumask *node_mask, - *proc_mask = tsk_cpus_allowed(current); + *proc_mask = ¤t->cpus_allowed; struct hfi1_affinity_node_list *affinity = &node_affinity; struct cpu_mask_set *set = &affinity->proc; diff --git a/drivers/infiniband/hw/hfi1/dma.c b/drivers/infiniband/hw/hfi1/dma.c deleted file mode 100644 index 7e8dab892848..000000000000 --- a/drivers/infiniband/hw/hfi1/dma.c +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright(c) 2015, 2016 Intel Corporation. - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ -#include <linux/types.h> -#include <linux/scatterlist.h> - -#include "verbs.h" - -#define BAD_DMA_ADDRESS ((u64)0) - -/* - * The following functions implement driver specific replacements - * for the ib_dma_*() functions. - * - * These functions return kernel virtual addresses instead of - * device bus addresses since the driver uses the CPU to copy - * data instead of using hardware DMA. - */ - -static int hfi1_mapping_error(struct ib_device *dev, u64 dma_addr) -{ - return dma_addr == BAD_DMA_ADDRESS; -} - -static u64 hfi1_dma_map_single(struct ib_device *dev, void *cpu_addr, - size_t size, enum dma_data_direction direction) -{ - if (WARN_ON(!valid_dma_direction(direction))) - return BAD_DMA_ADDRESS; - - return (u64)cpu_addr; -} - -static void hfi1_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, - enum dma_data_direction direction) -{ - /* This is a stub, nothing to be done here */ -} - -static u64 hfi1_dma_map_page(struct ib_device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - u64 addr; - - if (WARN_ON(!valid_dma_direction(direction))) - return BAD_DMA_ADDRESS; - - if (offset + size > PAGE_SIZE) - return BAD_DMA_ADDRESS; - - addr = (u64)page_address(page); - if (addr) - addr += offset; - - return addr; -} - -static void hfi1_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, - enum dma_data_direction direction) -{ - /* This is a stub, nothing to be done here */ -} - -static int hfi1_map_sg(struct ib_device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction direction) -{ - struct scatterlist *sg; - u64 addr; - int i; - int ret = nents; - - if (WARN_ON(!valid_dma_direction(direction))) - return BAD_DMA_ADDRESS; - - for_each_sg(sgl, sg, nents, i) { - addr = (u64)page_address(sg_page(sg)); - if (!addr) { - ret = 0; - break; - } - sg->dma_address = addr + sg->offset; -#ifdef CONFIG_NEED_SG_DMA_LENGTH - sg->dma_length = sg->length; -#endif - } - return ret; -} - -static void hfi1_unmap_sg(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - /* This is a stub, nothing to be done here */ -} - -static void hfi1_sync_single_for_cpu(struct ib_device *dev, u64 addr, - size_t size, enum dma_data_direction dir) -{ -} - -static void hfi1_sync_single_for_device(struct ib_device *dev, u64 addr, - size_t size, - enum dma_data_direction dir) -{ -} - -static void *hfi1_dma_alloc_coherent(struct ib_device *dev, size_t size, - u64 *dma_handle, gfp_t flag) -{ - struct page *p; - void *addr = NULL; - - p = alloc_pages(flag, get_order(size)); - if (p) - addr = page_address(p); - if (dma_handle) - *dma_handle = (u64)addr; - return addr; -} - -static void hfi1_dma_free_coherent(struct ib_device *dev, size_t size, - void *cpu_addr, u64 dma_handle) -{ - free_pages((unsigned long)cpu_addr, get_order(size)); -} - -struct ib_dma_mapping_ops hfi1_dma_mapping_ops = { - .mapping_error = hfi1_mapping_error, - .map_single = hfi1_dma_map_single, - .unmap_single = hfi1_dma_unmap_single, - .map_page = hfi1_dma_map_page, - .unmap_page = hfi1_dma_unmap_page, - .map_sg = hfi1_map_sg, - .unmap_sg = hfi1_unmap_sg, - .sync_single_for_cpu = hfi1_sync_single_for_cpu, - .sync_single_for_device = hfi1_sync_single_for_device, - .alloc_coherent = hfi1_dma_alloc_coherent, - .free_coherent = hfi1_dma_free_coherent -}; diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index f46033984d07..f78c739b330a 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -48,6 +48,7 @@ #include <linux/cdev.h> #include <linux/vmalloc.h> #include <linux/io.h> +#include <linux/sched/mm.h> #include <rdma/ib.h> @@ -185,7 +186,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) if (fd) { fd->rec_cpu_num = -1; /* no cpu affinity by default */ fd->mm = current->mm; - atomic_inc(&fd->mm->mm_count); + mmgrab(fd->mm); fp->private_data = fd; } else { fp->private_data = NULL; diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 6e595afca24c..09cda3c35e82 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -4406,7 +4406,7 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, switch (in_mad->base_version) { case OPA_MGMT_BASE_VERSION: if (unlikely(in_mad_size != sizeof(struct opa_mad))) { - dev_err(ibdev->dma_device, "invalid in_mad_size\n"); + dev_err(ibdev->dev.parent, "invalid in_mad_size\n"); return IB_MAD_RESULT_FAILURE; } return hfi1_process_opa_mad(ibdev, mad_flags, port, diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 1d81cac1fa6c..5cde1ecda0fe 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -856,7 +856,7 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, { struct sdma_rht_node *rht_node; struct sdma_engine *sde = NULL; - const struct cpumask *current_mask = tsk_cpus_allowed(current); + const struct cpumask *current_mask = ¤t->cpus_allowed; unsigned long cpu_id; /* diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c index 20f4ddcac3b0..68295a12b771 100644 --- a/drivers/infiniband/hw/hfi1/user_pages.c +++ b/drivers/infiniband/hw/hfi1/user_pages.c @@ -46,7 +46,7 @@ */ #include <linux/mm.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/device.h> #include <linux/module.h> diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 33f00f0719c5..222315fadab1 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1703,7 +1703,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz); ibdev->owner = THIS_MODULE; ibdev->phys_port_cnt = dd->num_pports; - ibdev->dma_device = &dd->pcidev->dev; + ibdev->dev.parent = &dd->pcidev->dev; ibdev->modify_device = modify_device; ibdev->alloc_hw_stats = alloc_hw_stats; ibdev->get_hw_stats = get_hw_stats; diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 6843409fba29..c3b41f95e70a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -439,7 +439,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_dev->owner = THIS_MODULE; ib_dev->node_type = RDMA_NODE_IB_CA; - ib_dev->dma_device = dev; + ib_dev->dev.parent = dev; ib_dev->phys_port_cnt = hr_dev->caps.num_ports; ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index f036f32f15d3..3f44f2f91f03 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -101,7 +101,7 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, event.event = IB_EVENT_QP_ACCESS_ERR; break; default: - dev_dbg(ibqp->device->dma_device, "roce_ib: Unexpected event type %d on QP %06lx\n", + dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", type, hr_qp->qpn); return; } diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 5f695bf232a8..9b2849979756 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -2758,7 +2758,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev (1ull << IB_USER_VERBS_CMD_POST_SEND); iwibdev->ibdev.phys_port_cnt = 1; iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count; - iwibdev->ibdev.dma_device = &pcidev->dev; iwibdev->ibdev.dev.parent = &pcidev->dev; iwibdev->ibdev.query_port = i40iw_query_port; iwibdev->ibdev.modify_port = i40iw_modify_port; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 211cbbe9ccd1..fba94df28cf1 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -39,6 +39,9 @@ #include <linux/inetdevice.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> + #include <net/ipv6.h> #include <net/addrconf.h> #include <net/devlink.h> @@ -2628,7 +2631,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; - ibdev->ib_dev.dma_device = &dev->persist->pdev->dev; + ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev; ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev; ibdev->ib_dev.add_gid = mlx4_ib_add_gid; ibdev->ib_dev.del_gid = mlx4_ib_del_gid; diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 7f3d976d81ed..64fed44b43a6 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -55,7 +55,7 @@ #define pr_fmt(fmt) "<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__ #define mlx4_ib_warn(ibdev, format, arg...) \ - dev_warn((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg) + dev_warn((ibdev)->dev.parent, MLX4_IB_DRV_NAME ": " format, ## arg) enum { MLX4_IB_SQ_MIN_WQE_SHIFT = 6, diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 5d73989d9771..433bcdbdd680 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -292,10 +292,10 @@ mlx4_alloc_priv_pages(struct ib_device *device, if (!mr->pages) return -ENOMEM; - mr->page_map = dma_map_single(device->dma_device, mr->pages, + mr->page_map = dma_map_single(device->dev.parent, mr->pages, mr->page_map_size, DMA_TO_DEVICE); - if (dma_mapping_error(device->dma_device, mr->page_map)) { + if (dma_mapping_error(device->dev.parent, mr->page_map)) { ret = -ENOMEM; goto err; } @@ -313,7 +313,7 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr) if (mr->pages) { struct ib_device *device = mr->ibmr.device; - dma_unmap_single(device->dma_device, mr->page_map, + dma_unmap_single(device->dev.parent, mr->page_map, mr->page_map_size, DMA_TO_DEVICE); free_page((unsigned long)mr->pages); mr->pages = NULL; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 6a8498c052a5..4dc0a8785fe0 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -41,6 +41,8 @@ #include <asm/pat.h> #endif #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include <linux/delay.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_addr.h> @@ -3363,7 +3365,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.phys_port_cnt = dev->num_ports; dev->ib_dev.num_comp_vectors = dev->mdev->priv.eq_table.num_comp_vectors; - dev->ib_dev.dma_device = &mdev->pdev->dev; + dev->ib_dev.dev.parent = &mdev->pdev->dev; dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION; dev->ib_dev.uverbs_cmd_mask = diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 3c1f483d003f..b8f9382a8b7d 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -966,7 +966,7 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, int page_shift, int flags) { struct mlx5_ib_dev *dev = mr->dev; - struct device *ddev = dev->ib_dev.dma_device; + struct device *ddev = dev->ib_dev.dev.parent; struct mlx5_ib_ucontext *uctx = NULL; int size; void *xlt; @@ -1411,9 +1411,9 @@ mlx5_alloc_priv_descs(struct ib_device *device, mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); - mr->desc_map = dma_map_single(device->dma_device, mr->descs, + mr->desc_map = dma_map_single(device->dev.parent, mr->descs, size, DMA_TO_DEVICE); - if (dma_mapping_error(device->dma_device, mr->desc_map)) { + if (dma_mapping_error(device->dev.parent, mr->desc_map)) { ret = -ENOMEM; goto err; } @@ -1432,7 +1432,7 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr) struct ib_device *device = mr->ibmr.device; int size = mr->max_descs * mr->desc_size; - dma_unmap_single(device->dma_device, mr->desc_map, + dma_unmap_single(device->dev.parent, mr->desc_map, size, DMA_TO_DEVICE); kfree(mr->descs_alloc); mr->descs = NULL; diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index ce163184e742..22d0e6ee5af6 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -1224,7 +1224,7 @@ int mthca_register_device(struct mthca_dev *dev) dev->ib_dev.node_type = RDMA_NODE_IB_CA; dev->ib_dev.phys_port_cnt = dev->limits.num_ports; dev->ib_dev.num_comp_vectors = 1; - dev->ib_dev.dma_device = &dev->pdev->dev; + dev->ib_dev.dev.parent = &dev->pdev->dev; dev->ib_dev.query_device = mthca_query_device; dev->ib_dev.query_port = mthca_query_port; dev->ib_dev.modify_device = mthca_modify_device; diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index d3eae2f3e9f5..ccf0a4cffe9c 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -3731,7 +3731,6 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev) nesibdev->ibdev.phys_port_cnt = 1; nesibdev->ibdev.num_comp_vectors = 1; - nesibdev->ibdev.dma_device = &nesdev->pcidev->dev; nesibdev->ibdev.dev.parent = &nesdev->pcidev->dev; nesibdev->ibdev.query_device = nes_query_device; nesibdev->ibdev.query_port = nes_query_port; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 3e43bdc81e7a..57c9a2ad0260 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -199,7 +199,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext; dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext; dev->ibdev.mmap = ocrdma_mmap; - dev->ibdev.dma_device = &dev->nic_info.pdev->dev; + dev->ibdev.dev.parent = &dev->nic_info.pdev->dev; dev->ibdev.process_mad = ocrdma_process_mad; dev->ibdev.get_port_immutable = ocrdma_port_immutable; diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 3ac8aa5ef37d..b9b47e5cc8b3 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -170,7 +170,7 @@ static int qedr_register_device(struct qedr_dev *dev) dev->ibdev.get_port_immutable = qedr_port_immutable; dev->ibdev.get_netdev = qedr_get_netdev; - dev->ibdev.dma_device = &dev->pdev->dev; + dev->ibdev.dev.parent = &dev->pdev->dev; dev->ibdev.get_link_layer = qedr_link_layer; dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str; diff --git a/drivers/infiniband/hw/qib/qib_dma.c b/drivers/infiniband/hw/qib/qib_dma.c deleted file mode 100644 index 59fe092b4b0f..000000000000 --- a/drivers/infiniband/hw/qib/qib_dma.c +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include <linux/types.h> -#include <linux/scatterlist.h> - -#include "qib_verbs.h" - -#define BAD_DMA_ADDRESS ((u64) 0) - -/* - * The following functions implement driver specific replacements - * for the ib_dma_*() functions. - * - * These functions return kernel virtual addresses instead of - * device bus addresses since the driver uses the CPU to copy - * data instead of using hardware DMA. - */ - -static int qib_mapping_error(struct ib_device *dev, u64 dma_addr) -{ - return dma_addr == BAD_DMA_ADDRESS; -} - -static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr, - size_t size, enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); - return (u64) cpu_addr; -} - -static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); -} - -static u64 qib_dma_map_page(struct ib_device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - u64 addr; - - BUG_ON(!valid_dma_direction(direction)); - - if (offset + size > PAGE_SIZE) { - addr = BAD_DMA_ADDRESS; - goto done; - } - - addr = (u64) page_address(page); - if (addr) - addr += offset; - /* TODO: handle highmem pages */ - -done: - return addr; -} - -static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); -} - -static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction direction) -{ - struct scatterlist *sg; - u64 addr; - int i; - int ret = nents; - - BUG_ON(!valid_dma_direction(direction)); - - for_each_sg(sgl, sg, nents, i) { - addr = (u64) page_address(sg_page(sg)); - /* TODO: handle highmem pages */ - if (!addr) { - ret = 0; - break; - } - sg->dma_address = addr + sg->offset; -#ifdef CONFIG_NEED_SG_DMA_LENGTH - sg->dma_length = sg->length; -#endif - } - return ret; -} - -static void qib_unmap_sg(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); -} - -static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr, - size_t size, enum dma_data_direction dir) -{ -} - -static void qib_sync_single_for_device(struct ib_device *dev, u64 addr, - size_t size, - enum dma_data_direction dir) -{ -} - -static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size, - u64 *dma_handle, gfp_t flag) -{ - struct page *p; - void *addr = NULL; - - p = alloc_pages(flag, get_order(size)); - if (p) - addr = page_address(p); - if (dma_handle) - *dma_handle = (u64) addr; - return addr; -} - -static void qib_dma_free_coherent(struct ib_device *dev, size_t size, - void *cpu_addr, u64 dma_handle) -{ - free_pages((unsigned long) cpu_addr, get_order(size)); -} - -struct ib_dma_mapping_ops qib_dma_mapping_ops = { - .mapping_error = qib_mapping_error, - .map_single = qib_dma_map_single, - .unmap_single = qib_dma_unmap_single, - .map_page = qib_dma_map_page, - .unmap_page = qib_dma_unmap_page, - .map_sg = qib_map_sg, - .unmap_sg = qib_unmap_sg, - .sync_single_for_cpu = qib_sync_single_for_cpu, - .sync_single_for_device = qib_sync_single_for_device, - .alloc_coherent = qib_dma_alloc_coherent, - .free_coherent = qib_dma_free_coherent -}; diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index 92399d3ffd15..06de1cbcf67d 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -707,7 +707,7 @@ static void qib_6120_clear_freeze(struct qib_devdata *dd) /* disable error interrupts, to avoid confusion */ qib_write_kreg(dd, kr_errmask, 0ULL); - /* also disable interrupts; errormask is sometimes overwriten */ + /* also disable interrupts; errormask is sometimes overwritten */ qib_6120_set_intr_state(dd, 0); qib_cancel_sends(dd->pport); diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index e55e31a69195..55a18384c22d 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -1259,7 +1259,7 @@ static void qib_7220_clear_freeze(struct qib_devdata *dd) /* disable error interrupts, to avoid confusion */ qib_write_kreg(dd, kr_errmask, 0ULL); - /* also disable interrupts; errormask is sometimes overwriten */ + /* also disable interrupts; errormask is sometimes overwritten */ qib_7220_set_intr_state(dd, 0); qib_cancel_sends(dd->pport); diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 9cc97bd42775..12c4208fd701 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -2053,7 +2053,7 @@ static void qib_7322_clear_freeze(struct qib_devdata *dd) qib_write_kreg_port(dd->pport + pidx, krp_errmask, 0ULL); - /* also disable interrupts; errormask is sometimes overwriten */ + /* also disable interrupts; errormask is sometimes overwritten */ qib_7322_set_intr_state(dd, 0); /* clear the freeze, and be sure chip saw it */ diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index 2c3c93572c17..8fdf79f8d4e4 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c @@ -158,10 +158,7 @@ int qib_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, unsigned n, m; size_t off; - /* - * We use RKEY == zero for kernel virtual addresses - * (see qib_get_dma_mr and qib_dma.c). - */ + /* We use RKEY == zero for kernel virtual addresses */ rcu_read_lock(); if (rkey == 0) { struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd); diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index 75f08624ac05..ce83ba9a12ef 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c @@ -32,6 +32,7 @@ */ #include <linux/mm.h> +#include <linux/sched/signal.h> #include <linux/device.h> #include "qib.h" diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 6b56f1c01a07..83f8b5f24381 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1550,7 +1550,7 @@ int qib_register_ib_device(struct qib_devdata *dd) ibdev->owner = THIS_MODULE; ibdev->node_guid = ppd->guid; ibdev->phys_port_cnt = dd->num_pports; - ibdev->dma_device = &dd->pcidev->dev; + ibdev->dev.parent = &dd->pcidev->dev; ibdev->modify_device = qib_modify_device; ibdev->process_mad = qib_process_mad; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 4f5a45db08e1..c0c1e8b027b1 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -382,7 +382,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev) us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP; us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT; us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS; - us_ibdev->ib_dev.dma_device = &dev->dev; + us_ibdev->ib_dev.dev.parent = &dev->dev; us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION; strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX); diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 1ccee6ea5bc3..c49db7c33979 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -34,7 +34,8 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/hugetlb.h> #include <linux/iommu.h> #include <linux/workqueue.h> diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index e03d2f6c1f90..100bea5c42ff 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -173,7 +173,7 @@ static int pvrdma_register_device(struct pvrdma_dev *dev) dev->flags = 0; dev->ib_dev.owner = THIS_MODULE; dev->ib_dev.num_comp_vectors = 1; - dev->ib_dev.dma_device = &dev->pdev->dev; + dev->ib_dev.dev.parent = &dev->pdev->dev; dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION; dev->ib_dev.uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig index 1da8d01a6855..fdd001ce13d8 100644 --- a/drivers/infiniband/sw/rdmavt/Kconfig +++ b/drivers/infiniband/sw/rdmavt/Kconfig @@ -1,5 +1,6 @@ config INFINIBAND_RDMAVT tristate "RDMA verbs transport library" depends on 64BIT + select DMA_VIRT_OPS ---help--- This is a common software verbs provider for RDMA networks. diff --git a/drivers/infiniband/sw/rdmavt/Makefile b/drivers/infiniband/sw/rdmavt/Makefile index c33a4f84413c..78b276a90401 100644 --- a/drivers/infiniband/sw/rdmavt/Makefile +++ b/drivers/infiniband/sw/rdmavt/Makefile @@ -7,7 +7,7 @@ # obj-$(CONFIG_INFINIBAND_RDMAVT) += rdmavt.o -rdmavt-y := vt.o ah.o cq.o dma.o mad.o mcast.o mmap.o mr.o pd.o qp.o \ +rdmavt-y := vt.o ah.o cq.o mad.o mcast.o mmap.o mr.o pd.o qp.o \ rc.o srq.o trace.o CFLAGS_trace.o = -I$(src) diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c deleted file mode 100644 index f2cefb0d9180..000000000000 --- a/drivers/infiniband/sw/rdmavt/dma.c +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Copyright(c) 2016 Intel Corporation. - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ -#include <linux/types.h> -#include <linux/scatterlist.h> -#include <rdma/ib_verbs.h> - -#include "dma.h" - -#define BAD_DMA_ADDRESS ((u64)0) - -/* - * The following functions implement driver specific replacements - * for the ib_dma_*() functions. - * - * These functions return kernel virtual addresses instead of - * device bus addresses since the driver uses the CPU to copy - * data instead of using hardware DMA. - */ - -static int rvt_mapping_error(struct ib_device *dev, u64 dma_addr) -{ - return dma_addr == BAD_DMA_ADDRESS; -} - -static u64 rvt_dma_map_single(struct ib_device *dev, void *cpu_addr, - size_t size, enum dma_data_direction direction) -{ - if (WARN_ON(!valid_dma_direction(direction))) - return BAD_DMA_ADDRESS; - - return (u64)cpu_addr; -} - -static void rvt_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, - enum dma_data_direction direction) -{ - /* This is a stub, nothing to be done here */ -} - -static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - u64 addr; - - if (WARN_ON(!valid_dma_direction(direction))) - return BAD_DMA_ADDRESS; - - addr = (u64)page_address(page); - if (addr) - addr += offset; - - return addr; -} - -static void rvt_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, - enum dma_data_direction direction) -{ - /* This is a stub, nothing to be done here */ -} - -static int rvt_map_sg(struct ib_device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction direction) -{ - struct scatterlist *sg; - u64 addr; - int i; - int ret = nents; - - if (WARN_ON(!valid_dma_direction(direction))) - return 0; - - for_each_sg(sgl, sg, nents, i) { - addr = (u64)page_address(sg_page(sg)); - if (!addr) { - ret = 0; - break; - } - sg->dma_address = addr + sg->offset; -#ifdef CONFIG_NEED_SG_DMA_LENGTH - sg->dma_length = sg->length; -#endif - } - return ret; -} - -static void rvt_unmap_sg(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - /* This is a stub, nothing to be done here */ -} - -static int rvt_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction direction, - unsigned long attrs) -{ - return rvt_map_sg(dev, sgl, nents, direction); -} - -static void rvt_unmap_sg_attrs(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction, - unsigned long attrs) -{ - return rvt_unmap_sg(dev, sg, nents, direction); -} - -static void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr, - size_t size, enum dma_data_direction dir) -{ -} - -static void rvt_sync_single_for_device(struct ib_device *dev, u64 addr, - size_t size, - enum dma_data_direction dir) -{ -} - -static void *rvt_dma_alloc_coherent(struct ib_device *dev, size_t size, - u64 *dma_handle, gfp_t flag) -{ - struct page *p; - void *addr = NULL; - - p = alloc_pages(flag, get_order(size)); - if (p) - addr = page_address(p); - if (dma_handle) - *dma_handle = (u64)addr; - return addr; -} - -static void rvt_dma_free_coherent(struct ib_device *dev, size_t size, - void *cpu_addr, u64 dma_handle) -{ - free_pages((unsigned long)cpu_addr, get_order(size)); -} - -struct ib_dma_mapping_ops rvt_default_dma_mapping_ops = { - .mapping_error = rvt_mapping_error, - .map_single = rvt_dma_map_single, - .unmap_single = rvt_dma_unmap_single, - .map_page = rvt_dma_map_page, - .unmap_page = rvt_dma_unmap_page, - .map_sg = rvt_map_sg, - .unmap_sg = rvt_unmap_sg, - .map_sg_attrs = rvt_map_sg_attrs, - .unmap_sg_attrs = rvt_unmap_sg_attrs, - .sync_single_for_cpu = rvt_sync_single_for_cpu, - .sync_single_for_device = rvt_sync_single_for_device, - .alloc_coherent = rvt_dma_alloc_coherent, - .free_coherent = rvt_dma_free_coherent -}; diff --git a/drivers/infiniband/sw/rdmavt/dma.h b/drivers/infiniband/sw/rdmavt/dma.h deleted file mode 100644 index 979f07e09195..000000000000 --- a/drivers/infiniband/sw/rdmavt/dma.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef DEF_RDMAVTDMA_H -#define DEF_RDMAVTDMA_H - -/* - * Copyright(c) 2016 Intel Corporation. - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -extern struct ib_dma_mapping_ops rvt_default_dma_mapping_ops; - -#endif /* DEF_RDMAVTDMA_H */ diff --git a/drivers/infiniband/sw/rdmavt/mad.c b/drivers/infiniband/sw/rdmavt/mad.c index f6e99778d7ca..bba241faca61 100644 --- a/drivers/infiniband/sw/rdmavt/mad.c +++ b/drivers/infiniband/sw/rdmavt/mad.c @@ -74,9 +74,9 @@ int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, u16 *out_mad_pkey_index) { /* - * MAD processing is quite different between hfi1 and qib. Therfore this - * is expected to be provided by the driver. Other drivers in the future - * may chose to implement this but it should not be made into a + * MAD processing is quite different between hfi1 and qib. Therefore + * this is expected to be provided by the driver. Other drivers in the + * future may choose to implement this but it should not be made into a * requirement. */ if (ibport_num_to_idx(ibdev, port_num) < 0) diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index c80a69b1ffcb..ae30b6838d79 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c @@ -320,8 +320,8 @@ static void __rvt_free_mr(struct rvt_mr *mr) * @acc: access flags * * Return: the memory region on success, otherwise returns an errno. - * Note that all DMA addresses should be created via the - * struct ib_dma_mapping_ops functions (see dma.c). + * Note that all DMA addresses should be created via the functions in + * struct dma_virt_ops. */ struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc) { @@ -799,7 +799,7 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, /* * We use LKEY == zero for kernel virtual addresses - * (see rvt_get_dma_mr and dma.c). + * (see rvt_get_dma_mr() and dma_virt_ops). */ rcu_read_lock(); if (sge->lkey == 0) { @@ -897,7 +897,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, /* * We use RKEY == zero for kernel virtual addresses - * (see rvt_get_dma_mr and dma.c). + * (see rvt_get_dma_mr() and dma_virt_ops). */ rcu_read_lock(); if (rkey == 0) { diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index 1165639a914b..0d7c6bb551d9 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c @@ -47,6 +47,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/dma-mapping.h> #include "vt.h" #include "trace.h" @@ -778,8 +779,7 @@ int rvt_register_device(struct rvt_dev_info *rdi) } /* DMA Operations */ - rdi->ibdev.dma_ops = - rdi->ibdev.dma_ops ? : &rvt_default_dma_mapping_ops; + rdi->ibdev.dev.dma_ops = rdi->ibdev.dev.dma_ops ? : &dma_virt_ops; /* Protection Domain */ spin_lock_init(&rdi->n_pds_lock); diff --git a/drivers/infiniband/sw/rdmavt/vt.h b/drivers/infiniband/sw/rdmavt/vt.h index 6b01eaa4461b..f363505312be 100644 --- a/drivers/infiniband/sw/rdmavt/vt.h +++ b/drivers/infiniband/sw/rdmavt/vt.h @@ -50,7 +50,6 @@ #include <rdma/rdma_vt.h> #include <linux/pci.h> -#include "dma.h" #include "pd.h" #include "qp.h" #include "ah.h" diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig index 1e4e628fe7b0..7d1ac27ed251 100644 --- a/drivers/infiniband/sw/rxe/Kconfig +++ b/drivers/infiniband/sw/rxe/Kconfig @@ -2,6 +2,7 @@ config RDMA_RXE tristate "Software RDMA over Ethernet (RoCE) driver" depends on INET && PCI && INFINIBAND depends on NET_UDP_TUNNEL + select DMA_VIRT_OPS ---help--- This driver implements the InfiniBand RDMA transport over the Linux network stack. It enables a system with a diff --git a/drivers/infiniband/sw/rxe/Makefile b/drivers/infiniband/sw/rxe/Makefile index 3b3fb9d1c470..ec35ff022a42 100644 --- a/drivers/infiniband/sw/rxe/Makefile +++ b/drivers/infiniband/sw/rxe/Makefile @@ -14,7 +14,6 @@ rdma_rxe-y := \ rxe_qp.o \ rxe_cq.o \ rxe_mr.o \ - rxe_dma.o \ rxe_opcode.o \ rxe_mmap.o \ rxe_icrc.o \ diff --git a/drivers/infiniband/sw/rxe/rxe_dma.c b/drivers/infiniband/sw/rxe/rxe_dma.c deleted file mode 100644 index a0f8af5851ae..000000000000 --- a/drivers/infiniband/sw/rxe/rxe_dma.c +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. - * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "rxe.h" -#include "rxe_loc.h" - -#define DMA_BAD_ADDER ((u64)0) - -static int rxe_mapping_error(struct ib_device *dev, u64 dma_addr) -{ - return dma_addr == DMA_BAD_ADDER; -} - -static u64 rxe_dma_map_single(struct ib_device *dev, - void *cpu_addr, size_t size, - enum dma_data_direction direction) -{ - WARN_ON(!valid_dma_direction(direction)); - return (uintptr_t)cpu_addr; -} - -static void rxe_dma_unmap_single(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction) -{ - WARN_ON(!valid_dma_direction(direction)); -} - -static u64 rxe_dma_map_page(struct ib_device *dev, - struct page *page, - unsigned long offset, - size_t size, enum dma_data_direction direction) -{ - u64 addr; - - WARN_ON(!valid_dma_direction(direction)); - - if (offset + size > PAGE_SIZE) { - addr = DMA_BAD_ADDER; - goto done; - } - - addr = (uintptr_t)page_address(page); - if (addr) - addr += offset; - -done: - return addr; -} - -static void rxe_dma_unmap_page(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction) -{ - WARN_ON(!valid_dma_direction(direction)); -} - -static int rxe_map_sg(struct ib_device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction direction) -{ - struct scatterlist *sg; - u64 addr; - int i; - int ret = nents; - - WARN_ON(!valid_dma_direction(direction)); - - for_each_sg(sgl, sg, nents, i) { - addr = (uintptr_t)page_address(sg_page(sg)); - if (!addr) { - ret = 0; - break; - } - sg->dma_address = addr + sg->offset; -#ifdef CONFIG_NEED_SG_DMA_LENGTH - sg->dma_length = sg->length; -#endif - } - - return ret; -} - -static void rxe_unmap_sg(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - WARN_ON(!valid_dma_direction(direction)); -} - -static int rxe_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction direction, - unsigned long attrs) -{ - return rxe_map_sg(dev, sgl, nents, direction); -} - -static void rxe_unmap_sg_attrs(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction, - unsigned long attrs) -{ - rxe_unmap_sg(dev, sg, nents, direction); -} - -static void rxe_sync_single_for_cpu(struct ib_device *dev, - u64 addr, - size_t size, enum dma_data_direction dir) -{ -} - -static void rxe_sync_single_for_device(struct ib_device *dev, - u64 addr, - size_t size, enum dma_data_direction dir) -{ -} - -static void *rxe_dma_alloc_coherent(struct ib_device *dev, size_t size, - u64 *dma_handle, gfp_t flag) -{ - struct page *p; - void *addr = NULL; - - p = alloc_pages(flag, get_order(size)); - if (p) - addr = page_address(p); - - if (dma_handle) - *dma_handle = (uintptr_t)addr; - - return addr; -} - -static void rxe_dma_free_coherent(struct ib_device *dev, size_t size, - void *cpu_addr, u64 dma_handle) -{ - free_pages((unsigned long)cpu_addr, get_order(size)); -} - -struct ib_dma_mapping_ops rxe_dma_mapping_ops = { - .mapping_error = rxe_mapping_error, - .map_single = rxe_dma_map_single, - .unmap_single = rxe_dma_unmap_single, - .map_page = rxe_dma_map_page, - .unmap_page = rxe_dma_unmap_page, - .map_sg = rxe_map_sg, - .unmap_sg = rxe_unmap_sg, - .map_sg_attrs = rxe_map_sg_attrs, - .unmap_sg_attrs = rxe_unmap_sg_attrs, - .sync_single_for_cpu = rxe_sync_single_for_cpu, - .sync_single_for_device = rxe_sync_single_for_device, - .alloc_coherent = rxe_dma_alloc_coherent, - .free_coherent = rxe_dma_free_coherent -}; diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 272337e5e948..183a9d379b41 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -237,8 +237,6 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct ib_udata *udata); -extern struct ib_dma_mapping_ops rxe_dma_mapping_ops; - void rxe_release(struct kref *kref); void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index d2e2eff7a515..5113e502f6f9 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -31,6 +31,7 @@ * SOFTWARE. */ +#include <linux/dma-mapping.h> #include "rxe.h" #include "rxe_loc.h" #include "rxe_queue.h" @@ -169,7 +170,7 @@ static int rxe_query_pkey(struct ib_device *device, struct rxe_port *port; if (unlikely(port_num != 1)) { - dev_warn(device->dma_device, "invalid port_num = %d\n", + dev_warn(device->dev.parent, "invalid port_num = %d\n", port_num); goto err1; } @@ -177,7 +178,7 @@ static int rxe_query_pkey(struct ib_device *device, port = &rxe->port; if (unlikely(index >= port->attr.pkey_tbl_len)) { - dev_warn(device->dma_device, "invalid index = %d\n", + dev_warn(device->dev.parent, "invalid index = %d\n", index); goto err1; } @@ -1234,10 +1235,10 @@ int rxe_register_device(struct rxe_dev *rxe) dev->node_type = RDMA_NODE_IB_CA; dev->phys_port_cnt = 1; dev->num_comp_vectors = RXE_NUM_COMP_VECTORS; - dev->dma_device = rxe_dma_device(rxe); + dev->dev.parent = rxe_dma_device(rxe); dev->local_dma_lkey = 0; dev->node_guid = rxe_node_guid(rxe); - dev->dma_ops = &rxe_dma_mapping_ops; + dev->dev.dma_ops = &dma_virt_ops; dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION; dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index a6d6c617b597..0cdf2b7f272f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -38,6 +38,7 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/moduleparam.h> +#include <linux/sched/signal.h> #include "ipoib.h" diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 7b6d40ff1acf..bac455a1942d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c @@ -65,7 +65,7 @@ static void ipoib_get_drvinfo(struct net_device *netdev, ib_get_device_fw_str(priv->ca, drvinfo->fw_version, sizeof(drvinfo->fw_version)); - strlcpy(drvinfo->bus_info, dev_name(priv->ca->dma_device), + strlcpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent), sizeof(drvinfo->bus_info)); strlcpy(drvinfo->version, ipoib_driver_version, diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 259c59f67394..d1d3fb7a6127 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -2020,7 +2020,7 @@ static struct net_device *ipoib_add_port(const char *format, if (!priv) goto alloc_mem_failed; - SET_NETDEV_DEV(priv->dev, hca->dma_device); + SET_NETDEV_DEV(priv->dev, hca->dev.parent); priv->dev->dev_id = port - 1; result = ib_query_port(hca, port, &attr); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index deedb6fc1b05..3e10e3dac2e7 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -31,6 +31,7 @@ */ #include <linux/module.h> +#include <linux/sched/signal.h> #include <linux/init.h> #include <linux/seq_file.h> diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 30a6985909e0..5a887efb4bdf 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -652,7 +652,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, } if (iscsi_host_add(shost, - ib_conn->device->ib_device->dma_device)) { + ib_conn->device->ib_device->dev.parent)) { mutex_unlock(&iser_conn->state_mutex); goto free_host; } diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 3c7fa972a38c..cee46266f434 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -2933,7 +2933,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) sprintf(target->target_name, "SRP.T10:%016llX", be64_to_cpu(target->id_ext)); - if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device)) + if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent)) return -ENODEV; memcpy(ids.port_id, &target->id_ext, 8); @@ -3546,7 +3546,7 @@ static struct srp_host *srp_add_port(struct srp_device *device, u8 port) host->port = port; host->dev.class = &srp_class; - host->dev.parent = device->dev->dma_device; + host->dev.parent = device->dev->dev.parent; dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port); if (device_register(&host->dev)) diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index bc5a2d86ae7e..7e314c2f2071 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -2479,8 +2479,7 @@ static void srpt_add_one(struct ib_device *device) struct ib_srq_init_attr srq_attr; int i; - pr_debug("device = %p, device->dma_ops = %p\n", device, - device->dma_ops); + pr_debug("device = %p\n", device); sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); if (!sdev) diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c index 3422464af229..198678613382 100644 --- a/drivers/input/rmi4/rmi_f30.c +++ b/drivers/input/rmi4/rmi_f30.c @@ -258,9 +258,10 @@ static int rmi_f30_map_gpios(struct rmi_function *fn, /* * Buttonpad could be also inferred from f30->has_mech_mouse_btns, - * but I am not sure, so use only the pdata info. + * but I am not sure, so use only the pdata info and the number of + * mapped buttons. */ - if (pdata->f30_data.buttonpad) + if (pdata->f30_data.buttonpad || (button - BTN_LEFT == 1)) __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); return 0; diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c index 44deca88c579..beaf61ce775b 100644 --- a/drivers/input/touchscreen/cyttsp4_core.c +++ b/drivers/input/touchscreen/cyttsp4_core.c @@ -202,7 +202,7 @@ static int cyttsp4_si_get_cydata(struct cyttsp4 *cd) int rc; si->si_ofs.cydata_size = si->si_ofs.test_ofs - si->si_ofs.cydata_ofs; - dev_dbg(cd->dev, "%s: cydata size: %Zd\n", __func__, + dev_dbg(cd->dev, "%s: cydata size: %zd\n", __func__, si->si_ofs.cydata_size); p = krealloc(si->si_ptrs.cydata, si->si_ofs.cydata_size, GFP_KERNEL); @@ -430,13 +430,13 @@ static int cyttsp4_si_get_opcfg_data(struct cyttsp4 *cd) for (abs = 0; abs < CY_TCH_NUM_ABS; abs++) { dev_dbg(cd->dev, "%s: tch_rec_%s\n", __func__, cyttsp4_tch_abs_string[abs]); - dev_dbg(cd->dev, "%s: ofs =%2Zd\n", __func__, + dev_dbg(cd->dev, "%s: ofs =%2zd\n", __func__, si->si_ofs.tch_abs[abs].ofs); - dev_dbg(cd->dev, "%s: siz =%2Zd\n", __func__, + dev_dbg(cd->dev, "%s: siz =%2zd\n", __func__, si->si_ofs.tch_abs[abs].size); - dev_dbg(cd->dev, "%s: max =%2Zd\n", __func__, + dev_dbg(cd->dev, "%s: max =%2zd\n", __func__, si->si_ofs.tch_abs[abs].max); - dev_dbg(cd->dev, "%s: bofs=%2Zd\n", __func__, + dev_dbg(cd->dev, "%s: bofs=%2zd\n", __func__, si->si_ofs.tch_abs[abs].bofs); } @@ -586,62 +586,62 @@ static int cyttsp4_si_get_op_data_ptrs(struct cyttsp4 *cd) static void cyttsp4_si_put_log_data(struct cyttsp4 *cd) { struct cyttsp4_sysinfo *si = &cd->sysinfo; - dev_dbg(cd->dev, "%s: cydata_ofs =%4Zd siz=%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: cydata_ofs =%4zd siz=%4zd\n", __func__, si->si_ofs.cydata_ofs, si->si_ofs.cydata_size); - dev_dbg(cd->dev, "%s: test_ofs =%4Zd siz=%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: test_ofs =%4zd siz=%4zd\n", __func__, si->si_ofs.test_ofs, si->si_ofs.test_size); - dev_dbg(cd->dev, "%s: pcfg_ofs =%4Zd siz=%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: pcfg_ofs =%4zd siz=%4zd\n", __func__, si->si_ofs.pcfg_ofs, si->si_ofs.pcfg_size); - dev_dbg(cd->dev, "%s: opcfg_ofs =%4Zd siz=%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: opcfg_ofs =%4zd siz=%4zd\n", __func__, si->si_ofs.opcfg_ofs, si->si_ofs.opcfg_size); - dev_dbg(cd->dev, "%s: ddata_ofs =%4Zd siz=%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: ddata_ofs =%4zd siz=%4zd\n", __func__, si->si_ofs.ddata_ofs, si->si_ofs.ddata_size); - dev_dbg(cd->dev, "%s: mdata_ofs =%4Zd siz=%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: mdata_ofs =%4zd siz=%4zd\n", __func__, si->si_ofs.mdata_ofs, si->si_ofs.mdata_size); - dev_dbg(cd->dev, "%s: cmd_ofs =%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: cmd_ofs =%4zd\n", __func__, si->si_ofs.cmd_ofs); - dev_dbg(cd->dev, "%s: rep_ofs =%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: rep_ofs =%4zd\n", __func__, si->si_ofs.rep_ofs); - dev_dbg(cd->dev, "%s: rep_sz =%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: rep_sz =%4zd\n", __func__, si->si_ofs.rep_sz); - dev_dbg(cd->dev, "%s: num_btns =%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: num_btns =%4zd\n", __func__, si->si_ofs.num_btns); - dev_dbg(cd->dev, "%s: num_btn_regs =%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: num_btn_regs =%4zd\n", __func__, si->si_ofs.num_btn_regs); - dev_dbg(cd->dev, "%s: tt_stat_ofs =%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: tt_stat_ofs =%4zd\n", __func__, si->si_ofs.tt_stat_ofs); - dev_dbg(cd->dev, "%s: tch_rec_size =%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: tch_rec_size =%4zd\n", __func__, si->si_ofs.tch_rec_size); - dev_dbg(cd->dev, "%s: max_tchs =%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: max_tchs =%4zd\n", __func__, si->si_ofs.max_tchs); - dev_dbg(cd->dev, "%s: mode_size =%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: mode_size =%4zd\n", __func__, si->si_ofs.mode_size); - dev_dbg(cd->dev, "%s: data_size =%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: data_size =%4zd\n", __func__, si->si_ofs.data_size); - dev_dbg(cd->dev, "%s: map_sz =%4Zd\n", __func__, + dev_dbg(cd->dev, "%s: map_sz =%4zd\n", __func__, si->si_ofs.map_sz); - dev_dbg(cd->dev, "%s: btn_rec_size =%2Zd\n", __func__, + dev_dbg(cd->dev, "%s: btn_rec_size =%2zd\n", __func__, si->si_ofs.btn_rec_size); - dev_dbg(cd->dev, "%s: btn_diff_ofs =%2Zd\n", __func__, + dev_dbg(cd->dev, "%s: btn_diff_ofs =%2zd\n", __func__, si->si_ofs.btn_diff_ofs); - dev_dbg(cd->dev, "%s: btn_diff_size =%2Zd\n", __func__, + dev_dbg(cd->dev, "%s: btn_diff_size =%2zd\n", __func__, si->si_ofs.btn_diff_size); - dev_dbg(cd->dev, "%s: max_x = 0x%04ZX (%Zd)\n", __func__, + dev_dbg(cd->dev, "%s: max_x = 0x%04zX (%zd)\n", __func__, si->si_ofs.max_x, si->si_ofs.max_x); - dev_dbg(cd->dev, "%s: x_origin = %Zd (%s)\n", __func__, + dev_dbg(cd->dev, "%s: x_origin = %zd (%s)\n", __func__, si->si_ofs.x_origin, si->si_ofs.x_origin == CY_NORMAL_ORIGIN ? "left corner" : "right corner"); - dev_dbg(cd->dev, "%s: max_y = 0x%04ZX (%Zd)\n", __func__, + dev_dbg(cd->dev, "%s: max_y = 0x%04zX (%zd)\n", __func__, si->si_ofs.max_y, si->si_ofs.max_y); - dev_dbg(cd->dev, "%s: y_origin = %Zd (%s)\n", __func__, + dev_dbg(cd->dev, "%s: y_origin = %zd (%s)\n", __func__, si->si_ofs.y_origin, si->si_ofs.y_origin == CY_NORMAL_ORIGIN ? "upper corner" : "lower corner"); - dev_dbg(cd->dev, "%s: max_p = 0x%04ZX (%Zd)\n", __func__, + dev_dbg(cd->dev, "%s: max_p = 0x%04zX (%zd)\n", __func__, si->si_ofs.max_p, si->si_ofs.max_p); dev_dbg(cd->dev, "%s: xy_mode=%p xy_data=%p\n", __func__, @@ -1000,7 +1000,7 @@ static int cyttsp4_xy_worker(struct cyttsp4 *cd) dev_dbg(dev, "%s: Large area detected\n", __func__); if (num_cur_tch > si->si_ofs.max_tchs) { - dev_err(dev, "%s: too many tch; set to max tch (n=%d c=%Zd)\n", + dev_err(dev, "%s: too many tch; set to max tch (n=%d c=%zd)\n", __func__, num_cur_tch, si->si_ofs.max_tchs); num_cur_tch = si->si_ofs.max_tchs; } diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 09bd3b290bb8..98940d1392cb 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -117,7 +117,7 @@ const struct iommu_ops amd_iommu_ops; static ATOMIC_NOTIFIER_HEAD(ppr_notifier); int amd_iommu_max_glx_val = -1; -static struct dma_map_ops amd_iommu_dma_ops; +static const struct dma_map_ops amd_iommu_dma_ops; /* * This struct contains device specific data for the IOMMU @@ -519,7 +519,7 @@ static void iommu_uninit_device(struct device *dev) iommu_group_remove_device(dev); /* Remove dma-ops */ - dev->archdata.dma_ops = NULL; + dev->dma_ops = NULL; /* * We keep dev_data around for unplugged devices and reuse it when the @@ -2168,7 +2168,7 @@ static int amd_iommu_add_device(struct device *dev) dev_name(dev)); iommu_ignore_device(dev); - dev->archdata.dma_ops = &nommu_dma_ops; + dev->dma_ops = &nommu_dma_ops; goto out; } init_iommu_group(dev); @@ -2185,7 +2185,7 @@ static int amd_iommu_add_device(struct device *dev) if (domain->type == IOMMU_DOMAIN_IDENTITY) dev_data->passthrough = true; else - dev->archdata.dma_ops = &amd_iommu_dma_ops; + dev->dma_ops = &amd_iommu_dma_ops; out: iommu_completion_wait(iommu); @@ -2732,7 +2732,7 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask) return check_device(dev); } -static struct dma_map_ops amd_iommu_dma_ops = { +static const struct dma_map_ops amd_iommu_dma_ops = { .alloc = alloc_coherent, .free = free_coherent, .map_page = map_page, diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 04cdac7ab3e3..6130278c5d71 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1507,7 +1507,7 @@ static ssize_t amd_iommu_show_cap(struct device *dev, struct device_attribute *attr, char *buf) { - struct amd_iommu *iommu = dev_get_drvdata(dev); + struct amd_iommu *iommu = dev_to_amd_iommu(dev); return sprintf(buf, "%x\n", iommu->cap); } static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); @@ -1516,7 +1516,7 @@ static ssize_t amd_iommu_show_features(struct device *dev, struct device_attribute *attr, char *buf) { - struct amd_iommu *iommu = dev_get_drvdata(dev); + struct amd_iommu *iommu = dev_to_amd_iommu(dev); return sprintf(buf, "%llx\n", iommu->features); } static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index af00f381a7b1..003f3ceb2661 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -569,6 +569,11 @@ struct amd_iommu { volatile u64 __aligned(8) cmd_sem; }; +static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) +{ + return container_of(dev, struct amd_iommu, iommu.dev); +} + #define ACPIHID_UID_LEN 256 #define ACPIHID_HID_LEN 9 diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index f8ed8c95b685..063343909b0d 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -22,6 +22,7 @@ #include <linux/profile.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/iommu.h> #include <linux/wait.h> #include <linux/pci.h> diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a8f7ae0eb7a4..238ad3447712 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4730,11 +4730,16 @@ static int intel_iommu_cpu_dead(unsigned int cpu) return 0; } +static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) +{ + return container_of(dev, struct intel_iommu, iommu.dev); +} + static ssize_t intel_iommu_show_version(struct device *dev, struct device_attribute *attr, char *buf) { - struct intel_iommu *iommu = dev_get_drvdata(dev); + struct intel_iommu *iommu = dev_to_intel_iommu(dev); u32 ver = readl(iommu->reg + DMAR_VER_REG); return sprintf(buf, "%d:%d\n", DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver)); @@ -4745,7 +4750,7 @@ static ssize_t intel_iommu_show_address(struct device *dev, struct device_attribute *attr, char *buf) { - struct intel_iommu *iommu = dev_get_drvdata(dev); + struct intel_iommu *iommu = dev_to_intel_iommu(dev); return sprintf(buf, "%llx\n", iommu->reg_phys); } static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL); @@ -4754,7 +4759,7 @@ static ssize_t intel_iommu_show_cap(struct device *dev, struct device_attribute *attr, char *buf) { - struct intel_iommu *iommu = dev_get_drvdata(dev); + struct intel_iommu *iommu = dev_to_intel_iommu(dev); return sprintf(buf, "%llx\n", iommu->cap); } static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL); @@ -4763,7 +4768,7 @@ static ssize_t intel_iommu_show_ecap(struct device *dev, struct device_attribute *attr, char *buf) { - struct intel_iommu *iommu = dev_get_drvdata(dev); + struct intel_iommu *iommu = dev_to_intel_iommu(dev); return sprintf(buf, "%llx\n", iommu->ecap); } static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL); @@ -4772,7 +4777,7 @@ static ssize_t intel_iommu_show_ndoms(struct device *dev, struct device_attribute *attr, char *buf) { - struct intel_iommu *iommu = dev_get_drvdata(dev); + struct intel_iommu *iommu = dev_to_intel_iommu(dev); return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap)); } static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL); @@ -4781,7 +4786,7 @@ static ssize_t intel_iommu_show_ndoms_used(struct device *dev, struct device_attribute *attr, char *buf) { - struct intel_iommu *iommu = dev_get_drvdata(dev); + struct intel_iommu *iommu = dev_to_intel_iommu(dev); return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap))); } diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index cb72e0011310..23c427602c55 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -16,6 +16,7 @@ #include <linux/intel-iommu.h> #include <linux/mmu_notifier.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/slab.h> #include <linux/intel-svm.h> #include <linux/rculist.h> @@ -579,7 +580,7 @@ static irqreturn_t prq_event_thread(int irq, void *d) if (!svm->mm) goto bad_req; /* If the mm is already defunct, don't handle faults. */ - if (!atomic_inc_not_zero(&svm->mm->mm_users)) + if (!mmget_not_zero(svm->mm)) goto bad_req; down_read(&svm->mm->mmap_sem); vma = find_extend_vma(svm->mm, address); diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 49d0f70c2bae..1dfd1085a04f 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -18,7 +18,7 @@ #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/proc_fs.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/seq_file.h> #include <linux/skbuff.h> #include <linux/workqueue.h> diff --git a/drivers/isdn/hardware/eicon/debug.c b/drivers/isdn/hardware/eicon/debug.c index 576b7b4a3278..8bc2791bc39c 100644 --- a/drivers/isdn/hardware/eicon/debug.c +++ b/drivers/isdn/hardware/eicon/debug.c @@ -2049,7 +2049,7 @@ static int diva_dbg_cmp_key(const char *ref, const char *key) { /* In case trace filter starts with "C" character then all following characters are interpreted as command. - Followings commands are available: + Following commands are available: - single, trace single call at time, independent from CPN/CiPN */ static int diva_mnt_cmp_nmbr(const char *nmbr) { diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c index 8d338ba366d0..77dec28ba874 100644 --- a/drivers/isdn/hardware/mISDN/mISDNipac.c +++ b/drivers/isdn/hardware/mISDN/mISDNipac.c @@ -1625,7 +1625,7 @@ mISDNipac_init(struct ipac_hw *ipac, void *hw) ipac->hscx[i].bch.hw = hw; ipac->hscx[i].ip = ipac; /* default values for IOM time slots - * can be overwriten by card */ + * can be overwritten by card */ ipac->hscx[i].slot = (i == 0) ? 0x2f : 0x03; } diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index 63eaa0a9f8a1..1b169559a240 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -15,6 +15,7 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/mutex.h> +#include <linux/sched/signal.h> #include "isdn_common.h" #include "isdn_tty.h" #ifdef CONFIG_ISDN_AUDIO diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 0222b1a35a2d..9b85295aa657 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -115,7 +115,7 @@ * * The CMX has special functions for conferences with one, two and more * members. It will allow different types of data flow. Receive and transmit - * data to/form upper layer may be swithed on/off individually without losing + * data to/form upper layer may be switched on/off individually without losing * features of CMX, Tones and DTMF. * * Echo Cancellation: Sometimes we like to cancel echo from the interface. diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 67c21876c35f..6ceca7db62ad 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -234,6 +234,8 @@ #include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/slab.h> +#include <linux/sched/signal.h> + #include <net/sock.h> #include "core.h" #include "l1oip.h" diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index b324474c0c12..8b7faea2ddf8 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c @@ -19,6 +19,9 @@ #include <linux/mISDNif.h> #include <linux/kthread.h> #include <linux/sched.h> +#include <linux/sched/cputime.h> +#include <linux/signal.h> + #include "core.h" static u_int *debug; diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c index 9438d7ec3308..b1e135fc1fb5 100644 --- a/drivers/isdn/mISDN/timerdev.c +++ b/drivers/isdn/mISDN/timerdev.c @@ -25,6 +25,8 @@ #include <linux/module.h> #include <linux/mISDNif.h> #include <linux/mutex.h> +#include <linux/sched/signal.h> + #include "core.h" static DEFINE_MUTEX(mISDN_mutex); diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c index a9145aa7f36a..8d456dc6c5bf 100644 --- a/drivers/leds/leds-pwm.c +++ b/drivers/leds/leds-pwm.c @@ -29,7 +29,6 @@ struct led_pwm_data { unsigned int active_low; unsigned int period; int duty; - bool can_sleep; }; struct led_pwm_priv { @@ -49,8 +48,8 @@ static void __led_pwm_set(struct led_pwm_data *led_dat) pwm_enable(led_dat->pwm); } -static void led_pwm_set(struct led_classdev *led_cdev, - enum led_brightness brightness) +static int led_pwm_set(struct led_classdev *led_cdev, + enum led_brightness brightness) { struct led_pwm_data *led_dat = container_of(led_cdev, struct led_pwm_data, cdev); @@ -66,12 +65,7 @@ static void led_pwm_set(struct led_classdev *led_cdev, led_dat->duty = duty; __led_pwm_set(led_dat); -} -static int led_pwm_set_blocking(struct led_classdev *led_cdev, - enum led_brightness brightness) -{ - led_pwm_set(led_cdev, brightness); return 0; } @@ -112,11 +106,7 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, return ret; } - led_data->can_sleep = pwm_can_sleep(led_data->pwm); - if (!led_data->can_sleep) - led_data->cdev.brightness_set = led_pwm_set; - else - led_data->cdev.brightness_set_blocking = led_pwm_set_blocking; + led_data->cdev.brightness_set_blocking = led_pwm_set; /* * FIXME: pwm_apply_args() should be removed when switching to the diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c index e6f2f8b9f09a..afa3b4099214 100644 --- a/drivers/leds/trigger/ledtrig-heartbeat.c +++ b/drivers/leds/trigger/ledtrig-heartbeat.c @@ -17,6 +17,7 @@ #include <linux/slab.h> #include <linux/timer.h> #include <linux/sched.h> +#include <linux/sched/loadavg.h> #include <linux/leds.h> #include <linux/reboot.h> #include <linux/suspend.h> diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index ac219045daf7..395ed1961dbf 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -8,6 +8,7 @@ #include <linux/stddef.h> #include <linux/io.h> #include <linux/mm.h> +#include <linux/sched/signal.h> #include <linux/vmalloc.h> #include <linux/cpu.h> #include <linux/freezer.h> diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 30c60687d277..1a6787bc9386 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -8,6 +8,7 @@ #include <linux/miscdevice.h> #include <linux/fs.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/export.h> diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index 152414e6378a..fee939efc4fc 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c @@ -23,7 +23,7 @@ #include <linux/module.h> #include <linux/fs.h> #include <linux/mm.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/adb.h> #include <linux/cuda.h> #include <linux/pmu.h> diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 227869159ac0..1ac66421877a 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -39,6 +39,7 @@ #include <linux/of_platform.h> #include <linux/slab.h> #include <linux/memblock.h> +#include <linux/sched/signal.h> #include <asm/byteorder.h> #include <asm/io.h> diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 43b8db2b5445..cce99f72e4ae 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -23,7 +23,7 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/miscdevice.h> #include <linux/blkdev.h> #include <linux/pci.h> diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c index 9c79f8019d2a..97fb956bb6e0 100644 --- a/drivers/mailbox/mailbox-test.c +++ b/drivers/mailbox/mailbox-test.c @@ -21,6 +21,7 @@ #include <linux/poll.h> #include <linux/slab.h> #include <linux/uaccess.h> +#include <linux/sched/signal.h> #define MBOX_MAX_SIG_LEN 8 #define MBOX_MAX_MSG_LEN 128 diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 646fe85261c1..18526d44688d 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -11,6 +11,7 @@ #include "bset.h" #include <linux/console.h> +#include <linux/sched/clock.h> #include <linux/random.h> #include <linux/prefetch.h> diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index a43eedd5804d..450d0e848ae4 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -32,6 +32,9 @@ #include <linux/prefetch.h> #include <linux/random.h> #include <linux/rcupdate.h> +#include <linux/sched/clock.h> +#include <linux/rculist.h> + #include <trace/events/bcache.h> /* diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 9b2fe2d3e3a9..1ec84ca81146 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -3,6 +3,7 @@ #include <linux/llist.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/workqueue.h> /* diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index b3ff57d61dde..f90f13616980 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -13,6 +13,7 @@ #include <linux/blkdev.h> #include <linux/sort.h> +#include <linux/sched/clock.h> static const char * const cache_replacement_policies[] = { "lru", diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index dde6172f3f10..8c3a938f4bf0 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -12,6 +12,7 @@ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/types.h> +#include <linux/sched/clock.h> #include "util.h" diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index cf2cbc211d83..a126919ed102 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -6,6 +6,7 @@ #include <linux/errno.h> #include <linux/blkdev.h> #include <linux/kernel.h> +#include <linux/sched/clock.h> #include <linux/llist.h> #include <linux/ratelimit.h> #include <linux/vmalloc.h> diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 69e1ae59cab8..6ac2e48b9235 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -13,6 +13,7 @@ #include <linux/delay.h> #include <linux/kthread.h> +#include <linux/sched/clock.h> #include <trace/events/bcache.h> /* Rate limiting */ diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index d36d427a9efb..df4859f6ac6a 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -11,6 +11,7 @@ #include <linux/device-mapper.h> #include <linux/dm-io.h> #include <linux/slab.h> +#include <linux/sched/mm.h> #include <linux/jiffies.h> #include <linux/vmalloc.h> #include <linux/shrinker.h> diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 1cb2ca9dfae3..389a3637ffcc 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1536,7 +1536,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string down_read(&key->sem); - ukp = user_key_payload(key); + ukp = user_key_payload_locked(key); if (!ukp) { up_read(&key->sem); key_put(key); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index a5a9b17f0f7f..4da6fc6b1ffd 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/miscdevice.h> +#include <linux/sched/mm.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/slab.h> diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 5c9e95d66f3b..f8564d63982f 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -101,6 +101,8 @@ struct raid_dev { #define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS) #define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV) +#define RESUME_STAY_FROZEN_FLAGS (CTR_FLAG_DELTA_DISKS | CTR_FLAG_DATA_OFFSET) + /* * Definitions of various constructor flags to * be used in checks of valid / invalid flags @@ -3462,9 +3464,11 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv) else if (!strcasecmp(argv[0], "recover")) set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); else { - if (!strcasecmp(argv[0], "check")) + if (!strcasecmp(argv[0], "check")) { set_bit(MD_RECOVERY_CHECK, &mddev->recovery); - else if (!strcasecmp(argv[0], "repair")) { + set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); + set_bit(MD_RECOVERY_SYNC, &mddev->recovery); + } else if (!strcasecmp(argv[0], "repair")) { set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); set_bit(MD_RECOVERY_SYNC, &mddev->recovery); } else @@ -3771,7 +3775,15 @@ static void raid_resume(struct dm_target *ti) mddev->ro = 0; mddev->in_sync = 0; - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + /* + * Keep the RAID set frozen if reshape/rebuild flags are set. + * The RAID set is unfrozen once the next table load/resume, + * which clears the reshape/rebuild flags, occurs. + * This ensures that the constructor for the inactive table + * retrieves an up-to-date reshape_position. + */ + if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS)) + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); if (mddev->suspended) mddev_resume(mddev); @@ -3779,7 +3791,7 @@ static void raid_resume(struct dm_target *ti) static struct target_type raid_target = { .name = "raid", - .version = {1, 10, 0}, + .version = {1, 10, 1}, .module = THIS_MODULE, .ctr = raid_ctr, .dtr = raid_dtr, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 9f37d7fc2786..f4ffd1eb8f44 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/sched/signal.h> #include <linux/blkpg.h> #include <linux/bio.h> #include <linux/mempool.h> diff --git a/drivers/md/md.c b/drivers/md/md.c index 985374f20e2e..548d1b8014f8 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -44,6 +44,7 @@ */ +#include <linux/sched/signal.h> #include <linux/kthread.h> #include <linux/blkdev.h> #include <linux/badblocks.h> diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 0863905dee02..8589e0a14068 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -13,6 +13,7 @@ #include <linux/rwsem.h> #include <linux/device-mapper.h> #include <linux/stacktrace.h> +#include <linux/sched/task.h> #define DM_MSG_PREFIX "block manager" diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 7453d94eeed7..fbc2d7851b49 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -37,7 +37,10 @@ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/ratelimit.h> +#include <linux/sched/signal.h> + #include <trace/events/block.h> + #include "md.h" #include "raid1.h" #include "bitmap.h" diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 2ce23b01dbb2..4fb09b3fcb41 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -55,6 +55,8 @@ #include <linux/ratelimit.h> #include <linux/nodemask.h> #include <linux/flex_array.h> +#include <linux/sched/signal.h> + #include <trace/events/block.h> #include "md.h" diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c index 000d737ad827..8d65028c7a74 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb-core/dvb_ca_en50221.c @@ -34,7 +34,7 @@ #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/spinlock.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/kthread.h> #include "dvb_ca_en50221.h" diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c index 4eac71e50c5f..6628f80d184f 100644 --- a/drivers/media/dvb-core/dvb_demux.c +++ b/drivers/media/dvb-core/dvb_demux.c @@ -19,7 +19,7 @@ #define pr_fmt(fmt) "dvb_demux: " fmt -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/vmalloc.h> diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 85ae3669aa66..e3fff8f64d37 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -29,7 +29,7 @@ #include <linux/string.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/poll.h> diff --git a/drivers/media/dvb-core/dvb_ringbuffer.h b/drivers/media/dvb-core/dvb_ringbuffer.h index bbe94873d44d..8ed6bcc3a56e 100644 --- a/drivers/media/dvb-core/dvb_ringbuffer.h +++ b/drivers/media/dvb-core/dvb_ringbuffer.h @@ -136,7 +136,7 @@ extern void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf); } /** - * dvb_ringbuffer_read_user - Reads a buffer into an user pointer + * dvb_ringbuffer_read_user - Reads a buffer into a user pointer * * @rbuf: pointer to struct dvb_ringbuffer * @buf: pointer to the buffer where the data will be stored @@ -193,7 +193,7 @@ extern ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t len); /** - * dvb_ringbuffer_write_user - Writes a buffer received via an user pointer + * dvb_ringbuffer_write_user - Writes a buffer received via a user pointer * * @rbuf: pointer to struct dvb_ringbuffer * @buf: pointer to the buffer where the data will be read diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h index 9076bf21cc8a..7a681d8202c7 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h +++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h @@ -1317,9 +1317,9 @@ struct drx_version_list { DRX_MPEG_STR_WIDTH_8 }; -/* CTRL CFG MPEG ouput */ +/* CTRL CFG MPEG output */ /** -* \struct struct drx_cfg_mpeg_output * \brief Configuartion parameters for MPEG output control. +* \struct struct drx_cfg_mpeg_output * \brief Configuration parameters for MPEG output control. * * Used by DRX_CFG_MPEG_OUTPUT, in combination with DRX_CTRL_SET_CFG and * DRX_CTRL_GET_CFG. diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c index f1c3e3b09b65..daeaf965dd56 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drxj.c +++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c @@ -601,7 +601,7 @@ static struct drxj_data drxj_data_g = { 0, /* hi_cfg_wake_up_key */ 0, /* hi_cfg_ctrl */ 0, /* HICfgTimeout */ - /* UIO configuartion */ + /* UIO configuration */ DRX_UIO_MODE_DISABLE, /* uio_sma_rx_mode */ DRX_UIO_MODE_DISABLE, /* uio_sma_tx_mode */ DRX_UIO_MODE_DISABLE, /* uioASELMode */ @@ -619,7 +619,7 @@ static struct drxj_data drxj_data_g = { /* false, * flagHDevSet */ /* (u16) 0xFFF, * rdsLastCount */ - /* ATV configuartion */ + /* ATV configuration */ 0UL, /* flags cfg changes */ /* shadow of ATV_TOP_EQU0__A */ {-5, @@ -3352,7 +3352,7 @@ rw_error: /*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/ -/* miscellaneous configuartions - begin */ +/* miscellaneous configurations - begin */ /*----------------------------------------------------------------------------*/ /** @@ -3515,7 +3515,7 @@ rw_error: } /*----------------------------------------------------------------------------*/ -/* miscellaneous configuartions - end */ +/* miscellaneous configurations - end */ /*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/ @@ -10952,7 +10952,7 @@ rw_error: static void drxj_reset_mode(struct drxj_data *ext_attr) { - /* Initialize default AFE configuartion for QAM */ + /* Initialize default AFE configuration for QAM */ if (ext_attr->has_lna) { /* IF AGC off, PGA active */ #ifndef DRXJ_VSB_ONLY @@ -10996,7 +10996,7 @@ static void drxj_reset_mode(struct drxj_data *ext_attr) ext_attr->qam_pre_saw_cfg.reference = 0x07; ext_attr->qam_pre_saw_cfg.use_pre_saw = true; #endif - /* Initialize default AFE configuartion for VSB */ + /* Initialize default AFE configuration for VSB */ ext_attr->vsb_rf_agc_cfg.standard = DRX_STANDARD_8VSB; ext_attr->vsb_rf_agc_cfg.ctrl_mode = DRX_AGC_CTRL_AUTO; ext_attr->vsb_rf_agc_cfg.min_output_level = 0; @@ -11072,9 +11072,9 @@ ctrl_power_mode(struct drx_demod_instance *demod, enum drx_power_mode *mode) } if ((*mode == DRX_POWER_UP)) { - /* Restore analog & pin configuartion */ + /* Restore analog & pin configuration */ - /* Initialize default AFE configuartion for VSB */ + /* Initialize default AFE configuration for VSB */ drxj_reset_mode(ext_attr); } else { /* Power down to requested mode */ diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.h b/drivers/media/dvb-frontends/drx39xyj/drxj.h index 55ad535197d2..6c5b8f78f9f6 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drxj.h +++ b/drivers/media/dvb-frontends/drx39xyj/drxj.h @@ -447,7 +447,7 @@ struct drxj_cfg_atv_output { u16 hi_cfg_ctrl; /**< HI Configure() parameter 5 */ u16 hi_cfg_transmit; /**< HI Configure() parameter 6 */ - /* UIO configuartion */ + /* UIO configuration */ enum drxuio_mode uio_sma_rx_mode;/**< current mode of SmaRx pin */ enum drxuio_mode uio_sma_tx_mode;/**< current mode of SmaTx pin */ enum drxuio_mode uio_gpio_mode; /**< current mode of ASEL pin */ @@ -459,7 +459,7 @@ struct drxj_cfg_atv_output { /* IQM RC frequecy shift */ u32 iqm_rc_rate_ofs; /**< frequency shifter setting after setchannel */ - /* ATV configuartion */ + /* ATV configuration */ u32 atv_cfg_changed_flags; /**< flag: flags cfg changes */ s16 atv_top_equ0[DRXJ_COEF_IDX_MAX]; /**< shadow of ATV_TOP_EQU0__A */ s16 atv_top_equ1[DRXJ_COEF_IDX_MAX]; /**< shadow of ATV_TOP_EQU1__A */ diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c index 15d2cac588b1..7e1bbbaad625 100644 --- a/drivers/media/dvb-frontends/drxk_hard.c +++ b/drivers/media/dvb-frontends/drxk_hard.c @@ -1626,7 +1626,7 @@ static int ctrl_power_mode(struct drxk_state *state, enum drx_power_mode *mode) } if (*mode == DRX_POWER_UP) { - /* Restore analog & pin configuartion */ + /* Restore analog & pin configuration */ } else { /* Power down to requested mode */ /* Backup some register settings */ diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c index ef35c2b30ea3..4bf5a551ba40 100644 --- a/drivers/media/dvb-frontends/helene.c +++ b/drivers/media/dvb-frontends/helene.c @@ -309,7 +309,7 @@ static int helene_write_regs(struct helene_priv *priv, if (len + 1 > sizeof(buf)) { dev_warn(&priv->i2c->dev, - "wr reg=%04x: len=%d vs %Zu is too big!\n", + "wr reg=%04x: len=%d vs %zu is too big!\n", reg, len + 1, sizeof(buf)); return -E2BIG; } diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c index 4b67d7e0116d..62aa00767015 100644 --- a/drivers/media/dvb-frontends/or51132.c +++ b/drivers/media/dvb-frontends/or51132.c @@ -133,7 +133,7 @@ static int or51132_load_firmware (struct dvb_frontend* fe, const struct firmware u32 firmwareAsize, firmwareBsize; int i,ret; - dprintk("Firmware is %Zd bytes\n",fw->size); + dprintk("Firmware is %zd bytes\n",fw->size); /* Get size of firmware A and B */ firmwareAsize = le32_to_cpu(*((__le32*)fw->data)); diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c index 92ab34c3e0be..143b39b5f6c9 100644 --- a/drivers/media/dvb-frontends/tda10048.c +++ b/drivers/media/dvb-frontends/tda10048.c @@ -499,7 +499,7 @@ static int tda10048_firmware_upload(struct dvb_frontend *fe) __func__); return -EIO; } else { - printk(KERN_INFO "%s: firmware read %Zu bytes.\n", + printk(KERN_INFO "%s: firmware read %zu bytes.\n", __func__, fw->size); ret = 0; diff --git a/drivers/media/i2c/adv7183_regs.h b/drivers/media/i2c/adv7183_regs.h index 843d4998435e..4ade89d33d62 100644 --- a/drivers/media/i2c/adv7183_regs.h +++ b/drivers/media/i2c/adv7183_regs.h @@ -83,7 +83,7 @@ #define ADV7183_LETTERBOX_3 0x9D /* Letterbox 3 */ #define ADV7183_CRC_EN 0xB2 /* CRC enable */ #define ADV7183_ADC_SWITCH_1 0xC3 /* ADC switch 1 */ -#define ADV7183_ADC_SWITCH_2 0xC4 /* ADC swithc 2 */ +#define ADV7183_ADC_SWITCH_2 0xC4 /* ADC switch 2 */ #define ADV7183_LETTERBOX_CTRL_1 0xDC /* Letterbox control 1 */ #define ADV7183_LETTERBOX_CTRL_2 0xDD /* Letterbox control 2 */ #define ADV7183_SD_OFFSET_CB 0xE1 /* SD offset Cb */ diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h index fef3c736fcba..7be2088c45fe 100644 --- a/drivers/media/pci/cx18/cx18-driver.h +++ b/drivers/media/pci/cx18/cx18-driver.h @@ -24,7 +24,7 @@ #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/fs.h> #include <linux/pci.h> #include <linux/interrupt.h> diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c index ab2ae53618e8..e73c153285f0 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.c +++ b/drivers/media/pci/ivtv/ivtv-driver.c @@ -59,6 +59,7 @@ #include <media/tveeprom.h> #include <media/i2c/saa7115.h> #include "tuner-xc2028.h" +#include <uapi/linux/sched/types.h> /* If you have already X v4l cards, then set this to X. This way the device numbers stay matched. Example: you have a WinTV card diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h index cde452e30746..d27c5c2c07ea 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.h +++ b/drivers/media/pci/ivtv/ivtv-driver.h @@ -38,37 +38,38 @@ * using information provided by Jiun-Kuei Jung @ AVerMedia. */ -#include <asm/byteorder.h> +#include <linux/module.h> +#include <linux/init.h> #include <linux/delay.h> -#include <linux/device.h> +#include <linux/sched/signal.h> #include <linux/fs.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/ivtv.h> -#include <linux/kernel.h> -#include <linux/kthread.h> #include <linux/list.h> -#include <linux/module.h> -#include <linux/mutex.h> +#include <linux/unistd.h> #include <linux/pagemap.h> -#include <linux/pci.h> #include <linux/scatterlist.h> -#include <linux/sched.h> +#include <linux/kthread.h> +#include <linux/mutex.h> #include <linux/slab.h> -#include <linux/spinlock.h> #include <linux/uaccess.h> -#include <linux/unistd.h> +#include <asm/byteorder.h> -#include <media/drv-intf/cx2341x.h> -#include <media/i2c/ir-kbd-i2c.h> -#include <media/tuner.h> +#include <linux/dvb/video.h> +#include <linux/dvb/audio.h> #include <media/v4l2-common.h> +#include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-fh.h> -#include <media/v4l2-ioctl.h> +#include <media/tuner.h> +#include <media/drv-intf/cx2341x.h> +#include <media/i2c/ir-kbd-i2c.h> + +#include <linux/ivtv.h> /* Memory layout */ #define IVTV_ENCODER_OFFSET 0x00000000 diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c index da1eebd2016f..3219d2f3271e 100644 --- a/drivers/media/pci/pt1/pt1.c +++ b/drivers/media/pci/pt1/pt1.c @@ -18,6 +18,7 @@ */ #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c index 77f4d15f322b..e8b5d0992157 100644 --- a/drivers/media/pci/pt3/pt3.c +++ b/drivers/media/pci/pt3/pt3.c @@ -21,6 +21,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/string.h> +#include <linux/sched/signal.h> #include "dmxdev.h" #include "dvbdev.h" diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c index 4ba5eade7ce2..ef4906406ebf 100644 --- a/drivers/media/pci/saa7164/saa7164-fw.c +++ b/drivers/media/pci/saa7164/saa7164-fw.c @@ -422,7 +422,7 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev) return -ENOMEM; } - printk(KERN_INFO "%s() firmware read %Zu bytes.\n", + printk(KERN_INFO "%s() firmware read %zu bytes.\n", __func__, fw->size); if (fw->size != fwlength) { diff --git a/drivers/media/pci/solo6x10/solo6x10-i2c.c b/drivers/media/pci/solo6x10/solo6x10-i2c.c index c908672b2c40..e83bb79f9349 100644 --- a/drivers/media/pci/solo6x10/solo6x10-i2c.c +++ b/drivers/media/pci/solo6x10/solo6x10-i2c.c @@ -27,6 +27,7 @@ * thread context, ACK the interrupt, and move on. -- BenC */ #include <linux/kernel.h> +#include <linux/sched/signal.h> #include "solo6x10.h" diff --git a/drivers/media/pci/zoran/zoran_device.c b/drivers/media/pci/zoran/zoran_device.c index 671907a6e6b6..40adceebca7e 100644 --- a/drivers/media/pci/zoran/zoran_device.c +++ b/drivers/media/pci/zoran/zoran_device.c @@ -28,6 +28,7 @@ #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/ktime.h> +#include <linux/sched/signal.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> diff --git a/drivers/media/platform/exynos4-is/fimc-core.h b/drivers/media/platform/exynos4-is/fimc-core.h index 5615fefbf7af..c0373aede81a 100644 --- a/drivers/media/platform/exynos4-is/fimc-core.h +++ b/drivers/media/platform/exynos4-is/fimc-core.h @@ -358,7 +358,7 @@ struct fimc_pix_limit { * @pix_limit: pixel size constraints for the scaler * @min_inp_pixsize: minimum input pixel size * @min_out_pixsize: minimum output pixel size - * @hor_offs_align: horizontal pixel offset aligment + * @hor_offs_align: horizontal pixel offset alignment * @min_vsize_align: minimum vertical pixel size alignment */ struct fimc_variant { diff --git a/drivers/media/platform/vivid/vivid-radio-rx.c b/drivers/media/platform/vivid/vivid-radio-rx.c index f99092ca8f5c..47c36c26096b 100644 --- a/drivers/media/platform/vivid/vivid-radio-rx.c +++ b/drivers/media/platform/vivid/vivid-radio-rx.c @@ -22,6 +22,8 @@ #include <linux/delay.h> #include <linux/videodev2.h> #include <linux/v4l2-dv-timings.h> +#include <linux/sched/signal.h> + #include <media/v4l2-common.h> #include <media/v4l2-event.h> #include <media/v4l2-dv-timings.h> diff --git a/drivers/media/platform/vivid/vivid-radio-tx.c b/drivers/media/platform/vivid/vivid-radio-tx.c index 8c59d4f53200..0e8025b7b4dd 100644 --- a/drivers/media/platform/vivid/vivid-radio-tx.c +++ b/drivers/media/platform/vivid/vivid-radio-tx.c @@ -19,6 +19,7 @@ #include <linux/errno.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/delay.h> #include <linux/videodev2.h> #include <linux/v4l2-dv-timings.h> diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index a54ca531d8ef..393dccaabdd0 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -19,7 +19,7 @@ #include <linux/module.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/ioctl.h> #include <linux/fs.h> diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c index 0345b274eccc..91947cf1950e 100644 --- a/drivers/media/tuners/xc5000.c +++ b/drivers/media/tuners/xc5000.c @@ -1144,7 +1144,7 @@ static int xc_load_fw_and_init_tuner(struct dvb_frontend *fe, int force) pr_err("xc5000: Upload failed. rc %d\n", ret); return ret; } - dprintk(1, "firmware read %Zu bytes.\n", fw->size); + dprintk(1, "firmware read %zu bytes.\n", fw->size); if (fw->size != desired_fw->size) { pr_err("xc5000: Firmware file with incorrect size\n"); diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c index 431dd0b4b332..b1d13444ff30 100644 --- a/drivers/media/usb/cpia2/cpia2_core.c +++ b/drivers/media/usb/cpia2/cpia2_core.c @@ -32,6 +32,7 @@ #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/firmware.h> +#include <linux/sched/signal.h> #define FIRMWARE "cpia2/stv0672_vp4.bin" MODULE_FIRMWARE(FIRMWARE); diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c index 81d7fd4f7776..85ab3fa48f9a 100644 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c @@ -2414,7 +2414,7 @@ static int stk9090m_frontend_attach(struct dvb_usb_adapter *adap) deb_info("%s: Upload failed. (file not found?)\n", __func__); return -ENODEV; } else { - deb_info("%s: firmware read %Zu bytes.\n", __func__, state->frontend_firmware->size); + deb_info("%s: firmware read %zu bytes.\n", __func__, state->frontend_firmware->size); } stk9090m_config.microcode_B_fe_size = state->frontend_firmware->size; stk9090m_config.microcode_B_fe_buffer = state->frontend_firmware->data; @@ -2480,7 +2480,7 @@ static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap) deb_info("%s: Upload failed. (file not found?)\n", __func__); return -EIO; } else { - deb_info("%s: firmware read %Zu bytes.\n", __func__, state->frontend_firmware->size); + deb_info("%s: firmware read %zu bytes.\n", __func__, state->frontend_firmware->size); } nim9090md_config[0].microcode_B_fe_size = state->frontend_firmware->size; nim9090md_config[0].microcode_B_fe_buffer = state->frontend_firmware->data; diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c index 23d3285f182a..e91d00762e94 100644 --- a/drivers/media/usb/gspca/cpia1.c +++ b/drivers/media/usb/gspca/cpia1.c @@ -27,6 +27,8 @@ #define MODULE_NAME "cpia1" #include <linux/input.h> +#include <linux/sched/signal.h> + #include "gspca.h" MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); diff --git a/drivers/media/usb/gspca/t613.c b/drivers/media/usb/gspca/t613.c index 42667710af92..46fb76349000 100644 --- a/drivers/media/usb/gspca/t613.c +++ b/drivers/media/usb/gspca/t613.c @@ -570,9 +570,9 @@ static void setfreq(struct gspca_dev *gspca_dev, s32 val) /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { - /* some of this registers are not really neded, because - * they are overriden by setbrigthness, setcontrast, etc, - * but wont hurt anyway, and can help someone with similar webcam + /* some of this registers are not really needed, because + * they are overridden by setbrigthness, setcontrast, etc., + * but won't hurt anyway, and can help someone with similar webcam * to see the initial parameters.*/ struct sd *sd = (struct sd *) gspca_dev; const struct additional_sensor_data *sensor; diff --git a/drivers/media/usb/tm6000/tm6000-input.c b/drivers/media/usb/tm6000/tm6000-input.c index 4afd4655d562..39c15bb2b20c 100644 --- a/drivers/media/usb/tm6000/tm6000-input.c +++ b/drivers/media/usb/tm6000/tm6000-input.c @@ -438,7 +438,7 @@ int tm6000_ir_init(struct tm6000_core *dev) /* input setup */ rc->allowed_protocols = RC_BIT_RC5 | RC_BIT_NEC; - /* Neded, in order to support NEC remotes with 24 or 32 bits */ + /* Needed, in order to support NEC remotes with 24 or 32 bits */ rc->scancode_mask = 0xffff; rc->priv = ir; rc->change_protocol = tm6000_ir_change_protocol; diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c index 05b5c6652cfa..e48b7c032c95 100644 --- a/drivers/media/v4l2-core/tuner-core.c +++ b/drivers/media/v4l2-core/tuner-core.c @@ -245,7 +245,7 @@ static const struct analog_demod_ops tuner_analog_ops = { * @tuner_callback: an optional function to be called when switching * to analog mode * - * This function applys the tuner config to tuner specified + * This function applies the tuner config to tuner specified * by tun_setup structure. It contains several per-tuner initialization "magic" */ static void set_type(struct i2c_client *c, unsigned int type, @@ -463,7 +463,7 @@ attach_failed: * @sd: subdev descriptor * @tun_setup: type to be associated to a given tuner i2c address * - * This function applys the tuner config to tuner specified + * This function applies the tuner config to tuner specified * by tun_setup structure. * If tuner I2C address is UNSET, then it will only set the device * if the tuner supports the mode specified in the call. diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index 36bd904946bd..0b5c43f7e020 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -21,7 +21,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/slab.h> #include <linux/interrupt.h> diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 6c722d96b775..79e60ec70bd3 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -418,8 +418,9 @@ struct cxl_afu { struct dentry *debugfs; struct mutex contexts_lock; spinlock_t afu_cntl_lock; - /* Used to block access to AFU config space while deconfigured */ - struct rw_semaphore configured_rwsem; + + /* -1: AFU deconfigured/locked, >= 0: number of readers */ + atomic_t configured_state; /* AFU error buffer fields and bin attribute for sysfs */ u64 eb_len, eb_offset; diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c index 377e650a2a1d..2fa015c05561 100644 --- a/drivers/misc/cxl/fault.c +++ b/drivers/misc/cxl/fault.c @@ -8,7 +8,8 @@ */ #include <linux/workqueue.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/pid.h> #include <linux/mm.h> #include <linux/moduleparam.h> diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index 859959f19f10..e7139c76f961 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -12,7 +12,7 @@ #include <linux/export.h> #include <linux/kernel.h> #include <linux/bitmap.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/poll.h> #include <linux/pid.h> #include <linux/fs.h> diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c index 2a6bf1d0a3a4..b0b6ed31918e 100644 --- a/drivers/misc/cxl/main.c +++ b/drivers/misc/cxl/main.c @@ -19,6 +19,8 @@ #include <linux/slab.h> #include <linux/idr.h> #include <linux/pci.h> +#include <linux/sched/task.h> + #include <asm/cputable.h> #include <misc/cxl-base.h> @@ -268,8 +270,7 @@ struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice) idr_init(&afu->contexts_idr); mutex_init(&afu->contexts_lock); spin_lock_init(&afu->afu_cntl_lock); - init_rwsem(&afu->configured_rwsem); - down_write(&afu->configured_rwsem); + atomic_set(&afu->configured_state, -1); afu->prefault_mode = CXL_PREFAULT_NONE; afu->irqs_max = afu->adapter->user_irqs; diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 09505f432eda..7ae710585267 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -9,6 +9,7 @@ #include <linux/spinlock.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/mm.h> diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index cca938845ffd..91f645992c94 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1129,7 +1129,7 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc if ((rc = cxl_native_register_psl_irq(afu))) goto err2; - up_write(&afu->configured_rwsem); + atomic_set(&afu->configured_state, 0); return 0; err2: @@ -1142,7 +1142,14 @@ err1: static void pci_deconfigure_afu(struct cxl_afu *afu) { - down_write(&afu->configured_rwsem); + /* + * It's okay to deconfigure when AFU is already locked, otherwise wait + * until there are no readers + */ + if (atomic_read(&afu->configured_state) != -1) { + while (atomic_cmpxchg(&afu->configured_state, 0, -1) != -1) + schedule(); + } cxl_native_release_psl_irq(afu); if (afu->adapter->native->sl_ops->release_serr_irq) afu->adapter->native->sl_ops->release_serr_irq(afu); diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index 639a343b7836..512a4897dbf6 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c @@ -83,6 +83,16 @@ static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus) return phb ? phb->private_data : NULL; } +static void cxl_afu_configured_put(struct cxl_afu *afu) +{ + atomic_dec_if_positive(&afu->configured_state); +} + +static bool cxl_afu_configured_get(struct cxl_afu *afu) +{ + return atomic_inc_unless_negative(&afu->configured_state); +} + static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn, struct cxl_afu *afu, int *_record) { @@ -107,7 +117,7 @@ static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn, afu = pci_bus_to_afu(bus); /* Grab a reader lock on afu. */ - if (afu == NULL || !down_read_trylock(&afu->configured_rwsem)) + if (afu == NULL || !cxl_afu_configured_get(afu)) return PCIBIOS_DEVICE_NOT_FOUND; rc = cxl_pcie_config_info(bus, devfn, afu, &record); @@ -132,7 +142,7 @@ static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn, } out: - up_read(&afu->configured_rwsem); + cxl_afu_configured_put(afu); return rc ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; } @@ -144,7 +154,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn, afu = pci_bus_to_afu(bus); /* Grab a reader lock on afu. */ - if (afu == NULL || !down_read_trylock(&afu->configured_rwsem)) + if (afu == NULL || !cxl_afu_configured_get(afu)) return PCIBIOS_DEVICE_NOT_FOUND; rc = cxl_pcie_config_info(bus, devfn, afu, &record); @@ -166,7 +176,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn, } out: - up_read(&afu->configured_rwsem); + cxl_afu_configured_put(afu); return rc ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL; } diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 051b14766ef9..764ff5df0dbc 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -19,7 +19,7 @@ #include <linux/log2.h> #include <linux/bitops.h> #include <linux/jiffies.h> -#include <linux/of.h> +#include <linux/property.h> #include <linux/acpi.h> #include <linux/i2c.h> #include <linux/nvmem-provider.h> @@ -562,26 +562,26 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count) return 0; } -#ifdef CONFIG_OF -static void at24_get_ofdata(struct i2c_client *client, - struct at24_platform_data *chip) +static void at24_get_pdata(struct device *dev, struct at24_platform_data *chip) { - const __be32 *val; - struct device_node *node = client->dev.of_node; - - if (node) { - if (of_get_property(node, "read-only", NULL)) - chip->flags |= AT24_FLAG_READONLY; - val = of_get_property(node, "pagesize", NULL); - if (val) - chip->page_size = be32_to_cpup(val); + int err; + u32 val; + + if (device_property_present(dev, "read-only")) + chip->flags |= AT24_FLAG_READONLY; + + err = device_property_read_u32(dev, "pagesize", &val); + if (!err) { + chip->page_size = val; + } else { + /* + * This is slow, but we can't know all eeproms, so we better + * play safe. Specifying custom eeprom-types via platform_data + * is recommended anyhow. + */ + chip->page_size = 1; } } -#else -static void at24_get_ofdata(struct i2c_client *client, - struct at24_platform_data *chip) -{ } -#endif /* CONFIG_OF */ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -613,15 +613,8 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) chip.byte_len = BIT(magic & AT24_BITMASK(AT24_SIZE_BYTELEN)); magic >>= AT24_SIZE_BYTELEN; chip.flags = magic & AT24_BITMASK(AT24_SIZE_FLAGS); - /* - * This is slow, but we can't know all eeproms, so we better - * play safe. Specifying custom eeprom-types via platform_data - * is recommended anyhow. - */ - chip.page_size = 1; - /* update chipdata if OF is present */ - at24_get_ofdata(client, &chip); + at24_get_pdata(&client->dev, &chip); chip.setup = NULL; chip.context = NULL; diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c index 3d1d55157e5f..2fad790db3bf 100644 --- a/drivers/misc/eeprom/eeprom.c +++ b/drivers/misc/eeprom/eeprom.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> +#include <linux/capability.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/mutex.h> diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c index cb290b8ca0c8..dd4617764f14 100644 --- a/drivers/misc/genwqe/card_dev.c +++ b/drivers/misc/genwqe/card_dev.c @@ -29,7 +29,7 @@ #include <linux/pci.h> #include <linux/string.h> #include <linux/fs.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/atomic.h> diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c index 232034f5da48..5c7dd26db716 100644 --- a/drivers/misc/ibmasm/r_heartbeat.c +++ b/drivers/misc/ibmasm/r_heartbeat.c @@ -20,7 +20,7 @@ * */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include "ibmasm.h" #include "dot_command.h" diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 99635dd9dbac..fc7efedbc4be 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -103,6 +103,8 @@ #include <linux/delay.h> #include <linux/kthread.h> #include <linux/module.h> +#include <linux/sched/task.h> + #include <asm/sections.h> #define v1printk(a...) do { \ diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index fb8705fc3aca..e389b0b5278d 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c @@ -23,6 +23,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/dmi.h> #include <linux/module.h> #include <linux/types.h> diff --git a/drivers/misc/lkdtm_heap.c b/drivers/misc/lkdtm_heap.c index 0f1581664c1c..ffb6aeac07b3 100644 --- a/drivers/misc/lkdtm_heap.c +++ b/drivers/misc/lkdtm_heap.c @@ -4,6 +4,7 @@ */ #include "lkdtm.h" #include <linux/slab.h> +#include <linux/sched.h> /* * This tries to stay within the next largest power-of-2 kmalloc cache diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c index 1dd611423d8b..df6ac985fbb5 100644 --- a/drivers/misc/lkdtm_usercopy.c +++ b/drivers/misc/lkdtm_usercopy.c @@ -5,6 +5,7 @@ #include "lkdtm.h" #include <linux/slab.h> #include <linux/vmalloc.h> +#include <linux/sched/task_stack.h> #include <linux/mman.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index cb3e9e0ca049..df5f78ae3d25 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -16,7 +16,7 @@ #include <linux/module.h> #include <linux/device.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/slab.h> diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 68fe37b5bc52..d3e3372424d6 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -14,7 +14,7 @@ * */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/slab.h> diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 9d0b7050c79a..bf816449cd40 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -26,7 +26,7 @@ #include <linux/init.h> #include <linux/ioctl.h> #include <linux/cdev.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/uuid.h> #include <linux/compat.h> #include <linux/jiffies.h> diff --git a/drivers/misc/mic/bus/mic_bus.c b/drivers/misc/mic/bus/mic_bus.c index be37890abb93..77b16ca66846 100644 --- a/drivers/misc/mic/bus/mic_bus.c +++ b/drivers/misc/mic/bus/mic_bus.c @@ -143,7 +143,7 @@ static void mbus_release_dev(struct device *d) } struct mbus_device * -mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, +mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, struct mbus_hw_ops *hw_ops, int index, void __iomem *mmio_va) { @@ -158,7 +158,7 @@ mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, mbdev->dev.parent = pdev; mbdev->id.device = id; mbdev->id.vendor = MBUS_DEV_ANY_ID; - mbdev->dev.archdata.dma_ops = dma_ops; + mbdev->dev.dma_ops = dma_ops; mbdev->dev.dma_mask = &mbdev->dev.coherent_dma_mask; dma_set_mask(&mbdev->dev, DMA_BIT_MASK(64)); mbdev->dev.release = mbus_release_dev; diff --git a/drivers/misc/mic/bus/scif_bus.c b/drivers/misc/mic/bus/scif_bus.c index ff6e01c25810..a444db5f61fe 100644 --- a/drivers/misc/mic/bus/scif_bus.c +++ b/drivers/misc/mic/bus/scif_bus.c @@ -138,7 +138,7 @@ static void scif_release_dev(struct device *d) } struct scif_hw_dev * -scif_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, +scif_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, struct scif_hw_ops *hw_ops, u8 dnode, u8 snode, struct mic_mw *mmio, struct mic_mw *aper, void *dp, void __iomem *rdp, struct dma_chan **chan, int num_chan, @@ -154,7 +154,7 @@ scif_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, sdev->dev.parent = pdev; sdev->id.device = id; sdev->id.vendor = SCIF_DEV_ANY_ID; - sdev->dev.archdata.dma_ops = dma_ops; + sdev->dev.dma_ops = dma_ops; sdev->dev.release = scif_release_dev; sdev->hw_ops = hw_ops; sdev->dnode = dnode; diff --git a/drivers/misc/mic/bus/scif_bus.h b/drivers/misc/mic/bus/scif_bus.h index 94f29ac608b6..ff59568219ad 100644 --- a/drivers/misc/mic/bus/scif_bus.h +++ b/drivers/misc/mic/bus/scif_bus.h @@ -113,7 +113,7 @@ int scif_register_driver(struct scif_driver *driver); void scif_unregister_driver(struct scif_driver *driver); struct scif_hw_dev * scif_register_device(struct device *pdev, int id, - struct dma_map_ops *dma_ops, + const struct dma_map_ops *dma_ops, struct scif_hw_ops *hw_ops, u8 dnode, u8 snode, struct mic_mw *mmio, struct mic_mw *aper, void *dp, void __iomem *rdp, diff --git a/drivers/misc/mic/bus/vop_bus.c b/drivers/misc/mic/bus/vop_bus.c index 303da222f5b6..fd7f2a6049f8 100644 --- a/drivers/misc/mic/bus/vop_bus.c +++ b/drivers/misc/mic/bus/vop_bus.c @@ -154,7 +154,7 @@ vop_register_device(struct device *pdev, int id, vdev->dev.parent = pdev; vdev->id.device = id; vdev->id.vendor = VOP_DEV_ANY_ID; - vdev->dev.archdata.dma_ops = (struct dma_map_ops *)dma_ops; + vdev->dev.dma_ops = dma_ops; vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask; dma_set_mask(&vdev->dev, DMA_BIT_MASK(64)); vdev->dev.release = vop_release_dev; diff --git a/drivers/misc/mic/cosm/cosm_scif_server.c b/drivers/misc/mic/cosm/cosm_scif_server.c index 5696df4326b5..85f7d09cc65f 100644 --- a/drivers/misc/mic/cosm/cosm_scif_server.c +++ b/drivers/misc/mic/cosm/cosm_scif_server.c @@ -19,6 +19,8 @@ * */ #include <linux/kthread.h> +#include <linux/sched/signal.h> + #include "cosm_main.h" /* diff --git a/drivers/misc/mic/cosm_client/cosm_scif_client.c b/drivers/misc/mic/cosm_client/cosm_scif_client.c index 03e98bf1ac15..aa530fcceaa9 100644 --- a/drivers/misc/mic/cosm_client/cosm_scif_client.c +++ b/drivers/misc/mic/cosm_client/cosm_scif_client.c @@ -22,6 +22,8 @@ #include <linux/delay.h> #include <linux/reboot.h> #include <linux/kthread.h> +#include <linux/sched/signal.h> + #include "../cosm/cosm_main.h" #define COSM_SCIF_MAX_RETRIES 10 diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c index 9599d732aff3..c327985c9523 100644 --- a/drivers/misc/mic/host/mic_boot.c +++ b/drivers/misc/mic/host/mic_boot.c @@ -245,7 +245,7 @@ static void __mic_dma_unmap_sg(struct device *dev, dma_unmap_sg(&mdev->pdev->dev, sg, nents, dir); } -static struct dma_map_ops __mic_dma_ops = { +static const struct dma_map_ops __mic_dma_ops = { .alloc = __mic_dma_alloc, .free = __mic_dma_free, .map_page = __mic_dma_map_page, @@ -344,7 +344,7 @@ mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, mic_unmap_single(mdev, dma_addr, size); } -static struct dma_map_ops mic_dma_ops = { +static const struct dma_map_ops mic_dma_ops = { .map_page = mic_dma_map_page, .unmap_page = mic_dma_unmap_page, }; diff --git a/drivers/misc/mic/scif/scif_main.h b/drivers/misc/mic/scif/scif_main.h index a08f0b600a9e..0e5eff9ad080 100644 --- a/drivers/misc/mic/scif/scif_main.h +++ b/drivers/misc/mic/scif/scif_main.h @@ -18,7 +18,7 @@ #ifndef SCIF_MAIN_H #define SCIF_MAIN_H -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/pci.h> #include <linux/miscdevice.h> #include <linux/dmaengine.h> diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c index f806a4471eb9..329727e00e97 100644 --- a/drivers/misc/mic/scif/scif_rma.c +++ b/drivers/misc/mic/scif/scif_rma.c @@ -17,6 +17,9 @@ */ #include <linux/dma_remapping.h> #include <linux/pagemap.h> +#include <linux/sched/mm.h> +#include <linux/sched/signal.h> + #include "scif_main.h" #include "scif_map.h" diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index 1a2b67f3183d..c2e29d7f0de8 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c @@ -374,7 +374,7 @@ unmap: static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], struct irq_affinity *desc) { struct _vop_vdev *vdev = to_vopvdev(dev); struct vop_device *vpdev = vdev->vpdev; diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c index c344483fa7d6..2cde80c7bb93 100644 --- a/drivers/misc/vexpress-syscfg.c +++ b/drivers/misc/vexpress-syscfg.c @@ -16,7 +16,7 @@ #include <linux/io.h> #include <linux/of.h> #include <linux/platform_device.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/syscore_ops.h> #include <linux/vexpress.h> diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c index f866a4baecb5..21d0fa592145 100644 --- a/drivers/misc/vmw_vmci/vmci_context.c +++ b/drivers/misc/vmw_vmci/vmci_context.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/slab.h> #include "vmci_queue_pair.h" @@ -303,7 +304,7 @@ int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg) vmci_dg_size = VMCI_DG_SIZE(dg); if (vmci_dg_size > VMCI_MAX_DG_SIZE) { - pr_devel("Datagram too large (bytes=%Zu)\n", vmci_dg_size); + pr_devel("Datagram too large (bytes=%zu)\n", vmci_dg_size); return VMCI_ERROR_INVALID_ARGS; } diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c index 8449516d6ac6..84258a48029d 100644 --- a/drivers/misc/vmw_vmci/vmci_event.c +++ b/drivers/misc/vmw_vmci/vmci_event.c @@ -19,6 +19,7 @@ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/rculist.h> #include "vmci_driver.h" #include "vmci_event.h" diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c index ec090105eb4b..8a16a26e9658 100644 --- a/drivers/misc/vmw_vmci/vmci_host.c +++ b/drivers/misc/vmw_vmci/vmci_host.c @@ -24,6 +24,7 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/init.h> diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index f84a4275ca29..498c0854305f 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -2928,7 +2928,7 @@ int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair, EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes); /* - * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the comsumer. + * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the consumer. * @qpair: Pointer to the queue pair struct. * @consumer_tail: Reference used for storing consumer tail index. * @producer_head: Reference used for storing the producer head index. diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c index 9a53a30de445..1ab6e8737a5f 100644 --- a/drivers/misc/vmw_vmci/vmci_resource.c +++ b/drivers/misc/vmw_vmci/vmci_resource.c @@ -17,6 +17,7 @@ #include <linux/hash.h> #include <linux/types.h> #include <linux/rculist.h> +#include <linux/completion.h> #include "vmci_resource.h" #include "vmci_driver.h" diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index d29faf2addfe..6d4b72080d51 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <uapi/linux/sched/types.h> #include <linux/kthread.h> #include <linux/export.h> #include <linux/wait.h> diff --git a/drivers/mmc/host/mmci_qcom_dml.c b/drivers/mmc/host/mmci_qcom_dml.c index 2b7fc3764803..00750c9d3514 100644 --- a/drivers/mmc/host/mmci_qcom_dml.c +++ b/drivers/mmc/host/mmci_qcom_dml.c @@ -170,7 +170,7 @@ int dml_hw_init(struct mmci_host *host, struct device_node *np) writel_relaxed(producer_id | (consumer_id << CONSUMER_PIPE_ID_SHFT), base + DML_PIPE_ID); - /* Make sure dml intialization is finished */ + /* Make sure dml initialization is finished */ mb(); return 0; diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c index 82bd00af5cc3..268aae45b514 100644 --- a/drivers/mtd/devices/lart.c +++ b/drivers/mtd/devices/lart.c @@ -75,18 +75,18 @@ static char module_name[] = "lart"; /* blob */ #define NUM_BLOB_BLOCKS FLASH_NUMBLOCKS_16m_PARAM -#define BLOB_START 0x00000000 -#define BLOB_LEN (NUM_BLOB_BLOCKS * FLASH_BLOCKSIZE_PARAM) +#define PART_BLOB_START 0x00000000 +#define PART_BLOB_LEN (NUM_BLOB_BLOCKS * FLASH_BLOCKSIZE_PARAM) /* kernel */ #define NUM_KERNEL_BLOCKS 7 -#define KERNEL_START (BLOB_START + BLOB_LEN) -#define KERNEL_LEN (NUM_KERNEL_BLOCKS * FLASH_BLOCKSIZE_MAIN) +#define PART_KERNEL_START (PART_BLOB_START + PART_BLOB_LEN) +#define PART_KERNEL_LEN (NUM_KERNEL_BLOCKS * FLASH_BLOCKSIZE_MAIN) /* initial ramdisk */ #define NUM_INITRD_BLOCKS 24 -#define INITRD_START (KERNEL_START + KERNEL_LEN) -#define INITRD_LEN (NUM_INITRD_BLOCKS * FLASH_BLOCKSIZE_MAIN) +#define PART_INITRD_START (PART_KERNEL_START + PART_KERNEL_LEN) +#define PART_INITRD_LEN (NUM_INITRD_BLOCKS * FLASH_BLOCKSIZE_MAIN) /* * See section 4.0 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet @@ -587,20 +587,20 @@ static struct mtd_partition lart_partitions[] = { /* blob */ { .name = "blob", - .offset = BLOB_START, - .size = BLOB_LEN, + .offset = PART_BLOB_START, + .size = PART_BLOB_LEN, }, /* kernel */ { .name = "kernel", - .offset = KERNEL_START, /* MTDPART_OFS_APPEND */ - .size = KERNEL_LEN, + .offset = PART_KERNEL_START, /* MTDPART_OFS_APPEND */ + .size = PART_KERNEL_LEN, }, /* initial ramdisk / file system */ { .name = "file system", - .offset = INITRD_START, /* MTDPART_OFS_APPEND */ - .size = INITRD_LEN, /* MTDPART_SIZ_FULL */ + .offset = PART_INITRD_START, /* MTDPART_OFS_APPEND */ + .size = PART_INITRD_LEN, /* MTDPART_SIZ_FULL */ } }; #define NUM_PARTITIONS ARRAY_SIZE(lart_partitions) diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index 6c062b8251d2..d52139635b67 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -20,6 +20,7 @@ */ #include <linux/clk.h> #include <linux/slab.h> +#include <linux/sched/task_stack.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/mtd/partitions.h> diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 1492c12906f6..b0524f8accb6 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -36,6 +36,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/mm.h> +#include <linux/nmi.h> #include <linux/types.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> diff --git a/drivers/mtd/tests/mtd_test.h b/drivers/mtd/tests/mtd_test.h index 4b7bee17c924..04afd0e7074f 100644 --- a/drivers/mtd/tests/mtd_test.h +++ b/drivers/mtd/tests/mtd_test.h @@ -1,5 +1,5 @@ #include <linux/mtd/mtd.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> static inline int mtdtest_relax(void) { diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 85d54f37e28f..77513195f50e 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -1159,7 +1159,7 @@ static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) if (err) return ERR_PTR(err); - err = vfs_getattr(&path, &stat); + err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT); path_put(&path); if (err) return ERR_PTR(err); diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 88b1897aeb40..d4b2e8744498 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c @@ -314,7 +314,7 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) if (error) return ERR_PTR(error); - error = vfs_getattr(&path, &stat); + error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT); path_put(&path); if (error) return ERR_PTR(error); diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 6ea963e3b89a..62ee439d5882 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -123,7 +123,7 @@ static int __init arcnet_init(void) arc_proto_map[count] = arc_proto_default; if (BUGLVL(D_DURING)) - pr_info("struct sizes: %Zd %Zd %Zd %Zd %Zd\n", + pr_info("struct sizes: %zd %zd %zd %zd %zd\n", sizeof(struct arc_hardware), sizeof(struct arc_rfc1201), sizeof(struct arc_rfc1051), diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 577e57cad1dc..1bcbb8913e17 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -16,6 +16,8 @@ #include <linux/rcupdate.h> #include <linux/ctype.h> #include <linux/inet.h> +#include <linux/sched/signal.h> + #include <net/bonding.h> static int bond_option_active_slave_set(struct bonding *bond, diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index e23c3ed737de..770623a0cc01 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -24,7 +24,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/string.h> diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index b306210b02b7..bc0eb47eccee 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -679,7 +679,8 @@ static int cfv_probe(struct virtio_device *vdev) goto err; /* Get the TX virtio ring. This is a "guest side vring". */ - err = vdev->config->find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names); + err = vdev->config->find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names, + NULL); if (err) goto err; diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c index 4063215c9b54..aac58ce6e371 100644 --- a/drivers/net/can/softing/softing_fw.c +++ b/drivers/net/can/softing/softing_fw.c @@ -17,7 +17,7 @@ */ #include <linux/firmware.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <asm/div64.h> #include <asm/io.h> diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index a81731303730..a9ac58c351a0 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -1206,7 +1206,7 @@ static void bfin_mac_rx(struct bfin_mac_local *lp) /* reserve 2 bytes for RXDWA padding */ skb_reserve(new_skb, NET_IP_ALIGN); /* Invalidate the data cache of skb->data range when it is write back - * cache. It will prevent overwritting the new data from DMA + * cache. It will prevent overwriting the new data from DMA */ blackfin_dcache_invalidate_range((unsigned long)new_skb->head, (unsigned long)new_skb->end); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index d0d0d12b531f..e536301acfde 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -293,36 +293,29 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); - bool mss_index_found = false; - int mss_index; + int mss_index = -EBUSY; int i; spin_lock(&pdata->mss_lock); /* Reuse the slot if MSS matches */ - for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) { + for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) { if (pdata->mss[i] == mss) { pdata->mss_refcnt[i]++; mss_index = i; - mss_index_found = true; } } /* Overwrite the slot with ref_count = 0 */ - for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) { + for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) { if (!pdata->mss_refcnt[i]) { pdata->mss_refcnt[i]++; pdata->mac_ops->set_mss(pdata, mss, i); pdata->mss[i] = mss; mss_index = i; - mss_index_found = true; } } - /* No slots with ref_count = 0 available, return busy */ - if (!mss_index_found) - mss_index = -EBUSY; - spin_unlock(&pdata->mss_lock); return mss_index; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 0ee6e208aa07..50d88d3e03b6 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -817,7 +817,7 @@ static void bcm_enet_adjust_phy_link(struct net_device *dev) rx_pause_en = 1; tx_pause_en = 1; } else if (!priv->pause_auto) { - /* pause setting overrided by user */ + /* pause setting overridden by user */ rx_pause_en = priv->pause_rx; tx_pause_en = priv->pause_tx; } else { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 05356efdbf93..b209b7f6093e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -6957,7 +6957,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) * hence its link is expected to be down * - SECOND_PHY means that first phy should not be able * to link up by itself (using configuration) - * - DEFAULT should be overriden during initialiazation + * - DEFAULT should be overridden during initialization */ DP(NETIF_MSG_LINK, "Invalid link indication" "mpc=0x%x. DISABLING LINK !!!\n", diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a448177990fe..30d1eb9ebec9 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -20,6 +20,7 @@ #include <linux/moduleparam.h> #include <linux/stringify.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/types.h> #include <linux/compiler.h> #include <linux/slab.h> diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 016d481c6476..30606b11b128 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -1622,7 +1622,7 @@ static void macb_init_rx_buffer_size(struct macb *bp, size_t size) } } - netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n", + netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", bp->dev->mtu, bp->rx_buffer_size); } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h index 8cd389148166..aa36e9ae7676 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -23,6 +23,8 @@ #ifndef _OCTEON_MAIN_H_ #define _OCTEON_MAIN_H_ +#include <linux/sched/signal.h> + #if BITS_PER_LONG == 32 #define CVM_CAST64(v) ((long long)(v)) #elif BITS_PER_LONG == 64 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index acc231293e4d..f6e739da7bb7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -1416,7 +1416,7 @@ static unsigned int xdigit2int(unsigned char c) * <pattern data>[/<pattern mask>][@<anchor>] * * Up to 2 filter patterns can be specified. If 2 are supplied the first one - * must be anchored at 0. An omited mask is taken as a mask of 1s, an omitted + * must be anchored at 0. An omitted mask is taken as a mask of 1s, an omitted * anchor is taken as 0. */ static ssize_t mps_trc_write(struct file *file, const char __user *buf, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 5043b64805f0..8098c93cd16e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -1364,6 +1364,10 @@ struct cpl_tx_data { #define TX_FORCE_S 13 #define TX_FORCE_V(x) ((x) << TX_FORCE_S) +#define T6_TX_FORCE_S 20 +#define T6_TX_FORCE_V(x) ((x) << T6_TX_FORCE_S) +#define T6_TX_FORCE_F T6_TX_FORCE_V(1U) + enum { ULP_TX_MEM_READ = 2, ULP_TX_MEM_WRITE = 3, diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h index e995a1a3840a..a91ad766cef0 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h @@ -59,7 +59,7 @@ struct cxgbi_pagepod_hdr { #define PPOD_PAGES_MAX 4 struct cxgbi_pagepod { struct cxgbi_pagepod_hdr hdr; - u64 addr[PPOD_PAGES_MAX + 1]; + __be64 addr[PPOD_PAGES_MAX + 1]; }; /* ddp tag format diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index cbbf8648307a..78460c52b7c4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -847,9 +847,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf) wr32(hw, reg_idx, reg); i40e_flush(hw); } - /* reset some of the state varibles keeping - * track of the resources - */ + /* reset some of the state variables keeping track of the resources */ vf->num_queue_pairs = 0; vf->vf_states = 0; clear_bit(I40E_VF_STAT_INIT, &vf->vf_states); diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 2788a5409023..68812d783f33 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -294,7 +294,7 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) u32 i, i2ccmd = 0; u16 phy_data_swapped; - /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/ + /* Prevent overwriting SFP I2C EEPROM which is at A0 address.*/ if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { hw_dbg("PHY I2C Address %d is out of range.\n", hw->phy.addr); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 30535e6b68f0..c8ac46049f34 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1449,7 +1449,7 @@ do { \ * @atr_input: input bitstream to compute the hash on * @input_mask: mask for the input bitstream * - * This function serves two main purposes. First it applys the input_mask + * This function serves two main purposes. First it applies the input_mask * to the atr_input resulting in a cleaned up atr_input data stream. * Secondly it computes the hash and stores it in the bkt_hash field at * the end of the input byte stream. This way it will be available for diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index e7b81a305469..024788549c25 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -89,10 +89,17 @@ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev) } } +#define MLX4_EN_WRAP_AROUND_SEC 10UL +/* By scheduling the overflow check every 5 seconds, we have a reasonably + * good chance we wont miss a wrap around. + * TOTO: Use a timer instead of a work queue to increase the guarantee. + */ +#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2) + void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev) { bool timeout = time_is_before_jiffies(mdev->last_overflow_check + - mdev->overflow_period); + MLX4_EN_OVERFLOW_PERIOD); unsigned long flags; if (timeout) { @@ -237,7 +244,6 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = { .enable = mlx4_en_phc_enable, }; -#define MLX4_EN_WRAP_AROUND_SEC 10ULL /* This function calculates the max shift that enables the user range * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register. @@ -258,7 +264,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) { struct mlx4_dev *dev = mdev->dev; unsigned long flags; - u64 ns, zero = 0; /* mlx4_en_init_timestamp is called for each netdev. * mdev->ptp_clock is common for all ports, skip initialization if @@ -282,13 +287,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) ktime_to_ns(ktime_get_real())); write_sequnlock_irqrestore(&mdev->clock_lock, flags); - /* Calculate period in seconds to call the overflow watchdog - to make - * sure counter is checked at least once every wrap around. - */ - ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero); - do_div(ns, NSEC_PER_SEC / 2 / HZ); - mdev->overflow_period = ns; - /* Configure the PHC */ mdev->ptp_clock_info = mlx4_en_ptp_clock_info; snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp"); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 4941b692e947..3629ce11a68b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -430,7 +430,6 @@ struct mlx4_en_dev { seqlock_t clock_lock; struct timecounter clock; unsigned long last_overflow_check; - unsigned long overflow_period; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; struct notifier_block nb; diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index ee38c18c2d2d..ee1c78abab0b 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -1251,10 +1251,10 @@ struct ksz_port_info { * @tx_size: Transmit data size. Used for TX optimization. * The maximum is defined by MAX_TX_HELD_SIZE. * @perm_addr: Permanent MAC address. - * @override_addr: Overrided MAC address. + * @override_addr: Overridden MAC address. * @address: Additional MAC address entries. * @addr_list_size: Additional MAC address list size. - * @mac_override: Indication of MAC address overrided. + * @mac_override: Indication of MAC address overridden. * @promiscuous: Counter to keep track of promiscuous mode set. * @all_multi: Counter to keep track of all multicast mode set. * @multi_list: Multicast address entries. @@ -4042,7 +4042,7 @@ static int empty_addr(u8 *addr) * @hw: The hardware instance. * * This routine programs the MAC address of the hardware when the address is - * overrided. + * overridden. */ static void hw_set_addr(struct ksz_hw *hw) { @@ -7043,7 +7043,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id) if (macaddr[0] != ':') get_mac_addr(hw_priv, macaddr, MAIN_PORT); - /* Read MAC address and initialize override address if not overrided. */ + /* Read MAC address and initialize override address if not overridden. */ hw_read_addr(hw); /* Multiple device interfaces mode requires a second MAC address. */ diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index c5c1d0e0c16f..118723ea681a 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -5397,7 +5397,7 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev, * s2io_nic structure. * @regs : pointer to the structure with parameters given by ethtool for * dumping the registers. - * @reg_space: The input argumnet into which all the registers are dumped. + * @reg_space: The input argument into which all the registers are dumped. * Description: * Dumps the entire register space of xFrame NIC into the user given * buffer area. diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c index db55e6d89cf4..0452848d1316 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c @@ -119,7 +119,7 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev, * @dev: device pointer. * @regs: pointer to the structure with parameters given by ethtool for * dumping the registers. - * @reg_space: The input argumnet into which all the registers are dumped. + * @reg_space: The input argument into which all the registers are dumped. * * Dumps the vpath register space of Titan NIC into the user given * buffer area. diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 61a9cd5be497..00c17fa6545b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -688,7 +688,9 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, #define OOO_LB_TC 9 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); -void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate); +void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, + struct qed_ptt *p_ptt, + u32 min_pf_rate); void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index d6c5a8165b5f..e2a081ceaf52 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -3198,7 +3198,8 @@ int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) } /* API to configure WFQ from mcp link change */ -void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) +void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, + struct qed_ptt *p_ptt, u32 min_pf_rate) { int i; @@ -3212,8 +3213,7 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - __qed_configure_vp_wfq_on_link_change(p_hwfn, - p_hwfn->p_dpc_ptt, + __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, min_pf_rate); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 314022df3469..87fde205149f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -679,7 +679,8 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, /* Min bandwidth configuration */ __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw); - qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate); + qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt, + p_link->min_pf_rate); p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); p_link->an_complete = !!(status & diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 29ed785f1dc2..253c2bbe1e4e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -3014,8 +3014,7 @@ cleanup: ack_vfs[vfid / 32] |= BIT((vfid % 32)); p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= ~(1ULL << (rel_vf_id % 64)); - p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &= - ~(1ULL << (rel_vf_id % 64)); + p_vf->vf_mbx.b_pending_msg = false; } return rc; @@ -3128,11 +3127,20 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, mbx = &p_vf->vf_mbx; /* qed_iov_process_mbx_request */ - DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id); + if (!mbx->b_pending_msg) { + DP_NOTICE(p_hwfn, + "VF[%02x]: Trying to process mailbox message when none is pending\n", + p_vf->abs_vf_id); + return; + } + mbx->b_pending_msg = false; mbx->first_tlv = mbx->req_virt->first_tlv; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%02x]: Processing mailbox message [type %04x]\n", + p_vf->abs_vf_id, mbx->first_tlv.tl.type); + /* check if tlv type is known */ if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) && !p_vf->b_malicious) { @@ -3219,20 +3227,19 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, } } -static void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid) +void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) { - u64 add_bit = 1ULL << (vfid % 64); + int i; - p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit; -} + memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); -static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn, - u64 *events) -{ - u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events; + qed_for_each_vf(p_hwfn, i) { + struct qed_vf_info *p_vf; - memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH); - memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); + p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; + if (p_vf->vf_mbx.b_pending_msg) + events[i / 64] |= 1ULL << (i % 64); + } } static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, @@ -3266,7 +3273,7 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; /* Mark the event and schedule the workqueue */ - qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id); + p_vf->vf_mbx.b_pending_msg = true; qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); return 0; @@ -4030,7 +4037,7 @@ static void qed_handle_vf_msg(struct qed_hwfn *hwfn) return; } - qed_iov_pf_get_and_clear_pending_events(hwfn, events); + qed_iov_pf_get_pending_events(hwfn, events); DP_VERBOSE(hwfn, QED_MSG_IOV, "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index fc08cc2da6a7..a89605821522 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -140,6 +140,9 @@ struct qed_iov_vf_mbx { /* Address in VF where a pending message is located */ dma_addr_t pending_req; + /* Message from VF awaits handling */ + bool b_pending_msg; + u8 *offset; /* saved VF request header */ @@ -232,7 +235,6 @@ struct qed_vf_info { */ struct qed_pf_iov { struct qed_vf_info vfs_array[MAX_NUM_VFS]; - u64 pending_events[QED_VF_ARRAY_LENGTH]; u64 pending_flr[QED_VF_ARRAY_LENGTH]; /* Allocate message address continuosuly and split to each VF */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 99b187bfdd55..718bf58a7da6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -178,7 +178,7 @@ const u32 qlcnic_83xx_reg_tbl[] = { 0x3540, /* Device state, DRV_REG1 */ 0x3544, /* Driver state, DRV_REG2 */ 0x3548, /* Driver scratch, DRV_REG3 */ - 0x354C, /* Device partiton info, DRV_REG4 */ + 0x354C, /* Device partition info, DRV_REG4 */ 0x3524, /* Driver IDC ver, DRV_REG5 */ 0x3550, /* FW_VER_MAJOR */ 0x3554, /* FW_VER_MINOR */ diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c index c6ff0cc5ef18..93c713c1f627 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon.c +++ b/drivers/net/ethernet/sfc/falcon/falcon.c @@ -16,6 +16,8 @@ #include <linux/i2c.h> #include <linux/mii.h> #include <linux/slab.h> +#include <linux/sched/signal.h> + #include "net_driver.h" #include "bitfield.h" #include "efx.h" diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 47ced8a898ca..91fb54fd03d9 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -10832,7 +10832,7 @@ /***********************************/ /* MC_CMD_GET_LICENSED_V3_FEATURE_STATES - * Query the state of an one or more licensed features. (Note that the actual + * Query the state of one or more licensed features. (Note that the actual * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE * operation or a reboot of the MC.) Used for V3 licensing (Medford) */ diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 19a458716f1a..1b6f6171d078 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -176,7 +176,7 @@ struct sis900_private { u32 msg_enable; - unsigned int cur_rx, dirty_rx; /* producer/comsumer pointers for Tx/Rx ring */ + unsigned int cur_rx, dirty_rx; /* producer/consumer pointers for Tx/Rx ring */ unsigned int cur_tx, dirty_tx; /* The saved address of a sent/receive-in-place packet buffer */ diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 144fe84e8a53..04d9245b7149 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -416,7 +416,7 @@ struct stmmac_dma_ops { /* Configure the AXI Bus Mode Register */ void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); /* Dump DMA registers */ - void (*dump_regs) (void __iomem *ioaddr); + void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space); /* Set tx/rx threshold in the csr6 register * An invalid value enables the store-and-forward mode */ void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, @@ -456,7 +456,7 @@ struct stmmac_ops { /* Enable RX Queues */ void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue); /* Dump MAC registers */ - void (*dump_regs)(struct mac_device_info *hw); + void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space); /* Handle extra events on specific interrupts hw dependent */ int (*host_irq_status)(struct mac_device_info *hw, struct stmmac_extra_stats *x); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 91c8926b7479..19b9b3087099 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -92,17 +92,13 @@ static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw) return !!(value & GMAC_CONTROL_IPC); } -static void dwmac1000_dump_regs(struct mac_device_info *hw) +static void dwmac1000_dump_regs(struct mac_device_info *hw, u32 *reg_space) { void __iomem *ioaddr = hw->pcsr; int i; - pr_info("\tDWMAC1000 regs (base addr = 0x%p)\n", ioaddr); - for (i = 0; i < 55; i++) { - int offset = i * 4; - pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i, - offset, readl(ioaddr + offset)); - } + for (i = 0; i < 55; i++) + reg_space[i] = readl(ioaddr + i * 4); } static void dwmac1000_set_umac_addr(struct mac_device_info *hw, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index fbaec0ffd9ef..d3654a447046 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -201,18 +201,14 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, writel(csr6, ioaddr + DMA_CONTROL); } -static void dwmac1000_dump_dma_regs(void __iomem *ioaddr) +static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) { int i; - pr_info(" DMA registers\n"); - for (i = 0; i < 22; i++) { - if ((i < 9) || (i > 17)) { - int offset = i * 4; - pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i, - (DMA_BUS_MODE + offset), - readl(ioaddr + DMA_BUS_MODE + offset)); - } - } + + for (i = 0; i < 22; i++) + if ((i < 9) || (i > 17)) + reg_space[DMA_BUS_MODE / 4 + i] = + readl(ioaddr + DMA_BUS_MODE + i * 4); } static void dwmac1000_get_hw_feature(void __iomem *ioaddr, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index 8ab518997b1b..e370ccec6176 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -40,28 +40,18 @@ static void dwmac100_core_init(struct mac_device_info *hw, int mtu) #endif } -static void dwmac100_dump_mac_regs(struct mac_device_info *hw) +static void dwmac100_dump_mac_regs(struct mac_device_info *hw, u32 *reg_space) { void __iomem *ioaddr = hw->pcsr; - pr_info("\t----------------------------------------------\n" - "\t DWMAC 100 CSR (base addr = 0x%p)\n" - "\t----------------------------------------------\n", ioaddr); - pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL, - readl(ioaddr + MAC_CONTROL)); - pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH, - readl(ioaddr + MAC_ADDR_HIGH)); - pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW, - readl(ioaddr + MAC_ADDR_LOW)); - pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n", - MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH)); - pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n", - MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW)); - pr_info("\tflow control (offset 0x%x): 0x%08x\n", - MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL)); - pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1, - readl(ioaddr + MAC_VLAN1)); - pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2, - readl(ioaddr + MAC_VLAN2)); + + reg_space[MAC_CONTROL / 4] = readl(ioaddr + MAC_CONTROL); + reg_space[MAC_ADDR_HIGH / 4] = readl(ioaddr + MAC_ADDR_HIGH); + reg_space[MAC_ADDR_LOW / 4] = readl(ioaddr + MAC_ADDR_LOW); + reg_space[MAC_HASH_HIGH / 4] = readl(ioaddr + MAC_HASH_HIGH); + reg_space[MAC_HASH_LOW / 4] = readl(ioaddr + MAC_HASH_LOW); + reg_space[MAC_FLOW_CTRL / 4] = readl(ioaddr + MAC_FLOW_CTRL); + reg_space[MAC_VLAN1 / 4] = readl(ioaddr + MAC_VLAN1); + reg_space[MAC_VLAN2 / 4] = readl(ioaddr + MAC_VLAN2); } static int dwmac100_rx_ipc_enable(struct mac_device_info *hw) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index d40e91e8fc7b..eef2f222ce9a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -66,19 +66,18 @@ static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode, writel(csr6, ioaddr + DMA_CONTROL); } -static void dwmac100_dump_dma_regs(void __iomem *ioaddr) +static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) { int i; - pr_debug("DWMAC 100 DMA CSR\n"); for (i = 0; i < 9; i++) - pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, - (DMA_BUS_MODE + i * 4), - readl(ioaddr + DMA_BUS_MODE + i * 4)); + reg_space[DMA_BUS_MODE / 4 + i] = + readl(ioaddr + DMA_BUS_MODE + i * 4); - pr_debug("\tCSR20 (0x%x): 0x%08x, CSR21 (0x%x): 0x%08x\n", - DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR), - DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); + reg_space[DMA_CUR_TX_BUF_ADDR / 4] = + readl(ioaddr + DMA_CUR_TX_BUF_ADDR); + reg_space[DMA_CUR_RX_BUF_ADDR / 4] = + readl(ioaddr + DMA_CUR_RX_BUF_ADDR); } /* DMA controller has two counters to track the number of the missed frames. */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 202216cd6789..1e79e6529c4a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -70,19 +70,13 @@ static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue) writel(value, ioaddr + GMAC_RXQ_CTRL0); } -static void dwmac4_dump_regs(struct mac_device_info *hw) +static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space) { void __iomem *ioaddr = hw->pcsr; int i; - pr_debug("\tDWMAC4 regs (base addr = 0x%p)\n", ioaddr); - - for (i = 0; i < GMAC_REG_NUM; i++) { - int offset = i * 4; - - pr_debug("\tReg No. %d (offset 0x%x): 0x%08x\n", i, - offset, readl(ioaddr + offset)); - } + for (i = 0; i < GMAC_REG_NUM; i++) + reg_space[i] = readl(ioaddr + i * 4); } static int dwmac4_rx_ipc_enable(struct mac_device_info *hw) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 377d1b44d4f2..f97b0d5d9987 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -127,53 +127,51 @@ static void dwmac4_dma_init(void __iomem *ioaddr, dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i); } -static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel) +static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel, + u32 *reg_space) { - pr_debug(" Channel %d\n", channel); - pr_debug("\tDMA_CHAN_CONTROL, offset: 0x%x, val: 0x%x\n", 0, - readl(ioaddr + DMA_CHAN_CONTROL(channel))); - pr_debug("\tDMA_CHAN_TX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x4, - readl(ioaddr + DMA_CHAN_TX_CONTROL(channel))); - pr_debug("\tDMA_CHAN_RX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x8, - readl(ioaddr + DMA_CHAN_RX_CONTROL(channel))); - pr_debug("\tDMA_CHAN_TX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x14, - readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel))); - pr_debug("\tDMA_CHAN_RX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x1c, - readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel))); - pr_debug("\tDMA_CHAN_TX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x20, - readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel))); - pr_debug("\tDMA_CHAN_RX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x28, - readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel))); - pr_debug("\tDMA_CHAN_TX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x2c, - readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel))); - pr_debug("\tDMA_CHAN_RX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x30, - readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel))); - pr_debug("\tDMA_CHAN_INTR_ENA, offset: 0x%x, val: 0x%x\n", 0x34, - readl(ioaddr + DMA_CHAN_INTR_ENA(channel))); - pr_debug("\tDMA_CHAN_RX_WATCHDOG, offset: 0x%x, val: 0x%x\n", 0x38, - readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel))); - pr_debug("\tDMA_CHAN_SLOT_CTRL_STATUS, offset: 0x%x, val: 0x%x\n", 0x3c, - readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel))); - pr_debug("\tDMA_CHAN_CUR_TX_DESC, offset: 0x%x, val: 0x%x\n", 0x44, - readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel))); - pr_debug("\tDMA_CHAN_CUR_RX_DESC, offset: 0x%x, val: 0x%x\n", 0x4c, - readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel))); - pr_debug("\tDMA_CHAN_CUR_TX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x54, - readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel))); - pr_debug("\tDMA_CHAN_CUR_RX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x5c, - readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel))); - pr_debug("\tDMA_CHAN_STATUS, offset: 0x%x, val: 0x%x\n", 0x60, - readl(ioaddr + DMA_CHAN_STATUS(channel))); + reg_space[DMA_CHAN_CONTROL(channel) / 4] = + readl(ioaddr + DMA_CHAN_CONTROL(channel)); + reg_space[DMA_CHAN_TX_CONTROL(channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)); + reg_space[DMA_CHAN_RX_CONTROL(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)); + reg_space[DMA_CHAN_TX_BASE_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)); + reg_space[DMA_CHAN_RX_BASE_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)); + reg_space[DMA_CHAN_TX_END_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)); + reg_space[DMA_CHAN_RX_END_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)); + reg_space[DMA_CHAN_TX_RING_LEN(channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)); + reg_space[DMA_CHAN_RX_RING_LEN(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)); + reg_space[DMA_CHAN_INTR_ENA(channel) / 4] = + readl(ioaddr + DMA_CHAN_INTR_ENA(channel)); + reg_space[DMA_CHAN_RX_WATCHDOG(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)); + reg_space[DMA_CHAN_SLOT_CTRL_STATUS(channel) / 4] = + readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)); + reg_space[DMA_CHAN_CUR_TX_DESC(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)); + reg_space[DMA_CHAN_CUR_RX_DESC(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)); + reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)); + reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)); + reg_space[DMA_CHAN_STATUS(channel) / 4] = + readl(ioaddr + DMA_CHAN_STATUS(channel)); } -static void dwmac4_dump_dma_regs(void __iomem *ioaddr) +static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) { int i; - pr_debug(" GMAC4 DMA registers\n"); - for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) - _dwmac4_dump_dma_regs(ioaddr, i); + _dwmac4_dump_dma_regs(ioaddr, i, reg_space); } static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 5ff6bc4eb8f1..85d64114e159 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -435,32 +435,14 @@ static int stmmac_ethtool_get_regs_len(struct net_device *dev) static void stmmac_ethtool_gregs(struct net_device *dev, struct ethtool_regs *regs, void *space) { - int i; u32 *reg_space = (u32 *) space; struct stmmac_priv *priv = netdev_priv(dev); memset(reg_space, 0x0, REG_SPACE_SIZE); - if (priv->plat->has_gmac || priv->plat->has_gmac4) { - /* MAC registers */ - for (i = 0; i < 55; i++) - reg_space[i] = readl(priv->ioaddr + (i * 4)); - /* DMA registers */ - for (i = 0; i < 22; i++) - reg_space[i + 55] = - readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4))); - } else { - /* MAC registers */ - for (i = 0; i < 12; i++) - reg_space[i] = readl(priv->ioaddr + (i * 4)); - /* DMA registers */ - for (i = 0; i < 9; i++) - reg_space[i + 12] = - readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4))); - reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR); - reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR); - } + priv->hw->mac->dump_regs(priv->hw, reg_space); + priv->hw->dma->dump_regs(priv->ioaddr, reg_space); } static void diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3cbe09682afe..4498a3861aa3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1729,11 +1729,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_rx(priv->ioaddr); - /* Dump DMA/MAC registers */ - if (netif_msg_hw(priv)) { - priv->hw->mac->dump_regs(priv->hw); - priv->hw->dma->dump_regs(priv->ioaddr); - } priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index bda0c6413450..89698741682f 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1330,7 +1330,7 @@ static int __init gtp_init(void) if (err < 0) goto unreg_genl_family; - pr_info("GTP module loaded (pdp ctx size %Zd bytes)\n", + pr_info("GTP module loaded (pdp ctx size %zd bytes)\n", sizeof(struct pdp_ctx)); return 0; diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index 6e8f616be48e..1dba16bc7f8d 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c @@ -24,6 +24,7 @@ #include <linux/dma/pxa-dma.h> #include <linux/gpio.h> #include <linux/slab.h> +#include <linux/sched/clock.h> #include <net/irda/irda.h> #include <net/irda/irmod.h> diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index 42da094b68dd..7ee514879531 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c @@ -40,6 +40,7 @@ #include <linux/moduleparam.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/ktime.h> #include <linux/types.h> #include <linux/time.h> diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index a4bfc10b61dd..da85057680d6 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -9,7 +9,7 @@ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/cache.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/wait.h> diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index d6f7838455dd..1be69d8bc909 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -146,7 +146,7 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) */ int phy_aneg_done(struct phy_device *phydev) { - if (phydev->drv->aneg_done) + if (phydev->drv && phydev->drv->aneg_done) return phydev->drv->aneg_done(phydev); return genphy_aneg_done(phydev); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index a411b43a69eb..f9c0e62716ea 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -24,6 +24,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/kmod.h> #include <linux/init.h> #include <linux/list.h> diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 08db4d687533..1da31dc47f86 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -66,7 +66,7 @@ #include <linux/uaccess.h> #include <linux/bitops.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 35b55a2fa1a1..4d4173d25dd0 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -8,7 +8,7 @@ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/cache.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/wait.h> diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 30863e378925..dc1b1dd9157c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -44,6 +44,7 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/major.h> #include <linux/slab.h> #include <linux/poll.h> diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index e7b516342678..4f2e8141dbe2 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -52,7 +52,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/delay.h> diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c index 3e37724d30ae..8aefb282c862 100644 --- a/drivers/net/usb/kalmia.c +++ b/drivers/net/usb/kalmia.c @@ -343,7 +343,7 @@ static const struct driver_info kalmia_info = { static const struct usb_device_id products[] = { /* The unswitched USB ID, to get the module auto loaded: */ { USB_DEVICE(0x04e8, 0x689a) }, - /* The stick swithed into modem (by e.g. usb_modeswitch): */ + /* The stick switched into modem (by e.g. usb_modeswitch): */ { USB_DEVICE(0x04e8, 0x6889), .driver_info = (unsigned long) &kalmia_info, }, { /* EMPTY == end of list */} }; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 24d5272cdce5..805674550683 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -11,6 +11,7 @@ */ #include <linux/module.h> +#include <linux/sched/signal.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 4f4f71b2966b..c5b21138b7eb 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -383,7 +383,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) /* REVISIT: peripheral "alignment" request is ignored ... */ dev_dbg(&intf->dev, - "hard mtu %u (%u from dev), rx buflen %Zu, align %d\n", + "hard mtu %u (%u from dev), rx buflen %zu, align %d\n", dev->hard_mtu, tmp, dev->rx_urb_size, 1 << le32_to_cpu(u.init_c->packet_alignment)); diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index d9440bc022f2..ac69f28d92d2 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -379,7 +379,7 @@ static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) u32 expected_length; if (datalen < sizeof(struct lsi_umts_single)) { - netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n", + netdev_err(dev->net, "%s: Data length %d, exp >= %zu\n", __func__, datalen, sizeof(struct lsi_umts_single)); return -1; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index bf95016f442a..66fd3139be60 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2080,7 +2080,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi) } ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, - names); + names, NULL); if (ret) goto err_find; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 556953f53437..b7911994112a 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2035,7 +2035,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, const struct iphdr *old_iph = ip_hdr(skb); union vxlan_addr *dst; union vxlan_addr remote_ip, local_ip; - union vxlan_addr *src; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; __be16 src_port = 0, dst_port; @@ -2062,7 +2061,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; vni = (rdst->remote_vni) ? : default_vni; - src = &vxlan->cfg.saddr; + local_ip = vxlan->cfg.saddr; dst_cache = &rdst->dst_cache; md->gbp = skb->mark; ttl = vxlan->cfg.ttl; @@ -2095,7 +2094,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dst = &remote_ip; dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; vni = tunnel_id_to_key32(info->key.tun_id); - src = &local_ip; dst_cache = &info->dst_cache; if (info->options_len) md = ip_tunnel_info_opts(info); @@ -2115,7 +2113,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, rt = vxlan_get_route(vxlan, dev, sock4, skb, rdst ? rdst->remote_ifindex : 0, tos, dst->sin.sin_addr.s_addr, - &src->sin.sin_addr.s_addr, + &local_ip.sin.sin_addr.s_addr, dst_port, src_port, dst_cache, info); if (IS_ERR(rt)) { @@ -2142,7 +2140,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (err < 0) goto tx_error; - udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, src->sin.sin_addr.s_addr, + udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr, dst->sin.sin_addr.s_addr, tos, ttl, df, src_port, dst_port, xnet, !udp_sum); #if IS_ENABLED(CONFIG_IPV6) @@ -2152,7 +2150,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = vxlan6_get_route(vxlan, dev, sock6, skb, rdst ? rdst->remote_ifindex : 0, tos, label, &dst->sin6.sin6_addr, - &src->sin6.sin6_addr, + &local_ip.sin6.sin6_addr, dst_port, src_port, dst_cache, info); if (IS_ERR(ndst)) { @@ -2180,7 +2178,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto tx_error; udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev, - &src->sin6.sin6_addr, + &local_ip.sin6.sin6_addr, &dst->sin6.sin6_addr, tos, ttl, label, src_port, dst_port, !udp_sum); #endif @@ -2675,7 +2673,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) if (data[IFLA_VXLAN_ID]) { __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); - if (id >= VXLAN_VID_MASK) + if (id >= VXLAN_N_VID) return -ERANGE; } diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 087eb266601f..4ca71bca39ac 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -78,7 +78,7 @@ #include <linux/module.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fs.h> diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c index e74664b84925..502c346aa790 100644 --- a/drivers/net/wimax/i2400m/usb-fw.c +++ b/drivers/net/wimax/i2400m/usb-fw.c @@ -237,7 +237,7 @@ void __i2400mu_bm_notif_cb(struct urb *urb) * * @i2400m: device descriptor * @urb: urb to use - * @completion: completion varible to complete when done + * @completion: completion variable to complete when done * * Data is always read to i2400m->bm_ack_buf */ diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index b7fe0af4cb24..363b30a549c2 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -20,6 +20,7 @@ #include <linux/moduleparam.h> #include <linux/inetdevice.h> #include <linux/export.h> +#include <linux/sched/signal.h> #include "core.h" #include "cfg80211.h" diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index 815efe9fd208..5214dd7a3936 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c @@ -59,13 +59,13 @@ static const struct ani_ofdm_level_entry ofdm_level_table[] = { /* * MRC (Maximal Ratio Combining) has always been used with multi-antenna ofdm. * With OFDM for single stream you just add up all antenna inputs, you're - * only interested in what you get after FFT. Signal aligment is also not + * only interested in what you get after FFT. Signal alignment is also not * required for OFDM because any phase difference adds up in the frequency * domain. * * MRC requires extra work for use with CCK. You need to align the antenna * signals from the different antenna before you can add the signals together. - * You need aligment of signals as CCK is in time domain, so addition can cancel + * You need alignment of signals as CCK is in time domain, so addition can cancel * your signal completely if phase is 180 degrees (think of adding sine waves). * You also need to remove noise before the addition and this is where ANI * MRC CCK comes into play. One of the antenna inputs may be stronger but diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index e97ab2b91663..cdafebb9c936 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -36,7 +36,7 @@ #include <linux/etherdevice.h> #include <linux/firmware.h> #include <linux/workqueue.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/skbuff.h> #include <linux/dma-mapping.h> #include <linux/slab.h> diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 10098b7586f3..944b83cfc519 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -4874,7 +4874,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, kfree(af_params); } else { brcmf_dbg(TRACE, "Unhandled, fc=%04x!!\n", mgmt->frame_control); - brcmf_dbg_hex_dump(true, buf, len, "payload, len=%Zu\n", len); + brcmf_dbg_hex_dump(true, buf, len, "payload, len=%zu\n", len); } exit: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index c5744b45ec8f..65689469c5a1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -22,7 +22,7 @@ #include <linux/pci_ids.h> #include <linux/netdevice.h> #include <linux/interrupt.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/mmc/sdio.h> #include <linux/mmc/sdio_ids.h> #include <linux/mmc/sdio_func.h> diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 356aba9d3d53..f922859acf40 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -1238,7 +1238,7 @@ static int ipw2100_get_hw_features(struct ipw2100_priv *priv) } /* - * Start firmware execution after power on and intialization + * Start firmware execution after power on and initialization * The sequence is: * 1. Release ARC * 2. Wait for f/w initialization completes; @@ -1277,7 +1277,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv) /* Release ARC - clear reset bit */ write_register(priv->net_dev, IPW_REG_RESET_REG, 0); - /* wait for f/w intialization complete */ + /* wait for f/w initialization complete */ IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n"); i = 5000; do { @@ -5652,7 +5652,7 @@ static void shim__set_security(struct net_device *dev, /* As a temporary work around to enable WPA until we figure out why * wpa_supplicant toggles the security capability of the driver, which - * forces a disassocation with force_update... + * forces a disassociation with force_update... * * if (force_update || !(priv->status & STATUS_ASSOCIATED))*/ if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index ef9af8a29cad..5ef3c5cc47c5 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -3974,7 +3974,7 @@ static void ipw_send_disassociate(struct ipw_priv *priv, int quiet) return; } - IPW_DEBUG_ASSOC("Disassocation attempt from %pM " + IPW_DEBUG_ASSOC("Disassociation attempt from %pM " "on channel %d.\n", priv->assoc_request.bssid, priv->assoc_request.channel); @@ -5196,7 +5196,7 @@ static void ipw_rx_queue_restock(struct ipw_priv *priv) * Move all used packet from rx_used to rx_free, allocating a new SKB for each. * Also restock the Rx queue via ipw_rx_queue_restock. * - * This is called as a scheduled work item (except for during intialization) + * This is called as a scheduled work item (except for during initialization) */ static void ipw_rx_queue_replenish(void *data) { diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index a91d170a614b..2781f5728d07 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -4855,39 +4855,39 @@ il4965_ucode_callback(const struct firmware *ucode_raw, void *context) */ D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver); - D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size); - D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size); - D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size); - D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size); - D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size); + D_INFO("f/w package hdr runtime inst size = %zd\n", pieces.inst_size); + D_INFO("f/w package hdr runtime data size = %zd\n", pieces.data_size); + D_INFO("f/w package hdr init inst size = %zd\n", pieces.init_size); + D_INFO("f/w package hdr init data size = %zd\n", pieces.init_data_size); + D_INFO("f/w package hdr boot inst size = %zd\n", pieces.boot_size); /* Verify that uCode images will fit in card's SRAM */ if (pieces.inst_size > il->hw_params.max_inst_size) { - IL_ERR("uCode instr len %Zd too large to fit in\n", + IL_ERR("uCode instr len %zd too large to fit in\n", pieces.inst_size); goto try_again; } if (pieces.data_size > il->hw_params.max_data_size) { - IL_ERR("uCode data len %Zd too large to fit in\n", + IL_ERR("uCode data len %zd too large to fit in\n", pieces.data_size); goto try_again; } if (pieces.init_size > il->hw_params.max_inst_size) { - IL_ERR("uCode init instr len %Zd too large to fit in\n", + IL_ERR("uCode init instr len %zd too large to fit in\n", pieces.init_size); goto try_again; } if (pieces.init_data_size > il->hw_params.max_data_size) { - IL_ERR("uCode init data len %Zd too large to fit in\n", + IL_ERR("uCode init data len %zd too large to fit in\n", pieces.init_data_size); goto try_again; } if (pieces.boot_size > il->hw_params.max_bsm_size) { - IL_ERR("uCode boot instr len %Zd too large to fit in\n", + IL_ERR("uCode boot instr len %zd too large to fit in\n", pieces.boot_size); goto try_again; } @@ -4938,7 +4938,7 @@ il4965_ucode_callback(const struct firmware *ucode_raw, void *context) /* Copy images into buffers for card's bus-master reads ... */ /* Runtime instructions (first block of data in file) */ - D_INFO("Copying (but not loading) uCode instr len %Zd\n", + D_INFO("Copying (but not loading) uCode instr len %zd\n", pieces.inst_size); memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size); @@ -4949,28 +4949,28 @@ il4965_ucode_callback(const struct firmware *ucode_raw, void *context) * Runtime data * NOTE: Copy into backup buffer will be done in il_up() */ - D_INFO("Copying (but not loading) uCode data len %Zd\n", + D_INFO("Copying (but not loading) uCode data len %zd\n", pieces.data_size); memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size); memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size); /* Initialization instructions */ if (pieces.init_size) { - D_INFO("Copying (but not loading) init instr len %Zd\n", + D_INFO("Copying (but not loading) init instr len %zd\n", pieces.init_size); memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size); } /* Initialization data */ if (pieces.init_data_size) { - D_INFO("Copying (but not loading) init data len %Zd\n", + D_INFO("Copying (but not loading) init data len %zd\n", pieces.init_data_size); memcpy(il->ucode_init_data.v_addr, pieces.init_data, pieces.init_data_size); } /* Bootstrap instructions */ - D_INFO("Copying (but not loading) boot instr len %Zd\n", + D_INFO("Copying (but not loading) boot instr len %zd\n", pieces.boot_size); memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 0e0293d42b5d..be466a074c1d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1141,21 +1141,21 @@ static int validate_sec_sizes(struct iwl_drv *drv, struct iwl_firmware_pieces *pieces, const struct iwl_cfg *cfg) { - IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %Zd\n", + IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %zd\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST)); - IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %Zd\n", + IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %zd\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA)); - IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %Zd\n", + IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %zd\n", get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST)); - IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %Zd\n", + IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %zd\n", get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)); /* Verify that uCode images will fit in card's SRAM. */ if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) > cfg->max_inst_size) { - IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n", + IWL_ERR(drv, "uCode instr len %zd too large to fit in\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST)); return -1; @@ -1163,7 +1163,7 @@ static int validate_sec_sizes(struct iwl_drv *drv, if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) > cfg->max_data_size) { - IWL_ERR(drv, "uCode data len %Zd too large to fit in\n", + IWL_ERR(drv, "uCode data len %zd too large to fit in\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA)); return -1; @@ -1171,7 +1171,7 @@ static int validate_sec_sizes(struct iwl_drv *drv, if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) > cfg->max_inst_size) { - IWL_ERR(drv, "uCode init instr len %Zd too large to fit in\n", + IWL_ERR(drv, "uCode init instr len %zd too large to fit in\n", get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST)); return -1; @@ -1179,7 +1179,7 @@ static int validate_sec_sizes(struct iwl_drv *drv, if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) > cfg->max_data_size) { - IWL_ERR(drv, "uCode init data len %Zd too large to fit in\n", + IWL_ERR(drv, "uCode init data len %zd too large to fit in\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA)); return -1; diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c index 544ef7adde7d..04dfd040a650 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_hw.c +++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c @@ -43,7 +43,7 @@ #include <linux/delay.h> #include <linux/random.h> #include <linux/wait.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/rtnetlink.h> #include <linux/wireless.h> #include <net/iw_handler.h> diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c index a5656bc0e6aa..b2c6b065b542 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c @@ -2,7 +2,7 @@ #include <linux/slab.h> #include <linux/types.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/ethtool.h> #include <linux/if_arp.h> #include <linux/module.h> diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c index abdd0cf710bf..fac28bd8fbee 100644 --- a/drivers/net/wireless/marvell/mwifiex/txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/txrx.c @@ -346,9 +346,7 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv, return; spin_lock_irqsave(&priv->ack_status_lock, flags); - ack_skb = idr_find(&priv->ack_status_frames, tx_status->tx_token_id); - if (ack_skb) - idr_remove(&priv->ack_status_frames, tx_status->tx_token_id); + ack_skb = idr_remove(&priv->ack_status_frames, tx_status->tx_token_id); spin_unlock_irqrestore(&priv->ack_status_lock, flags); if (ack_skb) { diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c index 28c2f6fae3e6..e4ff3b973850 100644 --- a/drivers/net/wireless/marvell/mwifiex/wmm.c +++ b/drivers/net/wireless/marvell/mwifiex/wmm.c @@ -673,8 +673,8 @@ void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac, spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); } -/* This function update non-tdls peer ralist tx_pause while - * tdls channel swithing +/* This function updates non-tdls peer ralist tx_pause while + * tdls channel switching */ void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv, u8 *mac, u8 tx_pause) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c index 1922e78ad6bd..89a0a28b8b20 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c @@ -455,7 +455,7 @@ static u32 _rtl92s_fill_h2c_cmd(struct sk_buff *skb, u32 h2cbufferlen, u8 i = 0; do { - /* 8 - Byte aligment */ + /* 8 - Byte alignment */ len = H2C_TX_CMD_HDR_LEN + N_BYTE_ALIGMENT(pcmd_len[i], 8); /* Buffer length is not enough */ @@ -504,7 +504,7 @@ static u32 _rtl92s_get_h2c_cmdlen(u32 h2cbufferlen, u32 cmd_num, u32 *pcmd_len) u8 i = 0; do { - /* 8 - Byte aligment */ + /* 8 - Byte alignment */ len = H2C_TX_CMD_HDR_LEN + N_BYTE_ALIGMENT(pcmd_len[i], 8); /* Buffer length is not enough */ diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index ef5d394f185b..cc8deecea8cb 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -516,7 +516,7 @@ err: /** * rsi_disconnect() - This function performs the reverse of the probe function, - * it deintialize the driver structure. + * it deinitialize the driver structure. * @pfunction: Pointer to the USB interface structure. * * Return: None. diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index 5bdf7a03e3dd..d1aa3eee0e81 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c @@ -178,7 +178,7 @@ static struct wlcore_conf wl18xx_conf = { .sg = { .params = { [WL18XX_CONF_SG_PARAM_0] = 0, - /* Configuartion Parameters */ + /* Configuration Parameters */ [WL18XX_CONF_SG_ANTENNA_CONFIGURATION] = 0, [WL18XX_CONF_SG_ZIGBEE_COEX] = 0, [WL18XX_CONF_SG_TIME_SYNC] = 0, diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c index d0b7734030ef..58898b99d3f7 100644 --- a/drivers/net/wireless/ti/wlcore/init.c +++ b/drivers/net/wireless/ti/wlcore/init.c @@ -544,7 +544,7 @@ static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif) return 0; } -/* vif-specific intialization */ +/* vif-specific initialization */ static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index a2d326760a72..829b26cd4549 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -31,6 +31,7 @@ #include "common.h" #include <linux/kthread.h> +#include <linux/sched/task.h> #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c index d9c55830b2b2..a966c6a85ea8 100644 --- a/drivers/nfc/pn533/pn533.c +++ b/drivers/nfc/pn533/pn533.c @@ -487,7 +487,7 @@ static int pn533_send_cmd_async(struct pn533 *dev, u8 cmd_code, /* * pn533_send_cmd_direct_async * - * The function sends a piority cmd directly to the chip omiting the cmd + * The function sends a piority cmd directly to the chip omitting the cmd * queue. It's intended to be used by chaining mechanism of received responses * where the host has to request every single chunk of data before scheduling * next cmd from the queue. diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 25ec4e585220..9b3b57fef446 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2344,6 +2344,53 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_kill_queues); +void nvme_unfreeze(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + mutex_lock(&ctrl->namespaces_mutex); + list_for_each_entry(ns, &ctrl->namespaces, list) + blk_mq_unfreeze_queue(ns->queue); + mutex_unlock(&ctrl->namespaces_mutex); +} +EXPORT_SYMBOL_GPL(nvme_unfreeze); + +void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) +{ + struct nvme_ns *ns; + + mutex_lock(&ctrl->namespaces_mutex); + list_for_each_entry(ns, &ctrl->namespaces, list) { + timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); + if (timeout <= 0) + break; + } + mutex_unlock(&ctrl->namespaces_mutex); +} +EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); + +void nvme_wait_freeze(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + mutex_lock(&ctrl->namespaces_mutex); + list_for_each_entry(ns, &ctrl->namespaces, list) + blk_mq_freeze_queue_wait(ns->queue); + mutex_unlock(&ctrl->namespaces_mutex); +} +EXPORT_SYMBOL_GPL(nvme_wait_freeze); + +void nvme_start_freeze(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + mutex_lock(&ctrl->namespaces_mutex); + list_for_each_entry(ns, &ctrl->namespaces, list) + blk_mq_freeze_queue_start(ns->queue); + mutex_unlock(&ctrl->namespaces_mutex); +} +EXPORT_SYMBOL_GPL(nvme_start_freeze); + void nvme_stop_queues(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index a3da1e90b99d..2aa20e3e5675 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -294,6 +294,10 @@ void nvme_queue_async_events(struct nvme_ctrl *ctrl); void nvme_stop_queues(struct nvme_ctrl *ctrl); void nvme_start_queues(struct nvme_ctrl *ctrl); void nvme_kill_queues(struct nvme_ctrl *ctrl); +void nvme_unfreeze(struct nvme_ctrl *ctrl); +void nvme_wait_freeze(struct nvme_ctrl *ctrl); +void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); +void nvme_start_freeze(struct nvme_ctrl *ctrl); #define NVME_QID_ANY -1 struct request *nvme_alloc_request(struct request_queue *q, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 57a1af52b06e..26a5fd05fe88 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1038,9 +1038,10 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, } static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, - int depth) + int depth, int node) { - struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); + struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL, + node); if (!nvmeq) return NULL; @@ -1217,7 +1218,8 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) nvmeq = dev->queues[0]; if (!nvmeq) { - nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); + nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, + dev_to_node(dev->dev)); if (!nvmeq) return -ENOMEM; } @@ -1309,7 +1311,9 @@ static int nvme_create_io_queues(struct nvme_dev *dev) int ret = 0; for (i = dev->queue_count; i <= dev->max_qid; i++) { - if (!nvme_alloc_queue(dev, i, dev->q_depth)) { + /* vector == qid - 1, match nvme_create_queue */ + if (!nvme_alloc_queue(dev, i, dev->q_depth, + pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) { ret = -ENOMEM; break; } @@ -1671,21 +1675,34 @@ static void nvme_pci_disable(struct nvme_dev *dev) static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) { int i, queues; - u32 csts = -1; + bool dead = true; + struct pci_dev *pdev = to_pci_dev(dev->dev); del_timer_sync(&dev->watchdog_timer); mutex_lock(&dev->shutdown_lock); - if (pci_is_enabled(to_pci_dev(dev->dev))) { - nvme_stop_queues(&dev->ctrl); - csts = readl(dev->bar + NVME_REG_CSTS); + if (pci_is_enabled(pdev)) { + u32 csts = readl(dev->bar + NVME_REG_CSTS); + + if (dev->ctrl.state == NVME_CTRL_LIVE) + nvme_start_freeze(&dev->ctrl); + dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || + pdev->error_state != pci_channel_io_normal); } + /* + * Give the controller a chance to complete all entered requests if + * doing a safe shutdown. + */ + if (!dead && shutdown) + nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); + nvme_stop_queues(&dev->ctrl); + queues = dev->online_queues - 1; for (i = dev->queue_count - 1; i > 0; i--) nvme_suspend_queue(dev->queues[i]); - if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { + if (dead) { /* A device might become IO incapable very soon during * probe, before the admin queue is configured. Thus, * queue_count can be 0 here. @@ -1700,6 +1717,14 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl); + + /* + * The driver will not be starting up queues again if shutting down so + * must flush all entered requests to their failed completion to avoid + * deadlocking blk-mq hot-cpu notifier. + */ + if (shutdown) + nvme_start_queues(&dev->ctrl); mutex_unlock(&dev->shutdown_lock); } @@ -1822,7 +1847,9 @@ static void nvme_reset_work(struct work_struct *work) nvme_remove_namespaces(&dev->ctrl); } else { nvme_start_queues(&dev->ctrl); + nvme_wait_freeze(&dev->ctrl); nvme_dev_add(dev); + nvme_unfreeze(&dev->ctrl); } if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 49b2121af689..779f516e7a4e 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1051,7 +1051,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, * sequencer is not allocated in our driver's tagset and it's * triggered to be freed by blk_cleanup_queue(). So we need to * always mark it as signaled to ensure that the "wr_cqe", which is - * embeded in request's payload, is not freed when __ib_process_cq() + * embedded in request's payload, is not freed when __ib_process_cq() * calls wr_cqe->done(). */ if ((++queue->sig_count % 32) == 0 || flush) @@ -1251,7 +1251,7 @@ static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue) dev = nvme_rdma_find_get_device(queue->cm_id); if (!dev) { - dev_err(queue->cm_id->device->dma_device, + dev_err(queue->cm_id->device->dev.parent, "no client data found!\n"); return -ECONNREFUSED; } diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 94e524fea568..a7bcff45f437 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -13,6 +13,8 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> +#include <linux/rculist.h> + #include <generated/utsrelease.h> #include <asm/unaligned.h> #include "nvmet.h" diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 5267ce20c12d..11b0a0a5f661 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -14,6 +14,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/random.h> +#include <linux/rculist.h> + #include "nvmet.h" static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 642478d35e99..ac27f3d3fbb4 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -31,6 +31,8 @@ #include <linux/fs.h> #include <linux/oprofile.h> #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include <linux/gfp.h> #include "oprofile_stats.h" diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 0581461c3a67..eda2633a393d 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -23,6 +23,8 @@ #include <linux/oprofile.h> #include <linux/errno.h> +#include <asm/ptrace.h> + #include "event_buffer.h" #include "cpu_buffer.h" #include "buffer_sync.h" diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c index 67935fbbbcab..32888f2bd1a9 100644 --- a/drivers/oprofile/event_buffer.c +++ b/drivers/oprofile/event_buffer.c @@ -14,7 +14,7 @@ #include <linux/vmalloc.h> #include <linux/oprofile.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/capability.h> #include <linux/dcookies.h> #include <linux/fs.h> diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index b0558e2451b5..e32ca2ef9e54 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1011,7 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); } -static struct dma_map_ops ccio_ops = { +static const struct dma_map_ops ccio_ops = { .dma_supported = ccio_dma_supported, .alloc = ccio_alloc, .free = ccio_free, diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c index ef31b77404ef..e2a3112f1c98 100644 --- a/drivers/parisc/power.c +++ b/drivers/parisc/power.c @@ -39,7 +39,7 @@ #include <linux/kernel.h> #include <linux/notifier.h> #include <linux/reboot.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/kthread.h> #include <linux/pm.h> diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 151b86b6d2e2..33385e574433 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1069,7 +1069,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, } -static struct dma_map_ops sba_ops = { +static const struct dma_map_ops sba_ops = { .dma_supported = sba_dma_supported, .alloc = sba_alloc, .free = sba_free, diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c index d998d0ed2bec..46eb15fb57ff 100644 --- a/drivers/parport/daisy.c +++ b/drivers/parport/daisy.c @@ -23,7 +23,7 @@ #include <linux/parport.h> #include <linux/delay.h> #include <linux/slab.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <asm/current.h> #include <linux/uaccess.h> diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c index f9fd4b33a546..74cc6dd982d2 100644 --- a/drivers/parport/ieee1284.c +++ b/drivers/parport/ieee1284.c @@ -23,7 +23,7 @@ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/timer.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #undef DEBUG /* undef me for production */ diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c index c0e7d21c88c2..a959224d011b 100644 --- a/drivers/parport/ieee1284_ops.c +++ b/drivers/parport/ieee1284_ops.c @@ -17,7 +17,7 @@ #include <linux/module.h> #include <linux/parport.h> #include <linux/delay.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/uaccess.h> #undef DEBUG /* undef me for production */ @@ -307,7 +307,7 @@ size_t parport_ieee1284_read_byte (struct parport *port, if (parport_read_status (port) & PARPORT_STATUS_ERROR) { end_of_data: DPRINTK (KERN_DEBUG - "%s: No more byte data (%Zd bytes)\n", + "%s: No more byte data (%zd bytes)\n", port->name, count); /* Go to reverse idle phase. */ diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c index 30e981be14c2..dcbeeb220dda 100644 --- a/drivers/parport/parport_ip32.c +++ b/drivers/parport/parport_ip32.c @@ -102,7 +102,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/parport.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/stddef.h> diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 78530d1714dc..9d42dfe65d44 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -44,7 +44,7 @@ #include <linux/module.h> #include <linux/init.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/interrupt.h> @@ -902,7 +902,7 @@ static size_t parport_pc_ecp_write_block_pio(struct parport *port, * ****************************************** */ -/* GCC is not inlining extern inline function later overwriten to non-inline, +/* GCC is not inlining extern inline function later overwritten to non-inline, so we use outlined_ variants here. */ static const struct parport_operations parport_pc_ops = { .write_data = parport_pc_write_data, diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 3308427ed9f7..bc090daa850a 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -27,7 +27,7 @@ #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/slab.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/kmod.h> #include <linux/device.h> diff --git a/drivers/pci/access.c b/drivers/pci/access.c index b9dd37c8c9ce..8b7382705bf2 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -1,7 +1,7 @@ #include <linux/delay.h> #include <linux/pci.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/wait.h> diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c index 18ef1a93c10a..e27ad2a3bd33 100644 --- a/drivers/pci/host/vmd.c +++ b/drivers/pci/host/vmd.c @@ -282,7 +282,7 @@ static struct device *to_vmd_dev(struct device *dev) return &vmd->dev->dev; } -static struct dma_map_ops *vmd_dma_ops(struct device *dev) +static const struct dma_map_ops *vmd_dma_ops(struct device *dev) { return get_dma_ops(to_vmd_dev(dev)); } diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c index 7ec8a8f72c69..95f689f53920 100644 --- a/drivers/pci/hotplug/cpci_hotplug_core.c +++ b/drivers/pci/hotplug/cpci_hotplug_core.c @@ -27,6 +27,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h index 9103a7b9f3b9..48c8a066a6b7 100644 --- a/drivers/pci/hotplug/cpqphp.h +++ b/drivers/pci/hotplug/cpqphp.h @@ -32,7 +32,7 @@ #include <asm/io.h> /* for read? and write? functions */ #include <linux/delay.h> /* for delays */ #include <linux/mutex.h> -#include <linux/sched.h> /* for signal_pending() */ +#include <linux/sched/signal.h> /* for signal_pending() */ #define MY_NAME "cpqphp" diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 37d70b5ad22f..06109d40c4ac 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -33,7 +33,7 @@ #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/delay.h> -#include <linux/sched.h> /* signal_pending() */ +#include <linux/sched/signal.h> /* signal_pending() */ #include <linux/pcieport_if.h> #include <linux/mutex.h> #include <linux/workqueue.h> diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c index d2961ef39a3a..7c203198b582 100644 --- a/drivers/pci/hotplug/pnv_php.c +++ b/drivers/pci/hotplug/pnv_php.c @@ -35,9 +35,11 @@ static void pnv_php_register(struct device_node *dn); static void pnv_php_unregister_one(struct device_node *dn); static void pnv_php_unregister(struct device_node *dn); -static void pnv_php_disable_irq(struct pnv_php_slot *php_slot) +static void pnv_php_disable_irq(struct pnv_php_slot *php_slot, + bool disable_device) { struct pci_dev *pdev = php_slot->pdev; + int irq = php_slot->irq; u16 ctrl; if (php_slot->irq > 0) { @@ -56,10 +58,14 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot) php_slot->wq = NULL; } - if (pdev->msix_enabled) - pci_disable_msix(pdev); - else if (pdev->msi_enabled) - pci_disable_msi(pdev); + if (disable_device || irq > 0) { + if (pdev->msix_enabled) + pci_disable_msix(pdev); + else if (pdev->msi_enabled) + pci_disable_msi(pdev); + + pci_disable_device(pdev); + } } static void pnv_php_free_slot(struct kref *kref) @@ -68,7 +74,7 @@ static void pnv_php_free_slot(struct kref *kref) struct pnv_php_slot, kref); WARN_ON(!list_empty(&php_slot->children)); - pnv_php_disable_irq(php_slot); + pnv_php_disable_irq(php_slot, false); kfree(php_slot->name); kfree(php_slot); } @@ -76,7 +82,7 @@ static void pnv_php_free_slot(struct kref *kref) static inline void pnv_php_put_slot(struct pnv_php_slot *php_slot) { - if (WARN_ON(!php_slot)) + if (!php_slot) return; kref_put(&php_slot->kref, pnv_php_free_slot); @@ -430,9 +436,21 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan) if (ret) return ret; - /* Proceed if there have nothing behind the slot */ - if (presence == OPAL_PCI_SLOT_EMPTY) + /* + * Proceed if there have nothing behind the slot. However, + * we should leave the slot in registered state at the + * beginning. Otherwise, the PCI devices inserted afterwards + * won't be probed and populated. + */ + if (presence == OPAL_PCI_SLOT_EMPTY) { + if (!php_slot->power_state_check) { + php_slot->power_state_check = true; + + return 0; + } + goto scan; + } /* * If the power supply to the slot is off, we can't detect @@ -705,10 +723,15 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data) if (sts & PCI_EXP_SLTSTA_DLLSC) { pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lsts); added = !!(lsts & PCI_EXP_LNKSTA_DLLLA); - } else if (sts & PCI_EXP_SLTSTA_PDC) { + } else if (!(php_slot->flags & PNV_PHP_FLAG_BROKEN_PDC) && + (sts & PCI_EXP_SLTSTA_PDC)) { ret = pnv_pci_get_presence_state(php_slot->id, &presence); - if (!ret) + if (ret) { + dev_warn(&pdev->dev, "PCI slot [%s] error %d getting presence (0x%04x), to retry the operation.\n", + php_slot->name, ret, sts); return IRQ_HANDLED; + } + added = !!(presence == OPAL_PCI_SLOT_PRESENT); } else { return IRQ_NONE; @@ -752,6 +775,7 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data) static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq) { struct pci_dev *pdev = php_slot->pdev; + u32 broken_pdc = 0; u16 sts, ctrl; int ret; @@ -759,29 +783,44 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq) php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name); if (!php_slot->wq) { dev_warn(&pdev->dev, "Cannot alloc workqueue\n"); - pnv_php_disable_irq(php_slot); + pnv_php_disable_irq(php_slot, true); return; } + /* Check PDC (Presence Detection Change) is broken or not */ + ret = of_property_read_u32(php_slot->dn, "ibm,slot-broken-pdc", + &broken_pdc); + if (!ret && broken_pdc) + php_slot->flags |= PNV_PHP_FLAG_BROKEN_PDC; + /* Clear pending interrupts */ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &sts); - sts |= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); + if (php_slot->flags & PNV_PHP_FLAG_BROKEN_PDC) + sts |= PCI_EXP_SLTSTA_DLLSC; + else + sts |= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, sts); /* Request the interrupt */ ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED, php_slot->name, php_slot); if (ret) { - pnv_php_disable_irq(php_slot); + pnv_php_disable_irq(php_slot, true); dev_warn(&pdev->dev, "Error %d enabling IRQ %d\n", ret, irq); return; } /* Enable the interrupts */ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &ctrl); - ctrl |= (PCI_EXP_SLTCTL_HPIE | - PCI_EXP_SLTCTL_PDCE | - PCI_EXP_SLTCTL_DLLSCE); + if (php_slot->flags & PNV_PHP_FLAG_BROKEN_PDC) { + ctrl &= ~PCI_EXP_SLTCTL_PDCE; + ctrl |= (PCI_EXP_SLTCTL_HPIE | + PCI_EXP_SLTCTL_DLLSCE); + } else { + ctrl |= (PCI_EXP_SLTCTL_HPIE | + PCI_EXP_SLTCTL_PDCE | + PCI_EXP_SLTCTL_DLLSCE); + } pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, ctrl); /* The interrupt is initialized successfully when @irq is valid */ @@ -793,6 +832,14 @@ static void pnv_php_enable_irq(struct pnv_php_slot *php_slot) struct pci_dev *pdev = php_slot->pdev; int irq, ret; + /* + * The MSI/MSIx interrupt might have been occupied by other + * drivers. Don't populate the surprise hotplug capability + * in that case. + */ + if (pci_dev_msi_enabled(pdev)) + return; + ret = pci_enable_device(pdev); if (ret) { dev_warn(&pdev->dev, "Error %d enabling device\n", ret); diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index 4da8fc601467..70c7ea6af034 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h @@ -33,7 +33,7 @@ #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/delay.h> -#include <linux/sched.h> /* signal_pending(), struct timer_list */ +#include <linux/sched/signal.h> /* signal_pending(), struct timer_list */ #include <linux/mutex.h> #include <linux/workqueue.h> diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 980eaf588281..d571bc330686 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1298,6 +1298,22 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) } EXPORT_SYMBOL(pci_irq_get_affinity); +/** + * pci_irq_get_node - return the numa node of a particular msi vector + * @pdev: PCI device to operate on + * @vec: device-relative interrupt vector index (0-based). + */ +int pci_irq_get_node(struct pci_dev *pdev, int vec) +{ + const struct cpumask *mask; + + mask = pci_irq_get_affinity(pdev, vec); + if (mask) + return local_memory_node(cpu_to_node(cpumask_first(mask))); + return dev_to_node(&pdev->dev); +} +EXPORT_SYMBOL(pci_irq_get_node); + struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) { return to_pci_dev(desc->dev); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index ca77d235867f..f754453fe754 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3630,7 +3630,7 @@ static int __init pci_apply_final_quirks(void) fs_initcall_sync(pci_apply_final_quirks); /* - * Followings are device-specific reset methods which can be used to + * Following are device-specific reset methods which can be used to * reset a single function if other methods (e.g. FLR, PM D0->D3) are * not available. */ diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 6d9335865880..9612b84bc3e0 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -20,6 +20,7 @@ #include <linux/perf/arm_pmu.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/sched/clock.h> #include <linux/spinlock.h> #include <linux/irq.h> #include <linux/irqdesc.h> diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig index 8968dd7aebed..e8c4e4f934a6 100644 --- a/drivers/pinctrl/bcm/Kconfig +++ b/drivers/pinctrl/bcm/Kconfig @@ -70,7 +70,7 @@ config PINCTRL_CYGNUS_MUX The Broadcom Cygnus IOMUX driver supports group based IOMUX configuration, with the exception that certain individual pins - can be overrided to GPIO function + can be overridden to GPIO function config PINCTRL_NSP_GPIO bool "Broadcom NSP GPIO (with PINCONF) driver" diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 49a594855f98..4bc88eb52712 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -92,9 +92,8 @@ config ASUS_LAPTOP If you have an ACPI-compatible ASUS laptop, say Y or M here. config DELL_SMBIOS - tristate "Dell SMBIOS Support" - depends on DCDBAS - default n + tristate + select DCDBAS ---help--- This module provides common functions for kernel modules using Dell SMBIOS. @@ -103,16 +102,15 @@ config DELL_SMBIOS config DELL_LAPTOP tristate "Dell Laptop Extras" - depends on DELL_SMBIOS depends on DMI depends on BACKLIGHT_CLASS_DEVICE depends on ACPI_VIDEO || ACPI_VIDEO = n depends on RFKILL || RFKILL = n depends on SERIO_I8042 + select DELL_SMBIOS select POWER_SUPPLY select LEDS_CLASS select NEW_LEDS - default n ---help--- This driver adds support for rfkill and backlight control to Dell laptops (except for some models covered by the Compal driver). @@ -123,7 +121,7 @@ config DELL_WMI depends on DMI depends on INPUT depends on ACPI_VIDEO || ACPI_VIDEO = n - depends on DELL_SMBIOS + select DELL_SMBIOS select INPUT_SPARSEKMAP ---help--- Say Y here if you want to support WMI-based hotkeys on Dell laptops. @@ -1069,4 +1067,30 @@ config MLX_CPLD_PLATFORM This driver handles hot-plug events for the power suppliers, power cables and fans on the wide range Mellanox IB and Ethernet systems. +config INTEL_TURBO_MAX_3 + bool "Intel Turbo Boost Max Technology 3.0 enumeration driver" + depends on X86_64 && SCHED_MC_PRIO + ---help--- + This driver reads maximum performance ratio of each CPU and set up + the scheduler priority metrics. In this way scheduler can prefer + CPU with higher performance to schedule tasks. + This driver is only required when the system is not using Hardware + P-States (HWP). In HWP mode, priority can be read from ACPI tables. + +config SILEAD_DMI + bool "Tablets with Silead touchscreens" + depends on ACPI && DMI && I2C=y && INPUT + ---help--- + Certain ACPI based tablets with Silead touchscreens do not have + enough data in ACPI tables for the touchscreen driver to handle + the touchscreen properly, as OEMs expected the data to be baked + into the tablet model specific version of the driver shipped + with the OS-image for the device. This option supplies the missing + information. Enable this for x86 tablets with Silead touchscreens. + endif # X86_PLATFORM_DEVICES + +config PMC_ATOM + def_bool y + depends on PCI + select COMMON_CLK diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index b2f52a7690af..299d0f9e40f7 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -65,6 +65,7 @@ obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o obj-$(CONFIG_PVPANIC) += pvpanic.o obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o obj-$(CONFIG_INTEL_PMC_IPC) += intel_pmc_ipc.o +obj-$(CONFIG_SILEAD_DMI) += silead_dmi.o obj-$(CONFIG_SURFACE_PRO3_BUTTON) += surfacepro3_button.o obj-$(CONFIG_SURFACE_3_BUTTON) += surface3_button.o obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o @@ -73,5 +74,7 @@ obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \ intel_telemetry_pltdrv.o \ intel_telemetry_debugfs.o obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o +obj-$(CONFIG_PMC_ATOM) += pmc_atom.o obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o obj-$(CONFIG_MLX_CPLD_PLATFORM) += mlxcpld-hotplug.o +obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index a66192f692e3..dac0fbe87460 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -128,6 +128,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = { {KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} }, {KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} }, {KE_KEY, 0x85, {KEY_TOUCHPAD_TOGGLE} }, + {KE_KEY, 0x86, {KEY_WLAN} }, {KE_END, 0} }; @@ -150,15 +151,30 @@ struct event_return_value { #define ACER_WMID3_GDS_BLUETOOTH (1<<11) /* BT */ #define ACER_WMID3_GDS_TOUCHPAD (1<<1) /* Touchpad */ -struct lm_input_params { +/* Hotkey Customized Setting and Acer Application Status. + * Set Device Default Value and Report Acer Application Status. + * When Acer Application starts, it will run this method to inform + * BIOS/EC that Acer Application is on. + * App Status + * Bit[0]: Launch Manager Status + * Bit[1]: ePM Status + * Bit[2]: Device Control Status + * Bit[3]: Acer Power Button Utility Status + * Bit[4]: RF Button Status + * Bit[5]: ODD PM Status + * Bit[6]: Device Default Value Control + * Bit[7]: Hall Sensor Application Status + */ +struct func_input_params { u8 function_num; /* Function Number */ u16 commun_devices; /* Communication type devices default status */ u16 devices; /* Other type devices default status */ - u8 lm_status; /* Launch Manager Status */ - u16 reserved; + u8 app_status; /* Acer Device Status. LM, ePM, RF Button... */ + u8 app_mask; /* Bit mask to app_status */ + u8 reserved; } __attribute__((packed)); -struct lm_return_value { +struct func_return_value { u8 error_code; /* Error Code */ u8 ec_return_value; /* EC Return Value */ u16 reserved; @@ -1769,13 +1785,13 @@ static void acer_wmi_notify(u32 value, void *context) } static acpi_status __init -wmid3_set_lm_mode(struct lm_input_params *params, - struct lm_return_value *return_value) +wmid3_set_function_mode(struct func_input_params *params, + struct func_return_value *return_value) { acpi_status status; union acpi_object *obj; - struct acpi_buffer input = { sizeof(struct lm_input_params), params }; + struct acpi_buffer input = { sizeof(struct func_input_params), params }; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &input, &output); @@ -1796,7 +1812,7 @@ wmid3_set_lm_mode(struct lm_input_params *params, return AE_ERROR; } - *return_value = *((struct lm_return_value *)obj->buffer.pointer); + *return_value = *((struct func_return_value *)obj->buffer.pointer); kfree(obj); return status; @@ -1804,16 +1820,17 @@ wmid3_set_lm_mode(struct lm_input_params *params, static int __init acer_wmi_enable_ec_raw(void) { - struct lm_return_value return_value; + struct func_return_value return_value; acpi_status status; - struct lm_input_params params = { + struct func_input_params params = { .function_num = 0x1, .commun_devices = 0xFFFF, .devices = 0xFFFF, - .lm_status = 0x00, /* Launch Manager Deactive */ + .app_status = 0x00, /* Launch Manager Deactive */ + .app_mask = 0x01, }; - status = wmid3_set_lm_mode(¶ms, &return_value); + status = wmid3_set_function_mode(¶ms, &return_value); if (return_value.error_code || return_value.ec_return_value) pr_warn("Enabling EC raw mode failed: 0x%x - 0x%x\n", @@ -1827,16 +1844,17 @@ static int __init acer_wmi_enable_ec_raw(void) static int __init acer_wmi_enable_lm(void) { - struct lm_return_value return_value; + struct func_return_value return_value; acpi_status status; - struct lm_input_params params = { + struct func_input_params params = { .function_num = 0x1, .commun_devices = 0xFFFF, .devices = 0xFFFF, - .lm_status = 0x01, /* Launch Manager Active */ + .app_status = 0x01, /* Launch Manager Active */ + .app_mask = 0x01, }; - status = wmid3_set_lm_mode(¶ms, &return_value); + status = wmid3_set_function_mode(¶ms, &return_value); if (return_value.error_code || return_value.ec_return_value) pr_warn("Enabling Launch Manager failed: 0x%x - 0x%x\n", @@ -1846,11 +1864,46 @@ static int __init acer_wmi_enable_lm(void) return status; } +static int __init acer_wmi_enable_rf_button(void) +{ + struct func_return_value return_value; + acpi_status status; + struct func_input_params params = { + .function_num = 0x1, + .commun_devices = 0xFFFF, + .devices = 0xFFFF, + .app_status = 0x10, /* RF Button Active */ + .app_mask = 0x10, + }; + + status = wmid3_set_function_mode(¶ms, &return_value); + + if (return_value.error_code || return_value.ec_return_value) + pr_warn("Enabling RF Button failed: 0x%x - 0x%x\n", + return_value.error_code, + return_value.ec_return_value); + + return status; +} + +#define ACER_WMID_ACCEL_HID "BST0001" + static acpi_status __init acer_wmi_get_handle_cb(acpi_handle ah, u32 level, void *ctx, void **retval) { + struct acpi_device *dev; + + if (!strcmp(ctx, "SENR")) { + if (acpi_bus_get_device(ah, &dev)) + return AE_OK; + if (!strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev))) + return AE_OK; + } else + return AE_OK; + *(acpi_handle *)retval = ah; - return AE_OK; + + return AE_CTRL_TERMINATE; } static int __init acer_wmi_get_handle(const char *name, const char *prop, @@ -1877,7 +1930,7 @@ static int __init acer_wmi_accel_setup(void) { int err; - err = acer_wmi_get_handle("SENR", "BST0001", &gsensor_handle); + err = acer_wmi_get_handle("SENR", ACER_WMID_ACCEL_HID, &gsensor_handle); if (err) return err; @@ -2216,6 +2269,9 @@ static int __init acer_wmi_init(void) interface->capability &= ~ACER_CAP_BRIGHTNESS; if (wmi_has_guid(WMID_GUID3)) { + if (ACPI_FAILURE(acer_wmi_enable_rf_button())) + pr_warn("Cannot enable RF Button Driver\n"); + if (ec_raw_mode) { if (ACPI_FAILURE(acer_wmi_enable_ec_raw())) { pr_err("Cannot enable EC raw mode\n"); @@ -2233,10 +2289,11 @@ static int __init acer_wmi_init(void) err = acer_wmi_input_setup(); if (err) return err; + err = acer_wmi_accel_setup(); + if (err) + return err; } - acer_wmi_accel_setup(); - err = platform_driver_register(&acer_platform_driver); if (err) { pr_err("Unable to register platform driver\n"); diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c index 005629447b0c..d6b34923fb4e 100644 --- a/drivers/platform/x86/alienware-wmi.c +++ b/drivers/platform/x86/alienware-wmi.c @@ -21,7 +21,6 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/dmi.h> -#include <linux/acpi.h> #include <linux/leds.h> #define LEGACY_CONTROL_GUID "A90597CE-A997-11DA-B012-B622A1EF5492" diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c index 9f31bc1a47d0..f3796164329e 100644 --- a/drivers/platform/x86/asus-wireless.c +++ b/drivers/platform/x86/asus-wireless.c @@ -17,19 +17,41 @@ #include <linux/pci_ids.h> #include <linux/leds.h> -#define ASUS_WIRELESS_LED_STATUS 0x2 -#define ASUS_WIRELESS_LED_OFF 0x4 -#define ASUS_WIRELESS_LED_ON 0x5 +struct hswc_params { + u8 on; + u8 off; + u8 status; +}; struct asus_wireless_data { struct input_dev *idev; struct acpi_device *adev; + const struct hswc_params *hswc_params; struct workqueue_struct *wq; struct work_struct led_work; struct led_classdev led; int led_state; }; +static const struct hswc_params atk4001_id_params = { + .on = 0x0, + .off = 0x1, + .status = 0x2, +}; + +static const struct hswc_params atk4002_id_params = { + .on = 0x5, + .off = 0x4, + .status = 0x2, +}; + +static const struct acpi_device_id device_ids[] = { + {"ATK4001", (kernel_ulong_t)&atk4001_id_params}, + {"ATK4002", (kernel_ulong_t)&atk4002_id_params}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, device_ids); + static u64 asus_wireless_method(acpi_handle handle, const char *method, int param) { @@ -61,8 +83,8 @@ static enum led_brightness led_state_get(struct led_classdev *led) data = container_of(led, struct asus_wireless_data, led); s = asus_wireless_method(acpi_device_handle(data->adev), "HSWC", - ASUS_WIRELESS_LED_STATUS); - if (s == ASUS_WIRELESS_LED_ON) + data->hswc_params->status); + if (s == data->hswc_params->on) return LED_FULL; return LED_OFF; } @@ -76,14 +98,13 @@ static void led_state_update(struct work_struct *work) data->led_state); } -static void led_state_set(struct led_classdev *led, - enum led_brightness value) +static void led_state_set(struct led_classdev *led, enum led_brightness value) { struct asus_wireless_data *data; data = container_of(led, struct asus_wireless_data, led); - data->led_state = value == LED_OFF ? ASUS_WIRELESS_LED_OFF : - ASUS_WIRELESS_LED_ON; + data->led_state = value == LED_OFF ? data->hswc_params->off : + data->hswc_params->on; queue_work(data->wq, &data->led_work); } @@ -104,12 +125,14 @@ static void asus_wireless_notify(struct acpi_device *adev, u32 event) static int asus_wireless_add(struct acpi_device *adev) { struct asus_wireless_data *data; + const struct acpi_device_id *id; int err; data = devm_kzalloc(&adev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; adev->driver_data = data; + data->adev = adev; data->idev = devm_input_allocate_device(&adev->dev); if (!data->idev) @@ -124,7 +147,16 @@ static int asus_wireless_add(struct acpi_device *adev) if (err) return err; - data->adev = adev; + for (id = device_ids; id->id[0]; id++) { + if (!strcmp((char *) id->id, acpi_device_hid(adev))) { + data->hswc_params = + (const struct hswc_params *)id->driver_data; + break; + } + } + if (!data->hswc_params) + return 0; + data->wq = create_singlethread_workqueue("asus_wireless_workqueue"); if (!data->wq) return -ENOMEM; @@ -137,6 +169,7 @@ static int asus_wireless_add(struct acpi_device *adev) err = devm_led_classdev_register(&adev->dev, &data->led); if (err) destroy_workqueue(data->wq); + return err; } @@ -149,13 +182,6 @@ static int asus_wireless_remove(struct acpi_device *adev) return 0; } -static const struct acpi_device_id device_ids[] = { - {"ATK4001", 0}, - {"ATK4002", 0}, - {"", 0}, -}; -MODULE_DEVICE_TABLE(acpi, device_ids); - static struct acpi_driver asus_wireless_driver = { .name = "Asus Wireless Radio Control Driver", .class = "hotkey", diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 14392a01ab36..f57dd282a002 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -106,6 +106,12 @@ static const struct dmi_system_id dell_device_table[] __initconst = { }, }, { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /*Notebook*/ + }, + }, + { .ident = "Dell Computer Corporation", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 82d67715ce76..2b218b1d13e5 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -202,6 +202,7 @@ static int radio_led_set(struct led_classdev *cdev, static struct led_classdev radio_led = { .name = "fujitsu::radio_led", + .default_trigger = "rfkill-any", .brightness_get = radio_led_get, .brightness_set_blocking = radio_led_set }; @@ -270,15 +271,20 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2) static int logolamp_set(struct led_classdev *cdev, enum led_brightness brightness) { - if (brightness >= LED_FULL) { - call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON); - return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_ON); - } else if (brightness >= LED_HALF) { - call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON); - return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_OFF); - } else { - return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_OFF); - } + int poweron = FUNC_LED_ON, always = FUNC_LED_ON; + int ret; + + if (brightness < LED_HALF) + poweron = FUNC_LED_OFF; + + if (brightness < LED_FULL) + always = FUNC_LED_OFF; + + ret = call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, poweron); + if (ret < 0) + return ret; + + return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, always); } static int kblamps_set(struct led_classdev *cdev, @@ -313,17 +319,17 @@ static int eco_led_set(struct led_classdev *cdev, static enum led_brightness logolamp_get(struct led_classdev *cdev) { - enum led_brightness brightness = LED_OFF; - int poweron, always; - - poweron = call_fext_func(FUNC_LEDS, 0x2, LOGOLAMP_POWERON, 0x0); - if (poweron == FUNC_LED_ON) { - brightness = LED_HALF; - always = call_fext_func(FUNC_LEDS, 0x2, LOGOLAMP_ALWAYS, 0x0); - if (always == FUNC_LED_ON) - brightness = LED_FULL; - } - return brightness; + int ret; + + ret = call_fext_func(FUNC_LEDS, 0x2, LOGOLAMP_ALWAYS, 0x0); + if (ret == FUNC_LED_ON) + return LED_FULL; + + ret = call_fext_func(FUNC_LEDS, 0x2, LOGOLAMP_POWERON, 0x0); + if (ret == FUNC_LED_ON) + return LED_HALF; + + return LED_OFF; } static enum led_brightness kblamps_get(struct led_classdev *cdev) @@ -1029,107 +1035,117 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device) return 0; } +static void acpi_fujitsu_hotkey_press(int keycode) +{ + struct input_dev *input = fujitsu_hotkey->input; + int status; + + status = kfifo_in_locked(&fujitsu_hotkey->fifo, + (unsigned char *)&keycode, sizeof(keycode), + &fujitsu_hotkey->fifo_lock); + if (status != sizeof(keycode)) { + vdbg_printk(FUJLAPTOP_DBG_WARN, + "Could not push keycode [0x%x]\n", keycode); + return; + } + input_report_key(input, keycode, 1); + input_sync(input); + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "Push keycode into ringbuffer [%d]\n", keycode); +} + +static void acpi_fujitsu_hotkey_release(void) +{ + struct input_dev *input = fujitsu_hotkey->input; + int keycode, status; + + while (true) { + status = kfifo_out_locked(&fujitsu_hotkey->fifo, + (unsigned char *)&keycode, + sizeof(keycode), + &fujitsu_hotkey->fifo_lock); + if (status != sizeof(keycode)) + return; + input_report_key(input, keycode, 0); + input_sync(input); + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "Pop keycode from ringbuffer [%d]\n", keycode); + } +} + static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event) { struct input_dev *input; - int keycode, keycode_r; + int keycode; unsigned int irb = 1; - int i, status; + int i; input = fujitsu_hotkey->input; + if (event != ACPI_FUJITSU_NOTIFY_CODE1) { + keycode = KEY_UNKNOWN; + vdbg_printk(FUJLAPTOP_DBG_WARN, + "Unsupported event [0x%x]\n", event); + input_report_key(input, keycode, 1); + input_sync(input); + input_report_key(input, keycode, 0); + input_sync(input); + return; + } + if (fujitsu_hotkey->rfkill_supported) fujitsu_hotkey->rfkill_state = call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0); - switch (event) { - case ACPI_FUJITSU_NOTIFY_CODE1: - i = 0; - while ((irb = - call_fext_func(FUNC_BUTTONS, 0x1, 0x0, 0x0)) != 0 - && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) { - switch (irb & 0x4ff) { - case KEY1_CODE: - keycode = fujitsu->keycode1; - break; - case KEY2_CODE: - keycode = fujitsu->keycode2; - break; - case KEY3_CODE: - keycode = fujitsu->keycode3; - break; - case KEY4_CODE: - keycode = fujitsu->keycode4; - break; - case KEY5_CODE: - keycode = fujitsu->keycode5; - break; - case 0: - keycode = 0; - break; - default: - vdbg_printk(FUJLAPTOP_DBG_WARN, - "Unknown GIRB result [%x]\n", irb); - keycode = -1; - break; - } - if (keycode > 0) { - vdbg_printk(FUJLAPTOP_DBG_TRACE, - "Push keycode into ringbuffer [%d]\n", - keycode); - status = kfifo_in_locked(&fujitsu_hotkey->fifo, - (unsigned char *)&keycode, - sizeof(keycode), - &fujitsu_hotkey->fifo_lock); - if (status != sizeof(keycode)) { - vdbg_printk(FUJLAPTOP_DBG_WARN, - "Could not push keycode [0x%x]\n", - keycode); - } else { - input_report_key(input, keycode, 1); - input_sync(input); - } - } else if (keycode == 0) { - while ((status = - kfifo_out_locked( - &fujitsu_hotkey->fifo, - (unsigned char *) &keycode_r, - sizeof(keycode_r), - &fujitsu_hotkey->fifo_lock)) - == sizeof(keycode_r)) { - input_report_key(input, keycode_r, 0); - input_sync(input); - vdbg_printk(FUJLAPTOP_DBG_TRACE, - "Pop keycode from ringbuffer [%d]\n", - keycode_r); - } - } + i = 0; + while ((irb = + call_fext_func(FUNC_BUTTONS, 0x1, 0x0, 0x0)) != 0 + && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) { + switch (irb & 0x4ff) { + case KEY1_CODE: + keycode = fujitsu->keycode1; + break; + case KEY2_CODE: + keycode = fujitsu->keycode2; + break; + case KEY3_CODE: + keycode = fujitsu->keycode3; + break; + case KEY4_CODE: + keycode = fujitsu->keycode4; + break; + case KEY5_CODE: + keycode = fujitsu->keycode5; + break; + case 0: + keycode = 0; + break; + default: + vdbg_printk(FUJLAPTOP_DBG_WARN, + "Unknown GIRB result [%x]\n", irb); + keycode = -1; + break; } - /* On some models (first seen on the Skylake-based Lifebook - * E736/E746/E756), the touchpad toggle hotkey (Fn+F4) is - * handled in software; its state is queried using FUNC_RFKILL - */ - if ((fujitsu_hotkey->rfkill_supported & BIT(26)) && - (call_fext_func(FUNC_RFKILL, 0x1, 0x0, 0x0) & BIT(26))) { - keycode = KEY_TOUCHPAD_TOGGLE; - input_report_key(input, keycode, 1); - input_sync(input); - input_report_key(input, keycode, 0); - input_sync(input); - } + if (keycode > 0) + acpi_fujitsu_hotkey_press(keycode); + else if (keycode == 0) + acpi_fujitsu_hotkey_release(); + } - break; - default: - keycode = KEY_UNKNOWN; - vdbg_printk(FUJLAPTOP_DBG_WARN, - "Unsupported event [0x%x]\n", event); + /* On some models (first seen on the Skylake-based Lifebook + * E736/E746/E756), the touchpad toggle hotkey (Fn+F4) is + * handled in software; its state is queried using FUNC_RFKILL + */ + if ((fujitsu_hotkey->rfkill_supported & BIT(26)) && + (call_fext_func(FUNC_RFKILL, 0x1, 0x0, 0x0) & BIT(26))) { + keycode = KEY_TOUCHPAD_TOGGLE; input_report_key(input, keycode, 1); input_sync(input); input_report_key(input, keycode, 0); input_sync(input); - break; } + } /* Initialization */ diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c index 09356684c32f..493d8910a74e 100644 --- a/drivers/platform/x86/hp_accel.c +++ b/drivers/platform/x86/hp_accel.c @@ -251,6 +251,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = { AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap), AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted), AXIS_DMI_MATCH("HPZBook15", "HP ZBook 15", x_inverted), + AXIS_DMI_MATCH("HPZBook17", "HP ZBook 17", xy_swap_yz_inverted), { NULL, } /* Laptop models without axis info (yet): * "NC6910" "HP Compaq 6910" diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index cb3ab2b212b1..bcf438f38781 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -1,5 +1,5 @@ /* - * Intel HID event driver for Windows 8 + * Intel HID event & 5 button array driver * * Copyright (C) 2015 Alex Hung <alex.hung@canonical.com> * Copyright (C) 2015 Andrew Lutomirski <luto@kernel.org> @@ -57,8 +57,24 @@ static const struct key_entry intel_hid_keymap[] = { { KE_END }, }; +/* 5 button array notification value. */ +static const struct key_entry intel_array_keymap[] = { + { KE_KEY, 0xC2, { KEY_LEFTMETA } }, /* Press */ + { KE_IGNORE, 0xC3, { KEY_LEFTMETA } }, /* Release */ + { KE_KEY, 0xC4, { KEY_VOLUMEUP } }, /* Press */ + { KE_IGNORE, 0xC5, { KEY_VOLUMEUP } }, /* Release */ + { KE_KEY, 0xC6, { KEY_VOLUMEDOWN } }, /* Press */ + { KE_IGNORE, 0xC7, { KEY_VOLUMEDOWN } }, /* Release */ + { KE_SW, 0xC8, { .sw = { SW_ROTATE_LOCK, 1 } } }, /* Press */ + { KE_SW, 0xC9, { .sw = { SW_ROTATE_LOCK, 0 } } }, /* Release */ + { KE_KEY, 0xCE, { KEY_POWER } }, /* Press */ + { KE_IGNORE, 0xCF, { KEY_POWER } }, /* Release */ + { KE_END }, +}; + struct intel_hid_priv { struct input_dev *input_dev; + struct input_dev *array; }; static int intel_hid_set_enable(struct device *device, int enable) @@ -78,15 +94,43 @@ static int intel_hid_set_enable(struct device *device, int enable) return 0; } +static void intel_button_array_enable(struct device *device, bool enable) +{ + struct intel_hid_priv *priv = dev_get_drvdata(device); + acpi_handle handle = ACPI_HANDLE(device); + unsigned long long button_cap; + acpi_status status; + + if (!priv->array) + return; + + /* Query supported platform features */ + status = acpi_evaluate_integer(handle, "BTNC", NULL, &button_cap); + if (ACPI_FAILURE(status)) { + dev_warn(device, "failed to get button capability\n"); + return; + } + + /* Enable|disable features - power button is always enabled */ + status = acpi_execute_simple_method(handle, "BTNE", + enable ? button_cap : 1); + if (ACPI_FAILURE(status)) + dev_warn(device, "failed to set button capability\n"); +} + static int intel_hid_pl_suspend_handler(struct device *device) { intel_hid_set_enable(device, 0); + intel_button_array_enable(device, false); + return 0; } static int intel_hid_pl_resume_handler(struct device *device) { intel_hid_set_enable(device, 1); + intel_button_array_enable(device, true); + return 0; } @@ -126,6 +170,27 @@ err_free_device: return ret; } +static int intel_button_array_input_setup(struct platform_device *device) +{ + struct intel_hid_priv *priv = dev_get_drvdata(&device->dev); + int ret; + + /* Setup input device for 5 button array */ + priv->array = devm_input_allocate_device(&device->dev); + if (!priv->array) + return -ENOMEM; + + ret = sparse_keymap_setup(priv->array, intel_array_keymap, NULL); + if (ret) + return ret; + + priv->array->dev.parent = &device->dev; + priv->array->name = "Intel HID 5 button array"; + priv->array->id.bustype = BUS_HOST; + + return input_register_device(priv->array); +} + static void intel_hid_input_destroy(struct platform_device *device) { struct intel_hid_priv *priv = dev_get_drvdata(&device->dev); @@ -140,10 +205,11 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) unsigned long long ev_index; acpi_status status; - /* The platform spec only defines one event code: 0xC0. */ + /* 0xC0 is for HID events, other values are for 5 button array */ if (event != 0xc0) { - dev_warn(&device->dev, "received unknown event (0x%x)\n", - event); + if (!priv->array || + !sparse_keymap_report_event(priv->array, event, 1, true)) + dev_info(&device->dev, "unknown event 0x%x\n", event); return; } @@ -161,8 +227,8 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) static int intel_hid_probe(struct platform_device *device) { acpi_handle handle = ACPI_HANDLE(&device->dev); + unsigned long long event_cap, mode; struct intel_hid_priv *priv; - unsigned long long mode; acpi_status status; int err; @@ -193,6 +259,15 @@ static int intel_hid_probe(struct platform_device *device) return err; } + /* Setup 5 button array */ + status = acpi_evaluate_integer(handle, "HEBC", NULL, &event_cap); + if (ACPI_SUCCESS(status) && (event_cap & 0x20000)) { + dev_info(&device->dev, "platform supports 5 button array\n"); + err = intel_button_array_input_setup(device); + if (err) + pr_err("Failed to setup Intel 5 button array hotkeys\n"); + } + status = acpi_install_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler, @@ -206,6 +281,16 @@ static int intel_hid_probe(struct platform_device *device) if (err) goto err_remove_notify; + if (priv->array) { + intel_button_array_enable(&device->dev, true); + + /* Call button load method to enable HID power button */ + status = acpi_evaluate_object(handle, "BTNL", NULL, NULL); + if (ACPI_FAILURE(status)) + dev_warn(&device->dev, + "failed to enable HID power button\n"); + } + return 0; err_remove_notify: @@ -224,6 +309,7 @@ static int intel_hid_remove(struct platform_device *device) acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); intel_hid_input_destroy(device); intel_hid_set_enable(&device->dev, 0); + intel_button_array_enable(&device->dev, false); /* * Even if we failed to shut off the event stream, we can still diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index 55663b3d7282..58dcee562d64 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c @@ -68,6 +68,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/sched.h> +#include <linux/sched/loadavg.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/tick.h> diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index 361770568ad0..871cfa682519 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c @@ -1,7 +1,10 @@ /* - * Power button driver for Medfield. + * Power button driver for Intel MID platforms. * - * Copyright (C) 2010 Intel Corp + * Copyright (C) 2010,2017 Intel Corp + * + * Author: Hong Liu <hong.liu@intel.com> + * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -11,20 +14,20 @@ * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ -#include <linux/module.h> #include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/slab.h> -#include <linux/platform_device.h> #include <linux/input.h> +#include <linux/interrupt.h> #include <linux/mfd/intel_msic.h> +#include <linux/module.h> +#include <linux/platform_device.h> #include <linux/pm_wakeirq.h> +#include <linux/slab.h> + +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> +#include <asm/intel_scu_ipc.h> #define DRIVER_NAME "msic_power_btn" @@ -36,37 +39,113 @@ */ #define MSIC_PWRBTNM (1 << 0) -static irqreturn_t mfld_pb_isr(int irq, void *dev_id) +/* Intel Tangier */ +#define BCOVE_PB_LEVEL (1 << 4) /* 1 - release, 0 - press */ + +/* Basin Cove PMIC */ +#define BCOVE_PBIRQ 0x02 +#define BCOVE_IRQLVL1MSK 0x0c +#define BCOVE_PBIRQMASK 0x0d +#define BCOVE_PBSTATUS 0x27 + +struct mid_pb_ddata { + struct device *dev; + int irq; + struct input_dev *input; + unsigned short mirqlvl1_addr; + unsigned short pbstat_addr; + u8 pbstat_mask; + int (*setup)(struct mid_pb_ddata *ddata); +}; + +static int mid_pbstat(struct mid_pb_ddata *ddata, int *value) { - struct input_dev *input = dev_id; + struct input_dev *input = ddata->input; int ret; u8 pbstat; - ret = intel_msic_reg_read(INTEL_MSIC_PBSTATUS, &pbstat); + ret = intel_scu_ipc_ioread8(ddata->pbstat_addr, &pbstat); + if (ret) + return ret; + dev_dbg(input->dev.parent, "PB_INT status= %d\n", pbstat); + *value = !(pbstat & ddata->pbstat_mask); + return 0; +} + +static int mid_irq_ack(struct mid_pb_ddata *ddata) +{ + return intel_scu_ipc_update_register(ddata->mirqlvl1_addr, 0, MSIC_PWRBTNM); +} + +static int mrfld_setup(struct mid_pb_ddata *ddata) +{ + /* Unmask the PBIRQ and MPBIRQ on Tangier */ + intel_scu_ipc_update_register(BCOVE_PBIRQ, 0, MSIC_PWRBTNM); + intel_scu_ipc_update_register(BCOVE_PBIRQMASK, 0, MSIC_PWRBTNM); + + return 0; +} + +static irqreturn_t mid_pb_isr(int irq, void *dev_id) +{ + struct mid_pb_ddata *ddata = dev_id; + struct input_dev *input = ddata->input; + int value = 0; + int ret; + + ret = mid_pbstat(ddata, &value); if (ret < 0) { - dev_err(input->dev.parent, "Read error %d while reading" - " MSIC_PB_STATUS\n", ret); + dev_err(input->dev.parent, + "Read error %d while reading MSIC_PB_STATUS\n", ret); } else { - input_event(input, EV_KEY, KEY_POWER, - !(pbstat & MSIC_PB_LEVEL)); + input_event(input, EV_KEY, KEY_POWER, value); input_sync(input); } + mid_irq_ack(ddata); return IRQ_HANDLED; } -static int mfld_pb_probe(struct platform_device *pdev) +static struct mid_pb_ddata mfld_ddata = { + .mirqlvl1_addr = INTEL_MSIC_IRQLVL1MSK, + .pbstat_addr = INTEL_MSIC_PBSTATUS, + .pbstat_mask = MSIC_PB_LEVEL, +}; + +static struct mid_pb_ddata mrfld_ddata = { + .mirqlvl1_addr = BCOVE_IRQLVL1MSK, + .pbstat_addr = BCOVE_PBSTATUS, + .pbstat_mask = BCOVE_PB_LEVEL, + .setup = mrfld_setup, +}; + +#define ICPU(model, ddata) \ + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata } + +static const struct x86_cpu_id mid_pb_cpu_ids[] = { + ICPU(INTEL_FAM6_ATOM_PENWELL, mfld_ddata), + ICPU(INTEL_FAM6_ATOM_MERRIFIELD, mrfld_ddata), + {} +}; + +static int mid_pb_probe(struct platform_device *pdev) { + const struct x86_cpu_id *id; + struct mid_pb_ddata *ddata; struct input_dev *input; int irq = platform_get_irq(pdev, 0); int error; + id = x86_match_cpu(mid_pb_cpu_ids); + if (!id) + return -ENODEV; + if (irq < 0) return -EINVAL; - input = input_allocate_device(); + input = devm_input_allocate_device(&pdev->dev); if (!input) return -ENOMEM; @@ -77,25 +156,36 @@ static int mfld_pb_probe(struct platform_device *pdev) input_set_capability(input, EV_KEY, KEY_POWER); - error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT, - DRIVER_NAME, input); - if (error) { - dev_err(&pdev->dev, "Unable to request irq %d for mfld power" - "button\n", irq); - goto err_free_input; + ddata = (struct mid_pb_ddata *)id->driver_data; + if (!ddata) + return -ENODATA; + + ddata->dev = &pdev->dev; + ddata->irq = irq; + ddata->input = input; + + if (ddata->setup) { + error = ddata->setup(ddata); + if (error) + return error; } - device_init_wakeup(&pdev->dev, true); - dev_pm_set_wake_irq(&pdev->dev, irq); + error = devm_request_threaded_irq(&pdev->dev, irq, NULL, mid_pb_isr, + IRQF_ONESHOT, DRIVER_NAME, ddata); + if (error) { + dev_err(&pdev->dev, + "Unable to request irq %d for MID power button\n", irq); + return error; + } error = input_register_device(input); if (error) { - dev_err(&pdev->dev, "Unable to register input dev, error " - "%d\n", error); - goto err_free_irq; + dev_err(&pdev->dev, + "Unable to register input dev, error %d\n", error); + return error; } - platform_set_drvdata(pdev, input); + platform_set_drvdata(pdev, ddata); /* * SCU firmware might send power button interrupts to IA core before @@ -107,46 +197,39 @@ static int mfld_pb_probe(struct platform_device *pdev) * initialization. The race happens rarely. So we needn't worry * about it. */ - error = intel_msic_reg_update(INTEL_MSIC_IRQLVL1MSK, 0, MSIC_PWRBTNM); + error = mid_irq_ack(ddata); if (error) { - dev_err(&pdev->dev, "Unable to clear power button interrupt, " - "error: %d\n", error); - goto err_free_irq; + dev_err(&pdev->dev, + "Unable to clear power button interrupt, error: %d\n", + error); + return error; } - return 0; + device_init_wakeup(&pdev->dev, true); + dev_pm_set_wake_irq(&pdev->dev, irq); -err_free_irq: - free_irq(irq, input); -err_free_input: - input_free_device(input); - return error; + return 0; } -static int mfld_pb_remove(struct platform_device *pdev) +static int mid_pb_remove(struct platform_device *pdev) { - struct input_dev *input = platform_get_drvdata(pdev); - int irq = platform_get_irq(pdev, 0); - dev_pm_clear_wake_irq(&pdev->dev); device_init_wakeup(&pdev->dev, false); - free_irq(irq, input); - input_unregister_device(input); return 0; } -static struct platform_driver mfld_pb_driver = { +static struct platform_driver mid_pb_driver = { .driver = { .name = DRIVER_NAME, }, - .probe = mfld_pb_probe, - .remove = mfld_pb_remove, + .probe = mid_pb_probe, + .remove = mid_pb_remove, }; -module_platform_driver(mfld_pb_driver); +module_platform_driver(mid_pb_driver); MODULE_AUTHOR("Hong Liu <hong.liu@intel.com>"); -MODULE_DESCRIPTION("Intel Medfield Power Button Driver"); +MODULE_DESCRIPTION("Intel MID Power Button Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c index 0df3c9d37509..008a76903cbf 100644 --- a/drivers/platform/x86/intel_mid_thermal.c +++ b/drivers/platform/x86/intel_mid_thermal.c @@ -549,9 +549,9 @@ static int mid_thermal_remove(struct platform_device *pdev) static const struct platform_device_id therm_id_table[] = { { DRIVER_NAME, 1 }, - { "msic_thermal", 1 }, { } }; +MODULE_DEVICE_TABLE(platform, therm_id_table); static struct platform_driver mid_thermal_driver = { .driver = { diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index b130b8c9b9d7..914bcd2edbde 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -188,8 +188,7 @@ static int pmc_core_check_read_lock_bit(void) u32 value; value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_CFG_OFFSET); - return test_bit(SPT_PMC_READ_DISABLE_BIT, - (unsigned long *)&value); + return value & BIT(SPT_PMC_READ_DISABLE_BIT); } #if IS_ENABLED(CONFIG_DEBUG_FS) @@ -238,8 +237,7 @@ static int pmc_core_mtpmc_link_status(void) u32 value; value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET); - return test_bit(SPT_PMC_MSG_FULL_STS_BIT, - (unsigned long *)&value); + return value & BIT(SPT_PMC_MSG_FULL_STS_BIT); } static int pmc_core_send_msg(u32 *addr_xram) diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index 0bf51d574fa9..0651d47b8eeb 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -32,7 +32,10 @@ #include <linux/notifier.h> #include <linux/suspend.h> #include <linux/acpi.h> +#include <linux/io-64-nonatomic-lo-hi.h> + #include <asm/intel_pmc_ipc.h> + #include <linux/platform_data/itco_wdt.h> /* @@ -54,6 +57,18 @@ #define IPC_WRITE_BUFFER 0x80 #define IPC_READ_BUFFER 0x90 +/* PMC Global Control Registers */ +#define GCR_TELEM_DEEP_S0IX_OFFSET 0x1078 +#define GCR_TELEM_SHLW_S0IX_OFFSET 0x1080 + +/* Residency with clock rate at 19.2MHz to usecs */ +#define S0IX_RESIDENCY_IN_USECS(d, s) \ +({ \ + u64 result = 10ull * ((d) + (s)); \ + do_div(result, 192); \ + result; \ +}) + /* * 16-byte buffer for sending data associated with IPC command. */ @@ -68,7 +83,7 @@ #define PLAT_RESOURCE_IPC_INDEX 0 #define PLAT_RESOURCE_IPC_SIZE 0x1000 #define PLAT_RESOURCE_GCR_OFFSET 0x1008 -#define PLAT_RESOURCE_GCR_SIZE 0x4 +#define PLAT_RESOURCE_GCR_SIZE 0x1000 #define PLAT_RESOURCE_BIOS_DATA_INDEX 1 #define PLAT_RESOURCE_BIOS_IFACE_INDEX 2 #define PLAT_RESOURCE_TELEM_SSRAM_INDEX 3 @@ -97,8 +112,6 @@ #define TCO_PMC_OFFSET 0x8 #define TCO_PMC_SIZE 0x4 -static const int iTCO_version = 3; - static struct intel_pmc_ipc_dev { struct device *dev; void __iomem *ipc_base; @@ -115,6 +128,7 @@ static struct intel_pmc_ipc_dev { /* gcr */ resource_size_t gcr_base; int gcr_size; + bool has_gcr_regs; /* punit */ struct platform_device *punit_dev; @@ -180,6 +194,11 @@ static inline u32 ipc_data_readl(u32 offset) return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset); } +static inline u64 gcr_data_readq(u32 offset) +{ + return readq(ipcdev.ipc_base + offset); +} + static int intel_pmc_ipc_check_status(void) { int status; @@ -389,6 +408,7 @@ static void ipc_pci_remove(struct pci_dev *pdev) static const struct pci_device_id ipc_pci_ids[] = { {PCI_VDEVICE(INTEL, 0x0a94), 0}, {PCI_VDEVICE(INTEL, 0x1a94), 0}, + {PCI_VDEVICE(INTEL, 0x5a94), 0}, { 0,} }; MODULE_DEVICE_TABLE(pci, ipc_pci_ids); @@ -712,7 +732,8 @@ static int ipc_plat_get_res(struct platform_device *pdev) dev_err(&pdev->dev, "Failed to get ipc resource\n"); return -ENXIO; } - size = PLAT_RESOURCE_IPC_SIZE; + size = PLAT_RESOURCE_IPC_SIZE + PLAT_RESOURCE_GCR_SIZE; + if (!request_mem_region(res->start, size, pdev->name)) { dev_err(&pdev->dev, "Failed to request ipc resource\n"); return -EBUSY; @@ -748,6 +769,28 @@ static int ipc_plat_get_res(struct platform_device *pdev) return 0; } +/** + * intel_pmc_s0ix_counter_read() - Read S0ix residency. + * @data: Out param that contains current S0ix residency count. + * + * Return: an error code or 0 on success. + */ +int intel_pmc_s0ix_counter_read(u64 *data) +{ + u64 deep, shlw; + + if (!ipcdev.has_gcr_regs) + return -EACCES; + + deep = gcr_data_readq(GCR_TELEM_DEEP_S0IX_OFFSET); + shlw = gcr_data_readq(GCR_TELEM_SHLW_S0IX_OFFSET); + + *data = S0IX_RESIDENCY_IN_USECS(deep, shlw); + + return 0; +} +EXPORT_SYMBOL_GPL(intel_pmc_s0ix_counter_read); + #ifdef CONFIG_ACPI static const struct acpi_device_id ipc_acpi_ids[] = { { "INT34D2", 0}, @@ -797,6 +840,8 @@ static int ipc_plat_probe(struct platform_device *pdev) goto err_sys; } + ipcdev.has_gcr_regs = true; + return 0; err_sys: free_irq(ipcdev.irq, &ipcdev); @@ -808,8 +853,11 @@ err_device: iounmap(ipcdev.ipc_base); res = platform_get_resource(pdev, IORESOURCE_MEM, PLAT_RESOURCE_IPC_INDEX); - if (res) - release_mem_region(res->start, PLAT_RESOURCE_IPC_SIZE); + if (res) { + release_mem_region(res->start, + PLAT_RESOURCE_IPC_SIZE + + PLAT_RESOURCE_GCR_SIZE); + } return ret; } @@ -825,8 +873,11 @@ static int ipc_plat_remove(struct platform_device *pdev) iounmap(ipcdev.ipc_base); res = platform_get_resource(pdev, IORESOURCE_MEM, PLAT_RESOURCE_IPC_INDEX); - if (res) - release_mem_region(res->start, PLAT_RESOURCE_IPC_SIZE); + if (res) { + release_mem_region(res->start, + PLAT_RESOURCE_IPC_SIZE + + PLAT_RESOURCE_GCR_SIZE); + } ipcdev.dev = NULL; return 0; } diff --git a/drivers/platform/x86/intel_turbo_max_3.c b/drivers/platform/x86/intel_turbo_max_3.c new file mode 100644 index 000000000000..4f60d8e32a0a --- /dev/null +++ b/drivers/platform/x86/intel_turbo_max_3.c @@ -0,0 +1,151 @@ +/* + * Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver + * Copyright (c) 2017, Intel Corporation. + * All rights reserved. + * + * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/topology.h> +#include <linux/workqueue.h> +#include <linux/cpuhotplug.h> +#include <linux/cpufeature.h> +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> + +#define MSR_OC_MAILBOX 0x150 +#define MSR_OC_MAILBOX_CMD_OFFSET 32 +#define MSR_OC_MAILBOX_RSP_OFFSET 32 +#define MSR_OC_MAILBOX_BUSY_BIT 63 +#define OC_MAILBOX_FC_CONTROL_CMD 0x1C + +/* + * Typical latency to get mail box response is ~3us, It takes +3 us to + * process reading mailbox after issuing mailbox write on a Broadwell 3.4 GHz + * system. So for most of the time, the first mailbox read should have the + * response, but to avoid some boundary cases retry twice. + */ +#define OC_MAILBOX_RETRY_COUNT 2 + +static int get_oc_core_priority(unsigned int cpu) +{ + u64 value, cmd = OC_MAILBOX_FC_CONTROL_CMD; + int ret, i; + + /* Issue favored core read command */ + value = cmd << MSR_OC_MAILBOX_CMD_OFFSET; + /* Set the busy bit to indicate OS is trying to issue command */ + value |= BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT); + ret = wrmsrl_safe(MSR_OC_MAILBOX, value); + if (ret) { + pr_debug("cpu %d OC mailbox write failed\n", cpu); + return ret; + } + + for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) { + ret = rdmsrl_safe(MSR_OC_MAILBOX, &value); + if (ret) { + pr_debug("cpu %d OC mailbox read failed\n", cpu); + break; + } + + if (value & BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT)) { + pr_debug("cpu %d OC mailbox still processing\n", cpu); + ret = -EBUSY; + continue; + } + + if ((value >> MSR_OC_MAILBOX_RSP_OFFSET) & 0xff) { + pr_debug("cpu %d OC mailbox cmd failed\n", cpu); + ret = -ENXIO; + break; + } + + ret = value & 0xff; + pr_debug("cpu %d max_ratio %d\n", cpu, ret); + break; + } + + return ret; +} + +/* + * The work item is needed to avoid CPU hotplug locking issues. The function + * itmt_legacy_set_priority() is called from CPU online callback, so can't + * call sched_set_itmt_support() from there as this function will aquire + * hotplug locks in its path. + */ +static void itmt_legacy_work_fn(struct work_struct *work) +{ + sched_set_itmt_support(); +} + +static DECLARE_WORK(sched_itmt_work, itmt_legacy_work_fn); + +static int itmt_legacy_cpu_online(unsigned int cpu) +{ + static u32 max_highest_perf = 0, min_highest_perf = U32_MAX; + int priority; + + priority = get_oc_core_priority(cpu); + if (priority < 0) + return 0; + + sched_set_itmt_core_prio(priority, cpu); + + /* Enable ITMT feature when a core with different priority is found */ + if (max_highest_perf <= min_highest_perf) { + if (priority > max_highest_perf) + max_highest_perf = priority; + + if (priority < min_highest_perf) + min_highest_perf = priority; + + if (max_highest_perf > min_highest_perf) + schedule_work(&sched_itmt_work); + } + + return 0; +} + +#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } + +static const struct x86_cpu_id itmt_legacy_cpu_ids[] = { + ICPU(INTEL_FAM6_BROADWELL_X), + {} +}; + +static int __init itmt_legacy_init(void) +{ + const struct x86_cpu_id *id; + int ret; + + id = x86_match_cpu(itmt_legacy_cpu_ids); + if (!id) + return -ENODEV; + + if (boot_cpu_has(X86_FEATURE_HWP)) + return -ENODEV; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "platform/x86/turbo_max_3:online", + itmt_legacy_cpu_online, NULL); + if (ret < 0) + return ret; + + return 0; +} +late_initcall(itmt_legacy_init) diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index 25f15df5c2d7..8f98c211b440 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -45,6 +45,10 @@ /* LPC bus IO offsets */ #define MLXPLAT_CPLD_LPC_I2C_BASE_ADRR 0x2000 #define MLXPLAT_CPLD_LPC_REG_BASE_ADRR 0x2500 +#define MLXPLAT_CPLD_LPC_REG_AGGR_ADRR 0x253a +#define MLXPLAT_CPLD_LPC_REG_PSU_ADRR 0x2558 +#define MLXPLAT_CPLD_LPC_REG_PWR_ADRR 0x2564 +#define MLXPLAT_CPLD_LPC_REG_FAN_ADRR 0x2588 #define MLXPLAT_CPLD_LPC_IO_RANGE 0x100 #define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb #define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda @@ -56,6 +60,17 @@ MLXPLAT_CPLD_LPC_I2C_CH2_OFF) | \ MLXPLAT_CPLD_LPC_PIO_OFFSET) +/* Masks for aggregation, psu, pwr and fan event in CPLD related registers. */ +#define MLXPLAT_CPLD_AGGR_PSU_MASK_DEF 0x08 +#define MLXPLAT_CPLD_AGGR_PWR_MASK_DEF 0x08 +#define MLXPLAT_CPLD_AGGR_FAN_MASK_DEF 0x40 +#define MLXPLAT_CPLD_AGGR_MASK_DEF (MLXPLAT_CPLD_AGGR_PSU_MASK_DEF | \ + MLXPLAT_CPLD_AGGR_FAN_MASK_DEF) +#define MLXPLAT_CPLD_AGGR_MASK_MSN21XX 0x04 +#define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0) +#define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0) +#define MLXPLAT_CPLD_FAN_MASK GENMASK(3, 0) + /* Start channel numbers */ #define MLXPLAT_CPLD_CH1 2 #define MLXPLAT_CPLD_CH2 10 @@ -123,7 +138,7 @@ static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = { }; /* Platform hotplug devices */ -static struct mlxcpld_hotplug_device mlxplat_mlxcpld_hotplug_psu[] = { +static struct mlxcpld_hotplug_device mlxplat_mlxcpld_psu[] = { { .brdinfo = { I2C_BOARD_INFO("24c02", 0x51) }, .bus = 10, @@ -134,7 +149,7 @@ static struct mlxcpld_hotplug_device mlxplat_mlxcpld_hotplug_psu[] = { }, }; -static struct mlxcpld_hotplug_device mlxplat_mlxcpld_hotplug_pwr[] = { +static struct mlxcpld_hotplug_device mlxplat_mlxcpld_pwr[] = { { .brdinfo = { I2C_BOARD_INFO("dps460", 0x59) }, .bus = 10, @@ -145,7 +160,7 @@ static struct mlxcpld_hotplug_device mlxplat_mlxcpld_hotplug_pwr[] = { }, }; -static struct mlxcpld_hotplug_device mlxplat_mlxcpld_hotplug_fan[] = { +static struct mlxcpld_hotplug_device mlxplat_mlxcpld_fan[] = { { .brdinfo = { I2C_BOARD_INFO("24c32", 0x50) }, .bus = 11, @@ -166,38 +181,38 @@ static struct mlxcpld_hotplug_device mlxplat_mlxcpld_hotplug_fan[] = { /* Platform hotplug default data */ static -struct mlxcpld_hotplug_platform_data mlxplat_mlxcpld_hotplug_default_data = { - .top_aggr_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x3a), - .top_aggr_mask = 0x48, - .top_aggr_psu_mask = 0x08, - .psu_reg_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x58), - .psu_mask = 0x03, - .psu_count = ARRAY_SIZE(mlxplat_mlxcpld_hotplug_psu), - .psu = mlxplat_mlxcpld_hotplug_psu, - .top_aggr_pwr_mask = 0x08, - .pwr_reg_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x64), - .pwr_mask = 0x03, - .pwr_count = ARRAY_SIZE(mlxplat_mlxcpld_hotplug_pwr), - .pwr = mlxplat_mlxcpld_hotplug_pwr, - .top_aggr_fan_mask = 0x40, - .fan_reg_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x88), - .fan_mask = 0x0f, - .fan_count = ARRAY_SIZE(mlxplat_mlxcpld_hotplug_fan), - .fan = mlxplat_mlxcpld_hotplug_fan, +struct mlxcpld_hotplug_platform_data mlxplat_mlxcpld_default_data = { + .top_aggr_offset = MLXPLAT_CPLD_LPC_REG_AGGR_ADRR, + .top_aggr_mask = MLXPLAT_CPLD_AGGR_MASK_DEF, + .top_aggr_psu_mask = MLXPLAT_CPLD_AGGR_PSU_MASK_DEF, + .psu_reg_offset = MLXPLAT_CPLD_LPC_REG_PSU_ADRR, + .psu_mask = MLXPLAT_CPLD_PSU_MASK, + .psu_count = ARRAY_SIZE(mlxplat_mlxcpld_psu), + .psu = mlxplat_mlxcpld_psu, + .top_aggr_pwr_mask = MLXPLAT_CPLD_AGGR_PWR_MASK_DEF, + .pwr_reg_offset = MLXPLAT_CPLD_LPC_REG_PWR_ADRR, + .pwr_mask = MLXPLAT_CPLD_PWR_MASK, + .pwr_count = ARRAY_SIZE(mlxplat_mlxcpld_pwr), + .pwr = mlxplat_mlxcpld_pwr, + .top_aggr_fan_mask = MLXPLAT_CPLD_AGGR_FAN_MASK_DEF, + .fan_reg_offset = MLXPLAT_CPLD_LPC_REG_FAN_ADRR, + .fan_mask = MLXPLAT_CPLD_FAN_MASK, + .fan_count = ARRAY_SIZE(mlxplat_mlxcpld_fan), + .fan = mlxplat_mlxcpld_fan, }; /* Platform hotplug MSN21xx system family data */ static -struct mlxcpld_hotplug_platform_data mlxplat_mlxcpld_hotplug_msn21xx_data = { - .top_aggr_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x3a), - .top_aggr_mask = 0x04, - .top_aggr_pwr_mask = 0x04, - .pwr_reg_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x64), - .pwr_mask = 0x03, - .pwr_count = ARRAY_SIZE(mlxplat_mlxcpld_hotplug_pwr), +struct mlxcpld_hotplug_platform_data mlxplat_mlxcpld_msn21xx_data = { + .top_aggr_offset = MLXPLAT_CPLD_LPC_REG_AGGR_ADRR, + .top_aggr_mask = MLXPLAT_CPLD_AGGR_MASK_MSN21XX, + .top_aggr_pwr_mask = MLXPLAT_CPLD_AGGR_MASK_MSN21XX, + .pwr_reg_offset = MLXPLAT_CPLD_LPC_REG_PWR_ADRR, + .pwr_mask = MLXPLAT_CPLD_PWR_MASK, + .pwr_count = ARRAY_SIZE(mlxplat_mlxcpld_pwr), }; -static struct resource mlxplat_mlxcpld_hotplug_resources[] = { +static struct resource mlxplat_mlxcpld_resources[] = { [0] = DEFINE_RES_IRQ_NAMED(17, "mlxcpld-hotplug"), }; @@ -213,7 +228,7 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi) mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_default_channels[i]); } - mlxplat_hotplug = &mlxplat_mlxcpld_hotplug_default_data; + mlxplat_hotplug = &mlxplat_mlxcpld_default_data; return 1; }; @@ -227,7 +242,7 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi) mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); } - mlxplat_hotplug = &mlxplat_mlxcpld_hotplug_msn21xx_data; + mlxplat_hotplug = &mlxplat_mlxcpld_msn21xx_data; return 1; }; @@ -314,9 +329,10 @@ static int __init mlxplat_init(void) } priv->pdev_hotplug = platform_device_register_resndata( - &mlxplat_dev->dev, "mlxcpld-hotplug", -1, - mlxplat_mlxcpld_hotplug_resources, - ARRAY_SIZE(mlxplat_mlxcpld_hotplug_resources), + &mlxplat_dev->dev, "mlxcpld-hotplug", + PLATFORM_DEVID_NONE, + mlxplat_mlxcpld_resources, + ARRAY_SIZE(mlxplat_mlxcpld_resources), mlxplat_hotplug, sizeof(*mlxplat_hotplug)); if (IS_ERR(priv->pdev_hotplug)) { err = PTR_ERR(priv->pdev_hotplug); diff --git a/arch/x86/platform/atom/pmc_atom.c b/drivers/platform/x86/pmc_atom.c index 964ff4fc61f9..77bac859342d 100644 --- a/arch/x86/platform/atom/pmc_atom.c +++ b/drivers/platform/x86/pmc_atom.c @@ -15,14 +15,15 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/debugfs.h> +#include <linux/device.h> #include <linux/init.h> +#include <linux/io.h> +#include <linux/platform_data/x86/clk-pmc-atom.h> +#include <linux/platform_data/x86/pmc_atom.h> +#include <linux/platform_device.h> #include <linux/pci.h> -#include <linux/device.h> -#include <linux/debugfs.h> #include <linux/seq_file.h> -#include <linux/io.h> - -#include <asm/pmc_atom.h> struct pmc_bit_map { const char *name; @@ -37,6 +38,11 @@ struct pmc_reg_map { const struct pmc_bit_map *pss; }; +struct pmc_data { + const struct pmc_reg_map *map; + const struct pmc_clk *clks; +}; + struct pmc_dev { u32 base_addr; void __iomem *regmap; @@ -50,6 +56,29 @@ struct pmc_dev { static struct pmc_dev pmc_device; static u32 acpi_base_addr; +static const struct pmc_clk byt_clks[] = { + { + .name = "xtal", + .freq = 25000000, + .parent_name = NULL, + }, + { + .name = "pll", + .freq = 19200000, + .parent_name = "xtal", + }, + {}, +}; + +static const struct pmc_clk cht_clks[] = { + { + .name = "xtal", + .freq = 19200000, + .parent_name = NULL, + }, + {}, +}; + static const struct pmc_bit_map d3_sts_0_map[] = { {"LPSS1_F0_DMA", BIT_LPSS1_F0_DMA}, {"LPSS1_F1_PWM1", BIT_LPSS1_F1_PWM1}, @@ -169,6 +198,16 @@ static const struct pmc_reg_map cht_reg_map = { .pss = cht_pss_map, }; +static const struct pmc_data byt_data = { + .map = &byt_reg_map, + .clks = byt_clks, +}; + +static const struct pmc_data cht_data = { + .map = &cht_reg_map, + .clks = cht_clks, +}; + static inline u32 pmc_reg_read(struct pmc_dev *pmc, int reg_offset) { return readl(pmc->regmap + reg_offset); @@ -382,10 +421,37 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc) } #endif /* CONFIG_DEBUG_FS */ +static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap, + const struct pmc_data *pmc_data) +{ + struct platform_device *clkdev; + struct pmc_clk_data *clk_data; + + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + return -ENOMEM; + + clk_data->base = pmc_regmap; /* offset is added by client */ + clk_data->clks = pmc_data->clks; + + clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom", + PLATFORM_DEVID_NONE, + clk_data, sizeof(*clk_data)); + if (IS_ERR(clkdev)) { + kfree(clk_data); + return PTR_ERR(clkdev); + } + + kfree(clk_data); + + return 0; +} + static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent) { struct pmc_dev *pmc = &pmc_device; - const struct pmc_reg_map *map = (struct pmc_reg_map *)ent->driver_data; + const struct pmc_data *data = (struct pmc_data *)ent->driver_data; + const struct pmc_reg_map *map = data->map; int ret; /* Obtain ACPI base address */ @@ -414,6 +480,12 @@ static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) dev_warn(&pdev->dev, "debugfs register failed\n"); + /* Register platform clocks - PMC_PLT_CLK [0..5] */ + ret = pmc_setup_clks(pdev, pmc->regmap, data); + if (ret) + dev_warn(&pdev->dev, "platform clocks register failed: %d\n", + ret); + pmc->init = true; return ret; } @@ -424,8 +496,8 @@ static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent) * used by pci_match_id() call below. */ static const struct pci_device_id pmc_pci_ids[] = { - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_VLV_PMC), (kernel_ulong_t)&byt_reg_map }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CHT_PMC), (kernel_ulong_t)&cht_reg_map }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_VLV_PMC), (kernel_ulong_t)&byt_data }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CHT_PMC), (kernel_ulong_t)&cht_data }, { 0, }, }; diff --git a/drivers/platform/x86/silead_dmi.c b/drivers/platform/x86/silead_dmi.c new file mode 100644 index 000000000000..02e11fdbf375 --- /dev/null +++ b/drivers/platform/x86/silead_dmi.c @@ -0,0 +1,136 @@ +/* + * Silead touchscreen driver DMI based configuration code + * + * Copyright (c) 2017 Red Hat Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Red Hat authors: + * Hans de Goede <hdegoede@redhat.com> + */ + +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/dmi.h> +#include <linux/i2c.h> +#include <linux/notifier.h> +#include <linux/property.h> +#include <linux/string.h> + +struct silead_ts_dmi_data { + const char *acpi_name; + struct property_entry *properties; +}; + +static struct property_entry cube_iwork8_air_props[] = { + PROPERTY_ENTRY_U32("touchscreen-size-x", 1660), + PROPERTY_ENTRY_U32("touchscreen-size-y", 900), + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-cube-iwork8-air.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct silead_ts_dmi_data cube_iwork8_air_data = { + .acpi_name = "MSSL1680:00", + .properties = cube_iwork8_air_props, +}; + +static struct property_entry jumper_ezpad_mini3_props[] = { + PROPERTY_ENTRY_U32("touchscreen-size-x", 1700), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1150), + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-jumper-ezpad-mini3.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct silead_ts_dmi_data jumper_ezpad_mini3_data = { + .acpi_name = "MSSL1680:00", + .properties = jumper_ezpad_mini3_props, +}; + +static const struct dmi_system_id silead_ts_dmi_table[] = { + { + /* CUBE iwork8 Air */ + .driver_data = (void *)&cube_iwork8_air_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "cube"), + DMI_MATCH(DMI_PRODUCT_NAME, "i1-TF"), + DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), + }, + }, + { + /* Jumper EZpad mini3 */ + .driver_data = (void *)&jumper_ezpad_mini3_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + /* jumperx.T87.KFBNEEA02 with the version-nr dropped */ + DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"), + }, + }, + { }, +}; + +static void silead_ts_dmi_add_props(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + const struct dmi_system_id *dmi_id; + const struct silead_ts_dmi_data *ts_data; + int error; + + dmi_id = dmi_first_match(silead_ts_dmi_table); + if (!dmi_id) + return; + + ts_data = dmi_id->driver_data; + if (has_acpi_companion(dev) && + !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) { + error = device_add_properties(dev, ts_data->properties); + if (error) + dev_err(dev, "failed to add properties: %d\n", error); + } +} + +static int silead_ts_dmi_notifier_call(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + silead_ts_dmi_add_props(dev); + break; + + default: + break; + } + + return 0; +} + +static struct notifier_block silead_ts_dmi_notifier = { + .notifier_call = silead_ts_dmi_notifier_call, +}; + +static int __init silead_ts_dmi_init(void) +{ + int error; + + error = bus_register_notifier(&i2c_bus_type, &silead_ts_dmi_notifier); + if (error) + pr_err("%s: failed to register i2c bus notifier: %d\n", + __func__, error); + + return error; +} + +/* + * We are registering out notifier after i2c core is initialized and i2c bus + * itself is ready (which happens at postcore initcall level), but before + * ACPI starts enumerating devices (at subsys initcall level). + */ +arch_initcall(silead_ts_dmi_init); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index cacb43fb1df7..1d18b32628ec 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -163,6 +163,7 @@ enum tpacpi_hkey_event_t { TP_HKEY_EV_HOTKEY_BASE = 0x1001, /* first hotkey (FN+F1) */ TP_HKEY_EV_BRGHT_UP = 0x1010, /* Brightness up */ TP_HKEY_EV_BRGHT_DOWN = 0x1011, /* Brightness down */ + TP_HKEY_EV_KBD_LIGHT = 0x1012, /* Thinklight/kbd backlight */ TP_HKEY_EV_VOL_UP = 0x1015, /* Volume up or unmute */ TP_HKEY_EV_VOL_DOWN = 0x1016, /* Volume down or unmute */ TP_HKEY_EV_VOL_MUTE = 0x1017, /* Mixer output mute */ @@ -372,11 +373,9 @@ enum led_status_t { TPACPI_LED_BLINK, }; -/* Special LED class that can defer work */ +/* tpacpi LED class */ struct tpacpi_led_classdev { struct led_classdev led_classdev; - struct work_struct work; - enum led_status_t new_state; int led; }; @@ -1959,7 +1958,7 @@ enum { /* Positions of some of the keys in hotkey masks */ TP_ACPI_HKEY_HIBERNATE_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNF12, TP_ACPI_HKEY_BRGHTUP_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNHOME, TP_ACPI_HKEY_BRGHTDWN_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNEND, - TP_ACPI_HKEY_THNKLGHT_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNPAGEUP, + TP_ACPI_HKEY_KBD_LIGHT_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNPAGEUP, TP_ACPI_HKEY_ZOOM_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNSPACE, TP_ACPI_HKEY_VOLUP_MASK = 1 << TP_ACPI_HOTKEYSCAN_VOLUMEUP, TP_ACPI_HKEY_VOLDWN_MASK = 1 << TP_ACPI_HOTKEYSCAN_VOLUMEDOWN, @@ -2344,7 +2343,7 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m) n->display_toggle = !!(d & TP_NVRAM_MASK_HKT_DISPLAY); n->hibernate_toggle = !!(d & TP_NVRAM_MASK_HKT_HIBERNATE); } - if (m & TP_ACPI_HKEY_THNKLGHT_MASK) { + if (m & TP_ACPI_HKEY_KBD_LIGHT_MASK) { d = nvram_read_byte(TP_NVRAM_ADDR_THINKLIGHT); n->thinklight_toggle = !!(d & TP_NVRAM_MASK_THINKLIGHT); } @@ -5084,18 +5083,27 @@ static struct ibm_struct video_driver_data = { * Keyboard backlight subdriver */ +static enum led_brightness kbdlight_brightness; +static DEFINE_MUTEX(kbdlight_mutex); + static int kbdlight_set_level(int level) { + int ret = 0; + if (!hkey_handle) return -ENXIO; + mutex_lock(&kbdlight_mutex); + if (!acpi_evalf(hkey_handle, NULL, "MLCS", "dd", level)) - return -EIO; + ret = -EIO; + else + kbdlight_brightness = level; - return 0; -} + mutex_unlock(&kbdlight_mutex); -static int kbdlight_set_level_and_update(int level); + return ret; +} static int kbdlight_get_level(void) { @@ -5158,24 +5166,10 @@ static bool kbdlight_is_supported(void) return status & BIT(9); } -static void kbdlight_set_worker(struct work_struct *work) -{ - struct tpacpi_led_classdev *data = - container_of(work, struct tpacpi_led_classdev, work); - - if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING)) - kbdlight_set_level_and_update(data->new_state); -} - -static void kbdlight_sysfs_set(struct led_classdev *led_cdev, +static int kbdlight_sysfs_set(struct led_classdev *led_cdev, enum led_brightness brightness) { - struct tpacpi_led_classdev *data = - container_of(led_cdev, - struct tpacpi_led_classdev, - led_classdev); - data->new_state = brightness; - queue_work(tpacpi_wq, &data->work); + return kbdlight_set_level(brightness); } static enum led_brightness kbdlight_sysfs_get(struct led_classdev *led_cdev) @@ -5193,7 +5187,8 @@ static struct tpacpi_led_classdev tpacpi_led_kbdlight = { .led_classdev = { .name = "tpacpi::kbd_backlight", .max_brightness = 2, - .brightness_set = &kbdlight_sysfs_set, + .flags = LED_BRIGHT_HW_CHANGED, + .brightness_set_blocking = &kbdlight_sysfs_set, .brightness_get = &kbdlight_sysfs_get, } }; @@ -5205,7 +5200,6 @@ static int __init kbdlight_init(struct ibm_init_struct *iibm) vdbg_printk(TPACPI_DBG_INIT, "initializing kbdlight subdriver\n"); TPACPI_ACPIHANDLE_INIT(hkey); - INIT_WORK(&tpacpi_led_kbdlight.work, kbdlight_set_worker); if (!kbdlight_is_supported()) { tp_features.kbdlight = 0; @@ -5213,6 +5207,7 @@ static int __init kbdlight_init(struct ibm_init_struct *iibm) return 1; } + kbdlight_brightness = kbdlight_sysfs_get(NULL); tp_features.kbdlight = 1; rc = led_classdev_register(&tpacpi_pdev->dev, @@ -5222,6 +5217,8 @@ static int __init kbdlight_init(struct ibm_init_struct *iibm) return rc; } + tpacpi_hotkey_driver_mask_set(hotkey_driver_mask | + TP_ACPI_HKEY_KBD_LIGHT_MASK); return 0; } @@ -5229,7 +5226,6 @@ static void kbdlight_exit(void) { if (tp_features.kbdlight) led_classdev_unregister(&tpacpi_led_kbdlight.led_classdev); - flush_workqueue(tpacpi_wq); } static int kbdlight_set_level_and_update(int level) @@ -5358,25 +5354,11 @@ static int light_set_status(int status) return -ENXIO; } -static void light_set_status_worker(struct work_struct *work) -{ - struct tpacpi_led_classdev *data = - container_of(work, struct tpacpi_led_classdev, work); - - if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING)) - light_set_status((data->new_state != TPACPI_LED_OFF)); -} - -static void light_sysfs_set(struct led_classdev *led_cdev, +static int light_sysfs_set(struct led_classdev *led_cdev, enum led_brightness brightness) { - struct tpacpi_led_classdev *data = - container_of(led_cdev, - struct tpacpi_led_classdev, - led_classdev); - data->new_state = (brightness != LED_OFF) ? - TPACPI_LED_ON : TPACPI_LED_OFF; - queue_work(tpacpi_wq, &data->work); + return light_set_status((brightness != LED_OFF) ? + TPACPI_LED_ON : TPACPI_LED_OFF); } static enum led_brightness light_sysfs_get(struct led_classdev *led_cdev) @@ -5387,7 +5369,7 @@ static enum led_brightness light_sysfs_get(struct led_classdev *led_cdev) static struct tpacpi_led_classdev tpacpi_led_thinklight = { .led_classdev = { .name = "tpacpi::thinklight", - .brightness_set = &light_sysfs_set, + .brightness_set_blocking = &light_sysfs_set, .brightness_get = &light_sysfs_get, } }; @@ -5403,7 +5385,6 @@ static int __init light_init(struct ibm_init_struct *iibm) TPACPI_ACPIHANDLE_INIT(lght); } TPACPI_ACPIHANDLE_INIT(cmos); - INIT_WORK(&tpacpi_led_thinklight.work, light_set_status_worker); /* light not supported on 570, 600e/x, 770e, 770x, G4x, R30, R31 */ tp_features.light = (cmos_handle || lght_handle) && !ledb_handle; @@ -5437,7 +5418,6 @@ static int __init light_init(struct ibm_init_struct *iibm) static void light_exit(void) { led_classdev_unregister(&tpacpi_led_thinklight.led_classdev); - flush_workqueue(tpacpi_wq); } static int light_read(struct seq_file *m) @@ -5704,29 +5684,21 @@ static int led_set_status(const unsigned int led, return rc; } -static void led_set_status_worker(struct work_struct *work) -{ - struct tpacpi_led_classdev *data = - container_of(work, struct tpacpi_led_classdev, work); - - if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING)) - led_set_status(data->led, data->new_state); -} - -static void led_sysfs_set(struct led_classdev *led_cdev, +static int led_sysfs_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct tpacpi_led_classdev *data = container_of(led_cdev, struct tpacpi_led_classdev, led_classdev); + enum led_status_t new_state; if (brightness == LED_OFF) - data->new_state = TPACPI_LED_OFF; + new_state = TPACPI_LED_OFF; else if (tpacpi_led_state_cache[data->led] != TPACPI_LED_BLINK) - data->new_state = TPACPI_LED_ON; + new_state = TPACPI_LED_ON; else - data->new_state = TPACPI_LED_BLINK; + new_state = TPACPI_LED_BLINK; - queue_work(tpacpi_wq, &data->work); + return led_set_status(data->led, new_state); } static int led_sysfs_blink_set(struct led_classdev *led_cdev, @@ -5743,10 +5715,7 @@ static int led_sysfs_blink_set(struct led_classdev *led_cdev, } else if ((*delay_on != 500) || (*delay_off != 500)) return -EINVAL; - data->new_state = TPACPI_LED_BLINK; - queue_work(tpacpi_wq, &data->work); - - return 0; + return led_set_status(data->led, TPACPI_LED_BLINK); } static enum led_brightness led_sysfs_get(struct led_classdev *led_cdev) @@ -5775,7 +5744,6 @@ static void led_exit(void) led_classdev_unregister(&tpacpi_leds[i].led_classdev); } - flush_workqueue(tpacpi_wq); kfree(tpacpi_leds); } @@ -5789,7 +5757,7 @@ static int __init tpacpi_init_led(unsigned int led) if (!tpacpi_led_names[led]) return 0; - tpacpi_leds[led].led_classdev.brightness_set = &led_sysfs_set; + tpacpi_leds[led].led_classdev.brightness_set_blocking = &led_sysfs_set; tpacpi_leds[led].led_classdev.blink_set = &led_sysfs_blink_set; if (led_supported == TPACPI_LED_570) tpacpi_leds[led].led_classdev.brightness_get = @@ -5797,8 +5765,6 @@ static int __init tpacpi_init_led(unsigned int led) tpacpi_leds[led].led_classdev.name = tpacpi_led_names[led]; - INIT_WORK(&tpacpi_leds[led].work, led_set_status_worker); - rc = led_classdev_register(&tpacpi_pdev->dev, &tpacpi_leds[led].led_classdev); if (rc < 0) @@ -9169,6 +9135,24 @@ static void tpacpi_driver_event(const unsigned int hkey_event) volume_alsa_notify_change(); } } + if (tp_features.kbdlight && hkey_event == TP_HKEY_EV_KBD_LIGHT) { + enum led_brightness brightness; + + mutex_lock(&kbdlight_mutex); + + /* + * Check the brightness actually changed, setting the brightness + * through kbdlight_set_level() also triggers this event. + */ + brightness = kbdlight_sysfs_get(NULL); + if (kbdlight_brightness != brightness) { + kbdlight_brightness = brightness; + led_classdev_notify_brightness_hw_changed( + &tpacpi_led_kbdlight.led_classdev, brightness); + } + + mutex_unlock(&kbdlight_mutex); + } } static void hotkey_driver_event(const unsigned int scancode) diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c index fa0f19b975a6..974fd684bab2 100644 --- a/drivers/power/avs/smartreflex.c +++ b/drivers/power/avs/smartreflex.c @@ -195,7 +195,7 @@ static void sr_stop_vddautocomp(struct omap_sr *sr) } /* - * This function handles the intializations which have to be done + * This function handles the initializations which have to be done * only when both sr device and class driver regiter has * completed. This will be attempted to be called from both sr class * driver register and sr device intializtion API's. Only one call @@ -671,7 +671,7 @@ int sr_register_class(struct omap_sr_class_data *class_data) sr_class = class_data; /* - * Call into late init to do intializations that require + * Call into late init to do initializations that require * both sr driver and sr class driver to be initiallized. */ list_for_each_entry(sr_info, &sr_list, node) @@ -899,7 +899,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) list_add(&sr_info->node, &sr_list); /* - * Call into late init to do intializations that require + * Call into late init to do initializations that require * both sr driver and sr class driver to be initiallized. */ if (sr_class) { diff --git a/drivers/ps3/ps3-sys-manager.c b/drivers/ps3/ps3-sys-manager.c index f2ab435954f6..73e496a72113 100644 --- a/drivers/ps3/ps3-sys-manager.c +++ b/drivers/ps3/ps3-sys-manager.c @@ -22,6 +22,7 @@ #include <linux/module.h> #include <linux/workqueue.h> #include <linux/reboot.h> +#include <linux/sched/signal.h> #include <asm/firmware.h> #include <asm/lv1call.h> diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 2d0cfaa6d84c..42e37c20b361 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -76,7 +76,9 @@ config PWM_ATMEL_TCB config PWM_BCM_IPROC tristate "iProc PWM support" - depends on ARCH_BCM_IPROC + depends on ARCH_BCM_IPROC || COMPILE_TEST + depends on COMMON_CLK + default ARCH_BCM_IPROC help Generic PWM framework driver for Broadcom iProc PWM block. This block is used in Broadcom iProc SoC's. diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 172ef8245811..a0860b30bd93 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -137,9 +137,14 @@ of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args) { struct pwm_device *pwm; + /* check, whether the driver supports a third cell for flags */ if (pc->of_pwm_n_cells < 3) return ERR_PTR(-EINVAL); + /* flags in the third cell are optional */ + if (args->args_count < 2) + return ERR_PTR(-EINVAL); + if (args->args[0] >= pc->npwm) return ERR_PTR(-EINVAL); @@ -148,11 +153,10 @@ of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args) return pwm; pwm->args.period = args->args[1]; + pwm->args.polarity = PWM_POLARITY_NORMAL; - if (args->args[2] & PWM_POLARITY_INVERTED) + if (args->args_count > 2 && args->args[2] & PWM_POLARITY_INVERTED) pwm->args.polarity = PWM_POLARITY_INVERSED; - else - pwm->args.polarity = PWM_POLARITY_NORMAL; return pwm; } @@ -163,9 +167,14 @@ of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args) { struct pwm_device *pwm; + /* sanity check driver support */ if (pc->of_pwm_n_cells < 2) return ERR_PTR(-EINVAL); + /* all cells are required */ + if (args->args_count != pc->of_pwm_n_cells) + return ERR_PTR(-EINVAL); + if (args->args[0] >= pc->npwm) return ERR_PTR(-EINVAL); @@ -663,24 +672,17 @@ struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id) err = of_parse_phandle_with_args(np, "pwms", "#pwm-cells", index, &args); if (err) { - pr_debug("%s(): can't parse \"pwms\" property\n", __func__); + pr_err("%s(): can't parse \"pwms\" property\n", __func__); return ERR_PTR(err); } pc = of_node_to_pwmchip(args.np); if (IS_ERR(pc)) { - pr_debug("%s(): PWM chip not found\n", __func__); + pr_err("%s(): PWM chip not found\n", __func__); pwm = ERR_CAST(pc); goto put; } - if (args.args_count != pc->of_pwm_n_cells) { - pr_debug("%s: wrong #pwm-cells for %s\n", np->full_name, - args.np->full_name); - pwm = ERR_PTR(-EINVAL); - goto put; - } - pwm = pc->of_xlate(pc, &args); if (IS_ERR(pwm)) goto put; @@ -757,12 +759,13 @@ void pwm_remove_table(struct pwm_lookup *table, size_t num) */ struct pwm_device *pwm_get(struct device *dev, const char *con_id) { - struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER); const char *dev_id = dev ? dev_name(dev) : NULL; - struct pwm_chip *chip = NULL; + struct pwm_device *pwm; + struct pwm_chip *chip; unsigned int best = 0; struct pwm_lookup *p, *chosen = NULL; unsigned int match; + int err; /* look up via DT first */ if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) @@ -817,24 +820,35 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) } } - if (!chosen) { - pwm = ERR_PTR(-ENODEV); - goto out; - } + mutex_unlock(&pwm_lookup_lock); + + if (!chosen) + return ERR_PTR(-ENODEV); chip = pwmchip_find_by_name(chosen->provider); + + /* + * If the lookup entry specifies a module, load the module and retry + * the PWM chip lookup. This can be used to work around driver load + * ordering issues if driver's can't be made to properly support the + * deferred probe mechanism. + */ + if (!chip && chosen->module) { + err = request_module(chosen->module); + if (err == 0) + chip = pwmchip_find_by_name(chosen->provider); + } + if (!chip) - goto out; + return ERR_PTR(-EPROBE_DEFER); pwm = pwm_request_from_chip(chip, chosen->index, con_id ?: dev_id); if (IS_ERR(pwm)) - goto out; + return pwm; pwm->args.period = chosen->period; pwm->args.polarity = chosen->polarity; -out: - mutex_unlock(&pwm_lookup_lock); return pwm; } EXPORT_SYMBOL_GPL(pwm_get); @@ -960,18 +974,6 @@ void devm_pwm_put(struct device *dev, struct pwm_device *pwm) } EXPORT_SYMBOL_GPL(devm_pwm_put); -/** - * pwm_can_sleep() - report whether PWM access will sleep - * @pwm: PWM device - * - * Returns: True if accessing the PWM can sleep, false otherwise. - */ -bool pwm_can_sleep(struct pwm_device *pwm) -{ - return true; -} -EXPORT_SYMBOL_GPL(pwm_can_sleep); - #ifdef CONFIG_DEBUG_FS static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s) { diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c index 14fc011faa32..999187277ea5 100644 --- a/drivers/pwm/pwm-atmel-hlcdc.c +++ b/drivers/pwm/pwm-atmel-hlcdc.c @@ -270,7 +270,6 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev) chip->chip.npwm = 1; chip->chip.of_xlate = of_pwm_xlate_with_flags; chip->chip.of_pwm_n_cells = 3; - chip->chip.can_sleep = 1; ret = pwmchip_add_with_polarity(&chip->chip, PWM_POLARITY_INVERSED); if (ret) { diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c index e6b8b1b7e6ba..67a7023be5c2 100644 --- a/drivers/pwm/pwm-atmel.c +++ b/drivers/pwm/pwm-atmel.c @@ -385,7 +385,6 @@ static int atmel_pwm_probe(struct platform_device *pdev) atmel_pwm->chip.base = -1; atmel_pwm->chip.npwm = 4; - atmel_pwm->chip.can_sleep = true; atmel_pwm->config = data->config; atmel_pwm->updated_pwms = 0; mutex_init(&atmel_pwm->isr_lock); diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c index c63418322023..09a95aeb3a70 100644 --- a/drivers/pwm/pwm-bcm-kona.c +++ b/drivers/pwm/pwm-bcm-kona.c @@ -276,7 +276,6 @@ static int kona_pwmc_probe(struct platform_device *pdev) kp->chip.npwm = 6; kp->chip.of_xlate = of_pwm_xlate_with_flags; kp->chip.of_pwm_n_cells = 3; - kp->chip.can_sleep = true; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); kp->base = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c index 01339c152ab0..771859aca4be 100644 --- a/drivers/pwm/pwm-berlin.c +++ b/drivers/pwm/pwm-berlin.c @@ -206,7 +206,6 @@ static int berlin_pwm_probe(struct platform_device *pdev) pwm->chip.ops = &berlin_pwm_ops; pwm->chip.base = -1; pwm->chip.npwm = 4; - pwm->chip.can_sleep = true; pwm->chip.of_xlate = of_pwm_xlate_with_flags; pwm->chip.of_pwm_n_cells = 3; diff --git a/drivers/pwm/pwm-bfin.c b/drivers/pwm/pwm-bfin.c index 7631ef194de7..d2ed0a2a18e8 100644 --- a/drivers/pwm/pwm-bfin.c +++ b/drivers/pwm/pwm-bfin.c @@ -103,7 +103,7 @@ static void bfin_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) disable_gptimer(priv->pin); } -static struct pwm_ops bfin_pwm_ops = { +static const struct pwm_ops bfin_pwm_ops = { .request = bfin_pwm_request, .free = bfin_pwm_free, .config = bfin_pwm_config, diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c index 5d5adee16886..8063cffa1c96 100644 --- a/drivers/pwm/pwm-brcmstb.c +++ b/drivers/pwm/pwm-brcmstb.c @@ -270,7 +270,6 @@ static int brcmstb_pwm_probe(struct platform_device *pdev) p->chip.ops = &brcmstb_pwm_ops; p->chip.base = -1; p->chip.npwm = 2; - p->chip.can_sleep = true; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); p->base = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c index fad968eb75f6..557b4ea16796 100644 --- a/drivers/pwm/pwm-fsl-ftm.c +++ b/drivers/pwm/pwm-fsl-ftm.c @@ -446,7 +446,6 @@ static int fsl_pwm_probe(struct platform_device *pdev) fpc->chip.of_pwm_n_cells = 3; fpc->chip.base = -1; fpc->chip.npwm = 8; - fpc->chip.can_sleep = true; ret = pwmchip_add(&fpc->chip); if (ret < 0) { diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c index d600fd5cd4ba..2ba5c3a398ff 100644 --- a/drivers/pwm/pwm-imx.c +++ b/drivers/pwm/pwm-imx.c @@ -38,6 +38,7 @@ #define MX3_PWMCR_DOZEEN (1 << 24) #define MX3_PWMCR_WAITEN (1 << 23) #define MX3_PWMCR_DBGEN (1 << 22) +#define MX3_PWMCR_POUTC (1 << 18) #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16) #define MX3_PWMCR_CLKSRC_IPG (1 << 16) #define MX3_PWMCR_SWR (1 << 3) @@ -49,15 +50,10 @@ struct imx_chip { struct clk *clk_per; - struct clk *clk_ipg; void __iomem *mmio_base; struct pwm_chip chip; - - int (*config)(struct pwm_chip *chip, - struct pwm_device *pwm, int duty_ns, int period_ns); - void (*set_enable)(struct pwm_chip *chip, bool enable); }; #define to_imx_chip(chip) container_of(chip, struct imx_chip, chip) @@ -91,176 +87,170 @@ static int imx_pwm_config_v1(struct pwm_chip *chip, return 0; } -static void imx_pwm_set_enable_v1(struct pwm_chip *chip, bool enable) +static int imx_pwm_enable_v1(struct pwm_chip *chip, struct pwm_device *pwm) { struct imx_chip *imx = to_imx_chip(chip); u32 val; + int ret; - val = readl(imx->mmio_base + MX1_PWMC); - - if (enable) - val |= MX1_PWMC_EN; - else - val &= ~MX1_PWMC_EN; + ret = clk_prepare_enable(imx->clk_per); + if (ret < 0) + return ret; + val = readl(imx->mmio_base + MX1_PWMC); + val |= MX1_PWMC_EN; writel(val, imx->mmio_base + MX1_PWMC); -} - -static int imx_pwm_config_v2(struct pwm_chip *chip, - struct pwm_device *pwm, int duty_ns, int period_ns) -{ - struct imx_chip *imx = to_imx_chip(chip); - struct device *dev = chip->dev; - unsigned long long c; - unsigned long period_cycles, duty_cycles, prescale; - unsigned int period_ms; - bool enable = pwm_is_enabled(pwm); - int wait_count = 0, fifoav; - u32 cr, sr; - - /* - * i.MX PWMv2 has a 4-word sample FIFO. - * In order to avoid FIFO overflow issue, we do software reset - * to clear all sample FIFO if the controller is disabled or - * wait for a full PWM cycle to get a relinquished FIFO slot - * when the controller is enabled and the FIFO is fully loaded. - */ - if (enable) { - sr = readl(imx->mmio_base + MX3_PWMSR); - fifoav = sr & MX3_PWMSR_FIFOAV_MASK; - if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) { - period_ms = DIV_ROUND_UP(pwm_get_period(pwm), - NSEC_PER_MSEC); - msleep(period_ms); - - sr = readl(imx->mmio_base + MX3_PWMSR); - if (fifoav == (sr & MX3_PWMSR_FIFOAV_MASK)) - dev_warn(dev, "there is no free FIFO slot\n"); - } - } else { - writel(MX3_PWMCR_SWR, imx->mmio_base + MX3_PWMCR); - do { - usleep_range(200, 1000); - cr = readl(imx->mmio_base + MX3_PWMCR); - } while ((cr & MX3_PWMCR_SWR) && - (wait_count++ < MX3_PWM_SWR_LOOP)); - - if (cr & MX3_PWMCR_SWR) - dev_warn(dev, "software reset timeout\n"); - } - - c = clk_get_rate(imx->clk_per); - c = c * period_ns; - do_div(c, 1000000000); - period_cycles = c; - - prescale = period_cycles / 0x10000 + 1; - - period_cycles /= prescale; - c = (unsigned long long)period_cycles * duty_ns; - do_div(c, period_ns); - duty_cycles = c; - - /* - * according to imx pwm RM, the real period value should be - * PERIOD value in PWMPR plus 2. - */ - if (period_cycles > 2) - period_cycles -= 2; - else - period_cycles = 0; - - writel(duty_cycles, imx->mmio_base + MX3_PWMSAR); - writel(period_cycles, imx->mmio_base + MX3_PWMPR); - - cr = MX3_PWMCR_PRESCALER(prescale) | - MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN | - MX3_PWMCR_DBGEN | MX3_PWMCR_CLKSRC_IPG_HIGH; - - if (enable) - cr |= MX3_PWMCR_EN; - - writel(cr, imx->mmio_base + MX3_PWMCR); return 0; } -static void imx_pwm_set_enable_v2(struct pwm_chip *chip, bool enable) +static void imx_pwm_disable_v1(struct pwm_chip *chip, struct pwm_device *pwm) { struct imx_chip *imx = to_imx_chip(chip); u32 val; - val = readl(imx->mmio_base + MX3_PWMCR); - - if (enable) - val |= MX3_PWMCR_EN; - else - val &= ~MX3_PWMCR_EN; + val = readl(imx->mmio_base + MX1_PWMC); + val &= ~MX1_PWMC_EN; + writel(val, imx->mmio_base + MX1_PWMC); - writel(val, imx->mmio_base + MX3_PWMCR); + clk_disable_unprepare(imx->clk_per); } -static int imx_pwm_config(struct pwm_chip *chip, - struct pwm_device *pwm, int duty_ns, int period_ns) +static void imx_pwm_sw_reset(struct pwm_chip *chip) { struct imx_chip *imx = to_imx_chip(chip); - int ret; - - ret = clk_prepare_enable(imx->clk_ipg); - if (ret) - return ret; + struct device *dev = chip->dev; + int wait_count = 0; + u32 cr; + + writel(MX3_PWMCR_SWR, imx->mmio_base + MX3_PWMCR); + do { + usleep_range(200, 1000); + cr = readl(imx->mmio_base + MX3_PWMCR); + } while ((cr & MX3_PWMCR_SWR) && + (wait_count++ < MX3_PWM_SWR_LOOP)); + + if (cr & MX3_PWMCR_SWR) + dev_warn(dev, "software reset timeout\n"); +} - ret = imx->config(chip, pwm, duty_ns, period_ns); +static void imx_pwm_wait_fifo_slot(struct pwm_chip *chip, + struct pwm_device *pwm) +{ + struct imx_chip *imx = to_imx_chip(chip); + struct device *dev = chip->dev; + unsigned int period_ms; + int fifoav; + u32 sr; - clk_disable_unprepare(imx->clk_ipg); + sr = readl(imx->mmio_base + MX3_PWMSR); + fifoav = sr & MX3_PWMSR_FIFOAV_MASK; + if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) { + period_ms = DIV_ROUND_UP(pwm_get_period(pwm), + NSEC_PER_MSEC); + msleep(period_ms); - return ret; + sr = readl(imx->mmio_base + MX3_PWMSR); + if (fifoav == (sr & MX3_PWMSR_FIFOAV_MASK)) + dev_warn(dev, "there is no free FIFO slot\n"); + } } -static int imx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) { + unsigned long period_cycles, duty_cycles, prescale; struct imx_chip *imx = to_imx_chip(chip); + struct pwm_state cstate; + unsigned long long c; int ret; + u32 cr; + + pwm_get_state(pwm, &cstate); + + if (state->enabled) { + c = clk_get_rate(imx->clk_per); + c *= state->period; + + do_div(c, 1000000000); + period_cycles = c; + + prescale = period_cycles / 0x10000 + 1; + + period_cycles /= prescale; + c = (unsigned long long)period_cycles * state->duty_cycle; + do_div(c, state->period); + duty_cycles = c; + + /* + * according to imx pwm RM, the real period value should be + * PERIOD value in PWMPR plus 2. + */ + if (period_cycles > 2) + period_cycles -= 2; + else + period_cycles = 0; + + /* + * Wait for a free FIFO slot if the PWM is already enabled, and + * flush the FIFO if the PWM was disabled and is about to be + * enabled. + */ + if (cstate.enabled) { + imx_pwm_wait_fifo_slot(chip, pwm); + } else { + ret = clk_prepare_enable(imx->clk_per); + if (ret) + return ret; + + imx_pwm_sw_reset(chip); + } - ret = clk_prepare_enable(imx->clk_per); - if (ret) - return ret; + writel(duty_cycles, imx->mmio_base + MX3_PWMSAR); + writel(period_cycles, imx->mmio_base + MX3_PWMPR); - imx->set_enable(chip, true); + cr = MX3_PWMCR_PRESCALER(prescale) | + MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN | + MX3_PWMCR_DBGEN | MX3_PWMCR_CLKSRC_IPG_HIGH | + MX3_PWMCR_EN; - return 0; -} + if (state->polarity == PWM_POLARITY_INVERSED) + cr |= MX3_PWMCR_POUTC; -static void imx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) -{ - struct imx_chip *imx = to_imx_chip(chip); + writel(cr, imx->mmio_base + MX3_PWMCR); + } else if (cstate.enabled) { + writel(0, imx->mmio_base + MX3_PWMCR); - imx->set_enable(chip, false); + clk_disable_unprepare(imx->clk_per); + } - clk_disable_unprepare(imx->clk_per); + return 0; } -static struct pwm_ops imx_pwm_ops = { - .enable = imx_pwm_enable, - .disable = imx_pwm_disable, - .config = imx_pwm_config, +static const struct pwm_ops imx_pwm_ops_v1 = { + .enable = imx_pwm_enable_v1, + .disable = imx_pwm_disable_v1, + .config = imx_pwm_config_v1, + .owner = THIS_MODULE, +}; + +static const struct pwm_ops imx_pwm_ops_v2 = { + .apply = imx_pwm_apply_v2, .owner = THIS_MODULE, }; struct imx_pwm_data { - int (*config)(struct pwm_chip *chip, - struct pwm_device *pwm, int duty_ns, int period_ns); - void (*set_enable)(struct pwm_chip *chip, bool enable); + bool polarity_supported; + const struct pwm_ops *ops; }; static struct imx_pwm_data imx_pwm_data_v1 = { - .config = imx_pwm_config_v1, - .set_enable = imx_pwm_set_enable_v1, + .ops = &imx_pwm_ops_v1, }; static struct imx_pwm_data imx_pwm_data_v2 = { - .config = imx_pwm_config_v2, - .set_enable = imx_pwm_set_enable_v2, + .polarity_supported = true, + .ops = &imx_pwm_ops_v2, }; static const struct of_device_id imx_pwm_dt_ids[] = { @@ -282,6 +272,8 @@ static int imx_pwm_probe(struct platform_device *pdev) if (!of_id) return -ENODEV; + data = of_id->data; + imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL); if (imx == NULL) return -ENOMEM; @@ -293,28 +285,22 @@ static int imx_pwm_probe(struct platform_device *pdev) return PTR_ERR(imx->clk_per); } - imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); - if (IS_ERR(imx->clk_ipg)) { - dev_err(&pdev->dev, "getting ipg clock failed with %ld\n", - PTR_ERR(imx->clk_ipg)); - return PTR_ERR(imx->clk_ipg); - } - - imx->chip.ops = &imx_pwm_ops; + imx->chip.ops = data->ops; imx->chip.dev = &pdev->dev; imx->chip.base = -1; imx->chip.npwm = 1; - imx->chip.can_sleep = true; + + if (data->polarity_supported) { + dev_dbg(&pdev->dev, "PWM supports output inversion\n"); + imx->chip.of_xlate = of_pwm_xlate_with_flags; + imx->chip.of_pwm_n_cells = 3; + } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); imx->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(imx->mmio_base)) return PTR_ERR(imx->mmio_base); - data = of_id->data; - imx->config = data->config; - imx->set_enable = data->set_enable; - ret = pwmchip_add(&imx->chip); if (ret < 0) return ret; diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c index 872ea76a4f19..52584e9962ed 100644 --- a/drivers/pwm/pwm-lp3943.c +++ b/drivers/pwm/pwm-lp3943.c @@ -278,7 +278,6 @@ static int lp3943_pwm_probe(struct platform_device *pdev) lp3943_pwm->chip.dev = &pdev->dev; lp3943_pwm->chip.ops = &lp3943_pwm_ops; lp3943_pwm->chip.npwm = LP3943_NUM_PWMS; - lp3943_pwm->chip.can_sleep = true; platform_set_drvdata(pdev, lp3943_pwm); diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c index 3622f093490e..053088b9b66e 100644 --- a/drivers/pwm/pwm-lpss-pci.c +++ b/drivers/pwm/pwm-lpss-pci.c @@ -17,6 +17,27 @@ #include "pwm-lpss.h" +/* BayTrail */ +static const struct pwm_lpss_boardinfo pwm_lpss_byt_info = { + .clk_rate = 25000000, + .npwm = 1, + .base_unit_bits = 16, +}; + +/* Braswell */ +static const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = { + .clk_rate = 19200000, + .npwm = 1, + .base_unit_bits = 16, +}; + +/* Broxton */ +static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = { + .clk_rate = 19200000, + .npwm = 4, + .base_unit_bits = 22, +}; + static int pwm_lpss_probe_pci(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -80,6 +101,7 @@ static const struct pci_device_id pwm_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info}, { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info}, { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info}, + { PCI_VDEVICE(INTEL, 0x31c8), (unsigned long)&pwm_lpss_bxt_info}, { PCI_VDEVICE(INTEL, 0x5ac8), (unsigned long)&pwm_lpss_bxt_info}, { }, }; diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c index 54433fc6d1a4..b22b6fdadb9a 100644 --- a/drivers/pwm/pwm-lpss-platform.c +++ b/drivers/pwm/pwm-lpss-platform.c @@ -18,6 +18,27 @@ #include "pwm-lpss.h" +/* BayTrail */ +static const struct pwm_lpss_boardinfo pwm_lpss_byt_info = { + .clk_rate = 25000000, + .npwm = 1, + .base_unit_bits = 16, +}; + +/* Braswell */ +static const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = { + .clk_rate = 19200000, + .npwm = 1, + .base_unit_bits = 16, +}; + +/* Broxton */ +static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = { + .clk_rate = 19200000, + .npwm = 4, + .base_unit_bits = 22, +}; + static int pwm_lpss_probe_platform(struct platform_device *pdev) { const struct pwm_lpss_boardinfo *info; diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c index 72c0bce5a75c..689d2c1cbead 100644 --- a/drivers/pwm/pwm-lpss.c +++ b/drivers/pwm/pwm-lpss.c @@ -15,6 +15,7 @@ #include <linux/delay.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pm_runtime.h> @@ -37,30 +38,6 @@ struct pwm_lpss_chip { const struct pwm_lpss_boardinfo *info; }; -/* BayTrail */ -const struct pwm_lpss_boardinfo pwm_lpss_byt_info = { - .clk_rate = 25000000, - .npwm = 1, - .base_unit_bits = 16, -}; -EXPORT_SYMBOL_GPL(pwm_lpss_byt_info); - -/* Braswell */ -const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = { - .clk_rate = 19200000, - .npwm = 1, - .base_unit_bits = 16, -}; -EXPORT_SYMBOL_GPL(pwm_lpss_bsw_info); - -/* Broxton */ -const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = { - .clk_rate = 19200000, - .npwm = 4, - .base_unit_bits = 22, -}; -EXPORT_SYMBOL_GPL(pwm_lpss_bxt_info); - static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip) { return container_of(chip, struct pwm_lpss_chip, chip); @@ -80,17 +57,42 @@ static inline void pwm_lpss_write(const struct pwm_device *pwm, u32 value) writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM); } -static void pwm_lpss_update(struct pwm_device *pwm) +static int pwm_lpss_update(struct pwm_device *pwm) { + struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip); + const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM; + const unsigned int ms = 500 * USEC_PER_MSEC; + u32 val; + int err; + pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE); - /* Give it some time to propagate */ - usleep_range(10, 50); + + /* + * PWM Configuration register has SW_UPDATE bit that is set when a new + * configuration is written to the register. The bit is automatically + * cleared at the start of the next output cycle by the IP block. + * + * If one writes a new configuration to the register while it still has + * the bit enabled, PWM may freeze. That is, while one can still write + * to the register, it won't have an effect. Thus, we try to sleep long + * enough that the bit gets cleared and make sure the bit is not + * enabled while we update the configuration. + */ + err = readl_poll_timeout(addr, val, !(val & PWM_SW_UPDATE), 40, ms); + if (err) + dev_err(pwm->chip->dev, "PWM_SW_UPDATE was not cleared\n"); + + return err; } -static int pwm_lpss_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) +static inline int pwm_lpss_is_updating(struct pwm_device *pwm) +{ + return (pwm_lpss_read(pwm) & PWM_SW_UPDATE) ? -EBUSY : 0; +} + +static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, + int duty_ns, int period_ns) { - struct pwm_lpss_chip *lpwm = to_lpwm(chip); unsigned long long on_time_div; unsigned long c = lpwm->info->clk_rate, base_unit_range; unsigned long long base_unit, freq = NSEC_PER_SEC; @@ -102,62 +104,62 @@ static int pwm_lpss_config(struct pwm_chip *chip, struct pwm_device *pwm, * The equation is: * base_unit = round(base_unit_range * freq / c) */ - base_unit_range = BIT(lpwm->info->base_unit_bits); + base_unit_range = BIT(lpwm->info->base_unit_bits) - 1; freq *= base_unit_range; base_unit = DIV_ROUND_CLOSEST_ULL(freq, c); - if (duty_ns <= 0) - duty_ns = 1; on_time_div = 255ULL * duty_ns; do_div(on_time_div, period_ns); on_time_div = 255ULL - on_time_div; - pm_runtime_get_sync(chip->dev); - ctrl = pwm_lpss_read(pwm); ctrl &= ~PWM_ON_TIME_DIV_MASK; - ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT); - base_unit &= (base_unit_range - 1); + ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT); + base_unit &= base_unit_range; ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT; ctrl |= on_time_div; pwm_lpss_write(pwm, ctrl); - - /* - * If the PWM is already enabled we need to notify the hardware - * about the change by setting PWM_SW_UPDATE. - */ - if (pwm_is_enabled(pwm)) - pwm_lpss_update(pwm); - - pm_runtime_put(chip->dev); - - return 0; } -static int pwm_lpss_enable(struct pwm_chip *chip, struct pwm_device *pwm) +static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) { - pm_runtime_get_sync(chip->dev); + struct pwm_lpss_chip *lpwm = to_lpwm(chip); + int ret; - /* - * Hardware must first see PWM_SW_UPDATE before the PWM can be - * enabled. - */ - pwm_lpss_update(pwm); - pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE); - return 0; -} + if (state->enabled) { + if (!pwm_is_enabled(pwm)) { + pm_runtime_get_sync(chip->dev); + ret = pwm_lpss_is_updating(pwm); + if (ret) { + pm_runtime_put(chip->dev); + return ret; + } + pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); + ret = pwm_lpss_update(pwm); + if (ret) { + pm_runtime_put(chip->dev); + return ret; + } + pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE); + } else { + ret = pwm_lpss_is_updating(pwm); + if (ret) + return ret; + pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); + return pwm_lpss_update(pwm); + } + } else if (pwm_is_enabled(pwm)) { + pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE); + pm_runtime_put(chip->dev); + } -static void pwm_lpss_disable(struct pwm_chip *chip, struct pwm_device *pwm) -{ - pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE); - pm_runtime_put(chip->dev); + return 0; } static const struct pwm_ops pwm_lpss_ops = { - .config = pwm_lpss_config, - .enable = pwm_lpss_enable, - .disable = pwm_lpss_disable, + .apply = pwm_lpss_apply, .owner = THIS_MODULE, }; diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h index 04766e0d41aa..c94cd7c2695d 100644 --- a/drivers/pwm/pwm-lpss.h +++ b/drivers/pwm/pwm-lpss.h @@ -24,10 +24,6 @@ struct pwm_lpss_boardinfo { unsigned long base_unit_bits; }; -extern const struct pwm_lpss_boardinfo pwm_lpss_byt_info; -extern const struct pwm_lpss_boardinfo pwm_lpss_bsw_info; -extern const struct pwm_lpss_boardinfo pwm_lpss_bxt_info; - struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, const struct pwm_lpss_boardinfo *info); int pwm_lpss_remove(struct pwm_lpss_chip *lpwm); diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c index 9a596324ebef..a6017ad9926c 100644 --- a/drivers/pwm/pwm-mxs.c +++ b/drivers/pwm/pwm-mxs.c @@ -151,7 +151,7 @@ static int mxs_pwm_probe(struct platform_device *pdev) mxs->chip.dev = &pdev->dev; mxs->chip.ops = &mxs_pwm_ops; mxs->chip.base = -1; - mxs->chip.can_sleep = true; + ret = of_property_read_u32(np, "fsl,pwm-number", &mxs->chip.npwm); if (ret < 0) { dev_err(&pdev->dev, "failed to get pwm number: %d\n", ret); diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c index 117fccf7934a..0cfb3571a732 100644 --- a/drivers/pwm/pwm-pca9685.c +++ b/drivers/pwm/pwm-pca9685.c @@ -20,8 +20,10 @@ */ #include <linux/acpi.h> +#include <linux/gpio/driver.h> #include <linux/i2c.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/pwm.h> @@ -65,7 +67,6 @@ #define PCA9685_MAXCHAN 0x10 #define LED_FULL (1 << 4) -#define MODE1_RESTART (1 << 7) #define MODE1_SLEEP (1 << 4) #define MODE2_INVRT (1 << 4) #define MODE2_OUTDRV (1 << 2) @@ -81,6 +82,10 @@ struct pca9685 { int active_cnt; int duty_ns; int period_ns; +#if IS_ENABLED(CONFIG_GPIOLIB) + struct mutex lock; + struct gpio_chip gpio; +#endif }; static inline struct pca9685 *to_pca(struct pwm_chip *chip) @@ -88,6 +93,151 @@ static inline struct pca9685 *to_pca(struct pwm_chip *chip) return container_of(chip, struct pca9685, chip); } +#if IS_ENABLED(CONFIG_GPIOLIB) +static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset) +{ + struct pca9685 *pca = gpiochip_get_data(gpio); + struct pwm_device *pwm; + + mutex_lock(&pca->lock); + + pwm = &pca->chip.pwms[offset]; + + if (pwm->flags & (PWMF_REQUESTED | PWMF_EXPORTED)) { + mutex_unlock(&pca->lock); + return -EBUSY; + } + + pwm_set_chip_data(pwm, (void *)1); + + mutex_unlock(&pca->lock); + return 0; +} + +static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset) +{ + struct pca9685 *pca = gpiochip_get_data(gpio); + struct pwm_device *pwm; + + mutex_lock(&pca->lock); + pwm = &pca->chip.pwms[offset]; + pwm_set_chip_data(pwm, NULL); + mutex_unlock(&pca->lock); +} + +static bool pca9685_pwm_is_gpio(struct pca9685 *pca, struct pwm_device *pwm) +{ + bool is_gpio = false; + + mutex_lock(&pca->lock); + + if (pwm->hwpwm >= PCA9685_MAXCHAN) { + unsigned int i; + + /* + * Check if any of the GPIOs are requested and in that case + * prevent using the "all LEDs" channel. + */ + for (i = 0; i < pca->gpio.ngpio; i++) + if (gpiochip_is_requested(&pca->gpio, i)) { + is_gpio = true; + break; + } + } else if (pwm_get_chip_data(pwm)) { + is_gpio = true; + } + + mutex_unlock(&pca->lock); + return is_gpio; +} + +static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset) +{ + struct pca9685 *pca = gpiochip_get_data(gpio); + struct pwm_device *pwm = &pca->chip.pwms[offset]; + unsigned int value; + + regmap_read(pca->regmap, LED_N_ON_H(pwm->hwpwm), &value); + + return value & LED_FULL; +} + +static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset, + int value) +{ + struct pca9685 *pca = gpiochip_get_data(gpio); + struct pwm_device *pwm = &pca->chip.pwms[offset]; + unsigned int on = value ? LED_FULL : 0; + + /* Clear both OFF registers */ + regmap_write(pca->regmap, LED_N_OFF_L(pwm->hwpwm), 0); + regmap_write(pca->regmap, LED_N_OFF_H(pwm->hwpwm), 0); + + /* Set the full ON bit */ + regmap_write(pca->regmap, LED_N_ON_H(pwm->hwpwm), on); +} + +static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + /* Always out */ + return 0; +} + +static int pca9685_pwm_gpio_direction_input(struct gpio_chip *gpio, + unsigned int offset) +{ + return -EINVAL; +} + +static int pca9685_pwm_gpio_direction_output(struct gpio_chip *gpio, + unsigned int offset, int value) +{ + pca9685_pwm_gpio_set(gpio, offset, value); + + return 0; +} + +/* + * The PCA9685 has a bit for turning the PWM output full off or on. Some + * boards like Intel Galileo actually uses these as normal GPIOs so we + * expose a GPIO chip here which can exclusively take over the underlying + * PWM channel. + */ +static int pca9685_pwm_gpio_probe(struct pca9685 *pca) +{ + struct device *dev = pca->chip.dev; + + mutex_init(&pca->lock); + + pca->gpio.label = dev_name(dev); + pca->gpio.parent = dev; + pca->gpio.request = pca9685_pwm_gpio_request; + pca->gpio.free = pca9685_pwm_gpio_free; + pca->gpio.get_direction = pca9685_pwm_gpio_get_direction; + pca->gpio.direction_input = pca9685_pwm_gpio_direction_input; + pca->gpio.direction_output = pca9685_pwm_gpio_direction_output; + pca->gpio.get = pca9685_pwm_gpio_get; + pca->gpio.set = pca9685_pwm_gpio_set; + pca->gpio.base = -1; + pca->gpio.ngpio = PCA9685_MAXCHAN; + pca->gpio.can_sleep = true; + + return devm_gpiochip_add_data(dev, &pca->gpio, pca); +} +#else +static inline bool pca9685_pwm_is_gpio(struct pca9685 *pca, + struct pwm_device *pwm) +{ + return false; +} + +static inline int pca9685_pwm_gpio_probe(struct pca9685 *pca) +{ + return 0; +} +#endif + static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { @@ -117,16 +267,6 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, udelay(500); pca->period_ns = period_ns; - - /* - * If the duty cycle did not change, restart PWM with - * the same duty cycle to period ratio and return. - */ - if (duty_ns == pca->duty_ns) { - regmap_update_bits(pca->regmap, PCA9685_MODE1, - MODE1_RESTART, 0x1); - return 0; - } } else { dev_err(chip->dev, "prescaler not set: period out of bounds!\n"); @@ -264,6 +404,9 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) { struct pca9685 *pca = to_pca(chip); + if (pca9685_pwm_is_gpio(pca, pwm)) + return -EBUSY; + if (pca->active_cnt++ == 0) return regmap_update_bits(pca->regmap, PCA9685_MODE1, MODE1_SLEEP, 0x0); @@ -343,9 +486,16 @@ static int pca9685_pwm_probe(struct i2c_client *client, pca->chip.dev = &client->dev; pca->chip.base = -1; - pca->chip.can_sleep = true; - return pwmchip_add(&pca->chip); + ret = pwmchip_add(&pca->chip); + if (ret < 0) + return ret; + + ret = pca9685_pwm_gpio_probe(pca); + if (ret < 0) + pwmchip_remove(&pca->chip); + + return ret; } static int pca9685_pwm_remove(struct i2c_client *client) diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c index 58b709f29130..4143a46684d2 100644 --- a/drivers/pwm/pwm-pxa.c +++ b/drivers/pwm/pwm-pxa.c @@ -118,7 +118,7 @@ static void pxa_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) clk_disable_unprepare(pc->clk); } -static struct pwm_ops pxa_pwm_ops = { +static const struct pwm_ops pxa_pwm_ops = { .config = pxa_pwm_config, .enable = pxa_pwm_enable, .disable = pxa_pwm_disable, diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c index dd82dc840af9..2b7c31c9d1ab 100644 --- a/drivers/pwm/pwm-sti.c +++ b/drivers/pwm/pwm-sti.c @@ -635,7 +635,6 @@ skip_cpt: pc->chip.ops = &sti_pwm_ops; pc->chip.base = -1; pc->chip.npwm = pc->cdata->pwm_num_devs; - pc->chip.can_sleep = true; ret = pwmchip_add(&pc->chip); if (ret < 0) { diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c index b0803f6c64d9..1284ffa05921 100644 --- a/drivers/pwm/pwm-sun4i.c +++ b/drivers/pwm/pwm-sun4i.c @@ -340,7 +340,6 @@ static int sun4i_pwm_probe(struct platform_device *pdev) pwm->chip.ops = &sun4i_pwm_ops; pwm->chip.base = -1; pwm->chip.npwm = pwm->data->npwm; - pwm->chip.can_sleep = true; pwm->chip.of_xlate = of_pwm_xlate_with_flags; pwm->chip.of_pwm_n_cells = 3; diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c index b964470025c5..21eff991d0e3 100644 --- a/drivers/pwm/pwm-twl-led.c +++ b/drivers/pwm/pwm-twl-led.c @@ -303,7 +303,6 @@ static int twl_pwmled_probe(struct platform_device *pdev) twl->chip.dev = &pdev->dev; twl->chip.base = -1; - twl->chip.can_sleep = true; mutex_init(&twl->mutex); diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c index 7a993b056638..9de617b76680 100644 --- a/drivers/pwm/pwm-twl.c +++ b/drivers/pwm/pwm-twl.c @@ -323,7 +323,6 @@ static int twl_pwm_probe(struct platform_device *pdev) twl->chip.dev = &pdev->dev; twl->chip.base = -1; twl->chip.npwm = 2; - twl->chip.can_sleep = true; mutex_init(&twl->mutex); diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c index cdb58fd4619d..8141a4984126 100644 --- a/drivers/pwm/pwm-vt8500.c +++ b/drivers/pwm/pwm-vt8500.c @@ -184,7 +184,7 @@ static int vt8500_pwm_set_polarity(struct pwm_chip *chip, return 0; } -static struct pwm_ops vt8500_pwm_ops = { +static const struct pwm_ops vt8500_pwm_ops = { .enable = vt8500_pwm_enable, .disable = vt8500_pwm_disable, .config = vt8500_pwm_config, diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 9013a585507e..50b617af81bd 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -889,17 +889,16 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, goto err_req; } - down_read(¤t->mm->mmap_sem); - pinned = get_user_pages( + pinned = get_user_pages_unlocked( (unsigned long)xfer->loc_addr & PAGE_MASK, nr_pages, - dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0, - page_list, NULL); - up_read(¤t->mm->mmap_sem); + page_list, + dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0); if (pinned != nr_pages) { if (pinned < 0) { - rmcd_error("get_user_pages err=%ld", pinned); + rmcd_error("get_user_pages_unlocked err=%ld", + pinned); nr_pages = 0; } else rmcd_error("pinned %ld out of %ld pages", diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 364411fb7734..0142cc3f0c91 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -137,7 +137,8 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev) static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], + struct irq_affinity *desc) { int i, ret; diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 3090b0d3072f..5e66e081027e 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -869,7 +869,7 @@ static int rpmsg_probe(struct virtio_device *vdev) init_waitqueue_head(&vrp->sendq); /* We expect two virtqueues, rx and tx (and in this order) */ - err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names); + err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names, NULL); if (err) goto free_vrp; diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 5dc673dc9487..ee1b0e9dde79 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -1434,9 +1434,10 @@ config RTC_DRV_SUN4V based RTC on SUN4V systems. config RTC_DRV_SUN6I - tristate "Allwinner A31 RTC" - default MACH_SUN6I || MACH_SUN8I || COMPILE_TEST - depends on ARCH_SUNXI + bool "Allwinner A31 RTC" + default MACH_SUN6I || MACH_SUN8I + depends on COMMON_CLK + depends on ARCH_SUNXI || COMPILE_TEST help If you say Y here you will get support for the RTC found in some Allwinner SoCs like the A31 or the A64. @@ -1719,6 +1720,17 @@ config RTC_DRV_R7301 This driver can also be built as a module. If so, the module will be called rtc-r7301. +config RTC_DRV_STM32 + tristate "STM32 RTC" + select REGMAP_MMIO + depends on ARCH_STM32 || COMPILE_TEST + help + If you say yes here you get support for the STM32 On-Chip + Real Time Clock. + + This driver can also be built as a module, if so, the module + will be called "rtc-stm32". + comment "HID Sensor RTC drivers" config RTC_DRV_HID_SENSOR_TIME diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index f13ab1c5c222..f07297b1460a 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -145,6 +145,7 @@ obj-$(CONFIG_RTC_DRV_SNVS) += rtc-snvs.o obj-$(CONFIG_RTC_DRV_SPEAR) += rtc-spear.o obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o +obj-$(CONFIG_RTC_DRV_STM32) += rtc-stm32.o obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o obj-$(CONFIG_RTC_DRV_ST_LPC) += rtc-st-lpc.o obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c index 9a3f2a6f512e..21f355c37eab 100644 --- a/drivers/rtc/rtc-armada38x.c +++ b/drivers/rtc/rtc-armada38x.c @@ -16,6 +16,7 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/rtc.h> @@ -23,17 +24,48 @@ #define RTC_STATUS_ALARM1 BIT(0) #define RTC_STATUS_ALARM2 BIT(1) #define RTC_IRQ1_CONF 0x4 -#define RTC_IRQ1_AL_EN BIT(0) -#define RTC_IRQ1_FREQ_EN BIT(1) -#define RTC_IRQ1_FREQ_1HZ BIT(2) +#define RTC_IRQ2_CONF 0x8 +#define RTC_IRQ_AL_EN BIT(0) +#define RTC_IRQ_FREQ_EN BIT(1) +#define RTC_IRQ_FREQ_1HZ BIT(2) + #define RTC_TIME 0xC #define RTC_ALARM1 0x10 - -#define SOC_RTC_INTERRUPT 0x8 -#define SOC_RTC_ALARM1 BIT(0) -#define SOC_RTC_ALARM2 BIT(1) -#define SOC_RTC_ALARM1_MASK BIT(2) -#define SOC_RTC_ALARM2_MASK BIT(3) +#define RTC_ALARM2 0x14 + +/* Armada38x SoC registers */ +#define RTC_38X_BRIDGE_TIMING_CTL 0x0 +#define RTC_38X_PERIOD_OFFS 0 +#define RTC_38X_PERIOD_MASK (0x3FF << RTC_38X_PERIOD_OFFS) +#define RTC_38X_READ_DELAY_OFFS 26 +#define RTC_38X_READ_DELAY_MASK (0x1F << RTC_38X_READ_DELAY_OFFS) + +/* Armada 7K/8K registers */ +#define RTC_8K_BRIDGE_TIMING_CTL0 0x0 +#define RTC_8K_WRCLK_PERIOD_OFFS 0 +#define RTC_8K_WRCLK_PERIOD_MASK (0xFFFF << RTC_8K_WRCLK_PERIOD_OFFS) +#define RTC_8K_WRCLK_SETUP_OFFS 16 +#define RTC_8K_WRCLK_SETUP_MASK (0xFFFF << RTC_8K_WRCLK_SETUP_OFFS) +#define RTC_8K_BRIDGE_TIMING_CTL1 0x4 +#define RTC_8K_READ_DELAY_OFFS 0 +#define RTC_8K_READ_DELAY_MASK (0xFFFF << RTC_8K_READ_DELAY_OFFS) + +#define RTC_8K_ISR 0x10 +#define RTC_8K_IMR 0x14 +#define RTC_8K_ALARM2 BIT(0) + +#define SOC_RTC_INTERRUPT 0x8 +#define SOC_RTC_ALARM1 BIT(0) +#define SOC_RTC_ALARM2 BIT(1) +#define SOC_RTC_ALARM1_MASK BIT(2) +#define SOC_RTC_ALARM2_MASK BIT(3) + +#define SAMPLE_NR 100 + +struct value_to_freq { + u32 value; + u8 freq; +}; struct armada38x_rtc { struct rtc_device *rtc_dev; @@ -41,38 +73,153 @@ struct armada38x_rtc { void __iomem *regs_soc; spinlock_t lock; int irq; + struct value_to_freq *val_to_freq; + struct armada38x_rtc_data *data; +}; + +#define ALARM1 0 +#define ALARM2 1 + +#define ALARM_REG(base, alarm) ((base) + (alarm) * sizeof(u32)) + +struct armada38x_rtc_data { + /* Initialize the RTC-MBUS bridge timing */ + void (*update_mbus_timing)(struct armada38x_rtc *rtc); + u32 (*read_rtc_reg)(struct armada38x_rtc *rtc, u8 rtc_reg); + void (*clear_isr)(struct armada38x_rtc *rtc); + void (*unmask_interrupt)(struct armada38x_rtc *rtc); + u32 alarm; }; /* * According to the datasheet, the OS should wait 5us after every * register write to the RTC hard macro so that the required update * can occur without holding off the system bus + * According to errata RES-3124064, Write to any RTC register + * may fail. As a workaround, before writing to RTC + * register, issue a dummy write of 0x0 twice to RTC Status + * register. */ + static void rtc_delayed_write(u32 val, struct armada38x_rtc *rtc, int offset) { + writel(0, rtc->regs + RTC_STATUS); + writel(0, rtc->regs + RTC_STATUS); writel(val, rtc->regs + offset); udelay(5); } +/* Update RTC-MBUS bridge timing parameters */ +static void rtc_update_38x_mbus_timing_params(struct armada38x_rtc *rtc) +{ + u32 reg; + + reg = readl(rtc->regs_soc + RTC_38X_BRIDGE_TIMING_CTL); + reg &= ~RTC_38X_PERIOD_MASK; + reg |= 0x3FF << RTC_38X_PERIOD_OFFS; /* Maximum value */ + reg &= ~RTC_38X_READ_DELAY_MASK; + reg |= 0x1F << RTC_38X_READ_DELAY_OFFS; /* Maximum value */ + writel(reg, rtc->regs_soc + RTC_38X_BRIDGE_TIMING_CTL); +} + +static void rtc_update_8k_mbus_timing_params(struct armada38x_rtc *rtc) +{ + u32 reg; + + reg = readl(rtc->regs_soc + RTC_8K_BRIDGE_TIMING_CTL0); + reg &= ~RTC_8K_WRCLK_PERIOD_MASK; + reg |= 0x3FF << RTC_8K_WRCLK_PERIOD_OFFS; + reg &= ~RTC_8K_WRCLK_SETUP_MASK; + reg |= 0x29 << RTC_8K_WRCLK_SETUP_OFFS; + writel(reg, rtc->regs_soc + RTC_8K_BRIDGE_TIMING_CTL0); + + reg = readl(rtc->regs_soc + RTC_8K_BRIDGE_TIMING_CTL1); + reg &= ~RTC_8K_READ_DELAY_MASK; + reg |= 0x3F << RTC_8K_READ_DELAY_OFFS; + writel(reg, rtc->regs_soc + RTC_8K_BRIDGE_TIMING_CTL1); +} + +static u32 read_rtc_register(struct armada38x_rtc *rtc, u8 rtc_reg) +{ + return readl(rtc->regs + rtc_reg); +} + +static u32 read_rtc_register_38x_wa(struct armada38x_rtc *rtc, u8 rtc_reg) +{ + int i, index_max = 0, max = 0; + + for (i = 0; i < SAMPLE_NR; i++) { + rtc->val_to_freq[i].value = readl(rtc->regs + rtc_reg); + rtc->val_to_freq[i].freq = 0; + } + + for (i = 0; i < SAMPLE_NR; i++) { + int j = 0; + u32 value = rtc->val_to_freq[i].value; + + while (rtc->val_to_freq[j].freq) { + if (rtc->val_to_freq[j].value == value) { + rtc->val_to_freq[j].freq++; + break; + } + j++; + } + + if (!rtc->val_to_freq[j].freq) { + rtc->val_to_freq[j].value = value; + rtc->val_to_freq[j].freq = 1; + } + + if (rtc->val_to_freq[j].freq > max) { + index_max = j; + max = rtc->val_to_freq[j].freq; + } + + /* + * If a value already has half of the sample this is the most + * frequent one and we can stop the research right now + */ + if (max > SAMPLE_NR / 2) + break; + } + + return rtc->val_to_freq[index_max].value; +} + +static void armada38x_clear_isr(struct armada38x_rtc *rtc) +{ + u32 val = readl(rtc->regs_soc + SOC_RTC_INTERRUPT); + + writel(val & ~SOC_RTC_ALARM1, rtc->regs_soc + SOC_RTC_INTERRUPT); +} + +static void armada38x_unmask_interrupt(struct armada38x_rtc *rtc) +{ + u32 val = readl(rtc->regs_soc + SOC_RTC_INTERRUPT); + + writel(val | SOC_RTC_ALARM1_MASK, rtc->regs_soc + SOC_RTC_INTERRUPT); +} + +static void armada8k_clear_isr(struct armada38x_rtc *rtc) +{ + writel(RTC_8K_ALARM2, rtc->regs_soc + RTC_8K_ISR); +} + +static void armada8k_unmask_interrupt(struct armada38x_rtc *rtc) +{ + writel(RTC_8K_ALARM2, rtc->regs_soc + RTC_8K_IMR); +} + static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct armada38x_rtc *rtc = dev_get_drvdata(dev); - unsigned long time, time_check, flags; + unsigned long time, flags; spin_lock_irqsave(&rtc->lock, flags); - time = readl(rtc->regs + RTC_TIME); - /* - * WA for failing time set attempts. As stated in HW ERRATA if - * more than one second between two time reads is detected - * then read once again. - */ - time_check = readl(rtc->regs + RTC_TIME); - if ((time_check - time) > 1) - time_check = readl(rtc->regs + RTC_TIME); - + time = rtc->data->read_rtc_reg(rtc, RTC_TIME); spin_unlock_irqrestore(&rtc->lock, flags); - rtc_time_to_tm(time_check, tm); + rtc_time_to_tm(time, tm); return 0; } @@ -87,16 +234,9 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm) if (ret) goto out; - /* - * According to errata FE-3124064, Write to RTC TIME register - * may fail. As a workaround, after writing to RTC TIME - * register, issue a dummy write of 0x0 twice to RTC Status - * register. - */ + spin_lock_irqsave(&rtc->lock, flags); rtc_delayed_write(time, rtc, RTC_TIME); - rtc_delayed_write(0, rtc, RTC_STATUS); - rtc_delayed_write(0, rtc, RTC_STATUS); spin_unlock_irqrestore(&rtc->lock, flags); out: @@ -107,12 +247,14 @@ static int armada38x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct armada38x_rtc *rtc = dev_get_drvdata(dev); unsigned long time, flags; + u32 reg = ALARM_REG(RTC_ALARM1, rtc->data->alarm); + u32 reg_irq = ALARM_REG(RTC_IRQ1_CONF, rtc->data->alarm); u32 val; spin_lock_irqsave(&rtc->lock, flags); - time = readl(rtc->regs + RTC_ALARM1); - val = readl(rtc->regs + RTC_IRQ1_CONF) & RTC_IRQ1_AL_EN; + time = rtc->data->read_rtc_reg(rtc, reg); + val = rtc->data->read_rtc_reg(rtc, reg_irq) & RTC_IRQ_AL_EN; spin_unlock_irqrestore(&rtc->lock, flags); @@ -125,9 +267,10 @@ static int armada38x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) static int armada38x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct armada38x_rtc *rtc = dev_get_drvdata(dev); + u32 reg = ALARM_REG(RTC_ALARM1, rtc->data->alarm); + u32 reg_irq = ALARM_REG(RTC_IRQ1_CONF, rtc->data->alarm); unsigned long time, flags; int ret = 0; - u32 val; ret = rtc_tm_to_time(&alrm->time, &time); @@ -136,13 +279,11 @@ static int armada38x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) spin_lock_irqsave(&rtc->lock, flags); - rtc_delayed_write(time, rtc, RTC_ALARM1); + rtc_delayed_write(time, rtc, reg); if (alrm->enabled) { - rtc_delayed_write(RTC_IRQ1_AL_EN, rtc, RTC_IRQ1_CONF); - val = readl(rtc->regs_soc + SOC_RTC_INTERRUPT); - writel(val | SOC_RTC_ALARM1_MASK, - rtc->regs_soc + SOC_RTC_INTERRUPT); + rtc_delayed_write(RTC_IRQ_AL_EN, rtc, reg_irq); + rtc->data->unmask_interrupt(rtc); } spin_unlock_irqrestore(&rtc->lock, flags); @@ -155,14 +296,15 @@ static int armada38x_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct armada38x_rtc *rtc = dev_get_drvdata(dev); + u32 reg_irq = ALARM_REG(RTC_IRQ1_CONF, rtc->data->alarm); unsigned long flags; spin_lock_irqsave(&rtc->lock, flags); if (enabled) - rtc_delayed_write(RTC_IRQ1_AL_EN, rtc, RTC_IRQ1_CONF); + rtc_delayed_write(RTC_IRQ_AL_EN, rtc, reg_irq); else - rtc_delayed_write(0, rtc, RTC_IRQ1_CONF); + rtc_delayed_write(0, rtc, reg_irq); spin_unlock_irqrestore(&rtc->lock, flags); @@ -174,24 +316,23 @@ static irqreturn_t armada38x_rtc_alarm_irq(int irq, void *data) struct armada38x_rtc *rtc = data; u32 val; int event = RTC_IRQF | RTC_AF; + u32 reg_irq = ALARM_REG(RTC_IRQ1_CONF, rtc->data->alarm); dev_dbg(&rtc->rtc_dev->dev, "%s:irq(%d)\n", __func__, irq); spin_lock(&rtc->lock); - val = readl(rtc->regs_soc + SOC_RTC_INTERRUPT); - - writel(val & ~SOC_RTC_ALARM1, rtc->regs_soc + SOC_RTC_INTERRUPT); - val = readl(rtc->regs + RTC_IRQ1_CONF); - /* disable all the interrupts for alarm 1 */ - rtc_delayed_write(0, rtc, RTC_IRQ1_CONF); + rtc->data->clear_isr(rtc); + val = rtc->data->read_rtc_reg(rtc, reg_irq); + /* disable all the interrupts for alarm*/ + rtc_delayed_write(0, rtc, reg_irq); /* Ack the event */ - rtc_delayed_write(RTC_STATUS_ALARM1, rtc, RTC_STATUS); + rtc_delayed_write(1 << rtc->data->alarm, rtc, RTC_STATUS); spin_unlock(&rtc->lock); - if (val & RTC_IRQ1_FREQ_EN) { - if (val & RTC_IRQ1_FREQ_1HZ) + if (val & RTC_IRQ_FREQ_EN) { + if (val & RTC_IRQ_FREQ_1HZ) event |= RTC_UF; else event |= RTC_PF; @@ -202,7 +343,7 @@ static irqreturn_t armada38x_rtc_alarm_irq(int irq, void *data) return IRQ_HANDLED; } -static struct rtc_class_ops armada38x_rtc_ops = { +static const struct rtc_class_ops armada38x_rtc_ops = { .read_time = armada38x_rtc_read_time, .set_time = armada38x_rtc_set_time, .read_alarm = armada38x_rtc_read_alarm, @@ -210,17 +351,65 @@ static struct rtc_class_ops armada38x_rtc_ops = { .alarm_irq_enable = armada38x_rtc_alarm_irq_enable, }; +static const struct rtc_class_ops armada38x_rtc_ops_noirq = { + .read_time = armada38x_rtc_read_time, + .set_time = armada38x_rtc_set_time, + .read_alarm = armada38x_rtc_read_alarm, +}; + +static const struct armada38x_rtc_data armada38x_data = { + .update_mbus_timing = rtc_update_38x_mbus_timing_params, + .read_rtc_reg = read_rtc_register_38x_wa, + .clear_isr = armada38x_clear_isr, + .unmask_interrupt = armada38x_unmask_interrupt, + .alarm = ALARM1, +}; + +static const struct armada38x_rtc_data armada8k_data = { + .update_mbus_timing = rtc_update_8k_mbus_timing_params, + .read_rtc_reg = read_rtc_register, + .clear_isr = armada8k_clear_isr, + .unmask_interrupt = armada8k_unmask_interrupt, + .alarm = ALARM2, +}; + +#ifdef CONFIG_OF +static const struct of_device_id armada38x_rtc_of_match_table[] = { + { + .compatible = "marvell,armada-380-rtc", + .data = &armada38x_data, + }, + { + .compatible = "marvell,armada-8k-rtc", + .data = &armada8k_data, + }, + {} +}; +MODULE_DEVICE_TABLE(of, armada38x_rtc_of_match_table); +#endif + static __init int armada38x_rtc_probe(struct platform_device *pdev) { + const struct rtc_class_ops *ops; struct resource *res; struct armada38x_rtc *rtc; + const struct of_device_id *match; int ret; + match = of_match_device(armada38x_rtc_of_match_table, &pdev->dev); + if (!match) + return -ENODEV; + rtc = devm_kzalloc(&pdev->dev, sizeof(struct armada38x_rtc), GFP_KERNEL); if (!rtc) return -ENOMEM; + rtc->val_to_freq = devm_kcalloc(&pdev->dev, SAMPLE_NR, + sizeof(struct value_to_freq), GFP_KERNEL); + if (!rtc->val_to_freq) + return -ENOMEM; + spin_lock_init(&rtc->lock); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc"); @@ -242,19 +431,27 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev) 0, pdev->name, rtc) < 0) { dev_warn(&pdev->dev, "Interrupt not available.\n"); rtc->irq = -1; + } + platform_set_drvdata(pdev, rtc); + + if (rtc->irq != -1) { + device_init_wakeup(&pdev->dev, 1); + ops = &armada38x_rtc_ops; + } else { /* * If there is no interrupt available then we can't * use the alarm */ - armada38x_rtc_ops.set_alarm = NULL; - armada38x_rtc_ops.alarm_irq_enable = NULL; + ops = &armada38x_rtc_ops_noirq; } - platform_set_drvdata(pdev, rtc); - if (rtc->irq != -1) - device_init_wakeup(&pdev->dev, 1); + rtc->data = (struct armada38x_rtc_data *)match->data; + + + /* Update RTC-MBUS bridge timing parameters */ + rtc->data->update_mbus_timing(rtc); rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, pdev->name, - &armada38x_rtc_ops, THIS_MODULE); + ops, THIS_MODULE); if (IS_ERR(rtc->rtc_dev)) { ret = PTR_ERR(rtc->rtc_dev); dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); @@ -280,6 +477,9 @@ static int armada38x_rtc_resume(struct device *dev) if (device_may_wakeup(dev)) { struct armada38x_rtc *rtc = dev_get_drvdata(dev); + /* Update RTC-MBUS bridge timing parameters */ + rtc->data->update_mbus_timing(rtc); + return disable_irq_wake(rtc->irq); } @@ -290,14 +490,6 @@ static int armada38x_rtc_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(armada38x_rtc_pm_ops, armada38x_rtc_suspend, armada38x_rtc_resume); -#ifdef CONFIG_OF -static const struct of_device_id armada38x_rtc_of_match_table[] = { - { .compatible = "marvell,armada-380-rtc", }, - {} -}; -MODULE_DEVICE_TABLE(of, armada38x_rtc_of_match_table); -#endif - static struct platform_driver armada38x_rtc_driver = { .driver = { .name = "armada38x-rtc", diff --git a/drivers/rtc/rtc-au1xxx.c b/drivers/rtc/rtc-au1xxx.c index 84d6e026784d..2ba44ccb9c3a 100644 --- a/drivers/rtc/rtc-au1xxx.c +++ b/drivers/rtc/rtc-au1xxx.c @@ -56,7 +56,7 @@ static int au1xtoy_rtc_set_time(struct device *dev, struct rtc_time *tm) return 0; } -static struct rtc_class_ops au1xtoy_rtc_ops = { +static const struct rtc_class_ops au1xtoy_rtc_ops = { .read_time = au1xtoy_rtc_read_time, .set_time = au1xtoy_rtc_set_time, }; diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index 535a5f9338d0..15344b7c07c5 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c @@ -333,7 +333,7 @@ static int bfin_rtc_proc(struct device *dev, struct seq_file *seq) #undef yesno } -static struct rtc_class_ops bfin_rtc_ops = { +static const struct rtc_class_ops bfin_rtc_ops = { .read_time = bfin_rtc_read_time, .set_time = bfin_rtc_set_time, .read_alarm = bfin_rtc_read_alarm, diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c index 397742446007..2b223935001f 100644 --- a/drivers/rtc/rtc-bq32k.c +++ b/drivers/rtc/rtc-bq32k.c @@ -34,6 +34,7 @@ #define BQ32K_CALIBRATION 0x07 /* CAL_CFG1, calibration and control */ #define BQ32K_TCH2 0x08 /* Trickle charge enable */ #define BQ32K_CFG2 0x09 /* Trickle charger control */ +#define BQ32K_TCFE BIT(6) /* Trickle charge FET bypass */ struct bq32k_regs { uint8_t seconds; @@ -188,6 +189,65 @@ static int trickle_charger_of_init(struct device *dev, struct device_node *node) return 0; } +static ssize_t bq32k_sysfs_show_tricklecharge_bypass(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int reg, error; + + error = bq32k_read(dev, ®, BQ32K_CFG2, 1); + if (error) + return error; + + return sprintf(buf, "%d\n", (reg & BQ32K_TCFE) ? 1 : 0); +} + +static ssize_t bq32k_sysfs_store_tricklecharge_bypass(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int reg, enable, error; + + if (kstrtoint(buf, 0, &enable)) + return -EINVAL; + + error = bq32k_read(dev, ®, BQ32K_CFG2, 1); + if (error) + return error; + + if (enable) { + reg |= BQ32K_TCFE; + error = bq32k_write(dev, ®, BQ32K_CFG2, 1); + if (error) + return error; + + dev_info(dev, "Enabled trickle charge FET bypass.\n"); + } else { + reg &= ~BQ32K_TCFE; + error = bq32k_write(dev, ®, BQ32K_CFG2, 1); + if (error) + return error; + + dev_info(dev, "Disabled trickle charge FET bypass.\n"); + } + + return count; +} + +static DEVICE_ATTR(trickle_charge_bypass, 0644, + bq32k_sysfs_show_tricklecharge_bypass, + bq32k_sysfs_store_tricklecharge_bypass); + +static int bq32k_sysfs_register(struct device *dev) +{ + return device_create_file(dev, &dev_attr_trickle_charge_bypass); +} + +static void bq32k_sysfs_unregister(struct device *dev) +{ + device_remove_file(dev, &dev_attr_trickle_charge_bypass); +} + static int bq32k_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -224,11 +284,26 @@ static int bq32k_probe(struct i2c_client *client, if (IS_ERR(rtc)) return PTR_ERR(rtc); + error = bq32k_sysfs_register(&client->dev); + if (error) { + dev_err(&client->dev, + "Unable to create sysfs entries for rtc bq32000\n"); + return error; + } + + i2c_set_clientdata(client, rtc); return 0; } +static int bq32k_remove(struct i2c_client *client) +{ + bq32k_sysfs_unregister(&client->dev); + + return 0; +} + static const struct i2c_device_id bq32k_id[] = { { "bq32000", 0 }, { } @@ -240,6 +315,7 @@ static struct i2c_driver bq32k_driver = { .name = "bq32k", }, .probe = bq32k_probe, + .remove = bq32k_remove, .id_table = bq32k_id, }; diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index a6d9434addf6..6dc8f29697ab 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c @@ -15,7 +15,7 @@ #include <linux/module.h> #include <linux/rtc.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include "rtc-core.h" static dev_t rtc_devt; diff --git a/drivers/rtc/rtc-dm355evm.c b/drivers/rtc/rtc-dm355evm.c index 94067f8eeb10..f225cd873ff6 100644 --- a/drivers/rtc/rtc-dm355evm.c +++ b/drivers/rtc/rtc-dm355evm.c @@ -116,7 +116,7 @@ static int dm355evm_rtc_set_time(struct device *dev, struct rtc_time *tm) return 0; } -static struct rtc_class_ops dm355evm_rtc_ops = { +static const struct rtc_class_ops dm355evm_rtc_ops = { .read_time = dm355evm_rtc_read_time, .set_time = dm355evm_rtc_set_time, }; diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c index b1f20d8c358f..9bb39a06b994 100644 --- a/drivers/rtc/rtc-ds3232.c +++ b/drivers/rtc/rtc-ds3232.c @@ -23,28 +23,28 @@ #include <linux/slab.h> #include <linux/regmap.h> -#define DS3232_REG_SECONDS 0x00 -#define DS3232_REG_MINUTES 0x01 -#define DS3232_REG_HOURS 0x02 -#define DS3232_REG_AMPM 0x02 -#define DS3232_REG_DAY 0x03 -#define DS3232_REG_DATE 0x04 -#define DS3232_REG_MONTH 0x05 -#define DS3232_REG_CENTURY 0x05 -#define DS3232_REG_YEAR 0x06 -#define DS3232_REG_ALARM1 0x07 /* Alarm 1 BASE */ -#define DS3232_REG_ALARM2 0x0B /* Alarm 2 BASE */ -#define DS3232_REG_CR 0x0E /* Control register */ -# define DS3232_REG_CR_nEOSC 0x80 -# define DS3232_REG_CR_INTCN 0x04 -# define DS3232_REG_CR_A2IE 0x02 -# define DS3232_REG_CR_A1IE 0x01 - -#define DS3232_REG_SR 0x0F /* control/status register */ -# define DS3232_REG_SR_OSF 0x80 -# define DS3232_REG_SR_BSY 0x04 -# define DS3232_REG_SR_A2F 0x02 -# define DS3232_REG_SR_A1F 0x01 +#define DS3232_REG_SECONDS 0x00 +#define DS3232_REG_MINUTES 0x01 +#define DS3232_REG_HOURS 0x02 +#define DS3232_REG_AMPM 0x02 +#define DS3232_REG_DAY 0x03 +#define DS3232_REG_DATE 0x04 +#define DS3232_REG_MONTH 0x05 +#define DS3232_REG_CENTURY 0x05 +#define DS3232_REG_YEAR 0x06 +#define DS3232_REG_ALARM1 0x07 /* Alarm 1 BASE */ +#define DS3232_REG_ALARM2 0x0B /* Alarm 2 BASE */ +#define DS3232_REG_CR 0x0E /* Control register */ +# define DS3232_REG_CR_nEOSC 0x80 +# define DS3232_REG_CR_INTCN 0x04 +# define DS3232_REG_CR_A2IE 0x02 +# define DS3232_REG_CR_A1IE 0x01 + +#define DS3232_REG_SR 0x0F /* control/status register */ +# define DS3232_REG_SR_OSF 0x80 +# define DS3232_REG_SR_BSY 0x04 +# define DS3232_REG_SR_A2F 0x02 +# define DS3232_REG_SR_A1F 0x01 struct ds3232 { struct device *dev; @@ -363,6 +363,9 @@ static int ds3232_probe(struct device *dev, struct regmap *regmap, int irq, if (ret) return ret; + if (ds3232->irq > 0) + device_init_wakeup(dev, 1); + ds3232->rtc = devm_rtc_device_register(dev, name, &ds3232_rtc_ops, THIS_MODULE); if (IS_ERR(ds3232->rtc)) @@ -374,10 +377,10 @@ static int ds3232_probe(struct device *dev, struct regmap *regmap, int irq, IRQF_SHARED | IRQF_ONESHOT, name, dev); if (ret) { + device_set_wakeup_capable(dev, 0); ds3232->irq = 0; dev_err(dev, "unable to request IRQ\n"); - } else - device_init_wakeup(dev, 1); + } } return 0; @@ -420,6 +423,7 @@ static int ds3232_i2c_probe(struct i2c_client *client, static const struct regmap_config config = { .reg_bits = 8, .val_bits = 8, + .max_register = 0x13, }; regmap = devm_regmap_init_i2c(client, &config); @@ -479,6 +483,7 @@ static int ds3234_probe(struct spi_device *spi) static const struct regmap_config config = { .reg_bits = 8, .val_bits = 8, + .max_register = 0x13, .write_flag_mask = 0x80, }; struct regmap *regmap; diff --git a/drivers/rtc/rtc-gemini.c b/drivers/rtc/rtc-gemini.c index 688debc14348..ccf0dbadb62d 100644 --- a/drivers/rtc/rtc-gemini.c +++ b/drivers/rtc/rtc-gemini.c @@ -159,9 +159,16 @@ static int gemini_rtc_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id gemini_rtc_dt_match[] = { + { .compatible = "cortina,gemini-rtc" }, + { } +}; +MODULE_DEVICE_TABLE(of, gemini_rtc_dt_match); + static struct platform_driver gemini_rtc_driver = { .driver = { .name = DRV_NAME, + .of_match_table = gemini_rtc_dt_match, }, .probe = gemini_rtc_probe, .remove = gemini_rtc_remove, diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c index 67b56b80dc70..6b54f6c24c5f 100644 --- a/drivers/rtc/rtc-imxdi.c +++ b/drivers/rtc/rtc-imxdi.c @@ -108,7 +108,6 @@ * @pdev: pionter to platform dev * @rtc: pointer to rtc struct * @ioaddr: IO registers pointer - * @irq: dryice normal interrupt * @clk: input reference clock * @dsr: copy of the DSR register * @irq_lock: interrupt enable register (DIER) lock @@ -120,7 +119,6 @@ struct imxdi_dev { struct platform_device *pdev; struct rtc_device *rtc; void __iomem *ioaddr; - int irq; struct clk *clk; u32 dsr; spinlock_t irq_lock; @@ -668,7 +666,7 @@ static int dryice_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) return 0; } -static struct rtc_class_ops dryice_rtc_ops = { +static const struct rtc_class_ops dryice_rtc_ops = { .read_time = dryice_rtc_read_time, .set_mmss = dryice_rtc_set_mmss, .alarm_irq_enable = dryice_rtc_alarm_irq_enable, @@ -677,9 +675,9 @@ static struct rtc_class_ops dryice_rtc_ops = { }; /* - * dryice "normal" interrupt handler + * interrupt handler for dryice "normal" and security violation interrupt */ -static irqreturn_t dryice_norm_irq(int irq, void *dev_id) +static irqreturn_t dryice_irq(int irq, void *dev_id) { struct imxdi_dev *imxdi = dev_id; u32 dsr, dier; @@ -765,6 +763,7 @@ static int __init dryice_rtc_probe(struct platform_device *pdev) { struct resource *res; struct imxdi_dev *imxdi; + int norm_irq, sec_irq; int rc; imxdi = devm_kzalloc(&pdev->dev, sizeof(*imxdi), GFP_KERNEL); @@ -780,9 +779,16 @@ static int __init dryice_rtc_probe(struct platform_device *pdev) spin_lock_init(&imxdi->irq_lock); - imxdi->irq = platform_get_irq(pdev, 0); - if (imxdi->irq < 0) - return imxdi->irq; + norm_irq = platform_get_irq(pdev, 0); + if (norm_irq < 0) + return norm_irq; + + /* the 2nd irq is the security violation irq + * make this optional, don't break the device tree ABI + */ + sec_irq = platform_get_irq(pdev, 1); + if (sec_irq <= 0) + sec_irq = IRQ_NOTCONNECTED; init_waitqueue_head(&imxdi->write_wait); @@ -808,13 +814,20 @@ static int __init dryice_rtc_probe(struct platform_device *pdev) if (rc != 0) goto err; - rc = devm_request_irq(&pdev->dev, imxdi->irq, dryice_norm_irq, - IRQF_SHARED, pdev->name, imxdi); + rc = devm_request_irq(&pdev->dev, norm_irq, dryice_irq, + IRQF_SHARED, pdev->name, imxdi); if (rc) { dev_warn(&pdev->dev, "interrupt not available.\n"); goto err; } + rc = devm_request_irq(&pdev->dev, sec_irq, dryice_irq, + IRQF_SHARED, pdev->name, imxdi); + if (rc) { + dev_warn(&pdev->dev, "security violation interrupt not available.\n"); + /* this is not an error, see above */ + } + platform_set_drvdata(pdev, imxdi); imxdi->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &dryice_rtc_ops, THIS_MODULE); diff --git a/drivers/rtc/rtc-ls1x.c b/drivers/rtc/rtc-ls1x.c index 22a9ec4f2b83..e04ca54f21e2 100644 --- a/drivers/rtc/rtc-ls1x.c +++ b/drivers/rtc/rtc-ls1x.c @@ -138,7 +138,7 @@ err: return ret; } -static struct rtc_class_ops ls1x_rtc_ops = { +static const struct rtc_class_ops ls1x_rtc_ops = { .read_time = ls1x_rtc_read_time, .set_time = ls1x_rtc_set_time, }; diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c index 0eeb5714c00f..02af045305dd 100644 --- a/drivers/rtc/rtc-m48t86.c +++ b/drivers/rtc/rtc-m48t86.c @@ -16,62 +16,88 @@ #include <linux/module.h> #include <linux/rtc.h> #include <linux/platform_device.h> -#include <linux/platform_data/rtc-m48t86.h> #include <linux/bcd.h> +#include <linux/io.h> -#define M48T86_REG_SEC 0x00 -#define M48T86_REG_SECALRM 0x01 -#define M48T86_REG_MIN 0x02 -#define M48T86_REG_MINALRM 0x03 -#define M48T86_REG_HOUR 0x04 -#define M48T86_REG_HOURALRM 0x05 -#define M48T86_REG_DOW 0x06 /* 1 = sunday */ -#define M48T86_REG_DOM 0x07 -#define M48T86_REG_MONTH 0x08 /* 1 - 12 */ -#define M48T86_REG_YEAR 0x09 /* 0 - 99 */ -#define M48T86_REG_A 0x0A -#define M48T86_REG_B 0x0B -#define M48T86_REG_C 0x0C -#define M48T86_REG_D 0x0D - -#define M48T86_REG_B_H24 (1 << 1) -#define M48T86_REG_B_DM (1 << 2) -#define M48T86_REG_B_SET (1 << 7) -#define M48T86_REG_D_VRT (1 << 7) +#define M48T86_SEC 0x00 +#define M48T86_SECALRM 0x01 +#define M48T86_MIN 0x02 +#define M48T86_MINALRM 0x03 +#define M48T86_HOUR 0x04 +#define M48T86_HOURALRM 0x05 +#define M48T86_DOW 0x06 /* 1 = sunday */ +#define M48T86_DOM 0x07 +#define M48T86_MONTH 0x08 /* 1 - 12 */ +#define M48T86_YEAR 0x09 /* 0 - 99 */ +#define M48T86_A 0x0a +#define M48T86_B 0x0b +#define M48T86_B_SET BIT(7) +#define M48T86_B_DM BIT(2) +#define M48T86_B_H24 BIT(1) +#define M48T86_C 0x0c +#define M48T86_D 0x0d +#define M48T86_D_VRT BIT(7) +#define M48T86_NVRAM(x) (0x0e + (x)) +#define M48T86_NVRAM_LEN 114 + +struct m48t86_rtc_info { + void __iomem *index_reg; + void __iomem *data_reg; + struct rtc_device *rtc; +}; + +static unsigned char m48t86_readb(struct device *dev, unsigned long addr) +{ + struct m48t86_rtc_info *info = dev_get_drvdata(dev); + unsigned char value; + + writeb(addr, info->index_reg); + value = readb(info->data_reg); + + return value; +} + +static void m48t86_writeb(struct device *dev, + unsigned char value, unsigned long addr) +{ + struct m48t86_rtc_info *info = dev_get_drvdata(dev); + + writeb(addr, info->index_reg); + writeb(value, info->data_reg); +} static int m48t86_rtc_read_time(struct device *dev, struct rtc_time *tm) { unsigned char reg; - struct platform_device *pdev = to_platform_device(dev); - struct m48t86_ops *ops = dev_get_platdata(&pdev->dev); - reg = ops->readbyte(M48T86_REG_B); + reg = m48t86_readb(dev, M48T86_B); - if (reg & M48T86_REG_B_DM) { + if (reg & M48T86_B_DM) { /* data (binary) mode */ - tm->tm_sec = ops->readbyte(M48T86_REG_SEC); - tm->tm_min = ops->readbyte(M48T86_REG_MIN); - tm->tm_hour = ops->readbyte(M48T86_REG_HOUR) & 0x3F; - tm->tm_mday = ops->readbyte(M48T86_REG_DOM); + tm->tm_sec = m48t86_readb(dev, M48T86_SEC); + tm->tm_min = m48t86_readb(dev, M48T86_MIN); + tm->tm_hour = m48t86_readb(dev, M48T86_HOUR) & 0x3f; + tm->tm_mday = m48t86_readb(dev, M48T86_DOM); /* tm_mon is 0-11 */ - tm->tm_mon = ops->readbyte(M48T86_REG_MONTH) - 1; - tm->tm_year = ops->readbyte(M48T86_REG_YEAR) + 100; - tm->tm_wday = ops->readbyte(M48T86_REG_DOW); + tm->tm_mon = m48t86_readb(dev, M48T86_MONTH) - 1; + tm->tm_year = m48t86_readb(dev, M48T86_YEAR) + 100; + tm->tm_wday = m48t86_readb(dev, M48T86_DOW); } else { /* bcd mode */ - tm->tm_sec = bcd2bin(ops->readbyte(M48T86_REG_SEC)); - tm->tm_min = bcd2bin(ops->readbyte(M48T86_REG_MIN)); - tm->tm_hour = bcd2bin(ops->readbyte(M48T86_REG_HOUR) & 0x3F); - tm->tm_mday = bcd2bin(ops->readbyte(M48T86_REG_DOM)); + tm->tm_sec = bcd2bin(m48t86_readb(dev, M48T86_SEC)); + tm->tm_min = bcd2bin(m48t86_readb(dev, M48T86_MIN)); + tm->tm_hour = bcd2bin(m48t86_readb(dev, M48T86_HOUR) & + 0x3f); + tm->tm_mday = bcd2bin(m48t86_readb(dev, M48T86_DOM)); /* tm_mon is 0-11 */ - tm->tm_mon = bcd2bin(ops->readbyte(M48T86_REG_MONTH)) - 1; - tm->tm_year = bcd2bin(ops->readbyte(M48T86_REG_YEAR)) + 100; - tm->tm_wday = bcd2bin(ops->readbyte(M48T86_REG_DOW)); + tm->tm_mon = bcd2bin(m48t86_readb(dev, M48T86_MONTH)) - 1; + tm->tm_year = bcd2bin(m48t86_readb(dev, M48T86_YEAR)) + 100; + tm->tm_wday = bcd2bin(m48t86_readb(dev, M48T86_DOW)); } /* correct the hour if the clock is in 12h mode */ - if (!(reg & M48T86_REG_B_H24)) - if (ops->readbyte(M48T86_REG_HOUR) & 0x80) + if (!(reg & M48T86_B_H24)) + if (m48t86_readb(dev, M48T86_HOUR) & 0x80) tm->tm_hour += 12; return rtc_valid_tm(tm); @@ -80,38 +106,36 @@ static int m48t86_rtc_read_time(struct device *dev, struct rtc_time *tm) static int m48t86_rtc_set_time(struct device *dev, struct rtc_time *tm) { unsigned char reg; - struct platform_device *pdev = to_platform_device(dev); - struct m48t86_ops *ops = dev_get_platdata(&pdev->dev); - reg = ops->readbyte(M48T86_REG_B); + reg = m48t86_readb(dev, M48T86_B); /* update flag and 24h mode */ - reg |= M48T86_REG_B_SET | M48T86_REG_B_H24; - ops->writebyte(reg, M48T86_REG_B); + reg |= M48T86_B_SET | M48T86_B_H24; + m48t86_writeb(dev, reg, M48T86_B); - if (reg & M48T86_REG_B_DM) { + if (reg & M48T86_B_DM) { /* data (binary) mode */ - ops->writebyte(tm->tm_sec, M48T86_REG_SEC); - ops->writebyte(tm->tm_min, M48T86_REG_MIN); - ops->writebyte(tm->tm_hour, M48T86_REG_HOUR); - ops->writebyte(tm->tm_mday, M48T86_REG_DOM); - ops->writebyte(tm->tm_mon + 1, M48T86_REG_MONTH); - ops->writebyte(tm->tm_year % 100, M48T86_REG_YEAR); - ops->writebyte(tm->tm_wday, M48T86_REG_DOW); + m48t86_writeb(dev, tm->tm_sec, M48T86_SEC); + m48t86_writeb(dev, tm->tm_min, M48T86_MIN); + m48t86_writeb(dev, tm->tm_hour, M48T86_HOUR); + m48t86_writeb(dev, tm->tm_mday, M48T86_DOM); + m48t86_writeb(dev, tm->tm_mon + 1, M48T86_MONTH); + m48t86_writeb(dev, tm->tm_year % 100, M48T86_YEAR); + m48t86_writeb(dev, tm->tm_wday, M48T86_DOW); } else { /* bcd mode */ - ops->writebyte(bin2bcd(tm->tm_sec), M48T86_REG_SEC); - ops->writebyte(bin2bcd(tm->tm_min), M48T86_REG_MIN); - ops->writebyte(bin2bcd(tm->tm_hour), M48T86_REG_HOUR); - ops->writebyte(bin2bcd(tm->tm_mday), M48T86_REG_DOM); - ops->writebyte(bin2bcd(tm->tm_mon + 1), M48T86_REG_MONTH); - ops->writebyte(bin2bcd(tm->tm_year % 100), M48T86_REG_YEAR); - ops->writebyte(bin2bcd(tm->tm_wday), M48T86_REG_DOW); + m48t86_writeb(dev, bin2bcd(tm->tm_sec), M48T86_SEC); + m48t86_writeb(dev, bin2bcd(tm->tm_min), M48T86_MIN); + m48t86_writeb(dev, bin2bcd(tm->tm_hour), M48T86_HOUR); + m48t86_writeb(dev, bin2bcd(tm->tm_mday), M48T86_DOM); + m48t86_writeb(dev, bin2bcd(tm->tm_mon + 1), M48T86_MONTH); + m48t86_writeb(dev, bin2bcd(tm->tm_year % 100), M48T86_YEAR); + m48t86_writeb(dev, bin2bcd(tm->tm_wday), M48T86_DOW); } /* update ended */ - reg &= ~M48T86_REG_B_SET; - ops->writebyte(reg, M48T86_REG_B); + reg &= ~M48T86_B_SET; + m48t86_writeb(dev, reg, M48T86_B); return 0; } @@ -119,18 +143,16 @@ static int m48t86_rtc_set_time(struct device *dev, struct rtc_time *tm) static int m48t86_rtc_proc(struct device *dev, struct seq_file *seq) { unsigned char reg; - struct platform_device *pdev = to_platform_device(dev); - struct m48t86_ops *ops = dev_get_platdata(&pdev->dev); - reg = ops->readbyte(M48T86_REG_B); + reg = m48t86_readb(dev, M48T86_B); seq_printf(seq, "mode\t\t: %s\n", - (reg & M48T86_REG_B_DM) ? "binary" : "bcd"); + (reg & M48T86_B_DM) ? "binary" : "bcd"); - reg = ops->readbyte(M48T86_REG_D); + reg = m48t86_readb(dev, M48T86_D); seq_printf(seq, "battery\t\t: %s\n", - (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted"); + (reg & M48T86_D_VRT) ? "ok" : "exhausted"); return 0; } @@ -141,25 +163,116 @@ static const struct rtc_class_ops m48t86_rtc_ops = { .proc = m48t86_rtc_proc, }; -static int m48t86_rtc_probe(struct platform_device *dev) +static ssize_t m48t86_nvram_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + unsigned int i; + + for (i = 0; i < count; i++) + buf[i] = m48t86_readb(dev, M48T86_NVRAM(off + i)); + + return count; +} + +static ssize_t m48t86_nvram_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) { + struct device *dev = kobj_to_dev(kobj); + unsigned int i; + + for (i = 0; i < count; i++) + m48t86_writeb(dev, buf[i], M48T86_NVRAM(off + i)); + + return count; +} + +static BIN_ATTR(nvram, 0644, m48t86_nvram_read, m48t86_nvram_write, + M48T86_NVRAM_LEN); + +/* + * The RTC is an optional feature at purchase time on some Technologic Systems + * boards. Verify that it actually exists by checking if the last two bytes + * of the NVRAM can be changed. + * + * This is based on the method used in their rtc7800.c example. + */ +static bool m48t86_verify_chip(struct platform_device *pdev) +{ + unsigned int offset0 = M48T86_NVRAM(M48T86_NVRAM_LEN - 2); + unsigned int offset1 = M48T86_NVRAM(M48T86_NVRAM_LEN - 1); + unsigned char tmp0, tmp1; + + tmp0 = m48t86_readb(&pdev->dev, offset0); + tmp1 = m48t86_readb(&pdev->dev, offset1); + + m48t86_writeb(&pdev->dev, 0x00, offset0); + m48t86_writeb(&pdev->dev, 0x55, offset1); + if (m48t86_readb(&pdev->dev, offset1) == 0x55) { + m48t86_writeb(&pdev->dev, 0xaa, offset1); + if (m48t86_readb(&pdev->dev, offset1) == 0xaa && + m48t86_readb(&pdev->dev, offset0) == 0x00) { + m48t86_writeb(&pdev->dev, tmp0, offset0); + m48t86_writeb(&pdev->dev, tmp1, offset1); + + return true; + } + } + return false; +} + +static int m48t86_rtc_probe(struct platform_device *pdev) +{ + struct m48t86_rtc_info *info; + struct resource *res; unsigned char reg; - struct m48t86_ops *ops = dev_get_platdata(&dev->dev); - struct rtc_device *rtc; - rtc = devm_rtc_device_register(&dev->dev, "m48t86", - &m48t86_rtc_ops, THIS_MODULE); + info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + info->index_reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(info->index_reg)) + return PTR_ERR(info->index_reg); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) + return -ENODEV; + info->data_reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(info->data_reg)) + return PTR_ERR(info->data_reg); - if (IS_ERR(rtc)) - return PTR_ERR(rtc); + dev_set_drvdata(&pdev->dev, info); + + if (!m48t86_verify_chip(pdev)) { + dev_info(&pdev->dev, "RTC not present\n"); + return -ENODEV; + } - platform_set_drvdata(dev, rtc); + info->rtc = devm_rtc_device_register(&pdev->dev, "m48t86", + &m48t86_rtc_ops, THIS_MODULE); + if (IS_ERR(info->rtc)) + return PTR_ERR(info->rtc); /* read battery status */ - reg = ops->readbyte(M48T86_REG_D); - dev_info(&dev->dev, "battery %s\n", - (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted"); + reg = m48t86_readb(&pdev->dev, M48T86_D); + dev_info(&pdev->dev, "battery %s\n", + (reg & M48T86_D_VRT) ? "ok" : "exhausted"); + if (device_create_bin_file(&pdev->dev, &bin_attr_nvram)) + dev_err(&pdev->dev, "failed to create nvram sysfs entry\n"); + + return 0; +} + +static int m48t86_rtc_remove(struct platform_device *pdev) +{ + device_remove_bin_file(&pdev->dev, &bin_attr_nvram); return 0; } @@ -168,6 +281,7 @@ static struct platform_driver m48t86_rtc_platform_driver = { .name = "rtc-m48t86", }, .probe = m48t86_rtc_probe, + .remove = m48t86_rtc_remove, }; module_platform_driver(m48t86_rtc_platform_driver); diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c index ce75e421ba00..77f21331ae21 100644 --- a/drivers/rtc/rtc-mcp795.c +++ b/drivers/rtc/rtc-mcp795.c @@ -44,12 +44,22 @@ #define MCP795_REG_DAY 0x04 #define MCP795_REG_MONTH 0x06 #define MCP795_REG_CONTROL 0x08 +#define MCP795_REG_ALM0_SECONDS 0x0C +#define MCP795_REG_ALM0_DAY 0x0F #define MCP795_ST_BIT BIT(7) #define MCP795_24_BIT BIT(6) #define MCP795_LP_BIT BIT(5) #define MCP795_EXTOSC_BIT BIT(3) #define MCP795_OSCON_BIT BIT(5) +#define MCP795_ALM0_BIT BIT(4) +#define MCP795_ALM1_BIT BIT(5) +#define MCP795_ALM0IF_BIT BIT(3) +#define MCP795_ALM0C0_BIT BIT(4) +#define MCP795_ALM0C1_BIT BIT(5) +#define MCP795_ALM0C2_BIT BIT(6) + +#define SEC_PER_DAY (24 * 60 * 60) static int mcp795_rtcc_read(struct device *dev, u8 addr, u8 *buf, u8 count) { @@ -150,6 +160,30 @@ static int mcp795_start_oscillator(struct device *dev, bool *extosc) dev, MCP795_REG_SECONDS, MCP795_ST_BIT, MCP795_ST_BIT); } +/* Enable or disable Alarm 0 in RTC */ +static int mcp795_update_alarm(struct device *dev, bool enable) +{ + int ret; + + dev_dbg(dev, "%s alarm\n", enable ? "Enable" : "Disable"); + + if (enable) { + /* clear ALM0IF (Alarm 0 Interrupt Flag) bit */ + ret = mcp795_rtcc_set_bits(dev, MCP795_REG_ALM0_DAY, + MCP795_ALM0IF_BIT, 0); + if (ret) + return ret; + /* enable alarm 0 */ + ret = mcp795_rtcc_set_bits(dev, MCP795_REG_CONTROL, + MCP795_ALM0_BIT, MCP795_ALM0_BIT); + } else { + /* disable alarm 0 and alarm 1 */ + ret = mcp795_rtcc_set_bits(dev, MCP795_REG_CONTROL, + MCP795_ALM0_BIT | MCP795_ALM1_BIT, 0); + } + return ret; +} + static int mcp795_set_time(struct device *dev, struct rtc_time *tim) { int ret; @@ -170,6 +204,7 @@ static int mcp795_set_time(struct device *dev, struct rtc_time *tim) data[0] = (data[0] & 0x80) | bin2bcd(tim->tm_sec); data[1] = (data[1] & 0x80) | bin2bcd(tim->tm_min); data[2] = bin2bcd(tim->tm_hour); + data[3] = (data[3] & 0xF8) | bin2bcd(tim->tm_wday + 1); data[4] = bin2bcd(tim->tm_mday); data[5] = (data[5] & MCP795_LP_BIT) | bin2bcd(tim->tm_mon + 1); @@ -198,9 +233,9 @@ static int mcp795_set_time(struct device *dev, struct rtc_time *tim) if (ret) return ret; - dev_dbg(dev, "Set mcp795: %04d-%02d-%02d %02d:%02d:%02d\n", + dev_dbg(dev, "Set mcp795: %04d-%02d-%02d(%d) %02d:%02d:%02d\n", tim->tm_year + 1900, tim->tm_mon, tim->tm_mday, - tim->tm_hour, tim->tm_min, tim->tm_sec); + tim->tm_wday, tim->tm_hour, tim->tm_min, tim->tm_sec); return 0; } @@ -218,20 +253,139 @@ static int mcp795_read_time(struct device *dev, struct rtc_time *tim) tim->tm_sec = bcd2bin(data[0] & 0x7F); tim->tm_min = bcd2bin(data[1] & 0x7F); tim->tm_hour = bcd2bin(data[2] & 0x3F); + tim->tm_wday = bcd2bin(data[3] & 0x07) - 1; tim->tm_mday = bcd2bin(data[4] & 0x3F); tim->tm_mon = bcd2bin(data[5] & 0x1F) - 1; tim->tm_year = bcd2bin(data[6]) + 100; /* Assume we are in 20xx */ - dev_dbg(dev, "Read from mcp795: %04d-%02d-%02d %02d:%02d:%02d\n", - tim->tm_year + 1900, tim->tm_mon, tim->tm_mday, - tim->tm_hour, tim->tm_min, tim->tm_sec); + dev_dbg(dev, "Read from mcp795: %04d-%02d-%02d(%d) %02d:%02d:%02d\n", + tim->tm_year + 1900, tim->tm_mon, tim->tm_mday, + tim->tm_wday, tim->tm_hour, tim->tm_min, tim->tm_sec); return rtc_valid_tm(tim); } +static int mcp795_set_alarm(struct device *dev, struct rtc_wkalrm *alm) +{ + struct rtc_time now_tm; + time64_t now; + time64_t later; + u8 tmp[6]; + int ret; + + /* Read current time from RTC hardware */ + ret = mcp795_read_time(dev, &now_tm); + if (ret) + return ret; + /* Get the number of seconds since 1970 */ + now = rtc_tm_to_time64(&now_tm); + later = rtc_tm_to_time64(&alm->time); + if (later <= now) + return -EINVAL; + /* make sure alarm fires within the next one year */ + if ((later - now) >= + (SEC_PER_DAY * (365 + is_leap_year(alm->time.tm_year)))) + return -EDOM; + /* disable alarm */ + ret = mcp795_update_alarm(dev, false); + if (ret) + return ret; + /* Read registers, so we can leave configuration bits untouched */ + ret = mcp795_rtcc_read(dev, MCP795_REG_ALM0_SECONDS, tmp, sizeof(tmp)); + if (ret) + return ret; + + alm->time.tm_year = -1; + alm->time.tm_isdst = -1; + alm->time.tm_yday = -1; + + tmp[0] = (tmp[0] & 0x80) | bin2bcd(alm->time.tm_sec); + tmp[1] = (tmp[1] & 0x80) | bin2bcd(alm->time.tm_min); + tmp[2] = (tmp[2] & 0xE0) | bin2bcd(alm->time.tm_hour); + tmp[3] = (tmp[3] & 0x80) | bin2bcd(alm->time.tm_wday + 1); + /* set alarm match: seconds, minutes, hour, day, date and month */ + tmp[3] |= (MCP795_ALM0C2_BIT | MCP795_ALM0C1_BIT | MCP795_ALM0C0_BIT); + tmp[4] = (tmp[4] & 0xC0) | bin2bcd(alm->time.tm_mday); + tmp[5] = (tmp[5] & 0xE0) | bin2bcd(alm->time.tm_mon + 1); + + ret = mcp795_rtcc_write(dev, MCP795_REG_ALM0_SECONDS, tmp, sizeof(tmp)); + if (ret) + return ret; + + /* enable alarm if requested */ + if (alm->enabled) { + ret = mcp795_update_alarm(dev, true); + if (ret) + return ret; + dev_dbg(dev, "Alarm IRQ armed\n"); + } + dev_dbg(dev, "Set alarm: %02d-%02d(%d) %02d:%02d:%02d\n", + alm->time.tm_mon, alm->time.tm_mday, alm->time.tm_wday, + alm->time.tm_hour, alm->time.tm_min, alm->time.tm_sec); + return 0; +} + +static int mcp795_read_alarm(struct device *dev, struct rtc_wkalrm *alm) +{ + u8 data[6]; + int ret; + + ret = mcp795_rtcc_read( + dev, MCP795_REG_ALM0_SECONDS, data, sizeof(data)); + if (ret) + return ret; + + alm->time.tm_sec = bcd2bin(data[0] & 0x7F); + alm->time.tm_min = bcd2bin(data[1] & 0x7F); + alm->time.tm_hour = bcd2bin(data[2] & 0x1F); + alm->time.tm_wday = bcd2bin(data[3] & 0x07) - 1; + alm->time.tm_mday = bcd2bin(data[4] & 0x3F); + alm->time.tm_mon = bcd2bin(data[5] & 0x1F) - 1; + alm->time.tm_year = -1; + alm->time.tm_isdst = -1; + alm->time.tm_yday = -1; + + dev_dbg(dev, "Read alarm: %02d-%02d(%d) %02d:%02d:%02d\n", + alm->time.tm_mon, alm->time.tm_mday, alm->time.tm_wday, + alm->time.tm_hour, alm->time.tm_min, alm->time.tm_sec); + return 0; +} + +static int mcp795_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + return mcp795_update_alarm(dev, !!enabled); +} + +static irqreturn_t mcp795_irq(int irq, void *data) +{ + struct spi_device *spi = data; + struct rtc_device *rtc = spi_get_drvdata(spi); + struct mutex *lock = &rtc->ops_lock; + int ret; + + mutex_lock(lock); + + /* Disable alarm. + * There is no need to clear ALM0IF (Alarm 0 Interrupt Flag) bit, + * because it is done every time when alarm is enabled. + */ + ret = mcp795_update_alarm(&spi->dev, false); + if (ret) + dev_err(&spi->dev, + "Failed to disable alarm in IRQ (ret=%d)\n", ret); + rtc_update_irq(rtc, 1, RTC_AF | RTC_IRQF); + + mutex_unlock(lock); + + return IRQ_HANDLED; +} + static const struct rtc_class_ops mcp795_rtc_ops = { .read_time = mcp795_read_time, - .set_time = mcp795_set_time + .set_time = mcp795_set_time, + .read_alarm = mcp795_read_alarm, + .set_alarm = mcp795_set_alarm, + .alarm_irq_enable = mcp795_alarm_irq_enable }; static int mcp795_probe(struct spi_device *spi) @@ -259,6 +413,23 @@ static int mcp795_probe(struct spi_device *spi) spi_set_drvdata(spi, rtc); + if (spi->irq > 0) { + dev_dbg(&spi->dev, "Alarm support enabled\n"); + + /* Clear any pending alarm (ALM0IF bit) before requesting + * the interrupt. + */ + mcp795_rtcc_set_bits(&spi->dev, MCP795_REG_ALM0_DAY, + MCP795_ALM0IF_BIT, 0); + ret = devm_request_threaded_irq(&spi->dev, spi->irq, NULL, + mcp795_irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + dev_name(&rtc->dev), spi); + if (ret) + dev_err(&spi->dev, "Failed to request IRQ: %d: %d\n", + spi->irq, ret); + else + device_init_wakeup(&spi->dev, true); + } return 0; } diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index 359876a88ac8..77319122642a 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c @@ -353,7 +353,7 @@ static int mxc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) } /* RTC layer */ -static struct rtc_class_ops mxc_rtc_ops = { +static const struct rtc_class_ops mxc_rtc_ops = { .release = mxc_rtc_release, .read_time = mxc_rtc_read_time, .set_mmss64 = mxc_rtc_set_mmss, diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c index 2bfdf638b673..f33447c5db85 100644 --- a/drivers/rtc/rtc-pcf2127.c +++ b/drivers/rtc/rtc-pcf2127.c @@ -52,9 +52,20 @@ static int pcf2127_rtc_read_time(struct device *dev, struct rtc_time *tm) struct pcf2127 *pcf2127 = dev_get_drvdata(dev); unsigned char buf[10]; int ret; + int i; - ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_CTRL1, buf, - sizeof(buf)); + for (i = 0; i <= PCF2127_REG_CTRL3; i++) { + ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL1 + i, + (unsigned int *)(buf + i)); + if (ret) { + dev_err(dev, "%s: read error\n", __func__); + return ret; + } + } + + ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_SC, + (buf + PCF2127_REG_SC), + ARRAY_SIZE(buf) - PCF2127_REG_SC); if (ret) { dev_err(dev, "%s: read error\n", __func__); return ret; diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c index 7163b91bb773..d08da371912c 100644 --- a/drivers/rtc/rtc-rx8010.c +++ b/drivers/rtc/rtc-rx8010.c @@ -63,7 +63,6 @@ struct rx8010_data { struct i2c_client *client; struct rtc_device *rtc; u8 ctrlreg; - spinlock_t flags_lock; }; static irqreturn_t rx8010_irq_1_handler(int irq, void *dev_id) @@ -72,12 +71,12 @@ static irqreturn_t rx8010_irq_1_handler(int irq, void *dev_id) struct rx8010_data *rx8010 = i2c_get_clientdata(client); int flagreg; - spin_lock(&rx8010->flags_lock); + mutex_lock(&rx8010->rtc->ops_lock); flagreg = i2c_smbus_read_byte_data(client, RX8010_FLAG); if (flagreg <= 0) { - spin_unlock(&rx8010->flags_lock); + mutex_unlock(&rx8010->rtc->ops_lock); return IRQ_NONE; } @@ -101,7 +100,7 @@ static irqreturn_t rx8010_irq_1_handler(int irq, void *dev_id) i2c_smbus_write_byte_data(client, RX8010_FLAG, flagreg); - spin_unlock(&rx8010->flags_lock); + mutex_unlock(&rx8010->rtc->ops_lock); return IRQ_HANDLED; } @@ -143,7 +142,6 @@ static int rx8010_set_time(struct device *dev, struct rtc_time *dt) u8 date[7]; int ctrl, flagreg; int ret; - unsigned long irqflags; if ((dt->tm_year < 100) || (dt->tm_year > 199)) return -EINVAL; @@ -181,11 +179,8 @@ static int rx8010_set_time(struct device *dev, struct rtc_time *dt) if (ret < 0) return ret; - spin_lock_irqsave(&rx8010->flags_lock, irqflags); - flagreg = i2c_smbus_read_byte_data(rx8010->client, RX8010_FLAG); if (flagreg < 0) { - spin_unlock_irqrestore(&rx8010->flags_lock, irqflags); return flagreg; } @@ -193,8 +188,6 @@ static int rx8010_set_time(struct device *dev, struct rtc_time *dt) ret = i2c_smbus_write_byte_data(rx8010->client, RX8010_FLAG, flagreg & ~RX8010_FLAG_VLF); - spin_unlock_irqrestore(&rx8010->flags_lock, irqflags); - return 0; } @@ -288,12 +281,9 @@ static int rx8010_set_alarm(struct device *dev, struct rtc_wkalrm *t) u8 alarmvals[3]; int extreg, flagreg; int err; - unsigned long irqflags; - spin_lock_irqsave(&rx8010->flags_lock, irqflags); flagreg = i2c_smbus_read_byte_data(client, RX8010_FLAG); if (flagreg < 0) { - spin_unlock_irqrestore(&rx8010->flags_lock, irqflags); return flagreg; } @@ -302,14 +292,12 @@ static int rx8010_set_alarm(struct device *dev, struct rtc_wkalrm *t) err = i2c_smbus_write_byte_data(rx8010->client, RX8010_CTRL, rx8010->ctrlreg); if (err < 0) { - spin_unlock_irqrestore(&rx8010->flags_lock, irqflags); return err; } } flagreg &= ~RX8010_FLAG_AF; err = i2c_smbus_write_byte_data(rx8010->client, RX8010_FLAG, flagreg); - spin_unlock_irqrestore(&rx8010->flags_lock, irqflags); if (err < 0) return err; @@ -404,7 +392,6 @@ static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) struct rx8010_data *rx8010 = dev_get_drvdata(dev); int ret, tmp; int flagreg; - unsigned long irqflags; switch (cmd) { case RTC_VL_READ: @@ -419,16 +406,13 @@ static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) return 0; case RTC_VL_CLR: - spin_lock_irqsave(&rx8010->flags_lock, irqflags); flagreg = i2c_smbus_read_byte_data(rx8010->client, RX8010_FLAG); if (flagreg < 0) { - spin_unlock_irqrestore(&rx8010->flags_lock, irqflags); return flagreg; } flagreg &= ~RX8010_FLAG_VLF; ret = i2c_smbus_write_byte_data(client, RX8010_FLAG, flagreg); - spin_unlock_irqrestore(&rx8010->flags_lock, irqflags); if (ret < 0) return ret; @@ -466,8 +450,6 @@ static int rx8010_probe(struct i2c_client *client, rx8010->client = client; i2c_set_clientdata(client, rx8010); - spin_lock_init(&rx8010->flags_lock); - err = rx8010_init_client(client); if (err) return err; diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 17b6235d67a5..c626e43a9cbb 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -535,7 +535,7 @@ static int sh_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) return 0; } -static struct rtc_class_ops sh_rtc_ops = { +static const struct rtc_class_ops sh_rtc_ops = { .read_time = sh_rtc_read_time, .set_time = sh_rtc_set_time, .read_alarm = sh_rtc_read_alarm, diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c index 0f11c2a228e3..d51b07d620f7 100644 --- a/drivers/rtc/rtc-snvs.c +++ b/drivers/rtc/rtc-snvs.c @@ -184,6 +184,7 @@ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) rtc_tm_to_time(alrm_tm, &time); regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, SNVS_LPCR_LPTA_EN, 0); + rtc_write_sync_lp(data); regmap_write(data->regmap, data->offset + SNVS_LPTAR, time); /* Clear alarm interrupt status bit */ diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c new file mode 100644 index 000000000000..bd57eb1029e1 --- /dev/null +++ b/drivers/rtc/rtc-stm32.c @@ -0,0 +1,725 @@ +/* + * Copyright (C) Amelie Delaunay 2016 + * Author: Amelie Delaunay <amelie.delaunay@st.com> + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <linux/bcd.h> +#include <linux/clk.h> +#include <linux/iopoll.h> +#include <linux/ioport.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/regmap.h> +#include <linux/rtc.h> + +#define DRIVER_NAME "stm32_rtc" + +/* STM32 RTC registers */ +#define STM32_RTC_TR 0x00 +#define STM32_RTC_DR 0x04 +#define STM32_RTC_CR 0x08 +#define STM32_RTC_ISR 0x0C +#define STM32_RTC_PRER 0x10 +#define STM32_RTC_ALRMAR 0x1C +#define STM32_RTC_WPR 0x24 + +/* STM32_RTC_TR bit fields */ +#define STM32_RTC_TR_SEC_SHIFT 0 +#define STM32_RTC_TR_SEC GENMASK(6, 0) +#define STM32_RTC_TR_MIN_SHIFT 8 +#define STM32_RTC_TR_MIN GENMASK(14, 8) +#define STM32_RTC_TR_HOUR_SHIFT 16 +#define STM32_RTC_TR_HOUR GENMASK(21, 16) + +/* STM32_RTC_DR bit fields */ +#define STM32_RTC_DR_DATE_SHIFT 0 +#define STM32_RTC_DR_DATE GENMASK(5, 0) +#define STM32_RTC_DR_MONTH_SHIFT 8 +#define STM32_RTC_DR_MONTH GENMASK(12, 8) +#define STM32_RTC_DR_WDAY_SHIFT 13 +#define STM32_RTC_DR_WDAY GENMASK(15, 13) +#define STM32_RTC_DR_YEAR_SHIFT 16 +#define STM32_RTC_DR_YEAR GENMASK(23, 16) + +/* STM32_RTC_CR bit fields */ +#define STM32_RTC_CR_FMT BIT(6) +#define STM32_RTC_CR_ALRAE BIT(8) +#define STM32_RTC_CR_ALRAIE BIT(12) + +/* STM32_RTC_ISR bit fields */ +#define STM32_RTC_ISR_ALRAWF BIT(0) +#define STM32_RTC_ISR_INITS BIT(4) +#define STM32_RTC_ISR_RSF BIT(5) +#define STM32_RTC_ISR_INITF BIT(6) +#define STM32_RTC_ISR_INIT BIT(7) +#define STM32_RTC_ISR_ALRAF BIT(8) + +/* STM32_RTC_PRER bit fields */ +#define STM32_RTC_PRER_PRED_S_SHIFT 0 +#define STM32_RTC_PRER_PRED_S GENMASK(14, 0) +#define STM32_RTC_PRER_PRED_A_SHIFT 16 +#define STM32_RTC_PRER_PRED_A GENMASK(22, 16) + +/* STM32_RTC_ALRMAR and STM32_RTC_ALRMBR bit fields */ +#define STM32_RTC_ALRMXR_SEC_SHIFT 0 +#define STM32_RTC_ALRMXR_SEC GENMASK(6, 0) +#define STM32_RTC_ALRMXR_SEC_MASK BIT(7) +#define STM32_RTC_ALRMXR_MIN_SHIFT 8 +#define STM32_RTC_ALRMXR_MIN GENMASK(14, 8) +#define STM32_RTC_ALRMXR_MIN_MASK BIT(15) +#define STM32_RTC_ALRMXR_HOUR_SHIFT 16 +#define STM32_RTC_ALRMXR_HOUR GENMASK(21, 16) +#define STM32_RTC_ALRMXR_PM BIT(22) +#define STM32_RTC_ALRMXR_HOUR_MASK BIT(23) +#define STM32_RTC_ALRMXR_DATE_SHIFT 24 +#define STM32_RTC_ALRMXR_DATE GENMASK(29, 24) +#define STM32_RTC_ALRMXR_WDSEL BIT(30) +#define STM32_RTC_ALRMXR_WDAY_SHIFT 24 +#define STM32_RTC_ALRMXR_WDAY GENMASK(27, 24) +#define STM32_RTC_ALRMXR_DATE_MASK BIT(31) + +/* STM32_RTC_WPR key constants */ +#define RTC_WPR_1ST_KEY 0xCA +#define RTC_WPR_2ND_KEY 0x53 +#define RTC_WPR_WRONG_KEY 0xFF + +/* + * RTC registers are protected against parasitic write access. + * PWR_CR_DBP bit must be set to enable write access to RTC registers. + */ +/* STM32_PWR_CR */ +#define PWR_CR 0x00 +/* STM32_PWR_CR bit field */ +#define PWR_CR_DBP BIT(8) + +struct stm32_rtc { + struct rtc_device *rtc_dev; + void __iomem *base; + struct regmap *dbp; + struct clk *ck_rtc; + int irq_alarm; +}; + +static void stm32_rtc_wpr_unlock(struct stm32_rtc *rtc) +{ + writel_relaxed(RTC_WPR_1ST_KEY, rtc->base + STM32_RTC_WPR); + writel_relaxed(RTC_WPR_2ND_KEY, rtc->base + STM32_RTC_WPR); +} + +static void stm32_rtc_wpr_lock(struct stm32_rtc *rtc) +{ + writel_relaxed(RTC_WPR_WRONG_KEY, rtc->base + STM32_RTC_WPR); +} + +static int stm32_rtc_enter_init_mode(struct stm32_rtc *rtc) +{ + unsigned int isr = readl_relaxed(rtc->base + STM32_RTC_ISR); + + if (!(isr & STM32_RTC_ISR_INITF)) { + isr |= STM32_RTC_ISR_INIT; + writel_relaxed(isr, rtc->base + STM32_RTC_ISR); + + /* + * It takes around 2 ck_rtc clock cycles to enter in + * initialization phase mode (and have INITF flag set). As + * slowest ck_rtc frequency may be 32kHz and highest should be + * 1MHz, we poll every 10 us with a timeout of 100ms. + */ + return readl_relaxed_poll_timeout_atomic( + rtc->base + STM32_RTC_ISR, + isr, (isr & STM32_RTC_ISR_INITF), + 10, 100000); + } + + return 0; +} + +static void stm32_rtc_exit_init_mode(struct stm32_rtc *rtc) +{ + unsigned int isr = readl_relaxed(rtc->base + STM32_RTC_ISR); + + isr &= ~STM32_RTC_ISR_INIT; + writel_relaxed(isr, rtc->base + STM32_RTC_ISR); +} + +static int stm32_rtc_wait_sync(struct stm32_rtc *rtc) +{ + unsigned int isr = readl_relaxed(rtc->base + STM32_RTC_ISR); + + isr &= ~STM32_RTC_ISR_RSF; + writel_relaxed(isr, rtc->base + STM32_RTC_ISR); + + /* + * Wait for RSF to be set to ensure the calendar registers are + * synchronised, it takes around 2 ck_rtc clock cycles + */ + return readl_relaxed_poll_timeout_atomic(rtc->base + STM32_RTC_ISR, + isr, + (isr & STM32_RTC_ISR_RSF), + 10, 100000); +} + +static irqreturn_t stm32_rtc_alarm_irq(int irq, void *dev_id) +{ + struct stm32_rtc *rtc = (struct stm32_rtc *)dev_id; + unsigned int isr, cr; + + mutex_lock(&rtc->rtc_dev->ops_lock); + + isr = readl_relaxed(rtc->base + STM32_RTC_ISR); + cr = readl_relaxed(rtc->base + STM32_RTC_CR); + + if ((isr & STM32_RTC_ISR_ALRAF) && + (cr & STM32_RTC_CR_ALRAIE)) { + /* Alarm A flag - Alarm interrupt */ + dev_dbg(&rtc->rtc_dev->dev, "Alarm occurred\n"); + + /* Pass event to the kernel */ + rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF); + + /* Clear event flag, otherwise new events won't be received */ + writel_relaxed(isr & ~STM32_RTC_ISR_ALRAF, + rtc->base + STM32_RTC_ISR); + } + + mutex_unlock(&rtc->rtc_dev->ops_lock); + + return IRQ_HANDLED; +} + +/* Convert rtc_time structure from bin to bcd format */ +static void tm2bcd(struct rtc_time *tm) +{ + tm->tm_sec = bin2bcd(tm->tm_sec); + tm->tm_min = bin2bcd(tm->tm_min); + tm->tm_hour = bin2bcd(tm->tm_hour); + + tm->tm_mday = bin2bcd(tm->tm_mday); + tm->tm_mon = bin2bcd(tm->tm_mon + 1); + tm->tm_year = bin2bcd(tm->tm_year - 100); + /* + * Number of days since Sunday + * - on kernel side, 0=Sunday...6=Saturday + * - on rtc side, 0=invalid,1=Monday...7=Sunday + */ + tm->tm_wday = (!tm->tm_wday) ? 7 : tm->tm_wday; +} + +/* Convert rtc_time structure from bcd to bin format */ +static void bcd2tm(struct rtc_time *tm) +{ + tm->tm_sec = bcd2bin(tm->tm_sec); + tm->tm_min = bcd2bin(tm->tm_min); + tm->tm_hour = bcd2bin(tm->tm_hour); + + tm->tm_mday = bcd2bin(tm->tm_mday); + tm->tm_mon = bcd2bin(tm->tm_mon) - 1; + tm->tm_year = bcd2bin(tm->tm_year) + 100; + /* + * Number of days since Sunday + * - on kernel side, 0=Sunday...6=Saturday + * - on rtc side, 0=invalid,1=Monday...7=Sunday + */ + tm->tm_wday %= 7; +} + +static int stm32_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct stm32_rtc *rtc = dev_get_drvdata(dev); + unsigned int tr, dr; + + /* Time and Date in BCD format */ + tr = readl_relaxed(rtc->base + STM32_RTC_TR); + dr = readl_relaxed(rtc->base + STM32_RTC_DR); + + tm->tm_sec = (tr & STM32_RTC_TR_SEC) >> STM32_RTC_TR_SEC_SHIFT; + tm->tm_min = (tr & STM32_RTC_TR_MIN) >> STM32_RTC_TR_MIN_SHIFT; + tm->tm_hour = (tr & STM32_RTC_TR_HOUR) >> STM32_RTC_TR_HOUR_SHIFT; + + tm->tm_mday = (dr & STM32_RTC_DR_DATE) >> STM32_RTC_DR_DATE_SHIFT; + tm->tm_mon = (dr & STM32_RTC_DR_MONTH) >> STM32_RTC_DR_MONTH_SHIFT; + tm->tm_year = (dr & STM32_RTC_DR_YEAR) >> STM32_RTC_DR_YEAR_SHIFT; + tm->tm_wday = (dr & STM32_RTC_DR_WDAY) >> STM32_RTC_DR_WDAY_SHIFT; + + /* We don't report tm_yday and tm_isdst */ + + bcd2tm(tm); + + return 0; +} + +static int stm32_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct stm32_rtc *rtc = dev_get_drvdata(dev); + unsigned int tr, dr; + int ret = 0; + + tm2bcd(tm); + + /* Time in BCD format */ + tr = ((tm->tm_sec << STM32_RTC_TR_SEC_SHIFT) & STM32_RTC_TR_SEC) | + ((tm->tm_min << STM32_RTC_TR_MIN_SHIFT) & STM32_RTC_TR_MIN) | + ((tm->tm_hour << STM32_RTC_TR_HOUR_SHIFT) & STM32_RTC_TR_HOUR); + + /* Date in BCD format */ + dr = ((tm->tm_mday << STM32_RTC_DR_DATE_SHIFT) & STM32_RTC_DR_DATE) | + ((tm->tm_mon << STM32_RTC_DR_MONTH_SHIFT) & STM32_RTC_DR_MONTH) | + ((tm->tm_year << STM32_RTC_DR_YEAR_SHIFT) & STM32_RTC_DR_YEAR) | + ((tm->tm_wday << STM32_RTC_DR_WDAY_SHIFT) & STM32_RTC_DR_WDAY); + + stm32_rtc_wpr_unlock(rtc); + + ret = stm32_rtc_enter_init_mode(rtc); + if (ret) { + dev_err(dev, "Can't enter in init mode. Set time aborted.\n"); + goto end; + } + + writel_relaxed(tr, rtc->base + STM32_RTC_TR); + writel_relaxed(dr, rtc->base + STM32_RTC_DR); + + stm32_rtc_exit_init_mode(rtc); + + ret = stm32_rtc_wait_sync(rtc); +end: + stm32_rtc_wpr_lock(rtc); + + return ret; +} + +static int stm32_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct stm32_rtc *rtc = dev_get_drvdata(dev); + struct rtc_time *tm = &alrm->time; + unsigned int alrmar, cr, isr; + + alrmar = readl_relaxed(rtc->base + STM32_RTC_ALRMAR); + cr = readl_relaxed(rtc->base + STM32_RTC_CR); + isr = readl_relaxed(rtc->base + STM32_RTC_ISR); + + if (alrmar & STM32_RTC_ALRMXR_DATE_MASK) { + /* + * Date/day doesn't matter in Alarm comparison so alarm + * triggers every day + */ + tm->tm_mday = -1; + tm->tm_wday = -1; + } else { + if (alrmar & STM32_RTC_ALRMXR_WDSEL) { + /* Alarm is set to a day of week */ + tm->tm_mday = -1; + tm->tm_wday = (alrmar & STM32_RTC_ALRMXR_WDAY) >> + STM32_RTC_ALRMXR_WDAY_SHIFT; + tm->tm_wday %= 7; + } else { + /* Alarm is set to a day of month */ + tm->tm_wday = -1; + tm->tm_mday = (alrmar & STM32_RTC_ALRMXR_DATE) >> + STM32_RTC_ALRMXR_DATE_SHIFT; + } + } + + if (alrmar & STM32_RTC_ALRMXR_HOUR_MASK) { + /* Hours don't matter in Alarm comparison */ + tm->tm_hour = -1; + } else { + tm->tm_hour = (alrmar & STM32_RTC_ALRMXR_HOUR) >> + STM32_RTC_ALRMXR_HOUR_SHIFT; + if (alrmar & STM32_RTC_ALRMXR_PM) + tm->tm_hour += 12; + } + + if (alrmar & STM32_RTC_ALRMXR_MIN_MASK) { + /* Minutes don't matter in Alarm comparison */ + tm->tm_min = -1; + } else { + tm->tm_min = (alrmar & STM32_RTC_ALRMXR_MIN) >> + STM32_RTC_ALRMXR_MIN_SHIFT; + } + + if (alrmar & STM32_RTC_ALRMXR_SEC_MASK) { + /* Seconds don't matter in Alarm comparison */ + tm->tm_sec = -1; + } else { + tm->tm_sec = (alrmar & STM32_RTC_ALRMXR_SEC) >> + STM32_RTC_ALRMXR_SEC_SHIFT; + } + + bcd2tm(tm); + + alrm->enabled = (cr & STM32_RTC_CR_ALRAE) ? 1 : 0; + alrm->pending = (isr & STM32_RTC_ISR_ALRAF) ? 1 : 0; + + return 0; +} + +static int stm32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct stm32_rtc *rtc = dev_get_drvdata(dev); + unsigned int isr, cr; + + cr = readl_relaxed(rtc->base + STM32_RTC_CR); + + stm32_rtc_wpr_unlock(rtc); + + /* We expose Alarm A to the kernel */ + if (enabled) + cr |= (STM32_RTC_CR_ALRAIE | STM32_RTC_CR_ALRAE); + else + cr &= ~(STM32_RTC_CR_ALRAIE | STM32_RTC_CR_ALRAE); + writel_relaxed(cr, rtc->base + STM32_RTC_CR); + + /* Clear event flag, otherwise new events won't be received */ + isr = readl_relaxed(rtc->base + STM32_RTC_ISR); + isr &= ~STM32_RTC_ISR_ALRAF; + writel_relaxed(isr, rtc->base + STM32_RTC_ISR); + + stm32_rtc_wpr_lock(rtc); + + return 0; +} + +static int stm32_rtc_valid_alrm(struct stm32_rtc *rtc, struct rtc_time *tm) +{ + int cur_day, cur_mon, cur_year, cur_hour, cur_min, cur_sec; + unsigned int dr = readl_relaxed(rtc->base + STM32_RTC_DR); + unsigned int tr = readl_relaxed(rtc->base + STM32_RTC_TR); + + cur_day = (dr & STM32_RTC_DR_DATE) >> STM32_RTC_DR_DATE_SHIFT; + cur_mon = (dr & STM32_RTC_DR_MONTH) >> STM32_RTC_DR_MONTH_SHIFT; + cur_year = (dr & STM32_RTC_DR_YEAR) >> STM32_RTC_DR_YEAR_SHIFT; + cur_sec = (tr & STM32_RTC_TR_SEC) >> STM32_RTC_TR_SEC_SHIFT; + cur_min = (tr & STM32_RTC_TR_MIN) >> STM32_RTC_TR_MIN_SHIFT; + cur_hour = (tr & STM32_RTC_TR_HOUR) >> STM32_RTC_TR_HOUR_SHIFT; + + /* + * Assuming current date is M-D-Y H:M:S. + * RTC alarm can't be set on a specific month and year. + * So the valid alarm range is: + * M-D-Y H:M:S < alarm <= (M+1)-D-Y H:M:S + * with a specific case for December... + */ + if ((((tm->tm_year > cur_year) && + (tm->tm_mon == 0x1) && (cur_mon == 0x12)) || + ((tm->tm_year == cur_year) && + (tm->tm_mon <= cur_mon + 1))) && + ((tm->tm_mday > cur_day) || + ((tm->tm_mday == cur_day) && + ((tm->tm_hour > cur_hour) || + ((tm->tm_hour == cur_hour) && (tm->tm_min > cur_min)) || + ((tm->tm_hour == cur_hour) && (tm->tm_min == cur_min) && + (tm->tm_sec >= cur_sec)))))) + return 0; + + return -EINVAL; +} + +static int stm32_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct stm32_rtc *rtc = dev_get_drvdata(dev); + struct rtc_time *tm = &alrm->time; + unsigned int cr, isr, alrmar; + int ret = 0; + + tm2bcd(tm); + + /* + * RTC alarm can't be set on a specific date, unless this date is + * up to the same day of month next month. + */ + if (stm32_rtc_valid_alrm(rtc, tm) < 0) { + dev_err(dev, "Alarm can be set only on upcoming month.\n"); + return -EINVAL; + } + + alrmar = 0; + /* tm_year and tm_mon are not used because not supported by RTC */ + alrmar |= (tm->tm_mday << STM32_RTC_ALRMXR_DATE_SHIFT) & + STM32_RTC_ALRMXR_DATE; + /* 24-hour format */ + alrmar &= ~STM32_RTC_ALRMXR_PM; + alrmar |= (tm->tm_hour << STM32_RTC_ALRMXR_HOUR_SHIFT) & + STM32_RTC_ALRMXR_HOUR; + alrmar |= (tm->tm_min << STM32_RTC_ALRMXR_MIN_SHIFT) & + STM32_RTC_ALRMXR_MIN; + alrmar |= (tm->tm_sec << STM32_RTC_ALRMXR_SEC_SHIFT) & + STM32_RTC_ALRMXR_SEC; + + stm32_rtc_wpr_unlock(rtc); + + /* Disable Alarm */ + cr = readl_relaxed(rtc->base + STM32_RTC_CR); + cr &= ~STM32_RTC_CR_ALRAE; + writel_relaxed(cr, rtc->base + STM32_RTC_CR); + + /* + * Poll Alarm write flag to be sure that Alarm update is allowed: it + * takes around 2 ck_rtc clock cycles + */ + ret = readl_relaxed_poll_timeout_atomic(rtc->base + STM32_RTC_ISR, + isr, + (isr & STM32_RTC_ISR_ALRAWF), + 10, 100000); + + if (ret) { + dev_err(dev, "Alarm update not allowed\n"); + goto end; + } + + /* Write to Alarm register */ + writel_relaxed(alrmar, rtc->base + STM32_RTC_ALRMAR); + + if (alrm->enabled) + stm32_rtc_alarm_irq_enable(dev, 1); + else + stm32_rtc_alarm_irq_enable(dev, 0); + +end: + stm32_rtc_wpr_lock(rtc); + + return ret; +} + +static const struct rtc_class_ops stm32_rtc_ops = { + .read_time = stm32_rtc_read_time, + .set_time = stm32_rtc_set_time, + .read_alarm = stm32_rtc_read_alarm, + .set_alarm = stm32_rtc_set_alarm, + .alarm_irq_enable = stm32_rtc_alarm_irq_enable, +}; + +static const struct of_device_id stm32_rtc_of_match[] = { + { .compatible = "st,stm32-rtc" }, + {} +}; +MODULE_DEVICE_TABLE(of, stm32_rtc_of_match); + +static int stm32_rtc_init(struct platform_device *pdev, + struct stm32_rtc *rtc) +{ + unsigned int prer, pred_a, pred_s, pred_a_max, pred_s_max, cr; + unsigned int rate; + int ret = 0; + + rate = clk_get_rate(rtc->ck_rtc); + + /* Find prediv_a and prediv_s to obtain the 1Hz calendar clock */ + pred_a_max = STM32_RTC_PRER_PRED_A >> STM32_RTC_PRER_PRED_A_SHIFT; + pred_s_max = STM32_RTC_PRER_PRED_S >> STM32_RTC_PRER_PRED_S_SHIFT; + + for (pred_a = pred_a_max; pred_a + 1 > 0; pred_a--) { + pred_s = (rate / (pred_a + 1)) - 1; + + if (((pred_s + 1) * (pred_a + 1)) == rate) + break; + } + + /* + * Can't find a 1Hz, so give priority to RTC power consumption + * by choosing the higher possible value for prediv_a + */ + if ((pred_s > pred_s_max) || (pred_a > pred_a_max)) { + pred_a = pred_a_max; + pred_s = (rate / (pred_a + 1)) - 1; + + dev_warn(&pdev->dev, "ck_rtc is %s\n", + (rate < ((pred_a + 1) * (pred_s + 1))) ? + "fast" : "slow"); + } + + stm32_rtc_wpr_unlock(rtc); + + ret = stm32_rtc_enter_init_mode(rtc); + if (ret) { + dev_err(&pdev->dev, + "Can't enter in init mode. Prescaler config failed.\n"); + goto end; + } + + prer = (pred_s << STM32_RTC_PRER_PRED_S_SHIFT) & STM32_RTC_PRER_PRED_S; + writel_relaxed(prer, rtc->base + STM32_RTC_PRER); + prer |= (pred_a << STM32_RTC_PRER_PRED_A_SHIFT) & STM32_RTC_PRER_PRED_A; + writel_relaxed(prer, rtc->base + STM32_RTC_PRER); + + /* Force 24h time format */ + cr = readl_relaxed(rtc->base + STM32_RTC_CR); + cr &= ~STM32_RTC_CR_FMT; + writel_relaxed(cr, rtc->base + STM32_RTC_CR); + + stm32_rtc_exit_init_mode(rtc); + + ret = stm32_rtc_wait_sync(rtc); +end: + stm32_rtc_wpr_lock(rtc); + + return ret; +} + +static int stm32_rtc_probe(struct platform_device *pdev) +{ + struct stm32_rtc *rtc; + struct resource *res; + int ret; + + rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); + if (!rtc) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rtc->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(rtc->base)) + return PTR_ERR(rtc->base); + + rtc->dbp = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "st,syscfg"); + if (IS_ERR(rtc->dbp)) { + dev_err(&pdev->dev, "no st,syscfg\n"); + return PTR_ERR(rtc->dbp); + } + + rtc->ck_rtc = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(rtc->ck_rtc)) { + dev_err(&pdev->dev, "no ck_rtc clock"); + return PTR_ERR(rtc->ck_rtc); + } + + ret = clk_prepare_enable(rtc->ck_rtc); + if (ret) + return ret; + + regmap_update_bits(rtc->dbp, PWR_CR, PWR_CR_DBP, PWR_CR_DBP); + + /* + * After a system reset, RTC_ISR.INITS flag can be read to check if + * the calendar has been initalized or not. INITS flag is reset by a + * power-on reset (no vbat, no power-supply). It is not reset if + * ck_rtc parent clock has changed (so RTC prescalers need to be + * changed). That's why we cannot rely on this flag to know if RTC + * init has to be done. + */ + ret = stm32_rtc_init(pdev, rtc); + if (ret) + goto err; + + rtc->irq_alarm = platform_get_irq(pdev, 0); + if (rtc->irq_alarm <= 0) { + dev_err(&pdev->dev, "no alarm irq\n"); + ret = rtc->irq_alarm; + goto err; + } + + platform_set_drvdata(pdev, rtc); + + ret = device_init_wakeup(&pdev->dev, true); + if (ret) + dev_warn(&pdev->dev, + "alarm won't be able to wake up the system"); + + rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, pdev->name, + &stm32_rtc_ops, THIS_MODULE); + if (IS_ERR(rtc->rtc_dev)) { + ret = PTR_ERR(rtc->rtc_dev); + dev_err(&pdev->dev, "rtc device registration failed, err=%d\n", + ret); + goto err; + } + + /* Handle RTC alarm interrupts */ + ret = devm_request_threaded_irq(&pdev->dev, rtc->irq_alarm, NULL, + stm32_rtc_alarm_irq, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + pdev->name, rtc); + if (ret) { + dev_err(&pdev->dev, "IRQ%d (alarm interrupt) already claimed\n", + rtc->irq_alarm); + goto err; + } + + /* + * If INITS flag is reset (calendar year field set to 0x00), calendar + * must be initialized + */ + if (!(readl_relaxed(rtc->base + STM32_RTC_ISR) & STM32_RTC_ISR_INITS)) + dev_warn(&pdev->dev, "Date/Time must be initialized\n"); + + return 0; +err: + clk_disable_unprepare(rtc->ck_rtc); + + regmap_update_bits(rtc->dbp, PWR_CR, PWR_CR_DBP, 0); + + device_init_wakeup(&pdev->dev, false); + + return ret; +} + +static int stm32_rtc_remove(struct platform_device *pdev) +{ + struct stm32_rtc *rtc = platform_get_drvdata(pdev); + unsigned int cr; + + /* Disable interrupts */ + stm32_rtc_wpr_unlock(rtc); + cr = readl_relaxed(rtc->base + STM32_RTC_CR); + cr &= ~STM32_RTC_CR_ALRAIE; + writel_relaxed(cr, rtc->base + STM32_RTC_CR); + stm32_rtc_wpr_lock(rtc); + + clk_disable_unprepare(rtc->ck_rtc); + + /* Enable backup domain write protection */ + regmap_update_bits(rtc->dbp, PWR_CR, PWR_CR_DBP, 0); + + device_init_wakeup(&pdev->dev, false); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int stm32_rtc_suspend(struct device *dev) +{ + struct stm32_rtc *rtc = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + return enable_irq_wake(rtc->irq_alarm); + + return 0; +} + +static int stm32_rtc_resume(struct device *dev) +{ + struct stm32_rtc *rtc = dev_get_drvdata(dev); + int ret = 0; + + ret = stm32_rtc_wait_sync(rtc); + if (ret < 0) + return ret; + + if (device_may_wakeup(dev)) + return disable_irq_wake(rtc->irq_alarm); + + return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(stm32_rtc_pm_ops, + stm32_rtc_suspend, stm32_rtc_resume); + +static struct platform_driver stm32_rtc_driver = { + .probe = stm32_rtc_probe, + .remove = stm32_rtc_remove, + .driver = { + .name = DRIVER_NAME, + .pm = &stm32_rtc_pm_ops, + .of_match_table = stm32_rtc_of_match, + }, +}; + +module_platform_driver(stm32_rtc_driver); + +MODULE_ALIAS("platform:" DRIVER_NAME); +MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics STM32 Real Time Clock driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index c169a2cd4727..39cbc1238b92 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -20,6 +20,8 @@ * more details. */ +#include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/fs.h> @@ -33,15 +35,20 @@ #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/rtc.h> +#include <linux/slab.h> #include <linux/types.h> /* Control register */ #define SUN6I_LOSC_CTRL 0x0000 +#define SUN6I_LOSC_CTRL_KEY (0x16aa << 16) #define SUN6I_LOSC_CTRL_ALM_DHMS_ACC BIT(9) #define SUN6I_LOSC_CTRL_RTC_HMS_ACC BIT(8) #define SUN6I_LOSC_CTRL_RTC_YMD_ACC BIT(7) +#define SUN6I_LOSC_CTRL_EXT_OSC BIT(0) #define SUN6I_LOSC_CTRL_ACC_MASK GENMASK(9, 7) +#define SUN6I_LOSC_CLK_PRESCAL 0x0008 + /* RTC */ #define SUN6I_RTC_YMD 0x0010 #define SUN6I_RTC_HMS 0x0014 @@ -114,13 +121,142 @@ struct sun6i_rtc_dev { void __iomem *base; int irq; unsigned long alarm; + + struct clk_hw hw; + struct clk_hw *int_osc; + struct clk *losc; + + spinlock_t lock; +}; + +static struct sun6i_rtc_dev *sun6i_rtc; + +static unsigned long sun6i_rtc_osc_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct sun6i_rtc_dev *rtc = container_of(hw, struct sun6i_rtc_dev, hw); + u32 val; + + val = readl(rtc->base + SUN6I_LOSC_CTRL); + if (val & SUN6I_LOSC_CTRL_EXT_OSC) + return parent_rate; + + val = readl(rtc->base + SUN6I_LOSC_CLK_PRESCAL); + val &= GENMASK(4, 0); + + return parent_rate / (val + 1); +} + +static u8 sun6i_rtc_osc_get_parent(struct clk_hw *hw) +{ + struct sun6i_rtc_dev *rtc = container_of(hw, struct sun6i_rtc_dev, hw); + + return readl(rtc->base + SUN6I_LOSC_CTRL) & SUN6I_LOSC_CTRL_EXT_OSC; +} + +static int sun6i_rtc_osc_set_parent(struct clk_hw *hw, u8 index) +{ + struct sun6i_rtc_dev *rtc = container_of(hw, struct sun6i_rtc_dev, hw); + unsigned long flags; + u32 val; + + if (index > 1) + return -EINVAL; + + spin_lock_irqsave(&rtc->lock, flags); + val = readl(rtc->base + SUN6I_LOSC_CTRL); + val &= ~SUN6I_LOSC_CTRL_EXT_OSC; + val |= SUN6I_LOSC_CTRL_KEY; + val |= index ? SUN6I_LOSC_CTRL_EXT_OSC : 0; + writel(val, rtc->base + SUN6I_LOSC_CTRL); + spin_unlock_irqrestore(&rtc->lock, flags); + + return 0; +} + +static const struct clk_ops sun6i_rtc_osc_ops = { + .recalc_rate = sun6i_rtc_osc_recalc_rate, + + .get_parent = sun6i_rtc_osc_get_parent, + .set_parent = sun6i_rtc_osc_set_parent, }; +static void __init sun6i_rtc_clk_init(struct device_node *node) +{ + struct clk_hw_onecell_data *clk_data; + struct sun6i_rtc_dev *rtc; + struct clk_init_data init = { + .ops = &sun6i_rtc_osc_ops, + }; + const char *parents[2]; + + rtc = kzalloc(sizeof(*rtc), GFP_KERNEL); + if (!rtc) + return; + spin_lock_init(&rtc->lock); + + clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws), + GFP_KERNEL); + if (!clk_data) + return; + spin_lock_init(&rtc->lock); + + rtc->base = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (IS_ERR(rtc->base)) { + pr_crit("Can't map RTC registers"); + return; + } + + /* Switch to the external, more precise, oscillator */ + writel(SUN6I_LOSC_CTRL_KEY | SUN6I_LOSC_CTRL_EXT_OSC, + rtc->base + SUN6I_LOSC_CTRL); + + /* Yes, I know, this is ugly. */ + sun6i_rtc = rtc; + + /* Deal with old DTs */ + if (!of_get_property(node, "clocks", NULL)) + return; + + rtc->int_osc = clk_hw_register_fixed_rate_with_accuracy(NULL, + "rtc-int-osc", + NULL, 0, + 667000, + 300000000); + if (IS_ERR(rtc->int_osc)) { + pr_crit("Couldn't register the internal oscillator\n"); + return; + } + + parents[0] = clk_hw_get_name(rtc->int_osc); + parents[1] = of_clk_get_parent_name(node, 0); + + rtc->hw.init = &init; + + init.parent_names = parents; + init.num_parents = of_clk_get_parent_count(node) + 1; + of_property_read_string(node, "clock-output-names", &init.name); + + rtc->losc = clk_register(NULL, &rtc->hw); + if (IS_ERR(rtc->losc)) { + pr_crit("Couldn't register the LOSC clock\n"); + return; + } + + clk_data->num = 1; + clk_data->hws[0] = &rtc->hw; + of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); +} +CLK_OF_DECLARE_DRIVER(sun6i_rtc_clk, "allwinner,sun6i-a31-rtc", + sun6i_rtc_clk_init); + static irqreturn_t sun6i_rtc_alarmirq(int irq, void *id) { struct sun6i_rtc_dev *chip = (struct sun6i_rtc_dev *) id; + irqreturn_t ret = IRQ_NONE; u32 val; + spin_lock(&chip->lock); val = readl(chip->base + SUN6I_ALRM_IRQ_STA); if (val & SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND) { @@ -129,10 +265,11 @@ static irqreturn_t sun6i_rtc_alarmirq(int irq, void *id) rtc_update_irq(chip->rtc, 1, RTC_AF | RTC_IRQF); - return IRQ_HANDLED; + ret = IRQ_HANDLED; } + spin_unlock(&chip->lock); - return IRQ_NONE; + return ret; } static void sun6i_rtc_setaie(int to, struct sun6i_rtc_dev *chip) @@ -140,6 +277,7 @@ static void sun6i_rtc_setaie(int to, struct sun6i_rtc_dev *chip) u32 alrm_val = 0; u32 alrm_irq_val = 0; u32 alrm_wake_val = 0; + unsigned long flags; if (to) { alrm_val = SUN6I_ALRM_EN_CNT_EN; @@ -150,9 +288,11 @@ static void sun6i_rtc_setaie(int to, struct sun6i_rtc_dev *chip) chip->base + SUN6I_ALRM_IRQ_STA); } + spin_lock_irqsave(&chip->lock, flags); writel(alrm_val, chip->base + SUN6I_ALRM_EN); writel(alrm_irq_val, chip->base + SUN6I_ALRM_IRQ_EN); writel(alrm_wake_val, chip->base + SUN6I_ALARM_CONFIG); + spin_unlock_irqrestore(&chip->lock, flags); } static int sun6i_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) @@ -191,11 +331,15 @@ static int sun6i_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) static int sun6i_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm) { struct sun6i_rtc_dev *chip = dev_get_drvdata(dev); + unsigned long flags; u32 alrm_st; u32 alrm_en; + spin_lock_irqsave(&chip->lock, flags); alrm_en = readl(chip->base + SUN6I_ALRM_IRQ_EN); alrm_st = readl(chip->base + SUN6I_ALRM_IRQ_STA); + spin_unlock_irqrestore(&chip->lock, flags); + wkalrm->enabled = !!(alrm_en & SUN6I_ALRM_EN_CNT_EN); wkalrm->pending = !!(alrm_st & SUN6I_ALRM_EN_CNT_EN); rtc_time_to_tm(chip->alarm, &wkalrm->time); @@ -349,22 +493,15 @@ static const struct rtc_class_ops sun6i_rtc_ops = { static int sun6i_rtc_probe(struct platform_device *pdev) { - struct sun6i_rtc_dev *chip; - struct resource *res; + struct sun6i_rtc_dev *chip = sun6i_rtc; int ret; - chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); if (!chip) - return -ENOMEM; + return -ENODEV; platform_set_drvdata(pdev, chip); chip->dev = &pdev->dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - chip->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(chip->base)) - return PTR_ERR(chip->base); - chip->irq = platform_get_irq(pdev, 0); if (chip->irq < 0) { dev_err(&pdev->dev, "No IRQ resource\n"); @@ -404,8 +541,10 @@ static int sun6i_rtc_probe(struct platform_device *pdev) /* disable alarm wakeup */ writel(0, chip->base + SUN6I_ALARM_CONFIG); - chip->rtc = rtc_device_register("rtc-sun6i", &pdev->dev, - &sun6i_rtc_ops, THIS_MODULE); + clk_prepare_enable(chip->losc); + + chip->rtc = devm_rtc_device_register(&pdev->dev, "rtc-sun6i", + &sun6i_rtc_ops, THIS_MODULE); if (IS_ERR(chip->rtc)) { dev_err(&pdev->dev, "unable to register device\n"); return PTR_ERR(chip->rtc); @@ -416,15 +555,6 @@ static int sun6i_rtc_probe(struct platform_device *pdev) return 0; } -static int sun6i_rtc_remove(struct platform_device *pdev) -{ - struct sun6i_rtc_dev *chip = platform_get_drvdata(pdev); - - rtc_device_unregister(chip->rtc); - - return 0; -} - static const struct of_device_id sun6i_rtc_dt_ids[] = { { .compatible = "allwinner,sun6i-a31-rtc" }, { /* sentinel */ }, @@ -433,15 +563,9 @@ MODULE_DEVICE_TABLE(of, sun6i_rtc_dt_ids); static struct platform_driver sun6i_rtc_driver = { .probe = sun6i_rtc_probe, - .remove = sun6i_rtc_remove, .driver = { .name = "sun6i-rtc", .of_match_table = sun6i_rtc_dt_ids, }, }; - -module_platform_driver(sun6i_rtc_driver); - -MODULE_DESCRIPTION("sun6i RTC driver"); -MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>"); -MODULE_LICENSE("GPL"); +builtin_platform_driver(sun6i_rtc_driver); diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c index 3853ba963bb5..d30d57b048d3 100644 --- a/drivers/rtc/rtc-tegra.c +++ b/drivers/rtc/rtc-tegra.c @@ -17,16 +17,18 @@ * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -#include <linux/kernel.h> + +#include <linux/clk.h> +#include <linux/delay.h> #include <linux/init.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/irq.h> #include <linux/io.h> -#include <linux/delay.h> -#include <linux/rtc.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm.h> +#include <linux/rtc.h> +#include <linux/slab.h> /* set to 1 = busy every eight 32kHz clocks during copy of sec+msec to AHB */ #define TEGRA_RTC_REG_BUSY 0x004 @@ -59,6 +61,7 @@ struct tegra_rtc_info { struct platform_device *pdev; struct rtc_device *rtc_dev; void __iomem *rtc_base; /* NULL if not initialized. */ + struct clk *clk; int tegra_rtc_irq; /* alarm and periodic irq */ spinlock_t tegra_rtc_lock; }; @@ -326,6 +329,14 @@ static int __init tegra_rtc_probe(struct platform_device *pdev) if (info->tegra_rtc_irq <= 0) return -EBUSY; + info->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(info->clk)) + return PTR_ERR(info->clk); + + ret = clk_prepare_enable(info->clk); + if (ret < 0) + return ret; + /* set context info. */ info->pdev = pdev; spin_lock_init(&info->tegra_rtc_lock); @@ -346,7 +357,7 @@ static int __init tegra_rtc_probe(struct platform_device *pdev) ret = PTR_ERR(info->rtc_dev); dev_err(&pdev->dev, "Unable to register device (err=%d).\n", ret); - return ret; + goto disable_clk; } ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq, @@ -356,12 +367,25 @@ static int __init tegra_rtc_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Unable to request interrupt for device (err=%d).\n", ret); - return ret; + goto disable_clk; } dev_notice(&pdev->dev, "Tegra internal Real Time Clock\n"); return 0; + +disable_clk: + clk_disable_unprepare(info->clk); + return ret; +} + +static int tegra_rtc_remove(struct platform_device *pdev) +{ + struct tegra_rtc_info *info = platform_get_drvdata(pdev); + + clk_disable_unprepare(info->clk); + + return 0; } #ifdef CONFIG_PM_SLEEP @@ -413,6 +437,7 @@ static void tegra_rtc_shutdown(struct platform_device *pdev) MODULE_ALIAS("platform:tegra_rtc"); static struct platform_driver tegra_rtc_driver = { + .remove = tegra_rtc_remove, .shutdown = tegra_rtc_shutdown, .driver = { .name = "tegra_rtc", diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c index 5a3d53caa485..d0244d7979fc 100644 --- a/drivers/rtc/rtc-tps65910.c +++ b/drivers/rtc/rtc-tps65910.c @@ -21,6 +21,7 @@ #include <linux/types.h> #include <linux/rtc.h> #include <linux/bcd.h> +#include <linux/math64.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/mfd/tps65910.h> @@ -33,7 +34,21 @@ struct tps65910_rtc { /* Total number of RTC registers needed to set time*/ #define NUM_TIME_REGS (TPS65910_YEARS - TPS65910_SECONDS + 1) -static int tps65910_rtc_alarm_irq_enable(struct device *dev, unsigned enabled) +/* Total number of RTC registers needed to set compensation registers */ +#define NUM_COMP_REGS (TPS65910_RTC_COMP_MSB - TPS65910_RTC_COMP_LSB + 1) + +/* Min and max values supported with 'offset' interface (swapped sign) */ +#define MIN_OFFSET (-277761) +#define MAX_OFFSET (277778) + +/* Number of ticks per hour */ +#define TICKS_PER_HOUR (32768 * 3600) + +/* Multiplier for ppb conversions */ +#define PPB_MULT (1000000000LL) + +static int tps65910_rtc_alarm_irq_enable(struct device *dev, + unsigned int enabled) { struct tps65910 *tps = dev_get_drvdata(dev->parent); u8 val = 0; @@ -187,6 +202,133 @@ static int tps65910_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) return ret; } +static int tps65910_rtc_set_calibration(struct device *dev, int calibration) +{ + unsigned char comp_data[NUM_COMP_REGS]; + struct tps65910 *tps = dev_get_drvdata(dev->parent); + s16 value; + int ret; + + /* + * TPS65910 uses two's complement 16 bit value for compensation for RTC + * crystal inaccuracies. One time every hour when seconds counter + * increments from 0 to 1 compensation value will be added to internal + * RTC counter value. + * + * Compensation value 0x7FFF is prohibited value. + * + * Valid range for compensation value: [-32768 .. 32766] + */ + if ((calibration < -32768) || (calibration > 32766)) { + dev_err(dev, "RTC calibration value out of range: %d\n", + calibration); + return -EINVAL; + } + + value = (s16)calibration; + + comp_data[0] = (u16)value & 0xFF; + comp_data[1] = ((u16)value >> 8) & 0xFF; + + /* Update all the compensation registers in one shot */ + ret = regmap_bulk_write(tps->regmap, TPS65910_RTC_COMP_LSB, + comp_data, NUM_COMP_REGS); + if (ret < 0) { + dev_err(dev, "rtc_set_calibration error: %d\n", ret); + return ret; + } + + /* Enable automatic compensation */ + ret = regmap_update_bits(tps->regmap, TPS65910_RTC_CTRL, + TPS65910_RTC_CTRL_AUTO_COMP, TPS65910_RTC_CTRL_AUTO_COMP); + if (ret < 0) + dev_err(dev, "auto_comp enable failed with error: %d\n", ret); + + return ret; +} + +static int tps65910_rtc_get_calibration(struct device *dev, int *calibration) +{ + unsigned char comp_data[NUM_COMP_REGS]; + struct tps65910 *tps = dev_get_drvdata(dev->parent); + unsigned int ctrl; + u16 value; + int ret; + + ret = regmap_read(tps->regmap, TPS65910_RTC_CTRL, &ctrl); + if (ret < 0) + return ret; + + /* If automatic compensation is not enabled report back zero */ + if (!(ctrl & TPS65910_RTC_CTRL_AUTO_COMP)) { + *calibration = 0; + return 0; + } + + ret = regmap_bulk_read(tps->regmap, TPS65910_RTC_COMP_LSB, comp_data, + NUM_COMP_REGS); + if (ret < 0) { + dev_err(dev, "rtc_get_calibration error: %d\n", ret); + return ret; + } + + value = (u16)comp_data[0] | ((u16)comp_data[1] << 8); + + *calibration = (s16)value; + + return 0; +} + +static int tps65910_read_offset(struct device *dev, long *offset) +{ + int calibration; + s64 tmp; + int ret; + + ret = tps65910_rtc_get_calibration(dev, &calibration); + if (ret < 0) + return ret; + + /* Convert from RTC calibration register format to ppb format */ + tmp = calibration * (s64)PPB_MULT; + if (tmp < 0) + tmp -= TICKS_PER_HOUR / 2LL; + else + tmp += TICKS_PER_HOUR / 2LL; + tmp = div_s64(tmp, TICKS_PER_HOUR); + + /* Offset value operates in negative way, so swap sign */ + *offset = (long)-tmp; + + return 0; +} + +static int tps65910_set_offset(struct device *dev, long offset) +{ + int calibration; + s64 tmp; + int ret; + + /* Make sure offset value is within supported range */ + if (offset < MIN_OFFSET || offset > MAX_OFFSET) + return -ERANGE; + + /* Convert from ppb format to RTC calibration register format */ + tmp = offset * (s64)TICKS_PER_HOUR; + if (tmp < 0) + tmp -= PPB_MULT / 2LL; + else + tmp += PPB_MULT / 2LL; + tmp = div_s64(tmp, PPB_MULT); + + /* Offset value operates in negative way, so swap sign */ + calibration = (int)-tmp; + + ret = tps65910_rtc_set_calibration(dev, calibration); + + return ret; +} + static irqreturn_t tps65910_rtc_interrupt(int irq, void *rtc) { struct device *dev = rtc; @@ -219,6 +361,8 @@ static const struct rtc_class_ops tps65910_rtc_ops = { .read_alarm = tps65910_rtc_read_alarm, .set_alarm = tps65910_rtc_set_alarm, .alarm_irq_enable = tps65910_rtc_alarm_irq_enable, + .read_offset = tps65910_read_offset, + .set_offset = tps65910_set_offset, }; static int tps65910_rtc_probe(struct platform_device *pdev) diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 0f1713727d4c..0b38217f8147 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -4864,7 +4864,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, break; case 3: /* tsa_intrg */ len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.intrg.: not supportet yet\n"); + " tsb->tsa.intrg.: not supported yet\n"); break; } diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 85eca1cef063..c4518168fd02 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/compat.h> +#include <linux/sched/signal.h> #include <linux/module.h> #include <linux/list.h> #include <linux/slab.h> diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 82c913318b73..ba0e4f93503d 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -7,7 +7,7 @@ */ #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/sysrq.h> diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index de6fccc13124..1b350665c823 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -29,7 +29,7 @@ #include <asm/chpid.h> #include <asm/airq.h> #include <asm/isc.h> -#include <linux/cputime.h> +#include <linux/sched/cputime.h> #include <asm/fcx.h> #include <asm/nmi.h> #include <asm/crw.h> diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 79823ee9c100..b8006ea9099c 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -24,6 +24,7 @@ #include <linux/delay.h> #include <linux/timer.h> #include <linux/kernel_stat.h> +#include <linux/sched/signal.h> #include <asm/ccwdev.h> #include <asm/cio.h> diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c index 8225da619014..4182f60124da 100644 --- a/drivers/s390/cio/ioasm.c +++ b/drivers/s390/cio/ioasm.c @@ -165,13 +165,15 @@ int tpi(struct tpi_info *addr) int chsc(void *chsc_area) { typedef struct { char _[4096]; } addr_type; - int cc; + int cc = -EIO; asm volatile( " .insn rre,0xb25f0000,%2,0\n" - " ipm %0\n" + "0: ipm %0\n" " srl %0,28\n" - : "=d" (cc), "=m" (*(addr_type *) chsc_area) + "1:\n" + EX_TABLE(0b, 1b) + : "+d" (cc), "=m" (*(addr_type *) chsc_area) : "d" (chsc_area), "m" (*(addr_type *) chsc_area) : "cc"); trace_s390_cio_chsc(chsc_area, cc); diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 8ad98a902a91..c61164f4528e 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -8,6 +8,8 @@ #include <linux/slab.h> #include <linux/kernel_stat.h> #include <linux/atomic.h> +#include <linux/rculist.h> + #include <asm/debug.h> #include <asm/qdio.h> #include <asm/airq.h> diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile index 0a7fb83f35e5..be36f1010d75 100644 --- a/drivers/s390/crypto/Makefile +++ b/drivers/s390/crypto/Makefile @@ -10,3 +10,7 @@ zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o obj-$(CONFIG_ZCRYPT) += zcrypt.o # adapter drivers depend on ap.o and zcrypt.o obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o + +# pkey kernel module +pkey-objs := pkey_api.o +obj-$(CONFIG_PKEY) += pkey.o diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 56db76c05775..9be4596d8a08 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -1107,16 +1107,6 @@ static void ap_config_timeout(unsigned long ptr) queue_work(system_long_wq, &ap_scan_work); } -static void ap_reset_domain(void) -{ - int i; - - if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index)) - return; - for (i = 0; i < AP_DEVICES; i++) - ap_rapq(AP_MKQID(i, ap_domain_index)); -} - static void ap_reset_all(void) { int i, j; diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c index 1cd9128593e4..cfa161ccc74e 100644 --- a/drivers/s390/crypto/ap_card.c +++ b/drivers/s390/crypto/ap_card.c @@ -58,9 +58,9 @@ static ssize_t ap_functions_show(struct device *dev, static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL); -static ssize_t ap_request_count_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t ap_req_count_show(struct device *dev, + struct device_attribute *attr, + char *buf) { struct ap_card *ac = to_ap_card(dev); unsigned int req_cnt; @@ -72,7 +72,23 @@ static ssize_t ap_request_count_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); } -static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); +static ssize_t ap_req_count_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ap_card *ac = to_ap_card(dev); + struct ap_queue *aq; + + spin_lock_bh(&ap_list_lock); + for_each_ap_queue(aq, ac) + aq->total_request_count = 0; + spin_unlock_bh(&ap_list_lock); + atomic_set(&ac->total_request_count, 0); + + return count; +} + +static DEVICE_ATTR(request_count, 0644, ap_req_count_show, ap_req_count_store); static ssize_t ap_requestq_count_show(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 7be67fa9f224..480c58a63769 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -459,9 +459,9 @@ EXPORT_SYMBOL(ap_queue_resume); /* * AP queue related attributes. */ -static ssize_t ap_request_count_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t ap_req_count_show(struct device *dev, + struct device_attribute *attr, + char *buf) { struct ap_queue *aq = to_ap_queue(dev); unsigned int req_cnt; @@ -472,7 +472,20 @@ static ssize_t ap_request_count_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); } -static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); +static ssize_t ap_req_count_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ap_queue *aq = to_ap_queue(dev); + + spin_lock_bh(&aq->lock); + aq->total_request_count = 0; + spin_unlock_bh(&aq->lock); + + return count; +} + +static DEVICE_ATTR(request_count, 0644, ap_req_count_show, ap_req_count_store); static ssize_t ap_requestq_count_show(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c new file mode 100644 index 000000000000..40f1136f5568 --- /dev/null +++ b/drivers/s390/crypto/pkey_api.c @@ -0,0 +1,1148 @@ +/* + * pkey device driver + * + * Copyright IBM Corp. 2017 + * Author(s): Harald Freudenberger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + */ + +#define KMSG_COMPONENT "pkey" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/kallsyms.h> +#include <linux/debugfs.h> +#include <asm/zcrypt.h> +#include <asm/cpacf.h> +#include <asm/pkey.h> + +#include "zcrypt_api.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("s390 protected key interface"); + +/* Size of parameter block used for all cca requests/replies */ +#define PARMBSIZE 512 + +/* Size of vardata block used for some of the cca requests/replies */ +#define VARDATASIZE 4096 + +/* + * debug feature data and functions + */ + +static debug_info_t *debug_info; + +#define DEBUG_DBG(...) debug_sprintf_event(debug_info, 6, ##__VA_ARGS__) +#define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__) +#define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__) +#define DEBUG_ERR(...) debug_sprintf_event(debug_info, 3, ##__VA_ARGS__) + +static void __init pkey_debug_init(void) +{ + debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long)); + debug_register_view(debug_info, &debug_sprintf_view); + debug_set_level(debug_info, 3); +} + +static void __exit pkey_debug_exit(void) +{ + debug_unregister(debug_info); +} + +/* inside view of a secure key token (only type 0x01 version 0x04) */ +struct secaeskeytoken { + u8 type; /* 0x01 for internal key token */ + u8 res0[3]; + u8 version; /* should be 0x04 */ + u8 res1[1]; + u8 flag; /* key flags */ + u8 res2[1]; + u64 mkvp; /* master key verification pattern */ + u8 key[32]; /* key value (encrypted) */ + u8 cv[8]; /* control vector */ + u16 bitsize; /* key bit size */ + u16 keysize; /* key byte size */ + u8 tvv[4]; /* token validation value */ +} __packed; + +/* + * Simple check if the token is a valid CCA secure AES key + * token. If keybitsize is given, the bitsize of the key is + * also checked. Returns 0 on success or errno value on failure. + */ +static int check_secaeskeytoken(u8 *token, int keybitsize) +{ + struct secaeskeytoken *t = (struct secaeskeytoken *) token; + + if (t->type != 0x01) { + DEBUG_ERR( + "check_secaeskeytoken secure token check failed, type mismatch 0x%02x != 0x01\n", + (int) t->type); + return -EINVAL; + } + if (t->version != 0x04) { + DEBUG_ERR( + "check_secaeskeytoken secure token check failed, version mismatch 0x%02x != 0x04\n", + (int) t->version); + return -EINVAL; + } + if (keybitsize > 0 && t->bitsize != keybitsize) { + DEBUG_ERR( + "check_secaeskeytoken secure token check failed, bitsize mismatch %d != %d\n", + (int) t->bitsize, keybitsize); + return -EINVAL; + } + + return 0; +} + +/* + * Allocate consecutive memory for request CPRB, request param + * block, reply CPRB and reply param block and fill in values + * for the common fields. Returns 0 on success or errno value + * on failure. + */ +static int alloc_and_prep_cprbmem(size_t paramblen, + u8 **pcprbmem, + struct CPRBX **preqCPRB, + struct CPRBX **prepCPRB) +{ + u8 *cprbmem; + size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen; + struct CPRBX *preqcblk, *prepcblk; + + /* + * allocate consecutive memory for request CPRB, request param + * block, reply CPRB and reply param block + */ + cprbmem = kmalloc(2 * cprbplusparamblen, GFP_KERNEL); + if (!cprbmem) + return -ENOMEM; + memset(cprbmem, 0, 2 * cprbplusparamblen); + + preqcblk = (struct CPRBX *) cprbmem; + prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen); + + /* fill request cprb struct */ + preqcblk->cprb_len = sizeof(struct CPRBX); + preqcblk->cprb_ver_id = 0x02; + memcpy(preqcblk->func_id, "T2", 2); + preqcblk->rpl_msgbl = cprbplusparamblen; + if (paramblen) { + preqcblk->req_parmb = + ((u8 *) preqcblk) + sizeof(struct CPRBX); + preqcblk->rpl_parmb = + ((u8 *) prepcblk) + sizeof(struct CPRBX); + } + + *pcprbmem = cprbmem; + *preqCPRB = preqcblk; + *prepCPRB = prepcblk; + + return 0; +} + +/* + * Free the cprb memory allocated with the function above. + * If the scrub value is not zero, the memory is filled + * with zeros before freeing (useful if there was some + * clear key material in there). + */ +static void free_cprbmem(void *mem, size_t paramblen, int scrub) +{ + if (scrub) + memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen)); + kfree(mem); +} + +/* + * Helper function to prepare the xcrb struct + */ +static inline void prep_xcrb(struct ica_xcRB *pxcrb, + u16 cardnr, + struct CPRBX *preqcblk, + struct CPRBX *prepcblk) +{ + memset(pxcrb, 0, sizeof(*pxcrb)); + pxcrb->agent_ID = 0x4341; /* 'CA' */ + pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr); + pxcrb->request_control_blk_length = + preqcblk->cprb_len + preqcblk->req_parml; + pxcrb->request_control_blk_addr = (void *) preqcblk; + pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl; + pxcrb->reply_control_blk_addr = (void *) prepcblk; +} + +/* + * Helper function which calls zcrypt_send_cprb with + * memory management segment adjusted to kernel space + * so that the copy_from_user called within this + * function do in fact copy from kernel space. + */ +static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb) +{ + int rc; + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + rc = zcrypt_send_cprb(xcrb); + set_fs(old_fs); + + return rc; +} + +/* + * Generate (random) AES secure key. + */ +int pkey_genseckey(u16 cardnr, u16 domain, + u32 keytype, struct pkey_seckey *seckey) +{ + int i, rc, keysize; + int seckeysize; + u8 *mem; + struct CPRBX *preqcblk, *prepcblk; + struct ica_xcRB xcrb; + struct kgreqparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct lv1 { + u16 len; + char key_form[8]; + char key_length[8]; + char key_type1[8]; + char key_type2[8]; + } lv1; + struct lv2 { + u16 len; + struct keyid { + u16 len; + u16 attr; + u8 data[SECKEYBLOBSIZE]; + } keyid[6]; + } lv2; + } *preqparm; + struct kgrepparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct lv3 { + u16 len; + u16 keyblocklen; + struct { + u16 toklen; + u16 tokattr; + u8 tok[0]; + /* ... some more data ... */ + } keyblock; + } lv3; + } *prepparm; + + /* get already prepared memory for 2 cprbs with param block each */ + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + if (rc) + return rc; + + /* fill request cprb struct */ + preqcblk->domain = domain; + + /* fill request cprb param block with KG request */ + preqparm = (struct kgreqparm *) preqcblk->req_parmb; + memcpy(preqparm->subfunc_code, "KG", 2); + preqparm->rule_array_len = sizeof(preqparm->rule_array_len); + preqparm->lv1.len = sizeof(struct lv1); + memcpy(preqparm->lv1.key_form, "OP ", 8); + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + keysize = 16; + memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8); + break; + case PKEY_KEYTYPE_AES_192: + keysize = 24; + memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8); + break; + case PKEY_KEYTYPE_AES_256: + keysize = 32; + memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8); + break; + default: + DEBUG_ERR( + "pkey_genseckey unknown/unsupported keytype %d\n", + keytype); + rc = -EINVAL; + goto out; + } + memcpy(preqparm->lv1.key_type1, "AESDATA ", 8); + preqparm->lv2.len = sizeof(struct lv2); + for (i = 0; i < 6; i++) { + preqparm->lv2.keyid[i].len = sizeof(struct keyid); + preqparm->lv2.keyid[i].attr = (i == 2 ? 0x30 : 0x10); + } + preqcblk->req_parml = sizeof(struct kgreqparm); + + /* fill xcrb struct */ + prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); + + /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ + rc = _zcrypt_send_cprb(&xcrb); + if (rc) { + DEBUG_ERR( + "pkey_genseckey zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n", + (int) cardnr, (int) domain, rc); + goto out; + } + + /* check response returncode and reasoncode */ + if (prepcblk->ccp_rtcode != 0) { + DEBUG_ERR( + "pkey_genseckey secure key generate failure, card response %d/%d\n", + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + rc = -EIO; + goto out; + } + + /* process response cprb param block */ + prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); + prepparm = (struct kgrepparm *) prepcblk->rpl_parmb; + + /* check length of the returned secure key token */ + seckeysize = prepparm->lv3.keyblock.toklen + - sizeof(prepparm->lv3.keyblock.toklen) + - sizeof(prepparm->lv3.keyblock.tokattr); + if (seckeysize != SECKEYBLOBSIZE) { + DEBUG_ERR( + "pkey_genseckey secure token size mismatch %d != %d bytes\n", + seckeysize, SECKEYBLOBSIZE); + rc = -EIO; + goto out; + } + + /* check secure key token */ + rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize); + if (rc) { + rc = -EIO; + goto out; + } + + /* copy the generated secure key token */ + memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); + +out: + free_cprbmem(mem, PARMBSIZE, 0); + return rc; +} +EXPORT_SYMBOL(pkey_genseckey); + +/* + * Generate an AES secure key with given key value. + */ +int pkey_clr2seckey(u16 cardnr, u16 domain, u32 keytype, + const struct pkey_clrkey *clrkey, + struct pkey_seckey *seckey) +{ + int rc, keysize, seckeysize; + u8 *mem; + struct CPRBX *preqcblk, *prepcblk; + struct ica_xcRB xcrb; + struct cmreqparm { + u8 subfunc_code[2]; + u16 rule_array_len; + char rule_array[8]; + struct lv1 { + u16 len; + u8 clrkey[0]; + } lv1; + struct lv2 { + u16 len; + struct keyid { + u16 len; + u16 attr; + u8 data[SECKEYBLOBSIZE]; + } keyid; + } lv2; + } *preqparm; + struct lv2 *plv2; + struct cmrepparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct lv3 { + u16 len; + u16 keyblocklen; + struct { + u16 toklen; + u16 tokattr; + u8 tok[0]; + /* ... some more data ... */ + } keyblock; + } lv3; + } *prepparm; + + /* get already prepared memory for 2 cprbs with param block each */ + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + if (rc) + return rc; + + /* fill request cprb struct */ + preqcblk->domain = domain; + + /* fill request cprb param block with CM request */ + preqparm = (struct cmreqparm *) preqcblk->req_parmb; + memcpy(preqparm->subfunc_code, "CM", 2); + memcpy(preqparm->rule_array, "AES ", 8); + preqparm->rule_array_len = + sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array); + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + keysize = 16; + break; + case PKEY_KEYTYPE_AES_192: + keysize = 24; + break; + case PKEY_KEYTYPE_AES_256: + keysize = 32; + break; + default: + DEBUG_ERR( + "pkey_clr2seckey unknown/unsupported keytype %d\n", + keytype); + rc = -EINVAL; + goto out; + } + preqparm->lv1.len = sizeof(struct lv1) + keysize; + memcpy(preqparm->lv1.clrkey, clrkey->clrkey, keysize); + plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize); + plv2->len = sizeof(struct lv2); + plv2->keyid.len = sizeof(struct keyid); + plv2->keyid.attr = 0x30; + preqcblk->req_parml = sizeof(struct cmreqparm) + keysize; + + /* fill xcrb struct */ + prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); + + /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ + rc = _zcrypt_send_cprb(&xcrb); + if (rc) { + DEBUG_ERR( + "pkey_clr2seckey zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n", + (int) cardnr, (int) domain, rc); + goto out; + } + + /* check response returncode and reasoncode */ + if (prepcblk->ccp_rtcode != 0) { + DEBUG_ERR( + "pkey_clr2seckey clear key import failure, card response %d/%d\n", + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + rc = -EIO; + goto out; + } + + /* process response cprb param block */ + prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); + prepparm = (struct cmrepparm *) prepcblk->rpl_parmb; + + /* check length of the returned secure key token */ + seckeysize = prepparm->lv3.keyblock.toklen + - sizeof(prepparm->lv3.keyblock.toklen) + - sizeof(prepparm->lv3.keyblock.tokattr); + if (seckeysize != SECKEYBLOBSIZE) { + DEBUG_ERR( + "pkey_clr2seckey secure token size mismatch %d != %d bytes\n", + seckeysize, SECKEYBLOBSIZE); + rc = -EIO; + goto out; + } + + /* check secure key token */ + rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize); + if (rc) { + rc = -EIO; + goto out; + } + + /* copy the generated secure key token */ + memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); + +out: + free_cprbmem(mem, PARMBSIZE, 1); + return rc; +} +EXPORT_SYMBOL(pkey_clr2seckey); + +/* + * Derive a proteced key from the secure key blob. + */ +int pkey_sec2protkey(u16 cardnr, u16 domain, + const struct pkey_seckey *seckey, + struct pkey_protkey *protkey) +{ + int rc; + u8 *mem; + struct CPRBX *preqcblk, *prepcblk; + struct ica_xcRB xcrb; + struct uskreqparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct lv1 { + u16 len; + u16 attr_len; + u16 attr_flags; + } lv1; + struct lv2 { + u16 len; + u16 attr_len; + u16 attr_flags; + u8 token[0]; /* cca secure key token */ + } lv2 __packed; + } *preqparm; + struct uskrepparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct lv3 { + u16 len; + u16 attr_len; + u16 attr_flags; + struct cpacfkeyblock { + u8 version; /* version of this struct */ + u8 flags[2]; + u8 algo; + u8 form; + u8 pad1[3]; + u16 keylen; + u8 key[64]; /* the key (keylen bytes) */ + u16 keyattrlen; + u8 keyattr[32]; + u8 pad2[1]; + u8 vptype; + u8 vp[32]; /* verification pattern */ + } keyblock; + } lv3 __packed; + } *prepparm; + + /* get already prepared memory for 2 cprbs with param block each */ + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + if (rc) + return rc; + + /* fill request cprb struct */ + preqcblk->domain = domain; + + /* fill request cprb param block with USK request */ + preqparm = (struct uskreqparm *) preqcblk->req_parmb; + memcpy(preqparm->subfunc_code, "US", 2); + preqparm->rule_array_len = sizeof(preqparm->rule_array_len); + preqparm->lv1.len = sizeof(struct lv1); + preqparm->lv1.attr_len = sizeof(struct lv1) - sizeof(preqparm->lv1.len); + preqparm->lv1.attr_flags = 0x0001; + preqparm->lv2.len = sizeof(struct lv2) + SECKEYBLOBSIZE; + preqparm->lv2.attr_len = sizeof(struct lv2) + - sizeof(preqparm->lv2.len) + SECKEYBLOBSIZE; + preqparm->lv2.attr_flags = 0x0000; + memcpy(preqparm->lv2.token, seckey->seckey, SECKEYBLOBSIZE); + preqcblk->req_parml = sizeof(struct uskreqparm) + SECKEYBLOBSIZE; + + /* fill xcrb struct */ + prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); + + /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ + rc = _zcrypt_send_cprb(&xcrb); + if (rc) { + DEBUG_ERR( + "pkey_sec2protkey zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n", + (int) cardnr, (int) domain, rc); + goto out; + } + + /* check response returncode and reasoncode */ + if (prepcblk->ccp_rtcode != 0) { + DEBUG_ERR( + "pkey_sec2protkey unwrap secure key failure, card response %d/%d\n", + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + rc = -EIO; + goto out; + } + + /* process response cprb param block */ + prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); + prepparm = (struct uskrepparm *) prepcblk->rpl_parmb; + + /* check the returned keyblock */ + if (prepparm->lv3.keyblock.version != 0x01) { + DEBUG_ERR( + "pkey_sec2protkey reply param keyblock version mismatch 0x%02x != 0x01\n", + (int) prepparm->lv3.keyblock.version); + rc = -EIO; + goto out; + } + + /* copy the tanslated protected key */ + switch (prepparm->lv3.keyblock.keylen) { + case 16+32: + protkey->type = PKEY_KEYTYPE_AES_128; + break; + case 24+32: + protkey->type = PKEY_KEYTYPE_AES_192; + break; + case 32+32: + protkey->type = PKEY_KEYTYPE_AES_256; + break; + default: + DEBUG_ERR("pkey_sec2protkey unknown/unsupported keytype %d\n", + prepparm->lv3.keyblock.keylen); + rc = -EIO; + goto out; + } + protkey->len = prepparm->lv3.keyblock.keylen; + memcpy(protkey->protkey, prepparm->lv3.keyblock.key, protkey->len); + +out: + free_cprbmem(mem, PARMBSIZE, 0); + return rc; +} +EXPORT_SYMBOL(pkey_sec2protkey); + +/* + * Create a protected key from a clear key value. + */ +int pkey_clr2protkey(u32 keytype, + const struct pkey_clrkey *clrkey, + struct pkey_protkey *protkey) +{ + long fc; + int keysize; + u8 paramblock[64]; + + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + keysize = 16; + fc = CPACF_PCKMO_ENC_AES_128_KEY; + break; + case PKEY_KEYTYPE_AES_192: + keysize = 24; + fc = CPACF_PCKMO_ENC_AES_192_KEY; + break; + case PKEY_KEYTYPE_AES_256: + keysize = 32; + fc = CPACF_PCKMO_ENC_AES_256_KEY; + break; + default: + DEBUG_ERR("pkey_clr2protkey unknown/unsupported keytype %d\n", + keytype); + return -EINVAL; + } + + /* prepare param block */ + memset(paramblock, 0, sizeof(paramblock)); + memcpy(paramblock, clrkey->clrkey, keysize); + + /* call the pckmo instruction */ + cpacf_pckmo(fc, paramblock); + + /* copy created protected key */ + protkey->type = keytype; + protkey->len = keysize + 32; + memcpy(protkey->protkey, paramblock, keysize + 32); + + return 0; +} +EXPORT_SYMBOL(pkey_clr2protkey); + +/* + * query cryptographic facility from adapter + */ +static int query_crypto_facility(u16 cardnr, u16 domain, + const char *keyword, + u8 *rarray, size_t *rarraylen, + u8 *varray, size_t *varraylen) +{ + int rc; + u16 len; + u8 *mem, *ptr; + struct CPRBX *preqcblk, *prepcblk; + struct ica_xcRB xcrb; + struct fqreqparm { + u8 subfunc_code[2]; + u16 rule_array_len; + char rule_array[8]; + struct lv1 { + u16 len; + u8 data[VARDATASIZE]; + } lv1; + u16 dummylen; + } *preqparm; + size_t parmbsize = sizeof(struct fqreqparm); + struct fqrepparm { + u8 subfunc_code[2]; + u8 lvdata[0]; + } *prepparm; + + /* get already prepared memory for 2 cprbs with param block each */ + rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk); + if (rc) + return rc; + + /* fill request cprb struct */ + preqcblk->domain = domain; + + /* fill request cprb param block with FQ request */ + preqparm = (struct fqreqparm *) preqcblk->req_parmb; + memcpy(preqparm->subfunc_code, "FQ", 2); + strncpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array)); + preqparm->rule_array_len = + sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array); + preqparm->lv1.len = sizeof(preqparm->lv1); + preqparm->dummylen = sizeof(preqparm->dummylen); + preqcblk->req_parml = parmbsize; + + /* fill xcrb struct */ + prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); + + /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ + rc = _zcrypt_send_cprb(&xcrb); + if (rc) { + DEBUG_ERR( + "query_crypto_facility zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n", + (int) cardnr, (int) domain, rc); + goto out; + } + + /* check response returncode and reasoncode */ + if (prepcblk->ccp_rtcode != 0) { + DEBUG_ERR( + "query_crypto_facility unwrap secure key failure, card response %d/%d\n", + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + rc = -EIO; + goto out; + } + + /* process response cprb param block */ + prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); + prepparm = (struct fqrepparm *) prepcblk->rpl_parmb; + ptr = prepparm->lvdata; + + /* check and possibly copy reply rule array */ + len = *((u16 *) ptr); + if (len > sizeof(u16)) { + ptr += sizeof(u16); + len -= sizeof(u16); + if (rarray && rarraylen && *rarraylen > 0) { + *rarraylen = (len > *rarraylen ? *rarraylen : len); + memcpy(rarray, ptr, *rarraylen); + } + ptr += len; + } + /* check and possible copy reply var array */ + len = *((u16 *) ptr); + if (len > sizeof(u16)) { + ptr += sizeof(u16); + len -= sizeof(u16); + if (varray && varraylen && *varraylen > 0) { + *varraylen = (len > *varraylen ? *varraylen : len); + memcpy(varray, ptr, *varraylen); + } + ptr += len; + } + +out: + free_cprbmem(mem, parmbsize, 0); + return rc; +} + +/* + * Fetch just the mkvp value via query_crypto_facility from adapter. + */ +static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp) +{ + int rc, found = 0; + size_t rlen, vlen; + u8 *rarray, *varray, *pg; + + pg = (u8 *) __get_free_page(GFP_KERNEL); + if (!pg) + return -ENOMEM; + rarray = pg; + varray = pg + PAGE_SIZE/2; + rlen = vlen = PAGE_SIZE/2; + + rc = query_crypto_facility(cardnr, domain, "STATICSA", + rarray, &rlen, varray, &vlen); + if (rc == 0 && rlen > 8*8 && vlen > 184+8) { + if (rarray[64] == '2') { + /* current master key state is valid */ + *mkvp = *((u64 *)(varray + 184)); + found = 1; + } + } + + free_page((unsigned long) pg); + + return found ? 0 : -ENOENT; +} + +/* struct to hold cached mkvp info for each card/domain */ +struct mkvp_info { + struct list_head list; + u16 cardnr; + u16 domain; + u64 mkvp; +}; + +/* a list with mkvp_info entries */ +static LIST_HEAD(mkvp_list); +static DEFINE_SPINLOCK(mkvp_list_lock); + +static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp) +{ + int rc = -ENOENT; + struct mkvp_info *ptr; + + spin_lock_bh(&mkvp_list_lock); + list_for_each_entry(ptr, &mkvp_list, list) { + if (ptr->cardnr == cardnr && + ptr->domain == domain) { + *mkvp = ptr->mkvp; + rc = 0; + break; + } + } + spin_unlock_bh(&mkvp_list_lock); + + return rc; +} + +static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp) +{ + int found = 0; + struct mkvp_info *ptr; + + spin_lock_bh(&mkvp_list_lock); + list_for_each_entry(ptr, &mkvp_list, list) { + if (ptr->cardnr == cardnr && + ptr->domain == domain) { + ptr->mkvp = mkvp; + found = 1; + break; + } + } + if (!found) { + ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC); + if (!ptr) { + spin_unlock_bh(&mkvp_list_lock); + return; + } + ptr->cardnr = cardnr; + ptr->domain = domain; + ptr->mkvp = mkvp; + list_add(&ptr->list, &mkvp_list); + } + spin_unlock_bh(&mkvp_list_lock); +} + +static void mkvp_cache_scrub(u16 cardnr, u16 domain) +{ + struct mkvp_info *ptr; + + spin_lock_bh(&mkvp_list_lock); + list_for_each_entry(ptr, &mkvp_list, list) { + if (ptr->cardnr == cardnr && + ptr->domain == domain) { + list_del(&ptr->list); + kfree(ptr); + break; + } + } + spin_unlock_bh(&mkvp_list_lock); +} + +static void __exit mkvp_cache_free(void) +{ + struct mkvp_info *ptr, *pnext; + + spin_lock_bh(&mkvp_list_lock); + list_for_each_entry_safe(ptr, pnext, &mkvp_list, list) { + list_del(&ptr->list); + kfree(ptr); + } + spin_unlock_bh(&mkvp_list_lock); +} + +/* + * Search for a matching crypto card based on the Master Key + * Verification Pattern provided inside a secure key. + */ +int pkey_findcard(const struct pkey_seckey *seckey, + u16 *pcardnr, u16 *pdomain, int verify) +{ + struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; + struct zcrypt_device_matrix *device_matrix; + u16 card, dom; + u64 mkvp; + int i, rc; + + /* mkvp must not be zero */ + if (t->mkvp == 0) + return -EINVAL; + + /* fetch status of all crypto cards */ + device_matrix = kmalloc(sizeof(struct zcrypt_device_matrix), + GFP_KERNEL); + if (!device_matrix) + return -ENOMEM; + zcrypt_device_status_mask(device_matrix); + + /* walk through all crypto cards */ + for (i = 0; i < MAX_ZDEV_ENTRIES; i++) { + card = AP_QID_CARD(device_matrix->device[i].qid); + dom = AP_QID_QUEUE(device_matrix->device[i].qid); + if (device_matrix->device[i].online && + device_matrix->device[i].functions & 0x04) { + /* an enabled CCA Coprocessor card */ + /* try cached mkvp */ + if (mkvp_cache_fetch(card, dom, &mkvp) == 0 && + t->mkvp == mkvp) { + if (!verify) + break; + /* verify: fetch mkvp from adapter */ + if (fetch_mkvp(card, dom, &mkvp) == 0) { + mkvp_cache_update(card, dom, mkvp); + if (t->mkvp == mkvp) + break; + } + } + } else { + /* Card is offline and/or not a CCA card. */ + /* del mkvp entry from cache if it exists */ + mkvp_cache_scrub(card, dom); + } + } + if (i >= MAX_ZDEV_ENTRIES) { + /* nothing found, so this time without cache */ + for (i = 0; i < MAX_ZDEV_ENTRIES; i++) { + if (!(device_matrix->device[i].online && + device_matrix->device[i].functions & 0x04)) + continue; + card = AP_QID_CARD(device_matrix->device[i].qid); + dom = AP_QID_QUEUE(device_matrix->device[i].qid); + /* fresh fetch mkvp from adapter */ + if (fetch_mkvp(card, dom, &mkvp) == 0) { + mkvp_cache_update(card, dom, mkvp); + if (t->mkvp == mkvp) + break; + } + } + } + if (i < MAX_ZDEV_ENTRIES) { + if (pcardnr) + *pcardnr = card; + if (pdomain) + *pdomain = dom; + rc = 0; + } else + rc = -ENODEV; + + kfree(device_matrix); + return rc; +} +EXPORT_SYMBOL(pkey_findcard); + +/* + * Find card and transform secure key into protected key. + */ +int pkey_skey2pkey(const struct pkey_seckey *seckey, + struct pkey_protkey *protkey) +{ + u16 cardnr, domain; + int rc, verify; + + /* + * The pkey_sec2protkey call may fail when a card has been + * addressed where the master key was changed after last fetch + * of the mkvp into the cache. So first try without verify then + * with verify enabled (thus refreshing the mkvp for each card). + */ + for (verify = 0; verify < 2; verify++) { + rc = pkey_findcard(seckey, &cardnr, &domain, verify); + if (rc) + continue; + rc = pkey_sec2protkey(cardnr, domain, seckey, protkey); + if (rc == 0) + break; + } + + if (rc) + DEBUG_DBG("pkey_skey2pkey failed rc=%d\n", rc); + + return rc; +} +EXPORT_SYMBOL(pkey_skey2pkey); + +/* + * File io functions + */ + +static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int rc; + + switch (cmd) { + case PKEY_GENSECK: { + struct pkey_genseck __user *ugs = (void __user *) arg; + struct pkey_genseck kgs; + + if (copy_from_user(&kgs, ugs, sizeof(kgs))) + return -EFAULT; + rc = pkey_genseckey(kgs.cardnr, kgs.domain, + kgs.keytype, &kgs.seckey); + DEBUG_DBG("pkey_ioctl pkey_genseckey()=%d\n", rc); + if (rc) + break; + if (copy_to_user(ugs, &kgs, sizeof(kgs))) + return -EFAULT; + break; + } + case PKEY_CLR2SECK: { + struct pkey_clr2seck __user *ucs = (void __user *) arg; + struct pkey_clr2seck kcs; + + if (copy_from_user(&kcs, ucs, sizeof(kcs))) + return -EFAULT; + rc = pkey_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype, + &kcs.clrkey, &kcs.seckey); + DEBUG_DBG("pkey_ioctl pkey_clr2seckey()=%d\n", rc); + if (rc) + break; + if (copy_to_user(ucs, &kcs, sizeof(kcs))) + return -EFAULT; + memzero_explicit(&kcs, sizeof(kcs)); + break; + } + case PKEY_SEC2PROTK: { + struct pkey_sec2protk __user *usp = (void __user *) arg; + struct pkey_sec2protk ksp; + + if (copy_from_user(&ksp, usp, sizeof(ksp))) + return -EFAULT; + rc = pkey_sec2protkey(ksp.cardnr, ksp.domain, + &ksp.seckey, &ksp.protkey); + DEBUG_DBG("pkey_ioctl pkey_sec2protkey()=%d\n", rc); + if (rc) + break; + if (copy_to_user(usp, &ksp, sizeof(ksp))) + return -EFAULT; + break; + } + case PKEY_CLR2PROTK: { + struct pkey_clr2protk __user *ucp = (void __user *) arg; + struct pkey_clr2protk kcp; + + if (copy_from_user(&kcp, ucp, sizeof(kcp))) + return -EFAULT; + rc = pkey_clr2protkey(kcp.keytype, + &kcp.clrkey, &kcp.protkey); + DEBUG_DBG("pkey_ioctl pkey_clr2protkey()=%d\n", rc); + if (rc) + break; + if (copy_to_user(ucp, &kcp, sizeof(kcp))) + return -EFAULT; + memzero_explicit(&kcp, sizeof(kcp)); + break; + } + case PKEY_FINDCARD: { + struct pkey_findcard __user *ufc = (void __user *) arg; + struct pkey_findcard kfc; + + if (copy_from_user(&kfc, ufc, sizeof(kfc))) + return -EFAULT; + rc = pkey_findcard(&kfc.seckey, + &kfc.cardnr, &kfc.domain, 1); + DEBUG_DBG("pkey_ioctl pkey_findcard()=%d\n", rc); + if (rc) + break; + if (copy_to_user(ufc, &kfc, sizeof(kfc))) + return -EFAULT; + break; + } + case PKEY_SKEY2PKEY: { + struct pkey_skey2pkey __user *usp = (void __user *) arg; + struct pkey_skey2pkey ksp; + + if (copy_from_user(&ksp, usp, sizeof(ksp))) + return -EFAULT; + rc = pkey_skey2pkey(&ksp.seckey, &ksp.protkey); + DEBUG_DBG("pkey_ioctl pkey_skey2pkey()=%d\n", rc); + if (rc) + break; + if (copy_to_user(usp, &ksp, sizeof(ksp))) + return -EFAULT; + break; + } + default: + /* unknown/unsupported ioctl cmd */ + return -ENOTTY; + } + + return rc; +} + +/* + * Sysfs and file io operations + */ +static const struct file_operations pkey_fops = { + .owner = THIS_MODULE, + .open = nonseekable_open, + .llseek = no_llseek, + .unlocked_ioctl = pkey_unlocked_ioctl, +}; + +static struct miscdevice pkey_dev = { + .name = "pkey", + .minor = MISC_DYNAMIC_MINOR, + .mode = 0666, + .fops = &pkey_fops, +}; + +/* + * Module init + */ +int __init pkey_init(void) +{ + cpacf_mask_t pckmo_functions; + + /* check for pckmo instructions available */ + if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) + return -EOPNOTSUPP; + if (!cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_128_KEY) || + !cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_192_KEY) || + !cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_256_KEY)) + return -EOPNOTSUPP; + + pkey_debug_init(); + + return misc_register(&pkey_dev); +} + +/* + * Module exit + */ +static void __exit pkey_exit(void) +{ + misc_deregister(&pkey_dev); + mkvp_cache_free(); + pkey_debug_exit(); +} + +module_init(pkey_init); +module_exit(pkey_exit); diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 144a17941e6f..93015f85d4a6 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -374,7 +374,7 @@ out: return rc; } -static long zcrypt_send_cprb(struct ica_xcRB *xcRB) +long zcrypt_send_cprb(struct ica_xcRB *xcRB) { struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; @@ -444,6 +444,7 @@ out: AP_QID_CARD(qid), AP_QID_QUEUE(qid)); return rc; } +EXPORT_SYMBOL(zcrypt_send_cprb); static bool is_desired_ep11_card(unsigned int dev_id, unsigned short target_num, @@ -619,7 +620,7 @@ out: return rc; } -static void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix) +void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix) { struct zcrypt_card *zc; struct zcrypt_queue *zq; diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 274a59051534..6c94efd23eac 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -190,5 +190,7 @@ void zcrypt_msgtype_unregister(struct zcrypt_ops *); struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int); int zcrypt_api_init(void); void zcrypt_api_exit(void); +long zcrypt_send_cprb(struct ica_xcRB *xcRB); +void zcrypt_device_status_mask(struct zcrypt_device_matrix *devstatus); #endif /* _ZCRYPT_API_H_ */ diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c index 5e5c11f37b24..2ce0b3eb2efe 100644 --- a/drivers/s390/virtio/kvm_virtio.c +++ b/drivers/s390/virtio/kvm_virtio.c @@ -255,7 +255,8 @@ static void kvm_del_vqs(struct virtio_device *vdev) static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], + struct irq_affinity *desc) { struct kvm_device *kdev = to_kvmdev(vdev); int i; diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 648373cde4a1..0ed209f3d8b0 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -628,7 +628,8 @@ out: static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], + struct irq_affinity *desc) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); unsigned long *indicatorp = NULL; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 137d22d3a005..838347c44f32 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1630,7 +1630,7 @@ static int aac_acquire_resources(struct aac_dev *dev) if (!dev->sync_mode) { /* After EEH recovery or suspend resume, max_msix count - * may change, therfore updating in init as well. + * may change, therefore updating in init as well. */ dev->init->r7.no_of_msix_vectors = cpu_to_le32(dev->max_msix); aac_adapter_start(dev); diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h index ae5bfe039fcc..ccbd9e31a5de 100644 --- a/drivers/scsi/bfa/bfi_ms.h +++ b/drivers/scsi/bfa/bfi_ms.h @@ -680,7 +680,7 @@ struct bfi_ioim_req_s { /* * SG elements array within the IO request must be double word - * aligned. This aligment is required to optimize SGM setup for the IO. + * aligned. This alignment is required to optimize SGM setup for the IO. */ struct bfi_sge_s sges[BFI_SGE_INLINE_MAX]; u8 io_timeout; diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index fdd4eb4e41b2..4fc8ed5fe067 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -39,7 +39,7 @@ #include <linux/bitops.h> #include <linux/log2.h> #include <linux/interrupt.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/io.h> #include <scsi/scsi.h> diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index ed7f3228e234..89ef1a1678d1 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -25,7 +25,7 @@ #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/delay.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/in.h> #include <linux/kfifo.h> #include <linux/netdevice.h> diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index cea57e27e713..656463ff9ccb 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -1387,7 +1387,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, /* * Actually need to subtract 'sizeof(*mp) - sizeof(*wp)' from 'rlen' * before determining max Vx_Port descriptor but a buggy FCF could have - * omited either or both MAC Address and Name Identifier descriptors + * omitted either or both MAC Address and Name Identifier descriptors */ num_vlink_desc = rlen / sizeof(*vp); if (num_vlink_desc) diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 835c59c777f2..b29afafc2885 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -9330,7 +9330,7 @@ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev, * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..) * @ioa_cfg: ioa cfg struct * - * Description: This is the second phase of adapter intialization + * Description: This is the second phase of adapter initialization * This function takes care of initilizing the adapter to the point * where it can accept new commands. diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 6103231104da..fd501f8dbb11 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -36,6 +36,8 @@ #include <linux/slab.h> #include <linux/err.h> #include <linux/export.h> +#include <linux/rculist.h> + #include <asm/unaligned.h> #include <scsi/fc/fc_gs.h> diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index c991f3b822f8..b44c3136eb51 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -65,6 +65,8 @@ #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/export.h> +#include <linux/rculist.h> + #include <asm/unaligned.h> #include <scsi/libfc.h> diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 834d1212b6d5..07c08ce68d70 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -26,6 +26,7 @@ #include <linux/delay.h> #include <linux/log2.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/module.h> #include <asm/unaligned.h> #include <net/tcp.h> diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 50cf402dea29..03cb05abc821 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -3329,7 +3329,7 @@ static DEVICE_ATTR(lpfc_static_vport, S_IRUGO, * @buf: Data buffer. * @count: Size of the data buffer. * - * This function get called when an user write to the lpfc_stat_data_ctrl + * This function get called when a user write to the lpfc_stat_data_ctrl * sysfs file. This function parse the command written to the sysfs file * and take appropriate action. These commands are used for controlling * driver statistical data collection. diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index d977a472f89f..8e886caf2454 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -4510,7 +4510,7 @@ lpfc_sli4_rb_setup(struct lpfc_hba *phba) * @phba: Pointer to HBA context object. * @sli_mode: sli mode - 2/3 * - * This function is called by the sli intialization code path + * This function is called by the sli initialization code path * to issue config_port mailbox command. This function restarts the * HBA firmware and issues a config_port mailbox command to configure * the SLI interface in the sli mode specified by sli_mode @@ -4650,11 +4650,11 @@ do_prep_failed: /** - * lpfc_sli_hba_setup - SLI intialization function + * lpfc_sli_hba_setup - SLI initialization function * @phba: Pointer to HBA context object. * - * This function is the main SLI intialization function. This function - * is called by the HBA intialization code, HBA reset code and HBA + * This function is the main SLI initialization function. This function + * is called by the HBA initialization code, HBA reset code and HBA * error attention handler code. Caller is not required to hold any * locks. This function issues config_port mailbox command to configure * the SLI, setup iocb rings and HBQ rings. In the end the function @@ -6324,11 +6324,11 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) } /** - * lpfc_sli4_hba_setup - SLI4 device intialization PCI function + * lpfc_sli4_hba_setup - SLI4 device initialization PCI function * @phba: Pointer to HBA context object. * - * This function is the main SLI4 device intialization PCI function. This - * function is called by the HBA intialization code, HBA reset code and + * This function is the main SLI4 device initialization PCI function. This + * function is called by the HBA initialization code, HBA reset code and * HBA error attention handler code. Caller is not required to hold any * locks. **/ @@ -12079,7 +12079,7 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, * @phba: Pointer to HBA context object. * @wcqe: Pointer to work-queue completion queue entry. * - * This routine handles slow-path WQ entry comsumed event by invoking the + * This routine handles slow-path WQ entry consumed event by invoking the * proper WQ release routine to the slow-path WQ. **/ static void @@ -12451,7 +12451,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, * @cq: Pointer to completion queue. * @wcqe: Pointer to work-queue completion queue entry. * - * This routine handles an fast-path WQ entry comsumed event by invoking the + * This routine handles an fast-path WQ entry consumed event by invoking the * proper WQ release routine to the slow-path WQ. **/ static void diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index e18bbc66e83b..4e36998a266c 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -28,6 +28,7 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/sched/signal.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 02fe1c4aae2f..bdffb692bded 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -1925,7 +1925,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, * * This allows ownership of the specified buffer to returned to the driver, * allowing an application to read the buffer without fear that firmware is - * overwritting information in the buffer. + * overwriting information in the buffer. */ static long _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h index f3e17a8c1b07..a44046cff0f3 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h @@ -390,7 +390,7 @@ struct mpt3_diag_query { * * This allows ownership of the specified buffer to returned to the driver, * allowing an application to read the buffer without fear that firmware is - * overwritting information in the buffer. + * overwriting information in the buffer. */ struct mpt3_diag_release { struct mpt3_ioctl_header hdr; diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 30b905080c61..6903f03c88af 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -1290,7 +1290,7 @@ int osd_req_add_get_attr_list(struct osd_request *or, or->enc_get_attr.total_bytes = total_bytes; OSD_DEBUG( - "get_attr.total_bytes=%u(%u) enc_get_attr.total_bytes=%u(%Zu)\n", + "get_attr.total_bytes=%u(%u) enc_get_attr.total_bytes=%u(%zu)\n", or->get_attr.total_bytes, or->get_attr.total_bytes - _osd_req_sizeof_alist_header(or), or->enc_get_attr.total_bytes, @@ -1677,7 +1677,7 @@ int osd_finalize_request(struct osd_request *or, } } else { /* TODO: I think that for the GET_ATTR command these 2 should - * be reversed to keep them in execution order (for embeded + * be reversed to keep them in execution order (for embedded * targets with low memory footprint) */ ret = _osd_req_finalize_set_attr_list(or); diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 451de6c5e3c9..c47f4b349bac 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -35,7 +35,7 @@ static const char * osst_version = "0.99.4"; #include <linux/fs.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/proc_fs.h> #include <linux/mm.h> #include <linux/slab.h> @@ -3435,7 +3435,7 @@ static ssize_t osst_write(struct file * filp, const char __user * buf, size_t co /* Write must be integral number of blocks */ if (STp->block_size != 0 && (count % STp->block_size) != 0) { - printk(KERN_ERR "%s:E: Write (%Zd bytes) not multiple of tape block size (%d%c).\n", + printk(KERN_ERR "%s:E: Write (%zd bytes) not multiple of tape block size (%d%c).\n", name, count, STp->block_size<1024? STp->block_size:STp->block_size/1024, STp->block_size<1024?'b':'k'); retval = (-EINVAL); @@ -3756,7 +3756,7 @@ static ssize_t osst_read(struct file * filp, char __user * buf, size_t count, lo if ((count % STp->block_size) != 0) { printk(KERN_WARNING - "%s:W: Read (%Zd bytes) not multiple of tape block size (%d%c).\n", name, count, + "%s:W: Read (%zd bytes) not multiple of tape block size (%d%c).\n", name, count, STp->block_size<1024?STp->block_size:STp->block_size/1024, STp->block_size<1024?'b':'k'); } @@ -3815,7 +3815,7 @@ static ssize_t osst_read(struct file * filp, char __user * buf, size_t count, lo if (transfer == 0) { printk(KERN_WARNING - "%s:W: Nothing can be transferred, requested %Zd, tape block size (%d%c).\n", + "%s:W: Nothing can be transferred, requested %zd, tape block size (%d%c).\n", name, count, STp->block_size < 1024? STp->block_size:STp->block_size/1024, STp->block_size<1024?'b':'k'); diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index f201f4099620..f610103994af 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2163,6 +2163,9 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) clear_bit(vha->vp_idx, ha->vp_idx_map); mutex_unlock(&ha->vport_lock); + dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, + vha->gnl.ldma); + if (vha->qpair->vp_idx == vha->vp_idx) { if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0x7087, diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 40ca75bbcb9d..84c9098cc089 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -13,28 +13,25 @@ /* BSG support for ELS/CT pass through */ void -qla2x00_bsg_job_done(void *data, void *ptr, int res) +qla2x00_bsg_job_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; - struct scsi_qla_host *vha = (scsi_qla_host_t *)data; + srb_t *sp = ptr; struct bsg_job *bsg_job = sp->u.bsg_job; struct fc_bsg_reply *bsg_reply = bsg_job->reply; bsg_reply->result = res; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); - sp->free(vha, sp); + sp->free(sp); } void -qla2x00_bsg_sp_free(void *data, void *ptr) +qla2x00_bsg_sp_free(void *ptr) { - srb_t *sp = (srb_t *)ptr; - struct scsi_qla_host *vha = sp->fcport->vha; + srb_t *sp = ptr; + struct qla_hw_data *ha = sp->vha->hw; struct bsg_job *bsg_job = sp->u.bsg_job; struct fc_bsg_request *bsg_request = bsg_job->request; - - struct qla_hw_data *ha = vha->hw; struct qla_mt_iocb_rqst_fx00 *piocb_rqst; if (sp->type == SRB_FXIOCB_BCMD) { @@ -62,7 +59,7 @@ qla2x00_bsg_sp_free(void *data, void *ptr) sp->type == SRB_FXIOCB_BCMD || sp->type == SRB_ELS_CMD_HST) kfree(sp->fcport); - qla2x00_rel_sp(vha, sp); + qla2x00_rel_sp(sp); } int @@ -394,7 +391,7 @@ qla2x00_process_els(struct bsg_job *bsg_job) if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x700e, "qla2x00_start_sp failed = %d\n", rval); - qla2x00_rel_sp(vha, sp); + qla2x00_rel_sp(sp); rval = -EIO; goto done_unmap_sg; } @@ -542,7 +539,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job) if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7017, "qla2x00_start_sp failed=%d.\n", rval); - qla2x00_rel_sp(vha, sp); + qla2x00_rel_sp(sp); rval = -EIO; goto done_free_fcport; } @@ -2578,6 +2575,6 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job) done: spin_unlock_irqrestore(&ha->hardware_lock, flags); - sp->free(vha, sp); + sp->free(sp); return 0; } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 2f14adfab018..625d438e3cce 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -55,6 +55,8 @@ #include "qla_settings.h" +#define MODE_DUAL (MODE_TARGET | MODE_INITIATOR) + /* * Data bit definitions */ @@ -251,6 +253,14 @@ #define MAX_CMDSZ 16 /* SCSI maximum CDB size. */ #include "qla_fw.h" + +struct name_list_extended { + struct get_name_list_extended *l; + dma_addr_t ldma; + struct list_head fcports; /* protect by sess_list */ + u32 size; + u8 sent; +}; /* * Timeout timer counts in seconds */ @@ -309,6 +319,17 @@ struct els_logo_payload { uint8_t wwpn[WWN_SIZE]; }; +struct ct_arg { + void *iocb; + u16 nport_handle; + dma_addr_t req_dma; + dma_addr_t rsp_dma; + u32 req_size; + u32 rsp_size; + void *req; + void *rsp; +}; + /* * SRB extensions. */ @@ -320,6 +341,7 @@ struct srb_iocb { #define SRB_LOGIN_COND_PLOGI BIT_1 #define SRB_LOGIN_SKIP_PRLI BIT_2 uint16_t data[2]; + u32 iop[2]; } logio; struct { #define ELS_DCMD_TIMEOUT 20 @@ -372,6 +394,16 @@ struct srb_iocb { __le16 comp_status; struct completion comp; } abt; + struct ct_arg ctarg; + struct { + __le16 in_mb[28]; /* fr fw */ + __le16 out_mb[28]; /* to fw */ + void *out, *in; + dma_addr_t out_dma, in_dma; + } mbx; + struct { + struct imm_ntfy_from_isp *ntfy; + } nack; } u; struct timer_list timer; @@ -392,23 +424,31 @@ struct srb_iocb { #define SRB_FXIOCB_BCMD 11 #define SRB_ABT_CMD 12 #define SRB_ELS_DCMD 13 +#define SRB_MB_IOCB 14 +#define SRB_CT_PTHRU_CMD 15 +#define SRB_NACK_PLOGI 16 +#define SRB_NACK_PRLI 17 +#define SRB_NACK_LOGO 18 typedef struct srb { atomic_t ref_count; struct fc_port *fcport; + struct scsi_qla_host *vha; uint32_t handle; uint16_t flags; uint16_t type; char *name; int iocbs; struct qla_qpair *qpair; + u32 gen1; /* scratch */ + u32 gen2; /* scratch */ union { struct srb_iocb iocb_cmd; struct bsg_job *bsg_job; struct srb_cmd scmd; } u; - void (*done)(void *, void *, int); - void (*free)(void *, void *); + void (*done)(void *, int); + void (*free)(void *); } srb_t; #define GET_CMD_SP(sp) (sp->u.scmd.cmd) @@ -1794,6 +1834,7 @@ typedef struct { #define SS_RESIDUAL_OVER BIT_10 #define SS_SENSE_LEN_VALID BIT_9 #define SS_RESPONSE_INFO_LEN_VALID BIT_8 +#define SS_SCSI_STATUS_BYTE 0xff #define SS_RESERVE_CONFLICT (BIT_4 | BIT_3) #define SS_BUSY_CONDITION BIT_3 @@ -1975,6 +2016,84 @@ struct mbx_entry { uint8_t port_name[WWN_SIZE]; }; +#ifndef IMMED_NOTIFY_TYPE +#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */ +/* + * ISP queue - immediate notify entry structure definition. + * This is sent by the ISP to the Target driver. + * This IOCB would have report of events sent by the + * initiator, that needs to be handled by the target + * driver immediately. + */ +struct imm_ntfy_from_isp { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + union { + struct { + uint32_t sys_define_2; /* System defined. */ + target_id_t target; + uint16_t lun; + uint8_t target_id; + uint8_t reserved_1; + uint16_t status_modifier; + uint16_t status; + uint16_t task_flags; + uint16_t seq_id; + uint16_t srr_rx_id; + uint32_t srr_rel_offs; + uint16_t srr_ui; +#define SRR_IU_DATA_IN 0x1 +#define SRR_IU_DATA_OUT 0x5 +#define SRR_IU_STATUS 0x7 + uint16_t srr_ox_id; + uint8_t reserved_2[28]; + } isp2x; + struct { + uint32_t reserved; + uint16_t nport_handle; + uint16_t reserved_2; + uint16_t flags; +#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1 +#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0 + uint16_t srr_rx_id; + uint16_t status; + uint8_t status_subcode; + uint8_t fw_handle; + uint32_t exchange_address; + uint32_t srr_rel_offs; + uint16_t srr_ui; + uint16_t srr_ox_id; + union { + struct { + uint8_t node_name[8]; + } plogi; /* PLOGI/ADISC/PDISC */ + struct { + /* PRLI word 3 bit 0-15 */ + uint16_t wd3_lo; + uint8_t resv0[6]; + } prli; + struct { + uint8_t port_id[3]; + uint8_t resv1; + uint16_t nport_handle; + uint16_t resv2; + } req_els; + } u; + uint8_t port_name[8]; + uint8_t resv3[3]; + uint8_t vp_index; + uint32_t reserved_5; + uint8_t port_id[3]; + uint8_t reserved_6; + } isp24; + } u; + uint16_t reserved_7; + uint16_t ox_id; +} __packed; +#endif + /* * ISP request and response queue entry sizes */ @@ -2022,10 +2141,22 @@ typedef struct { #define FC4_TYPE_OTHER 0x0 #define FC4_TYPE_UNKNOWN 0xff +/* mailbox command 4G & above */ +struct mbx_24xx_entry { + uint8_t entry_type; + uint8_t entry_count; + uint8_t sys_define1; + uint8_t entry_status; + uint32_t handle; + uint16_t mb[28]; +}; + +#define IOCB_SIZE 64 + /* * Fibre channel port type. */ - typedef enum { +typedef enum { FCT_UNKNOWN, FCT_RSCN, FCT_SWITCH, @@ -2034,6 +2165,74 @@ typedef struct { FCT_TARGET } fc_port_type_t; +enum qla_sess_deletion { + QLA_SESS_DELETION_NONE = 0, + QLA_SESS_DELETION_IN_PROGRESS, + QLA_SESS_DELETED, +}; + +enum qlt_plogi_link_t { + QLT_PLOGI_LINK_SAME_WWN, + QLT_PLOGI_LINK_CONFLICT, + QLT_PLOGI_LINK_MAX +}; + +struct qlt_plogi_ack_t { + struct list_head list; + struct imm_ntfy_from_isp iocb; + port_id_t id; + int ref_count; + void *fcport; +}; + +struct ct_sns_desc { + struct ct_sns_pkt *ct_sns; + dma_addr_t ct_sns_dma; +}; + +enum discovery_state { + DSC_DELETED, + DSC_GID_PN, + DSC_GNL, + DSC_LOGIN_PEND, + DSC_LOGIN_FAILED, + DSC_GPDB, + DSC_GPSC, + DSC_UPD_FCPORT, + DSC_LOGIN_COMPLETE, + DSC_DELETE_PEND, +}; + +enum login_state { /* FW control Target side */ + DSC_LS_LLIOCB_SENT = 2, + DSC_LS_PLOGI_PEND, + DSC_LS_PLOGI_COMP, + DSC_LS_PRLI_PEND, + DSC_LS_PRLI_COMP, + DSC_LS_PORT_UNAVAIL, + DSC_LS_PRLO_PEND = 9, + DSC_LS_LOGO_PEND, +}; + +enum fcport_mgt_event { + FCME_RELOGIN = 1, + FCME_RSCN, + FCME_GIDPN_DONE, + FCME_PLOGI_DONE, /* Initiator side sent LLIOCB */ + FCME_GNL_DONE, + FCME_GPSC_DONE, + FCME_GPDB_DONE, + FCME_GPNID_DONE, + FCME_DELETE_DONE, +}; + +enum rscn_addr_format { + RSCN_PORT_ADDR, + RSCN_AREA_ADDR, + RSCN_DOM_ADDR, + RSCN_FAB_ADDR, +}; + /* * Fibre channel port structure. */ @@ -2047,6 +2246,29 @@ typedef struct fc_port { uint16_t loop_id; uint16_t old_loop_id; + unsigned int conf_compl_supported:1; + unsigned int deleted:2; + unsigned int local:1; + unsigned int logout_on_delete:1; + unsigned int logo_ack_needed:1; + unsigned int keep_nport_handle:1; + unsigned int send_els_logo:1; + unsigned int login_pause:1; + unsigned int login_succ:1; + + struct fc_port *conflict; + unsigned char logout_completed; + int generation; + + struct se_session *se_sess; + struct kref sess_kref; + struct qla_tgt *tgt; + unsigned long expires; + struct list_head del_list_entry; + struct work_struct free_work; + + struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; + uint16_t tgt_id; uint16_t old_tgt_id; @@ -2075,8 +2297,30 @@ typedef struct fc_port { unsigned long retry_delay_timestamp; struct qla_tgt_sess *tgt_session; + struct ct_sns_desc ct_desc; + enum discovery_state disc_state; + enum login_state fw_login_state; + u32 login_gen, last_login_gen; + u32 rscn_gen, last_rscn_gen; + u32 chip_reset; + struct list_head gnl_entry; + struct work_struct del_work; + u8 iocb[IOCB_SIZE]; } fc_port_t; +#define QLA_FCPORT_SCAN 1 +#define QLA_FCPORT_FOUND 2 + +struct event_arg { + enum fcport_mgt_event event; + fc_port_t *fcport; + srb_t *sp; + port_id_t id; + u16 data[2], rc; + u8 port_name[WWN_SIZE]; + u32 iop[2]; +}; + #include "qla_mr.h" /* @@ -2154,6 +2398,10 @@ static const char * const port_state_str[] = { #define GFT_ID_REQ_SIZE (16 + 4) #define GFT_ID_RSP_SIZE (16 + 32) +#define GID_PN_CMD 0x121 +#define GID_PN_REQ_SIZE (16 + 8) +#define GID_PN_RSP_SIZE (16 + 4) + #define RFT_ID_CMD 0x217 #define RFT_ID_REQ_SIZE (16 + 4 + 32) #define RFT_ID_RSP_SIZE 16 @@ -2479,6 +2727,10 @@ struct ct_sns_req { uint8_t reserved; uint8_t port_name[3]; } gff_id; + + struct { + uint8_t port_name[8]; + } gid_pn; } req; }; @@ -2558,6 +2810,10 @@ struct ct_sns_rsp { struct { uint8_t fc4_features[128]; } gff_id; + struct { + uint8_t reserved; + uint8_t port_id[3]; + } gid_pn; } rsp; }; @@ -2699,11 +2955,11 @@ struct isp_operations { uint16_t (*calc_req_entries) (uint16_t); void (*build_iocbs) (srb_t *, cmd_entry_t *, uint16_t); - void * (*prep_ms_iocb) (struct scsi_qla_host *, uint32_t, uint32_t); - void * (*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t, + void *(*prep_ms_iocb) (struct scsi_qla_host *, struct ct_arg *); + void *(*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t, uint32_t); - uint8_t * (*read_nvram) (struct scsi_qla_host *, uint8_t *, + uint8_t *(*read_nvram) (struct scsi_qla_host *, uint8_t *, uint32_t, uint32_t); int (*write_nvram) (struct scsi_qla_host *, uint8_t *, uint32_t, uint32_t); @@ -2765,13 +3021,21 @@ enum qla_work_type { QLA_EVT_AEN, QLA_EVT_IDC_ACK, QLA_EVT_ASYNC_LOGIN, - QLA_EVT_ASYNC_LOGIN_DONE, QLA_EVT_ASYNC_LOGOUT, QLA_EVT_ASYNC_LOGOUT_DONE, QLA_EVT_ASYNC_ADISC, QLA_EVT_ASYNC_ADISC_DONE, QLA_EVT_UEVENT, QLA_EVT_AENFX, + QLA_EVT_GIDPN, + QLA_EVT_GPNID, + QLA_EVT_GPNID_DONE, + QLA_EVT_NEW_SESS, + QLA_EVT_GPDB, + QLA_EVT_GPSC, + QLA_EVT_UPD_FCPORT, + QLA_EVT_GNL, + QLA_EVT_NACK, }; @@ -2807,6 +3071,23 @@ struct qla_work_evt { struct { srb_t *sp; } iosb; + struct { + port_id_t id; + } gpnid; + struct { + port_id_t id; + u8 port_name[8]; + void *pla; + } new_sess; + struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */ + fc_port_t *fcport; + u8 opt; + } fcport; + struct { + fc_port_t *fcport; + u8 iocb[IOCB_SIZE]; + int type; + } nack; } u; }; @@ -2943,6 +3224,7 @@ struct qla_qpair { struct qla_hw_data *hw; struct work_struct q_work; struct list_head qp_list_elem; /* vha->qp_list */ + struct scsi_qla_host *vha; }; /* Place holder for FW buffer parameters */ @@ -2963,7 +3245,6 @@ struct qlt_hw_data { /* Protected by hw lock */ uint32_t enable_class_2:1; uint32_t enable_explicit_conf:1; - uint32_t ini_mode_force_reverse:1; uint32_t node_name_set:1; dma_addr_t atio_dma; /* Physical address. */ @@ -3115,6 +3396,7 @@ struct qla_hw_data { #define FLOGI_SP_SUPPORT BIT_13 uint8_t port_no; /* Physical port of adapter */ + uint8_t exch_starvation; /* Timeout timers. */ uint8_t loop_down_abort_time; /* port down timer */ @@ -3682,7 +3964,7 @@ typedef struct scsi_qla_host { #define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ #define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ #define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */ -#define SCR_PENDING 21 /* SCR in target mode */ +#define FREE_BIT 21 #define PORT_UPDATE_NEEDED 22 #define FX00_RESET_RECOVERY 23 #define FX00_TARGET_SCAN 24 @@ -3736,7 +4018,9 @@ typedef struct scsi_qla_host { /* list of commands waiting on workqueue */ struct list_head qla_cmd_list; struct list_head qla_sess_op_cmd_list; + struct list_head unknown_atio_list; spinlock_t cmd_list_lock; + struct delayed_work unknown_atio_work; /* Counter to detect races between ELS and RSCN events */ atomic_t generation_tick; @@ -3788,6 +4072,10 @@ typedef struct scsi_qla_host { struct qla8044_reset_template reset_tmplt; struct qla_tgt_counters tgt_counters; uint16_t bbcr; + struct name_list_extended gnl; + /* Count of active session/fcport */ + int fcport_count; + wait_queue_head_t fcport_waitQ; } scsi_qla_host_t; struct qla27xx_image_status { diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 34272fde8a5b..b48cce696bac 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -18,7 +18,7 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused) scsi_qla_host_t *vha = s->private; struct qla_hw_data *ha = vha->hw; unsigned long flags; - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; struct qla_tgt *tgt= vha->vha_tgt.qla_tgt; seq_printf(s, "%s\n",vha->host_str); @@ -26,12 +26,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused) seq_printf(s, "Port ID Port Name Handle\n"); spin_lock_irqsave(&ha->tgt.sess_lock, flags); - list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) { + list_for_each_entry(sess, &vha->vp_fcports, list) seq_printf(s, "%02x:%02x:%02x %8phC %d\n", - sess->s_id.b.domain,sess->s_id.b.area, - sess->s_id.b.al_pa, sess->port_name, - sess->loop_id); - } + sess->d_id.b.domain, sess->d_id.b.area, + sess->d_id.b.al_pa, sess->port_name, + sess->loop_id); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 8a2368b32dec..1f808928763b 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -72,6 +72,37 @@ struct port_database_24xx { uint8_t reserved_3[24]; }; +/* + * MB 75h returns a list of DB entries similar to port_database_24xx(64B). + * However, in this case it returns 1st 40 bytes. + */ +struct get_name_list_extended { + __le16 flags; + u8 current_login_state; + u8 last_login_state; + u8 hard_address[3]; + u8 reserved_1; + u8 port_id[3]; + u8 sequence_id; + __le16 port_timer; + __le16 nport_handle; /* N_PORT handle. */ + __le16 receive_data_size; + __le16 reserved_2; + + /* PRLI SVC Param are Big endian */ + u8 prli_svc_param_word_0[2]; /* Bits 15-0 of word 0 */ + u8 prli_svc_param_word_3[2]; /* Bits 15-0 of word 3 */ + u8 port_name[WWN_SIZE]; + u8 node_name[WWN_SIZE]; +}; + +/* MB 75h: This is the short version of the database */ +struct get_name_list { + u8 port_node_name[WWN_SIZE]; /* B7 most sig, B0 least sig */ + __le16 nport_handle; + u8 reserved; +}; + struct vp_database_24xx { uint16_t vp_status; uint8_t options; @@ -1270,27 +1301,76 @@ struct vp_config_entry_24xx { }; #define VP_RPT_ID_IOCB_TYPE 0x32 /* Report ID Acquisition entry. */ +enum VP_STATUS { + VP_STAT_COMPL, + VP_STAT_FAIL, + VP_STAT_ID_CHG, + VP_STAT_SNS_TO, /* timeout */ + VP_STAT_SNS_RJT, + VP_STAT_SCR_TO, /* timeout */ + VP_STAT_SCR_RJT, +}; + +enum VP_FLAGS { + VP_FLAGS_CON_FLOOP = 1, + VP_FLAGS_CON_P2P = 2, + VP_FLAGS_CON_FABRIC = 3, + VP_FLAGS_NAME_VALID = BIT_5, +}; + struct vp_rpt_id_entry_24xx { uint8_t entry_type; /* Entry type. */ uint8_t entry_count; /* Entry count. */ uint8_t sys_define; /* System defined. */ uint8_t entry_status; /* Entry Status. */ - - uint32_t handle; /* System handle. */ - - uint16_t vp_count; /* Format 0 -- | VP setup | VP acq |. */ - /* Format 1 -- | VP count |. */ - uint16_t vp_idx; /* Format 0 -- Reserved. */ - /* Format 1 -- VP status and index. */ + uint32_t resv1; + uint8_t vp_acquired; + uint8_t vp_setup; + uint8_t vp_idx; /* Format 0=reserved */ + uint8_t vp_status; /* Format 0=reserved */ uint8_t port_id[3]; uint8_t format; - - uint8_t vp_idx_map[16]; - - uint8_t reserved_4[24]; - uint16_t bbcr; - uint8_t reserved_5[6]; + union { + struct { + /* format 0 loop */ + uint8_t vp_idx_map[16]; + uint8_t reserved_4[32]; + } f0; + struct { + /* format 1 fabric */ + uint8_t vpstat1_subcode; /* vp_status=1 subcode */ + uint8_t flags; + uint16_t fip_flags; + uint8_t rsv2[12]; + + uint8_t ls_rjt_vendor; + uint8_t ls_rjt_explanation; + uint8_t ls_rjt_reason; + uint8_t rsv3[5]; + + uint8_t port_name[8]; + uint8_t node_name[8]; + uint16_t bbcr; + uint8_t reserved_5[6]; + } f1; + struct { /* format 2: N2N direct connect */ + uint8_t vpstat1_subcode; + uint8_t flags; + uint16_t rsv6; + uint8_t rsv2[12]; + + uint8_t ls_rjt_vendor; + uint8_t ls_rjt_explanation; + uint8_t ls_rjt_reason; + uint8_t rsv3[5]; + + uint8_t port_name[8]; + uint8_t node_name[8]; + uint32_t remote_nport_id; + uint32_t reserved_5; + } f2; + } u; }; #define VF_EVFP_IOCB_TYPE 0x26 /* Exchange Virtual Fabric Parameters entry. */ diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index afa0116a163b..b3d6441d1d90 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -73,6 +73,10 @@ extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *, uint16_t *); +struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *, + enum qla_work_type); +extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *); +int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e); extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); extern void *qla2x00_alloc_iocbs_ready(struct scsi_qla_host *, srb_t *); extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *); @@ -94,6 +98,13 @@ extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *); extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *, int, int); extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *); +void qla2x00_fcport_event_handler(scsi_qla_host_t *, struct event_arg *); +int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8); +int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, + struct imm_ntfy_from_isp *, int); +int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, + void *); +int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *); /* * Global Data in qla_os.c source file. @@ -127,6 +138,7 @@ extern int ql2xmdenable; extern int ql2xexlogins; extern int ql2xexchoffld; extern int ql2xfwholdabts; +extern int ql2xmvasynctoatio; extern int qla2x00_loop_reset(scsi_qla_host_t *); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); @@ -135,8 +147,6 @@ extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *); extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); -extern int qla2x00_post_async_login_done_work(struct scsi_qla_host *, - fc_port_t *, uint16_t *); extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *, @@ -176,9 +186,13 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); extern void qla2x00_disable_board_on_pci_error(struct work_struct *); -extern void qla2x00_sp_compl(void *, void *, int); -extern void qla2xxx_qpair_sp_free_dma(void *, void *); -extern void qla2xxx_qpair_sp_compl(void *, void *, int); +extern void qla2x00_sp_compl(void *, int); +extern void qla2xxx_qpair_sp_free_dma(void *); +extern void qla2xxx_qpair_sp_compl(void *, int); +extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *); +void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, + uint16_t *); +int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); /* * Global Functions in qla_mid.c source file. @@ -201,7 +215,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *); extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *); -extern void qla2x00_sp_free_dma(void *, void *); +extern void qla2x00_sp_free_dma(void *); extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); @@ -302,9 +316,6 @@ extern int qla2x00_init_firmware(scsi_qla_host_t *, uint16_t); extern int -qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *); - -extern int qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t); extern int @@ -483,6 +494,9 @@ qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *, uint32_t); extern irqreturn_t qla2xxx_msix_rsp_q(int irq, void *dev_id); +fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t); +fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8); +fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8); /* * Global Function Prototypes in qla_sup.c source file. @@ -574,8 +588,8 @@ extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int); /* * Global Function Prototypes in qla_gs.c source file. */ -extern void *qla2x00_prep_ms_iocb(scsi_qla_host_t *, uint32_t, uint32_t); -extern void *qla24xx_prep_ms_iocb(scsi_qla_host_t *, uint32_t, uint32_t); +extern void *qla2x00_prep_ms_iocb(scsi_qla_host_t *, struct ct_arg *); +extern void *qla24xx_prep_ms_iocb(scsi_qla_host_t *, struct ct_arg *); extern int qla2x00_ga_nxt(scsi_qla_host_t *, fc_port_t *); extern int qla2x00_gid_pt(scsi_qla_host_t *, sw_info_t *); extern int qla2x00_gpn_id(scsi_qla_host_t *, sw_info_t *); @@ -591,6 +605,23 @@ extern int qla2x00_fdmi_register(scsi_qla_host_t *); extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *); extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *); extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t); +extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *, + struct ct_sns_rsp *, const char *); +extern void qla2x00_async_iocb_timeout(void *data); +extern int qla24xx_async_gidpn(scsi_qla_host_t *, fc_port_t *); +int qla24xx_post_gidpn_work(struct scsi_qla_host *, fc_port_t *); +void qla24xx_handle_gidpn_event(scsi_qla_host_t *, struct event_arg *); + +extern void qla2x00_free_fcport(fc_port_t *); + +extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *); +extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *); +void qla24xx_async_gpnid_done(scsi_qla_host_t *, srb_t*); +void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *); + +int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *); +int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *); +int qla2x00_mgmt_svr_login(scsi_qla_host_t *); /* * Global Function Prototypes in qla_attr.c source file. @@ -702,10 +733,10 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *); /* IOCB related functions */ extern int qla82xx_start_scsi(srb_t *); -extern void qla2x00_sp_free(void *, void *); +extern void qla2x00_sp_free(void *); extern void qla2x00_sp_timeout(unsigned long); -extern void qla2x00_bsg_job_done(void *, void *, int); -extern void qla2x00_bsg_sp_free(void *, void *); +extern void qla2x00_bsg_job_done(void *, int); +extern void qla2x00_bsg_sp_free(void *); extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *); /* Interrupt related */ @@ -803,4 +834,17 @@ extern int qla_get_exchoffld_status(scsi_qla_host_t *, uint16_t *, uint16_t *); extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *, dma_addr_t); extern void qlt_handle_abts_recv(struct scsi_qla_host *, response_t *); +int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, + struct imm_ntfy_from_isp *, int); +void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *); +void qlt_plogi_ack_link(struct scsi_qla_host *, struct qlt_plogi_ack_t *, + struct fc_port *, enum qlt_plogi_link_t); +void qlt_plogi_ack_unref(struct scsi_qla_host *, struct qlt_plogi_ack_t *); +extern void qlt_schedule_sess_for_deletion(struct fc_port *, bool); +extern void qlt_schedule_sess_for_deletion_lock(struct fc_port *); +extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *, + uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **); +void qla24xx_delete_sess_fn(struct work_struct *); +void qlt_unknown_atio_work_fn(struct work_struct *); + #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index ee3df8794806..ab0f873fd6a1 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -24,12 +24,12 @@ static int qla2x00_sns_rnn_id(scsi_qla_host_t *); * Returns a pointer to the @ha's ms_iocb. */ void * -qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) +qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg) { struct qla_hw_data *ha = vha->hw; ms_iocb_entry_t *ms_pkt; - ms_pkt = ha->ms_iocb; + ms_pkt = (ms_iocb_entry_t *)arg->iocb; memset(ms_pkt, 0, sizeof(ms_iocb_entry_t)); ms_pkt->entry_type = MS_IOCB_TYPE; @@ -39,15 +39,15 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); ms_pkt->cmd_dsd_count = cpu_to_le16(1); ms_pkt->total_dsd_count = cpu_to_le16(2); - ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); - ms_pkt->req_bytecount = cpu_to_le32(req_size); + ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size); + ms_pkt->req_bytecount = cpu_to_le32(arg->req_size); - ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); + ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(arg->req_dma)); + ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(arg->req_dma)); ms_pkt->dseg_req_length = ms_pkt->req_bytecount; - ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); + ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(arg->rsp_dma)); + ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(arg->rsp_dma)); ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount; vha->qla_stats.control_requests++; @@ -64,29 +64,29 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) * Returns a pointer to the @ha's ms_iocb. */ void * -qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) +qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg) { struct qla_hw_data *ha = vha->hw; struct ct_entry_24xx *ct_pkt; - ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; + ct_pkt = (struct ct_entry_24xx *)arg->iocb; memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); ct_pkt->entry_type = CT_IOCB_TYPE; ct_pkt->entry_count = 1; - ct_pkt->nport_handle = cpu_to_le16(NPH_SNS); + ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle); ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); ct_pkt->cmd_dsd_count = cpu_to_le16(1); ct_pkt->rsp_dsd_count = cpu_to_le16(1); - ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); - ct_pkt->cmd_byte_count = cpu_to_le32(req_size); + ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size); + ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size); - ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); + ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(arg->req_dma)); + ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(arg->req_dma)); ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; - ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); + ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(arg->rsp_dma)); + ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(arg->rsp_dma)); ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; ct_pkt->vp_index = vha->vp_idx; @@ -117,7 +117,7 @@ qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size) return &p->p.req; } -static int +int qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, struct ct_sns_rsp *ct_rsp, const char *routine) { @@ -183,14 +183,21 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct qla_hw_data *ha = vha->hw; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_ga_nxt(vha, fcport); + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GA_NXT_REQ_SIZE; + arg.rsp_size = GA_NXT_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Issue GA_NXT */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GA_NXT_REQ_SIZE, - GA_NXT_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD, @@ -269,16 +276,24 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) struct ct_sns_gid_pt_data *gid_data; struct qla_hw_data *ha = vha->hw; uint16_t gid_pt_rsp_size; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_gid_pt(vha, list); gid_data = NULL; gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha); + + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GID_PT_REQ_SIZE; + arg.rsp_size = gid_pt_rsp_size; + arg.nport_handle = NPH_SNS; + /* Issue GID_PT */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE, - gid_pt_rsp_size); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size); @@ -344,15 +359,22 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct qla_hw_data *ha = vha->hw; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_gpn_id(vha, list); + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GPN_ID_REQ_SIZE; + arg.rsp_size = GPN_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GPN_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE, - GPN_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD, @@ -406,15 +428,22 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_gnn_id(vha, list); + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GNN_ID_REQ_SIZE; + arg.rsp_size = GNN_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GNN_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE, - GNN_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD, @@ -473,14 +502,21 @@ qla2x00_rft_id(scsi_qla_host_t *vha) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_rft_id(vha); + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = RFT_ID_REQ_SIZE; + arg.rsp_size = RFT_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Issue RFT_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFT_ID_REQ_SIZE, - RFT_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFT_ID_CMD, @@ -526,6 +562,7 @@ qla2x00_rff_id(scsi_qla_host_t *vha) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { ql_dbg(ql_dbg_disc, vha, 0x2046, @@ -533,10 +570,16 @@ qla2x00_rff_id(scsi_qla_host_t *vha) return (QLA_SUCCESS); } + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = RFF_ID_REQ_SIZE; + arg.rsp_size = RFF_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Issue RFF_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFF_ID_REQ_SIZE, - RFF_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFF_ID_CMD, @@ -584,14 +627,21 @@ qla2x00_rnn_id(scsi_qla_host_t *vha) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_rnn_id(vha); + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = RNN_ID_REQ_SIZE; + arg.rsp_size = RNN_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Issue RNN_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RNN_ID_REQ_SIZE, - RNN_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE); @@ -651,6 +701,7 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { ql_dbg(ql_dbg_disc, vha, 0x2050, @@ -658,10 +709,17 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha) return (QLA_SUCCESS); } + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = 0; + arg.rsp_size = RSNN_NN_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Issue RSNN_NN */ /* Prepare common MS IOCB */ /* Request size adjusted after CT preparation */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, 0, RSNN_NN_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, RSNN_NN_CMD, @@ -1103,7 +1161,7 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha) * * Returns 0 on success. */ -static int +int qla2x00_mgmt_svr_login(scsi_qla_host_t *vha) { int ret, rval; @@ -2425,15 +2483,22 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (!IS_IIDMA_CAPABLE(ha)) return QLA_FUNCTION_FAILED; + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GFPN_ID_REQ_SIZE; + arg.rsp_size = GFPN_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GFPN_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE, - GFPN_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD, @@ -2471,36 +2536,6 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) return (rval); } -static inline void * -qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha, uint32_t req_size, - uint32_t rsp_size) -{ - struct ct_entry_24xx *ct_pkt; - struct qla_hw_data *ha = vha->hw; - ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; - memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); - - ct_pkt->entry_type = CT_IOCB_TYPE; - ct_pkt->entry_count = 1; - ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id); - ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); - ct_pkt->cmd_dsd_count = cpu_to_le16(1); - ct_pkt->rsp_dsd_count = cpu_to_le16(1); - ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); - ct_pkt->cmd_byte_count = cpu_to_le32(req_size); - - ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); - ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; - - ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); - ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; - ct_pkt->vp_index = vha->vp_idx; - - return ct_pkt; -} - static inline struct ct_sns_req * qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd, @@ -2530,9 +2565,10 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) int rval; uint16_t i; struct qla_hw_data *ha = vha->hw; - ms_iocb_entry_t *ms_pkt; + ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (!IS_IIDMA_CAPABLE(ha)) return QLA_FUNCTION_FAILED; @@ -2543,11 +2579,17 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) if (rval) return rval; + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GPSC_REQ_SIZE; + arg.rsp_size = GPSC_RSP_SIZE; + arg.nport_handle = vha->mgmt_svr_loop_id; + for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GFPN_ID */ /* Prepare common MS IOCB */ - ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE, - GPSC_RSP_SIZE); + ms_pkt = qla24xx_prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD, @@ -2641,6 +2683,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) struct ct_sns_rsp *ct_rsp; struct qla_hw_data *ha = vha->hw; uint8_t fcp_scsi_features = 0; + struct ct_arg arg; for (i = 0; i < ha->max_fibre_devices; i++) { /* Set default FC4 Type as UNKNOWN so the default is to @@ -2651,9 +2694,15 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) if (!IS_FWI2_CAPABLE(ha)) continue; + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GFF_ID_REQ_SIZE; + arg.rsp_size = GFF_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFF_ID_REQ_SIZE, - GFF_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD, @@ -2692,3 +2741,538 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) break; } } + +/* GID_PN completion processing. */ +void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea) +{ + fc_port_t *fcport = ea->fcport; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC login state %d \n", + __func__, fcport->port_name, fcport->fw_login_state); + + if (ea->sp->gen2 != fcport->login_gen) { + /* PLOGI/PRLI/LOGO came in while cmd was out.*/ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC generation changed rscn %d|%d login %d|%d \n", + __func__, fcport->port_name, fcport->last_rscn_gen, + fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen); + return; + } + + if (!ea->rc) { + if (ea->sp->gen1 == fcport->rscn_gen) { + fcport->scan_state = QLA_FCPORT_FOUND; + fcport->flags |= FCF_FABRIC_DEVICE; + + if (fcport->d_id.b24 == ea->id.b24) { + /* cable plugged into the same place */ + switch (vha->host->active_mode) { + case MODE_TARGET: + /* NOOP. let the other guy login to us.*/ + break; + case MODE_INITIATOR: + case MODE_DUAL: + default: + if (atomic_read(&fcport->state) == + FCS_ONLINE) + break; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gnl\n", + __func__, __LINE__, fcport->port_name); + qla24xx_post_gnl_work(vha, fcport); + break; + } + } else { /* fcport->d_id.b24 != ea->id.b24 */ + fcport->d_id.b24 = ea->id.b24; + if (fcport->deleted == QLA_SESS_DELETED) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, fcport->port_name); + qlt_schedule_sess_for_deletion_lock(fcport); + } + } + } else { /* ea->sp->gen1 != fcport->rscn_gen */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gidpn\n", + __func__, __LINE__, fcport->port_name); + /* rscn came in while cmd was out */ + qla24xx_post_gidpn_work(vha, fcport); + } + } else { /* ea->rc */ + /* cable pulled */ + if (ea->sp->gen1 == fcport->rscn_gen) { + if (ea->sp->gen2 == fcport->login_gen) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", __func__, + __LINE__, fcport->port_name); + qlt_schedule_sess_for_deletion_lock(fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC login\n", __func__, __LINE__, + fcport->port_name); + qla24xx_fcport_handle_login(vha, fcport); + } + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gidpn\n", __func__, __LINE__, + fcport->port_name); + qla24xx_post_gidpn_work(vha, fcport); + } + } +} /* gidpn_event */ + +static void qla2x00_async_gidpn_sp_done(void *s, int res) +{ + struct srb *sp = s; + struct scsi_qla_host *vha = sp->vha; + fc_port_t *fcport = sp->fcport; + u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id; + struct event_arg ea; + + fcport->flags &= ~FCF_ASYNC_SENT; + + memset(&ea, 0, sizeof(ea)); + ea.fcport = fcport; + ea.id.b.domain = id[0]; + ea.id.b.area = id[1]; + ea.id.b.al_pa = id[2]; + ea.sp = sp; + ea.rc = res; + ea.event = FCME_GIDPN_DONE; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x, WWPN %8phC ID %3phC \n", + sp->name, res, fcport->port_name, id); + + qla2x00_fcport_event_handler(vha, &ea); + + sp->free(sp); +} + +int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + int rval = QLA_FUNCTION_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + + if (!vha->flags.online) + goto done; + + fcport->flags |= FCF_ASYNC_SENT; + fcport->disc_state = DSC_GID_PN; + fcport->scan_state = QLA_FCPORT_SCAN; + sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); + if (!sp) + goto done; + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "gidpn"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + /* CT_IU preamble */ + ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GID_PN_CMD, + GID_PN_RSP_SIZE); + + /* GIDPN req */ + memcpy(ct_req->req.gid_pn.port_name, fcport->port_name, + WWN_SIZE); + + /* req & rsp use the same buffer */ + sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.req_size = GID_PN_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.rsp_size = GID_PN_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; + + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; + sp->done = qla2x00_async_gidpn_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0x206f, + "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n", + sp->name, fcport->port_name, + sp->handle, fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + return rval; + +done_free_sp: + sp->free(sp); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} + +int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + int ls; + + ls = atomic_read(&vha->loop_state); + if (((ls != LOOP_READY) && (ls != LOOP_UP)) || + test_bit(UNLOADING, &vha->dpc_flags)) + return 0; + + e = qla2x00_alloc_work(vha, QLA_EVT_GIDPN); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + return qla2x00_post_work(vha, e); +} + +int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_GPSC); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + return qla2x00_post_work(vha, e); +} + +static void qla24xx_async_gpsc_sp_done(void *s, int res) +{ + struct srb *sp = s; + struct scsi_qla_host *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport = sp->fcport; + struct ct_sns_rsp *ct_rsp; + struct event_arg ea; + + ct_rsp = &fcport->ct_desc.ct_sns->p.rsp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x, WWPN %8phC \n", + sp->name, res, fcport->port_name); + + fcport->flags &= ~FCF_ASYNC_SENT; + + if (res == (DID_ERROR << 16)) { + /* entry status error */ + goto done; + } else if (res) { + if ((ct_rsp->header.reason_code == + CT_REASON_INVALID_COMMAND_CODE) || + (ct_rsp->header.reason_code == + CT_REASON_COMMAND_UNSUPPORTED)) { + ql_dbg(ql_dbg_disc, vha, 0x205a, + "GPSC command unsupported, disabling " + "query.\n"); + ha->flags.gpsc_supported = 0; + res = QLA_SUCCESS; + } + } else { + switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) { + case BIT_15: + fcport->fp_speed = PORT_SPEED_1GB; + break; + case BIT_14: + fcport->fp_speed = PORT_SPEED_2GB; + break; + case BIT_13: + fcport->fp_speed = PORT_SPEED_4GB; + break; + case BIT_12: + fcport->fp_speed = PORT_SPEED_10GB; + break; + case BIT_11: + fcport->fp_speed = PORT_SPEED_8GB; + break; + case BIT_10: + fcport->fp_speed = PORT_SPEED_16GB; + break; + case BIT_8: + fcport->fp_speed = PORT_SPEED_32GB; + break; + } + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n", + sp->name, + fcport->fabric_port_name, + be16_to_cpu(ct_rsp->rsp.gpsc.speeds), + be16_to_cpu(ct_rsp->rsp.gpsc.speed)); + } +done: + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_GPSC_DONE; + ea.rc = res; + ea.fcport = fcport; + qla2x00_fcport_event_handler(vha, &ea); + + sp->free(sp); +} + +int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + int rval = QLA_FUNCTION_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + + if (!vha->flags.online) + goto done; + + fcport->flags |= FCF_ASYNC_SENT; + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "gpsc"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + /* CT_IU preamble */ + ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD, + GPSC_RSP_SIZE); + + /* GPSC req */ + memcpy(ct_req->req.gpsc.port_name, fcport->port_name, + WWN_SIZE); + + sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id; + + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; + sp->done = qla24xx_async_gpsc_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n", + sp->name, fcport->port_name, sp->handle, + fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + return rval; + +done_free_sp: + sp->free(sp); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} + +int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id) +{ + struct qla_work_evt *e; + + if (test_bit(UNLOADING, &vha->dpc_flags)) + return 0; + + e = qla2x00_alloc_work(vha, QLA_EVT_GPNID); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.gpnid.id = *id; + return qla2x00_post_work(vha, e); +} + +void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp) +{ + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + + sp->free(sp); +} + +void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) +{ + fc_port_t *fcport; + unsigned long flags; + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + if (fcport) { + /* cable moved. just plugged in */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, fcport->port_name); + + fcport->rscn_gen++; + fcport->d_id = ea->id; + fcport->scan_state = QLA_FCPORT_FOUND; + fcport->flags |= FCF_FABRIC_DEVICE; + + qlt_schedule_sess_for_deletion_lock(fcport); + } else { + /* create new fcport */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post new sess\n", + __func__, __LINE__, ea->port_name); + + qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL); + } +} + +static void qla2x00_async_gpnid_sp_done(void *s, int res) +{ + struct srb *sp = s; + struct scsi_qla_host *vha = sp->vha; + struct ct_sns_req *ct_req = + (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; + struct ct_sns_rsp *ct_rsp = + (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; + struct event_arg ea; + struct qla_work_evt *e; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x ID %3phC. %8phC\n", + sp->name, res, ct_req->req.port_id.port_id, + ct_rsp->rsp.gpn_id.port_name); + + memset(&ea, 0, sizeof(ea)); + memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); + ea.sp = sp; + ea.id.b.domain = ct_req->req.port_id.port_id[0]; + ea.id.b.area = ct_req->req.port_id.port_id[1]; + ea.id.b.al_pa = ct_req->req.port_id.port_id[2]; + ea.rc = res; + ea.event = FCME_GPNID_DONE; + + qla2x00_fcport_event_handler(vha, &ea); + + e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE); + if (!e) { + /* please ignore kernel warning. otherwise, we have mem leak. */ + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + + sp->free(sp); + return; + } + + e->u.iosb.sp = sp; + qla2x00_post_work(vha, e); +} + +/* Get WWPN with Nport ID. */ +int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) +{ + int rval = QLA_FUNCTION_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + struct ct_sns_pkt *ct_sns; + + if (!vha->flags.online) + goto done; + + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "gpnid"; + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, + GFP_KERNEL); + if (!sp->u.iocb_cmd.u.ctarg.req) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate ct_sns request.\n"); + goto done_free_sp; + } + + sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, + GFP_KERNEL); + if (!sp->u.iocb_cmd.u.ctarg.rsp) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate ct_sns request.\n"); + goto done_free_sp; + } + + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; + memset(ct_sns, 0, sizeof(*ct_sns)); + + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; + /* CT_IU preamble */ + ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE); + + /* GPN_ID req */ + ct_req->req.port_id.port_id[0] = id->b.domain; + ct_req->req.port_id.port_id[1] = id->b.area; + ct_req->req.port_id.port_id[2] = id->b.al_pa; + + sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; + + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; + sp->done = qla2x00_async_gpnid_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s hdl=%x ID %3phC.\n", sp->name, + sp->handle, ct_req->req.port_id.port_id); + return rval; + +done_free_sp: + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + + sp->free(sp); +done: + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 7b6317c8c2e9..32fb9007f137 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -30,15 +30,15 @@ static int qla2x00_configure_hba(scsi_qla_host_t *); static int qla2x00_configure_loop(scsi_qla_host_t *); static int qla2x00_configure_local_loop(scsi_qla_host_t *); static int qla2x00_configure_fabric(scsi_qla_host_t *); -static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *); -static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *, - uint16_t *); - +static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *); static int qla2x00_restart_isp(scsi_qla_host_t *); static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); static int qla84xx_init_chip(scsi_qla_host_t *); static int qla25xx_init_queues(struct qla_hw_data *); +static int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8); +static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *, + struct event_arg *); /* SRB Extensions ---------------------------------------------------------- */ @@ -47,29 +47,27 @@ qla2x00_sp_timeout(unsigned long __data) { srb_t *sp = (srb_t *)__data; struct srb_iocb *iocb; - fc_port_t *fcport = sp->fcport; - struct qla_hw_data *ha = fcport->vha->hw; + scsi_qla_host_t *vha = sp->vha; struct req_que *req; unsigned long flags; - spin_lock_irqsave(&ha->hardware_lock, flags); - req = ha->req_q_map[0]; + spin_lock_irqsave(&vha->hw->hardware_lock, flags); + req = vha->hw->req_q_map[0]; req->outstanding_cmds[sp->handle] = NULL; iocb = &sp->u.iocb_cmd; iocb->timeout(sp); - sp->free(fcport->vha, sp); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + sp->free(sp); + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); } void -qla2x00_sp_free(void *data, void *ptr) +qla2x00_sp_free(void *ptr) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct srb_iocb *iocb = &sp->u.iocb_cmd; - struct scsi_qla_host *vha = (scsi_qla_host_t *)data; del_timer(&iocb->timer); - qla2x00_rel_sp(vha, sp); + qla2x00_rel_sp(sp); } /* Asynchronous Login/Logout Routines -------------------------------------- */ @@ -94,43 +92,72 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha) return tmo; } -static void +void qla2x00_async_iocb_timeout(void *data) { - srb_t *sp = (srb_t *)data; + srb_t *sp = data; fc_port_t *fcport = sp->fcport; + struct srb_iocb *lio = &sp->u.iocb_cmd; + struct event_arg ea; ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, - "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n", - sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa); + "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", + sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); fcport->flags &= ~FCF_ASYNC_SENT; - if (sp->type == SRB_LOGIN_CMD) { - struct srb_iocb *lio = &sp->u.iocb_cmd; - qla2x00_post_async_logout_work(fcport->vha, fcport, NULL); + + switch (sp->type) { + case SRB_LOGIN_CMD: /* Retry as needed. */ lio->u.logio.data[0] = MBS_COMMAND_ERROR; lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? QLA_LOGIO_LOGIN_RETRIED : 0; - qla2x00_post_async_login_done_work(fcport->vha, fcport, - lio->u.logio.data); - } else if (sp->type == SRB_LOGOUT_CMD) { + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_PLOGI_DONE; + ea.fcport = sp->fcport; + ea.data[0] = lio->u.logio.data[0]; + ea.data[1] = lio->u.logio.data[1]; + ea.sp = sp; + qla24xx_handle_plogi_done_event(fcport->vha, &ea); + break; + case SRB_LOGOUT_CMD: qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT); + break; + case SRB_CT_PTHRU_CMD: + case SRB_MB_IOCB: + case SRB_NACK_PLOGI: + case SRB_NACK_PRLI: + case SRB_NACK_LOGO: + sp->done(sp, QLA_FUNCTION_TIMEOUT); + break; } } static void -qla2x00_async_login_sp_done(void *data, void *ptr, int res) +qla2x00_async_login_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; + struct scsi_qla_host *vha = sp->vha; struct srb_iocb *lio = &sp->u.iocb_cmd; - struct scsi_qla_host *vha = (scsi_qla_host_t *)data; + struct event_arg ea; - if (!test_bit(UNLOADING, &vha->dpc_flags)) - qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport, - lio->u.logio.data); - sp->free(sp->fcport->vha, sp); + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC res %d \n", __func__, sp->fcport->port_name, res); + + sp->fcport->flags &= ~FCF_ASYNC_SENT; + if (!test_bit(UNLOADING, &vha->dpc_flags)) { + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_PLOGI_DONE; + ea.fcport = sp->fcport; + ea.data[0] = lio->u.logio.data[0]; + ea.data[1] = lio->u.logio.data[1]; + ea.iop[0] = lio->u.logio.iop[0]; + ea.iop[1] = lio->u.logio.iop[1]; + ea.sp = sp; + qla2x00_fcport_event_handler(vha, &ea); + } + + sp->free(sp); } int @@ -139,13 +166,23 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, { srb_t *sp; struct srb_iocb *lio; - int rval; + int rval = QLA_FUNCTION_FAILED; + + if (!vha->flags.online) + goto done; + + if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || + (fcport->fw_login_state == DSC_LS_PLOGI_COMP) || + (fcport->fw_login_state == DSC_LS_PRLI_PEND)) + goto done; - rval = QLA_FUNCTION_FAILED; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; + fcport->flags |= FCF_ASYNC_SENT; + fcport->logout_completed = 0; + sp->type = SRB_LOGIN_CMD; sp->name = "login"; qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); @@ -165,29 +202,30 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, } ql_dbg(ql_dbg_disc, vha, 0x2072, - "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x " - "retries=%d.\n", sp->handle, fcport->loop_id, + "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x " + "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->login_retry); return rval; done_free_sp: - sp->free(fcport->vha, sp); + sp->free(sp); done: + fcport->flags &= ~FCF_ASYNC_SENT; return rval; } static void -qla2x00_async_logout_sp_done(void *data, void *ptr, int res) +qla2x00_async_logout_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct srb_iocb *lio = &sp->u.iocb_cmd; - struct scsi_qla_host *vha = (scsi_qla_host_t *)data; - if (!test_bit(UNLOADING, &vha->dpc_flags)) - qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport, + sp->fcport->flags &= ~FCF_ASYNC_SENT; + if (!test_bit(UNLOADING, &sp->vha->dpc_flags)) + qla2x00_post_async_logout_done_work(sp->vha, sp->fcport, lio->u.logio.data); - sp->free(sp->fcport->vha, sp); + sp->free(sp); } int @@ -198,6 +236,7 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) int rval; rval = QLA_FUNCTION_FAILED; + fcport->flags |= FCF_ASYNC_SENT; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; @@ -214,28 +253,30 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) goto done_free_sp; ql_dbg(ql_dbg_disc, vha, 0x2070, - "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", + "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n", sp->handle, fcport->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa); + fcport->d_id.b.area, fcport->d_id.b.al_pa, + fcport->port_name); return rval; done_free_sp: - sp->free(fcport->vha, sp); + sp->free(sp); done: + fcport->flags &= ~FCF_ASYNC_SENT; return rval; } static void -qla2x00_async_adisc_sp_done(void *data, void *ptr, int res) +qla2x00_async_adisc_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; + struct scsi_qla_host *vha = sp->vha; struct srb_iocb *lio = &sp->u.iocb_cmd; - struct scsi_qla_host *vha = (scsi_qla_host_t *)data; if (!test_bit(UNLOADING, &vha->dpc_flags)) - qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport, + qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport, lio->u.logio.data); - sp->free(sp->fcport->vha, sp); + sp->free(sp); } int @@ -247,6 +288,7 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, int rval; rval = QLA_FUNCTION_FAILED; + fcport->flags |= FCF_ASYNC_SENT; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; @@ -271,15 +313,858 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, return rval; done_free_sp: - sp->free(fcport->vha, sp); + sp->free(sp); done: + fcport->flags &= ~FCF_ASYNC_SENT; return rval; } +static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, + struct event_arg *ea) +{ + fc_port_t *fcport, *conflict_fcport; + struct get_name_list_extended *e; + u16 i, n, found = 0, loop_id; + port_id_t id; + u64 wwn; + u8 opt = 0; + + fcport = ea->fcport; + + if (ea->rc) { /* rval */ + if (fcport->login_retry == 0) { + fcport->login_retry = vha->hw->login_retry_count; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "GNL failed Port login retry %8phN, retry cnt=%d.\n", + fcport->port_name, fcport->login_retry); + } + return; + } + + if (fcport->last_rscn_gen != fcport->rscn_gen) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC rscn gen changed rscn %d|%d \n", + __func__, fcport->port_name, + fcport->last_rscn_gen, fcport->rscn_gen); + qla24xx_post_gidpn_work(vha, fcport); + return; + } else if (fcport->last_login_gen != fcport->login_gen) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC login gen changed login %d|%d \n", + __func__, fcport->port_name, + fcport->last_login_gen, fcport->login_gen); + return; + } + + n = ea->data[0] / sizeof(struct get_name_list_extended); + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC n %d %02x%02x%02x lid %d \n", + __func__, __LINE__, fcport->port_name, n, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, fcport->loop_id); + + for (i = 0; i < n; i++) { + e = &vha->gnl.l[i]; + wwn = wwn_to_u64(e->port_name); + + if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE)) + continue; + + found = 1; + id.b.domain = e->port_id[2]; + id.b.area = e->port_id[1]; + id.b.al_pa = e->port_id[0]; + id.b.rsvd_1 = 0; + + loop_id = le16_to_cpu(e->nport_handle); + loop_id = (loop_id & 0x7fff); + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n", + __func__, fcport->port_name, + e->current_login_state, fcport->fw_login_state, + id.b.domain, id.b.area, id.b.al_pa, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, loop_id, fcport->loop_id); + + if ((id.b24 != fcport->d_id.b24) || + ((fcport->loop_id != FC_NO_LOOP_ID) && + (fcport->loop_id != loop_id))) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, fcport->port_name); + qlt_schedule_sess_for_deletion(fcport, 1); + return; + } + + fcport->loop_id = loop_id; + + wwn = wwn_to_u64(fcport->port_name); + qlt_find_sess_invalidate_other(vha, wwn, + id, loop_id, &conflict_fcport); + + if (conflict_fcport) { + /* + * Another share fcport share the same loop_id & + * nport id. Conflict fcport needs to finish + * cleanup before this fcport can proceed to login. + */ + conflict_fcport->conflict = fcport; + fcport->login_pause = 1; + } + + switch (e->current_login_state) { + case DSC_LS_PRLI_COMP: + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpdb\n", + __func__, __LINE__, fcport->port_name); + opt = PDO_FORCE_ADISC; + qla24xx_post_gpdb_work(vha, fcport, opt); + break; + + case DSC_LS_PORT_UNAVAIL: + default: + if (fcport->loop_id == FC_NO_LOOP_ID) { + qla2x00_find_new_loop_id(vha, fcport); + fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + } + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC \n", + __func__, __LINE__, fcport->port_name); + qla24xx_fcport_handle_login(vha, fcport); + break; + } + } + + if (!found) { + /* fw has no record of this port */ + if (fcport->loop_id == FC_NO_LOOP_ID) { + qla2x00_find_new_loop_id(vha, fcport); + fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + } else { + for (i = 0; i < n; i++) { + e = &vha->gnl.l[i]; + id.b.domain = e->port_id[0]; + id.b.area = e->port_id[1]; + id.b.al_pa = e->port_id[2]; + id.b.rsvd_1 = 0; + loop_id = le16_to_cpu(e->nport_handle); + + if (fcport->d_id.b24 == id.b24) { + conflict_fcport = + qla2x00_find_fcport_by_wwpn(vha, + e->port_name, 0); + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, + conflict_fcport->port_name); + qlt_schedule_sess_for_deletion + (conflict_fcport, 1); + } + + if (fcport->loop_id == loop_id) { + /* FW already picked this loop id for another fcport */ + qla2x00_find_new_loop_id(vha, fcport); + } + } + } + qla24xx_fcport_handle_login(vha, fcport); + } +} /* gnl_event */ + +static void +qla24xx_async_gnl_sp_done(void *s, int res) +{ + struct srb *sp = s; + struct scsi_qla_host *vha = sp->vha; + unsigned long flags; + struct fc_port *fcport = NULL, *tf; + u16 i, n = 0, loop_id; + struct event_arg ea; + struct get_name_list_extended *e; + u64 wwn; + struct list_head h; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x mb[1]=%x mb[2]=%x \n", + sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], + sp->u.iocb_cmd.u.mbx.in_mb[2]); + + memset(&ea, 0, sizeof(ea)); + ea.sp = sp; + ea.rc = res; + ea.event = FCME_GNL_DONE; + + if (sp->u.iocb_cmd.u.mbx.in_mb[1] >= + sizeof(struct get_name_list_extended)) { + n = sp->u.iocb_cmd.u.mbx.in_mb[1] / + sizeof(struct get_name_list_extended); + ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */ + } + + for (i = 0; i < n; i++) { + e = &vha->gnl.l[i]; + loop_id = le16_to_cpu(e->nport_handle); + /* mask out reserve bit */ + loop_id = (loop_id & 0x7fff); + set_bit(loop_id, vha->hw->loop_id_map); + wwn = wwn_to_u64(e->port_name); + + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n", + __func__, (void *)&wwn, e->port_id[2], e->port_id[1], + e->port_id[0], e->current_login_state, e->last_login_state, + (loop_id & 0x7fff)); + } + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + vha->gnl.sent = 0; + + INIT_LIST_HEAD(&h); + fcport = tf = NULL; + if (!list_empty(&vha->gnl.fcports)) + list_splice_init(&vha->gnl.fcports, &h); + + list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { + list_del_init(&fcport->gnl_entry); + fcport->flags &= ~FCF_ASYNC_SENT; + ea.fcport = fcport; + + qla2x00_fcport_event_handler(vha, &ea); + } + + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + sp->free(sp); +} + +int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + srb_t *sp; + struct srb_iocb *mbx; + int rval = QLA_FUNCTION_FAILED; + unsigned long flags; + u16 *mb; + + if (!vha->flags.online) + goto done; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-gnlist WWPN %8phC \n", fcport->port_name); + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + fcport->flags |= FCF_ASYNC_SENT; + fcport->disc_state = DSC_GNL; + fcport->last_rscn_gen = fcport->rscn_gen; + fcport->last_login_gen = fcport->login_gen; + + list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); + if (vha->gnl.sent) { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + rval = QLA_SUCCESS; + goto done; + } + vha->gnl.sent = 1; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + sp->type = SRB_MB_IOCB; + sp->name = "gnlist"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); + + mb = sp->u.iocb_cmd.u.mbx.out_mb; + mb[0] = MBC_PORT_NODE_NAME_LIST; + mb[1] = BIT_2 | BIT_3; + mb[2] = MSW(vha->gnl.ldma); + mb[3] = LSW(vha->gnl.ldma); + mb[6] = MSW(MSD(vha->gnl.ldma)); + mb[7] = LSW(MSD(vha->gnl.ldma)); + mb[8] = vha->gnl.size; + mb[9] = vha->vp_idx; + + mbx = &sp->u.iocb_cmd; + mbx->timeout = qla2x00_async_iocb_timeout; + + sp->done = qla24xx_async_gnl_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s - OUT WWPN %8phC hndl %x\n", + sp->name, fcport->port_name, sp->handle); + + return rval; + +done_free_sp: + sp->free(sp); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} + +int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_GNL); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + return qla2x00_post_work(vha, e); +} + +static +void qla24xx_async_gpdb_sp_done(void *s, int res) +{ + struct srb *sp = s; + struct scsi_qla_host *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + uint64_t zero = 0; + struct port_database_24xx *pd; + fc_port_t *fcport = sp->fcport; + u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; + int rval = QLA_SUCCESS; + struct event_arg ea; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", + sp->name, res, fcport->port_name, mb[1], mb[2]); + + fcport->flags &= ~FCF_ASYNC_SENT; + + if (res) { + rval = res; + goto gpd_error_out; + } + + pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; + + /* Check for logged in state. */ + if (pd->current_login_state != PDS_PRLI_COMPLETE && + pd->last_login_state != PDS_PRLI_COMPLETE) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "Unable to verify login-state (%x/%x) for " + "loop_id %x.\n", pd->current_login_state, + pd->last_login_state, fcport->loop_id); + rval = QLA_FUNCTION_FAILED; + goto gpd_error_out; + } + + if (fcport->loop_id == FC_NO_LOOP_ID || + (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && + memcmp(fcport->port_name, pd->port_name, 8))) { + /* We lost the device mid way. */ + rval = QLA_NOT_LOGGED_IN; + goto gpd_error_out; + } + + /* Names are little-endian. */ + memcpy(fcport->node_name, pd->node_name, WWN_SIZE); + + /* Get port_id of device. */ + fcport->d_id.b.domain = pd->port_id[0]; + fcport->d_id.b.area = pd->port_id[1]; + fcport->d_id.b.al_pa = pd->port_id[2]; + fcport->d_id.b.rsvd_1 = 0; + + /* If not target must be initiator or unknown type. */ + if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) + fcport->port_type = FCT_INITIATOR; + else + fcport->port_type = FCT_TARGET; + + /* Passback COS information. */ + fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? + FC_COS_CLASS2 : FC_COS_CLASS3; + + if (pd->prli_svc_param_word_3[0] & BIT_7) { + fcport->flags |= FCF_CONF_COMP_SUPPORTED; + fcport->conf_compl_supported = 1; + } + +gpd_error_out: + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_GPDB_DONE; + ea.rc = rval; + ea.fcport = fcport; + ea.sp = sp; + + qla2x00_fcport_event_handler(vha, &ea); + + dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, + sp->u.iocb_cmd.u.mbx.in_dma); + + sp->free(sp); +} + +static int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, + u8 opt) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_GPDB); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + e->u.fcport.opt = opt; + return qla2x00_post_work(vha, e); +} + +int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) +{ + srb_t *sp; + struct srb_iocb *mbx; + int rval = QLA_FUNCTION_FAILED; + u16 *mb; + dma_addr_t pd_dma; + struct port_database_24xx *pd; + struct qla_hw_data *ha = vha->hw; + + if (!vha->flags.online) + goto done; + + fcport->flags |= FCF_ASYNC_SENT; + fcport->disc_state = DSC_GPDB; + + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + + pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); + if (pd == NULL) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate port database structure.\n"); + goto done_free_sp; + } + memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); + + sp->type = SRB_MB_IOCB; + sp->name = "gpdb"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + mb = sp->u.iocb_cmd.u.mbx.out_mb; + mb[0] = MBC_GET_PORT_DATABASE; + mb[1] = fcport->loop_id; + mb[2] = MSW(pd_dma); + mb[3] = LSW(pd_dma); + mb[6] = MSW(MSD(pd_dma)); + mb[7] = LSW(MSD(pd_dma)); + mb[9] = vha->vp_idx; + mb[10] = opt; + + mbx = &sp->u.iocb_cmd; + mbx->timeout = qla2x00_async_iocb_timeout; + mbx->u.mbx.in = (void *)pd; + mbx->u.mbx.in_dma = pd_dma; + + sp->done = qla24xx_async_gpdb_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s %8phC hndl %x opt %x\n", + sp->name, fcport->port_name, sp->handle, opt); + + return rval; + +done_free_sp: + if (pd) + dma_pool_free(ha->s_dma_pool, pd, pd_dma); + + sp->free(sp); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + qla24xx_post_gpdb_work(vha, fcport, opt); + return rval; +} + +static +void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) +{ + int rval = ea->rc; + fc_port_t *fcport = ea->fcport; + unsigned long flags; + + fcport->flags &= ~FCF_ASYNC_SENT; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name, + fcport->disc_state, fcport->fw_login_state, rval); + + if (ea->sp->gen2 != fcport->login_gen) { + /* target side must have changed it. */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC generation changed rscn %d|%d login %d|%d \n", + __func__, fcport->port_name, fcport->last_rscn_gen, + fcport->rscn_gen, fcport->last_login_gen, + fcport->login_gen); + return; + } else if (ea->sp->gen1 != fcport->rscn_gen) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n", + __func__, __LINE__, fcport->port_name); + qla24xx_post_gidpn_work(vha, fcport); + return; + } + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post del sess\n", + __func__, __LINE__, fcport->port_name); + qlt_schedule_sess_for_deletion_lock(fcport); + return; + } + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + ea->fcport->login_gen++; + ea->fcport->deleted = 0; + ea->fcport->logout_on_delete = 1; + + if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) { + vha->fcport_count++; + ea->fcport->login_succ = 1; + + if (!IS_IIDMA_CAPABLE(vha->hw) || + !vha->hw->flags.gpsc_supported) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post upd_fcport fcp_cnt %d\n", + __func__, __LINE__, fcport->port_name, + vha->fcport_count); + + qla24xx_post_upd_fcport_work(vha, fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpsc fcp_cnt %d\n", + __func__, __LINE__, fcport->port_name, + vha->fcport_count); + + qla24xx_post_gpsc_work(vha, fcport); + } + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); +} /* gpdb event */ + +int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + if (fcport->login_retry == 0) + return 0; + + if (fcport->scan_state != QLA_FCPORT_FOUND) + return 0; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, fcport->login_pause, fcport->flags, + fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, + fcport->last_login_gen, fcport->login_gen, fcport->login_retry, + fcport->loop_id); + + fcport->login_retry--; + + if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || + (fcport->fw_login_state == DSC_LS_PLOGI_COMP) || + (fcport->fw_login_state == DSC_LS_PRLI_PEND)) + return 0; + + /* for pure Target Mode. Login will not be initiated */ + if (vha->host->active_mode == MODE_TARGET) + return 0; + + if (fcport->flags & FCF_ASYNC_SENT) { + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + return 0; + } + + switch (fcport->disc_state) { + case DSC_DELETED: + if (fcport->loop_id == FC_NO_LOOP_ID) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gnl\n", + __func__, __LINE__, fcport->port_name); + qla24xx_async_gnl(vha, fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post login\n", + __func__, __LINE__, fcport->port_name); + fcport->disc_state = DSC_LOGIN_PEND; + qla2x00_post_async_login_work(vha, fcport, NULL); + } + break; + + case DSC_GNL: + if (fcport->login_pause) { + fcport->last_rscn_gen = fcport->rscn_gen; + fcport->last_login_gen = fcport->login_gen; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; + } + + if (fcport->flags & FCF_FCP2_DEVICE) { + u8 opt = PDO_FORCE_ADISC; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpdb\n", + __func__, __LINE__, fcport->port_name); + + fcport->disc_state = DSC_GPDB; + qla24xx_post_gpdb_work(vha, fcport, opt); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post login \n", + __func__, __LINE__, fcport->port_name); + fcport->disc_state = DSC_LOGIN_PEND; + qla2x00_post_async_login_work(vha, fcport, NULL); + } + + break; + + case DSC_LOGIN_FAILED: + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gidpn \n", + __func__, __LINE__, fcport->port_name); + + qla24xx_post_gidpn_work(vha, fcport); + break; + + case DSC_LOGIN_COMPLETE: + /* recheck login state */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpdb \n", + __func__, __LINE__, fcport->port_name); + + qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC); + break; + + default: + break; + } + + return 0; +} + +static +void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea) +{ + fcport->rscn_gen++; + + ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, + "%s %8phC DS %d LS %d\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state); + + if (fcport->flags & FCF_ASYNC_SENT) + return; + + switch (fcport->disc_state) { + case DSC_DELETED: + case DSC_LOGIN_COMPLETE: + qla24xx_post_gidpn_work(fcport->vha, fcport); + break; + + default: + break; + } +} + +int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, + u8 *port_name, void *pla) +{ + struct qla_work_evt *e; + e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.new_sess.id = *id; + e->u.new_sess.pla = pla; + memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE); + + return qla2x00_post_work(vha, e); +} + +static +int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha, + struct event_arg *ea) +{ + fc_port_t *fcport = ea->fcport; + + if (test_bit(UNLOADING, &vha->dpc_flags)) + return 0; + + switch (vha->host->active_mode) { + case MODE_INITIATOR: + case MODE_DUAL: + if (fcport->scan_state == QLA_FCPORT_FOUND) + qla24xx_fcport_handle_login(vha, fcport); + break; + + case MODE_TARGET: + default: + /* no-op */ + break; + } + + return 0; +} + +static +void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, + struct event_arg *ea) +{ + fc_port_t *fcport = ea->fcport; + + if (fcport->scan_state != QLA_FCPORT_FOUND) { + fcport->login_retry++; + return; + } + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, fcport->login_pause, + fcport->deleted, fcport->conflict, + fcport->last_rscn_gen, fcport->rscn_gen, + fcport->last_login_gen, fcport->login_gen, + fcport->flags); + + if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || + (fcport->fw_login_state == DSC_LS_PLOGI_COMP) || + (fcport->fw_login_state == DSC_LS_PRLI_PEND)) + return; + + if (fcport->flags & FCF_ASYNC_SENT) { + fcport->login_retry++; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + return; + } + + if (fcport->disc_state == DSC_DELETE_PEND) { + fcport->login_retry++; + return; + } + + if (fcport->last_rscn_gen != fcport->rscn_gen) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n", + __func__, __LINE__, fcport->port_name); + + qla24xx_async_gidpn(vha, fcport); + return; + } + + qla24xx_fcport_handle_login(vha, fcport); +} + +void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) +{ + fc_port_t *fcport, *f, *tf; + uint32_t id = 0, mask, rid; + int rc; + + switch (ea->event) { + case FCME_RELOGIN: + if (test_bit(UNLOADING, &vha->dpc_flags)) + return; + + qla24xx_handle_relogin_event(vha, ea); + break; + case FCME_RSCN: + if (test_bit(UNLOADING, &vha->dpc_flags)) + return; + switch (ea->id.b.rsvd_1) { + case RSCN_PORT_ADDR: + fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); + if (!fcport) { + /* cable moved */ + rc = qla24xx_post_gpnid_work(vha, &ea->id); + if (rc) { + ql_log(ql_log_warn, vha, 0xffff, + "RSCN GPNID work failed %02x%02x%02x\n", + ea->id.b.domain, ea->id.b.area, + ea->id.b.al_pa); + } + } else { + ea->fcport = fcport; + qla24xx_handle_rscn_event(fcport, ea); + } + break; + case RSCN_AREA_ADDR: + case RSCN_DOM_ADDR: + if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) { + mask = 0xffff00; + ql_log(ql_dbg_async, vha, 0xffff, + "RSCN: Area 0x%06x was affected\n", + ea->id.b24); + } else { + mask = 0xff0000; + ql_log(ql_dbg_async, vha, 0xffff, + "RSCN: Domain 0x%06x was affected\n", + ea->id.b24); + } + + rid = ea->id.b24 & mask; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, + list) { + id = f->d_id.b24 & mask; + if (rid == id) { + ea->fcport = f; + qla24xx_handle_rscn_event(f, ea); + } + } + break; + case RSCN_FAB_ADDR: + default: + ql_log(ql_log_warn, vha, 0xffff, + "RSCN: Fabric was affected. Addr format %d\n", + ea->id.b.rsvd_1); + qla2x00_mark_all_devices_lost(vha, 1); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + } + break; + case FCME_GIDPN_DONE: + qla24xx_handle_gidpn_event(vha, ea); + break; + case FCME_GNL_DONE: + qla24xx_handle_gnl_done_event(vha, ea); + break; + case FCME_GPSC_DONE: + qla24xx_post_upd_fcport_work(vha, ea->fcport); + break; + case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */ + qla24xx_handle_plogi_done_event(vha, ea); + break; + case FCME_GPDB_DONE: + qla24xx_handle_gpdb_event(vha, ea); + break; + case FCME_GPNID_DONE: + qla24xx_handle_gpnid_event(vha, ea); + break; + case FCME_DELETE_DONE: + qla24xx_handle_delete_done_event(vha, ea); + break; + default: + BUG_ON(1); + break; + } +} + static void qla2x00_tmf_iocb_timeout(void *data) { - srb_t *sp = (srb_t *)data; + srb_t *sp = data; struct srb_iocb *tmf = &sp->u.iocb_cmd; tmf->u.tmf.comp_status = CS_TIMEOUT; @@ -287,10 +1172,11 @@ qla2x00_tmf_iocb_timeout(void *data) } static void -qla2x00_tmf_sp_done(void *data, void *ptr, int res) +qla2x00_tmf_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct srb_iocb *tmf = &sp->u.iocb_cmd; + complete(&tmf->u.tmf.comp); } @@ -348,7 +1234,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, } done_free_sp: - sp->free(vha, sp); + sp->free(sp); done: return rval; } @@ -356,7 +1242,7 @@ done: static void qla24xx_abort_iocb_timeout(void *data) { - srb_t *sp = (srb_t *)data; + srb_t *sp = data; struct srb_iocb *abt = &sp->u.iocb_cmd; abt->u.abt.comp_status = CS_TIMEOUT; @@ -364,9 +1250,9 @@ qla24xx_abort_iocb_timeout(void *data) } static void -qla24xx_abort_sp_done(void *data, void *ptr, int res) +qla24xx_abort_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct srb_iocb *abt = &sp->u.iocb_cmd; complete(&abt->u.abt.comp); @@ -375,7 +1261,7 @@ qla24xx_abort_sp_done(void *data, void *ptr, int res) static int qla24xx_async_abort_cmd(srb_t *cmd_sp) { - scsi_qla_host_t *vha = cmd_sp->fcport->vha; + scsi_qla_host_t *vha = cmd_sp->vha; fc_port_t *fcport = cmd_sp->fcport; struct srb_iocb *abt_iocb; srb_t *sp; @@ -408,7 +1294,7 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp) QLA_SUCCESS : QLA_FUNCTION_FAILED; done_free_sp: - sp->free(vha, sp); + sp->free(sp); done: return rval; } @@ -441,59 +1327,65 @@ qla24xx_async_abort_command(srb_t *sp) return qla24xx_async_abort_cmd(sp); } -void -qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, - uint16_t *data) +static void +qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) { - int rval; + port_id_t cid; /* conflict Nport id */ - switch (data[0]) { + switch (ea->data[0]) { case MBS_COMMAND_COMPLETE: /* * Driver must validate login state - If PRLI not complete, * force a relogin attempt via implicit LOGO, PLOGI, and PRLI * requests. */ - rval = qla2x00_get_port_database(vha, fcport, 0); - if (rval == QLA_NOT_LOGGED_IN) { - fcport->flags &= ~FCF_ASYNC_SENT; - fcport->flags |= FCF_LOGIN_NEEDED; - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - break; - } - - if (rval != QLA_SUCCESS) { - qla2x00_post_async_logout_work(vha, fcport, NULL); - qla2x00_post_async_login_work(vha, fcport, NULL); - break; - } - if (fcport->flags & FCF_FCP2_DEVICE) { - qla2x00_post_async_adisc_work(vha, fcport, data); - break; - } - qla2x00_update_fcport(vha, fcport); + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpdb\n", + __func__, __LINE__, ea->fcport->port_name); + ea->fcport->chip_reset = vha->hw->chip_reset; + ea->fcport->logout_on_delete = 1; + qla24xx_post_gpdb_work(vha, ea->fcport, 0); break; case MBS_COMMAND_ERROR: - fcport->flags &= ~FCF_ASYNC_SENT; - if (data[1] & QLA_LOGIO_LOGIN_RETRIED) + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC cmd error %x\n", + __func__, __LINE__, ea->fcport->port_name, ea->data[1]); + + ea->fcport->flags &= ~FCF_ASYNC_SENT; + ea->fcport->disc_state = DSC_LOGIN_FAILED; + if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED) set_bit(RELOGIN_NEEDED, &vha->dpc_flags); else - qla2x00_mark_device_lost(vha, fcport, 1, 0); - break; - case MBS_PORT_ID_USED: - fcport->loop_id = data[1]; - qla2x00_post_async_logout_work(vha, fcport, NULL); - qla2x00_post_async_login_work(vha, fcport, NULL); + qla2x00_mark_device_lost(vha, ea->fcport, 1, 0); break; case MBS_LOOP_ID_USED: - fcport->loop_id++; - rval = qla2x00_find_new_loop_id(vha, fcport); - if (rval != QLA_SUCCESS) { - fcport->flags &= ~FCF_ASYNC_SENT; - qla2x00_mark_device_lost(vha, fcport, 1, 0); - break; + /* data[1] = IO PARAM 1 = nport ID */ + cid.b.domain = (ea->iop[1] >> 16) & 0xff; + cid.b.area = (ea->iop[1] >> 8) & 0xff; + cid.b.al_pa = ea->iop[1] & 0xff; + cid.b.rsvd_1 = 0; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC LoopID 0x%x in use post gnl\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->loop_id); + + if (IS_SW_RESV_ADDR(cid)) { + set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); + ea->fcport->loop_id = FC_NO_LOOP_ID; + } else { + qla2x00_clear_loop_id(ea->fcport); } - qla2x00_post_async_login_work(vha, fcport, NULL); + qla24xx_post_gnl_work(vha, ea->fcport); + break; + case MBS_PORT_ID_USED: + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area, + ea->fcport->d_id.b.al_pa); + + qla2x00_clear_loop_id(ea->fcport); + qla24xx_post_gidpn_work(vha, ea->fcport); break; } return; @@ -503,10 +1395,9 @@ void qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, uint16_t *data) { - /* Don't re-login in target mode */ - if (!fcport->tgt_session) - qla2x00_mark_device_lost(vha, fcport, 1, 0); + qla2x00_mark_device_lost(vha, fcport, 1, 0); qlt_logo_completion_handler(fcport, data[0]); + fcport->login_gen++; return; } @@ -709,7 +1600,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) } } - if (qla_ini_mode_enabled(vha)) + if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) rval = qla2x00_init_rings(vha); ha->flags.chip_reset_done = 1; @@ -2088,6 +2979,21 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) __func__, ha->fw_options[2]); } + /* Move PUREX, ABTS RX & RIDA to ATIOQ */ + if (ql2xmvasynctoatio) { + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) + ha->fw_options[2] |= BIT_11; + else + ha->fw_options[2] &= ~BIT_11; + } + + ql_dbg(ql_dbg_init, vha, 0xffff, + "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", + __func__, ha->fw_options[1], ha->fw_options[2], + ha->fw_options[3], vha->host->active_mode); + qla2x00_set_fw_options(vha, ha->fw_options); + /* Update Serial Link options. */ if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) return; @@ -2968,8 +3874,14 @@ qla2x00_rport_del(void *data) rport = fcport->drport ? fcport->drport: fcport->rport; fcport->drport = NULL; spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); - if (rport) + if (rport) { + ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, + "%s %8phN. rport %p roles %x \n", + __func__, fcport->port_name, rport, + rport->roles); + fc_remote_port_delete(rport); + } } /** @@ -2995,9 +3907,42 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); fcport->supported_classes = FC_COS_UNSPECIFIED; + fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, + flags); + fcport->disc_state = DSC_DELETED; + fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + fcport->deleted = QLA_SESS_DELETED; + fcport->login_retry = vha->hw->login_retry_count; + fcport->login_retry = 5; + fcport->logout_on_delete = 1; + + if (!fcport->ct_desc.ct_sns) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate ct_sns request.\n"); + kfree(fcport); + fcport = NULL; + } + INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); + INIT_LIST_HEAD(&fcport->gnl_entry); + INIT_LIST_HEAD(&fcport->list); + return fcport; } +void +qla2x00_free_fcport(fc_port_t *fcport) +{ + if (fcport->ct_desc.ct_sns) { + dma_free_coherent(&fcport->vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns, + fcport->ct_desc.ct_sns_dma); + + fcport->ct_desc.ct_sns = NULL; + } + kfree(fcport); +} + /* * qla2x00_configure_loop * Updates Fibre Channel Device Database with what is actually on loop. @@ -3055,10 +4000,11 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) } else if (ha->current_topology == ISP_CFG_N) { clear_bit(RSCN_UPDATE, &flags); - + } else if (ha->current_topology == ISP_CFG_NL) { + clear_bit(RSCN_UPDATE, &flags); + set_bit(LOCAL_LOOP_UPDATE, &flags); } else if (!vha->flags.online || (test_bit(ABORT_ISP_ACTIVE, &flags))) { - set_bit(RSCN_UPDATE, &flags); set_bit(LOCAL_LOOP_UPDATE, &flags); } @@ -3095,7 +4041,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) * Process any ATIO queue entries that came in * while we weren't online. */ - if (qla_tgt_mode_enabled(vha)) { + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) { if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) { spin_lock_irqsave(&ha->tgt.atio_lock, flags); @@ -3159,6 +4106,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) uint16_t loop_id; uint8_t domain, area, al_pa; struct qla_hw_data *ha = vha->hw; + unsigned long flags; found_devs = 0; new_fcport = NULL; @@ -3199,7 +4147,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) "Marking port lost loop_id=0x%04x.\n", fcport->loop_id); - qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); + qla2x00_mark_device_lost(vha, fcport, 0, 0); } } @@ -3230,13 +4178,14 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) if (loop_id > LAST_LOCAL_LOOP_ID) continue; - memset(new_fcport, 0, sizeof(fc_port_t)); + memset(new_fcport->port_name, 0, WWN_SIZE); /* Fill in member data. */ new_fcport->d_id.b.domain = domain; new_fcport->d_id.b.area = area; new_fcport->d_id.b.al_pa = al_pa; new_fcport->loop_id = loop_id; + rval2 = qla2x00_get_port_database(vha, new_fcport, 0); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x201a, @@ -3249,6 +4198,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) continue; } + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); /* Check for matching device in port list. */ found = 0; fcport = NULL; @@ -3264,6 +4214,12 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) memcpy(fcport->node_name, new_fcport->node_name, WWN_SIZE); + if (!fcport->login_succ) { + vha->fcport_count++; + fcport->login_succ = 1; + fcport->disc_state = DSC_LOGIN_COMPLETE; + } + found++; break; } @@ -3274,16 +4230,28 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) /* Allocate a new replacement fcport. */ fcport = new_fcport; + if (!fcport->login_succ) { + vha->fcport_count++; + fcport->login_succ = 1; + fcport->disc_state = DSC_LOGIN_COMPLETE; + } + + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (new_fcport == NULL) { ql_log(ql_log_warn, vha, 0x201c, "Failed to allocate memory for fcport.\n"); rval = QLA_MEMORY_ALLOC_FAILED; goto cleanup_allocation; } + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); new_fcport->flags &= ~FCF_FABRIC_DEVICE; } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + /* Base iIDMA settings on HBA port speed. */ fcport->fp_speed = ha->link_data_rate; @@ -3334,6 +4302,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) } } +/* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/ static void qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) { @@ -3352,12 +4321,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) "Unable to allocate fc remote port.\n"); return; } - /* - * Create target mode FC NEXUS in qla_target.c if target mode is - * enabled.. - */ - - qlt_fc_port_added(vha, fcport); spin_lock_irqsave(fcport->vha->host->host_lock, flags); *((fc_port_t **)rport->dd_data) = fcport; @@ -3370,6 +4333,12 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (fcport->port_type == FCT_TARGET) rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phN. rport %p is %s mode \n", + __func__, fcport->port_name, rport, + (fcport->port_type == FCT_TARGET) ? "tgt" : "ini"); + fc_remote_port_rolechg(rport, rport_ids.roles); } @@ -3393,25 +4362,44 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { fcport->vha = vha; + if (IS_SW_RESV_ADDR(fcport->d_id)) + return; + + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC \n", + __func__, fcport->port_name); + if (IS_QLAFX00(vha->hw)) { qla2x00_set_fcport_state(fcport, FCS_ONLINE); goto reg_port; } fcport->login_retry = 0; fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + fcport->disc_state = DSC_LOGIN_COMPLETE; + fcport->deleted = 0; + fcport->logout_on_delete = 1; qla2x00_set_fcport_state(fcport, FCS_ONLINE); qla2x00_iidma_fcport(vha, fcport); qla24xx_update_fcport_fcp_prio(vha, fcport); reg_port: - if (qla_ini_mode_enabled(vha)) + switch (vha->host->active_mode) { + case MODE_INITIATOR: qla2x00_reg_remote_port(vha, fcport); - else { - /* - * Create target mode FC NEXUS in qla_target.c - */ - qlt_fc_port_added(vha, fcport); + break; + case MODE_TARGET: + if (!vha->vha_tgt.qla_tgt->tgt_stop && + !vha->vha_tgt.qla_tgt->tgt_stopped) + qlt_fc_port_added(vha, fcport); + break; + case MODE_DUAL: + qla2x00_reg_remote_port(vha, fcport); + if (!vha->vha_tgt.qla_tgt->tgt_stop && + !vha->vha_tgt.qla_tgt->tgt_stopped) + qlt_fc_port_added(vha, fcport); + break; + default: + break; } } @@ -3430,13 +4418,11 @@ static int qla2x00_configure_fabric(scsi_qla_host_t *vha) { int rval; - fc_port_t *fcport, *fcptemp; - uint16_t next_loopid; + fc_port_t *fcport; uint16_t mb[MAILBOX_REGISTER_COUNT]; uint16_t loop_id; LIST_HEAD(new_fcports); struct qla_hw_data *ha = vha->hw; - struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); int discovery_gen; /* If FL port exists, then SNS is present */ @@ -3454,7 +4440,19 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) } vha->device_flags |= SWITCH_FOUND; + + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { + rval = qla2x00_send_change_request(vha, 0x3, 0); + if (rval != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0x121, + "Failed to enable receiving of RSCN requests: 0x%x.\n", + rval); + } + + do { + qla2x00_mgmt_svr_login(vha); + /* FDMI support. */ if (ql2xfdmienable && test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) @@ -3501,9 +4499,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) } } -#define QLA_FCPORT_SCAN 1 -#define QLA_FCPORT_FOUND 2 - list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->scan_state = QLA_FCPORT_SCAN; } @@ -3516,174 +4511,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) * will be newer than discovery_gen. */ qlt_do_generation_tick(vha, &discovery_gen); - rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); + rval = qla2x00_find_all_fabric_devs(vha); if (rval != QLA_SUCCESS) break; - - /* - * Logout all previous fabric devices marked lost, except - * FCP2 devices. - */ - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) - break; - - if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) - continue; - - if (fcport->scan_state == QLA_FCPORT_SCAN) { - if (qla_ini_mode_enabled(base_vha) && - atomic_read(&fcport->state) == FCS_ONLINE) { - qla2x00_mark_device_lost(vha, fcport, - ql2xplogiabsentdevice, 0); - if (fcport->loop_id != FC_NO_LOOP_ID && - (fcport->flags & FCF_FCP2_DEVICE) == 0 && - fcport->port_type != FCT_INITIATOR && - fcport->port_type != FCT_BROADCAST) { - ha->isp_ops->fabric_logout(vha, - fcport->loop_id, - fcport->d_id.b.domain, - fcport->d_id.b.area, - fcport->d_id.b.al_pa); - qla2x00_clear_loop_id(fcport); - } - } else if (!qla_ini_mode_enabled(base_vha)) { - /* - * In target mode, explicitly kill - * sessions and log out of devices - * that are gone, so that we don't - * end up with an initiator using the - * wrong ACL (if the fabric recycles - * an FC address and we have a stale - * session around) and so that we don't - * report initiators that are no longer - * on the fabric. - */ - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077, - "port gone, logging out/killing session: " - "%8phC state 0x%x flags 0x%x fc4_type 0x%x " - "scan_state %d\n", - fcport->port_name, - atomic_read(&fcport->state), - fcport->flags, fcport->fc4_type, - fcport->scan_state); - qlt_fc_port_deleted(vha, fcport, - discovery_gen); - } - } - } - - /* Starting free loop ID. */ - next_loopid = ha->min_external_loopid; - - /* - * Scan through our port list and login entries that need to be - * logged in. - */ - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (atomic_read(&vha->loop_down_timer) || - test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) - break; - - if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || - (fcport->flags & FCF_LOGIN_NEEDED) == 0) - continue; - - /* - * If we're not an initiator, skip looking for devices - * and logging in. There's no reason for us to do it, - * and it seems to actively cause problems in target - * mode if we race with the initiator logging into us - * (we might get the "port ID used" status back from - * our login command and log out the initiator, which - * seems to cause havoc). - */ - if (!qla_ini_mode_enabled(base_vha)) { - if (fcport->scan_state == QLA_FCPORT_FOUND) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078, - "port %8phC state 0x%x flags 0x%x fc4_type 0x%x " - "scan_state %d (initiator mode disabled; skipping " - "login)\n", fcport->port_name, - atomic_read(&fcport->state), - fcport->flags, fcport->fc4_type, - fcport->scan_state); - } - continue; - } - - if (fcport->loop_id == FC_NO_LOOP_ID) { - fcport->loop_id = next_loopid; - rval = qla2x00_find_new_loop_id( - base_vha, fcport); - if (rval != QLA_SUCCESS) { - /* Ran out of IDs to use */ - break; - } - } - /* Login and update database */ - qla2x00_fabric_dev_login(vha, fcport, &next_loopid); - } - - /* Exit if out of loop IDs. */ - if (rval != QLA_SUCCESS) { - break; - } - - /* - * Login and add the new devices to our port list. - */ - list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { - if (atomic_read(&vha->loop_down_timer) || - test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) - break; - - /* - * If we're not an initiator, skip looking for devices - * and logging in. There's no reason for us to do it, - * and it seems to actively cause problems in target - * mode if we race with the initiator logging into us - * (we might get the "port ID used" status back from - * our login command and log out the initiator, which - * seems to cause havoc). - */ - if (qla_ini_mode_enabled(base_vha)) { - /* Find a new loop ID to use. */ - fcport->loop_id = next_loopid; - rval = qla2x00_find_new_loop_id(base_vha, - fcport); - if (rval != QLA_SUCCESS) { - /* Ran out of IDs to use */ - break; - } - - /* Login and update database */ - qla2x00_fabric_dev_login(vha, fcport, - &next_loopid); - } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079, - "new port %8phC state 0x%x flags 0x%x fc4_type " - "0x%x scan_state %d (initiator mode disabled; " - "skipping login)\n", - fcport->port_name, - atomic_read(&fcport->state), - fcport->flags, fcport->fc4_type, - fcport->scan_state); - } - - list_move_tail(&fcport->list, &vha->vp_fcports); - } } while (0); - /* Free all new device structures not processed. */ - list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { - list_del(&fcport->list); - kfree(fcport); - } - - if (rval) { + if (rval) ql_dbg(ql_dbg_disc, vha, 0x2068, "Configure fabric error exit rval=%d.\n", rval); - } return (rval); } @@ -3702,12 +4537,11 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) * Kernel context. */ static int -qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, - struct list_head *new_fcports) +qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) { int rval; uint16_t loop_id; - fc_port_t *fcport, *new_fcport, *fcptemp; + fc_port_t *fcport, *new_fcport; int found; sw_info_t *swl; @@ -3716,6 +4550,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, port_id_t wrap = {}, nxt_d_id; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + unsigned long flags; rval = QLA_SUCCESS; @@ -3736,9 +4571,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, swl = NULL; } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { swl = NULL; - } else if (ql2xiidmaenable && - qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) { - qla2x00_gpsc(vha, swl); + } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) { + swl = NULL; } /* If other queries succeeded probe for FC-4 type */ @@ -3800,11 +4634,6 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, ql_log(ql_log_warn, vha, 0x2064, "SNS scan failed -- assuming " "zero-entry result.\n"); - list_for_each_entry_safe(fcport, fcptemp, - new_fcports, list) { - list_del(&fcport->list); - kfree(fcport); - } rval = QLA_SUCCESS; break; } @@ -3847,6 +4676,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, new_fcport->fc4_type != FC4_TYPE_UNKNOWN)) continue; + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + /* Locate matching device in database. */ found = 0; list_for_each_entry(fcport, &vha->vp_fcports, list) { @@ -3869,7 +4700,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, */ if (fcport->d_id.b24 == new_fcport->d_id.b24 && (atomic_read(&fcport->state) == FCS_ONLINE || - !qla_ini_mode_enabled(base_vha))) { + (vha->host->active_mode == MODE_TARGET))) { break; } @@ -3889,7 +4720,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, * Log it out if still logged in and mark it for * relogin later. */ - if (!qla_ini_mode_enabled(base_vha)) { + if (qla_tgt_mode_enabled(base_vha)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080, "port changed FC ID, %8phC" " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n", @@ -3907,25 +4738,19 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, fcport->d_id.b24 = new_fcport->d_id.b24; fcport->flags |= FCF_LOGIN_NEEDED; - if (fcport->loop_id != FC_NO_LOOP_ID && - (fcport->flags & FCF_FCP2_DEVICE) == 0 && - (fcport->flags & FCF_ASYNC_SENT) == 0 && - fcport->port_type != FCT_INITIATOR && - fcport->port_type != FCT_BROADCAST) { - ha->isp_ops->fabric_logout(vha, fcport->loop_id, - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa); - qla2x00_clear_loop_id(fcport); - } - break; } - if (found) + if (found) { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); continue; + } /* If device was not in our fcports list, then add it. */ new_fcport->scan_state = QLA_FCPORT_FOUND; - list_add_tail(&new_fcport->list, new_fcports); + list_add_tail(&new_fcport->list, &vha->vp_fcports); + + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + /* Allocate a new replacement fcport. */ nxt_d_id.b24 = new_fcport->d_id.b24; @@ -3939,8 +4764,44 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, new_fcport->d_id.b24 = nxt_d_id.b24; } - kfree(new_fcport); + qla2x00_free_fcport(new_fcport); + /* + * Logout all previous fabric dev marked lost, except FCP2 devices. + */ + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + break; + + if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || + (fcport->flags & FCF_LOGIN_NEEDED) == 0) + continue; + + if (fcport->scan_state == QLA_FCPORT_SCAN) { + if ((qla_dual_mode_enabled(vha) || + qla_ini_mode_enabled(vha)) && + atomic_read(&fcport->state) == FCS_ONLINE) { + qla2x00_mark_device_lost(vha, fcport, + ql2xplogiabsentdevice, 0); + if (fcport->loop_id != FC_NO_LOOP_ID && + (fcport->flags & FCF_FCP2_DEVICE) == 0 && + fcport->port_type != FCT_INITIATOR && + fcport->port_type != FCT_BROADCAST) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, + fcport->port_name); + + qlt_schedule_sess_for_deletion_lock + (fcport); + continue; + } + } + } + + if (fcport->scan_state == QLA_FCPORT_FOUND) + qla24xx_fcport_handle_login(vha, fcport); + } return (rval); } @@ -3992,64 +4853,6 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) return (rval); } -/* - * qla2x00_fabric_dev_login - * Login fabric target device and update FC port database. - * - * Input: - * ha: adapter state pointer. - * fcport: port structure list pointer. - * next_loopid: contains value of a new loop ID that can be used - * by the next login attempt. - * - * Returns: - * qla2x00 local function return status code. - * - * Context: - * Kernel context. - */ -static int -qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport, - uint16_t *next_loopid) -{ - int rval; - uint8_t opts; - struct qla_hw_data *ha = vha->hw; - - rval = QLA_SUCCESS; - - if (IS_ALOGIO_CAPABLE(ha)) { - if (fcport->flags & FCF_ASYNC_SENT) - return rval; - fcport->flags |= FCF_ASYNC_SENT; - rval = qla2x00_post_async_login_work(vha, fcport, NULL); - if (!rval) - return rval; - } - - fcport->flags &= ~FCF_ASYNC_SENT; - rval = qla2x00_fabric_login(vha, fcport, next_loopid); - if (rval == QLA_SUCCESS) { - /* Send an ADISC to FCP2 devices.*/ - opts = 0; - if (fcport->flags & FCF_FCP2_DEVICE) - opts |= BIT_1; - rval = qla2x00_get_port_database(vha, fcport, opts); - if (rval != QLA_SUCCESS) { - ha->isp_ops->fabric_logout(vha, fcport->loop_id, - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa); - qla2x00_mark_device_lost(vha, fcport, 1, 0); - } else { - qla2x00_update_fcport(vha, fcport); - } - } else { - /* Retry Login. */ - qla2x00_mark_device_lost(vha, fcport, 1, 0); - } - - return (rval); -} /* * qla2x00_fabric_login @@ -4341,13 +5144,6 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha) spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_rport_del(fcport); - /* - * Release the target mode FC NEXUS in - * qla_target.c, if target mod is enabled. - */ - qlt_fc_port_deleted(vha, fcport, - base_vha->total_fcport_update_gen); - spin_lock_irqsave(&ha->vport_slock, flags); } } @@ -4730,6 +5526,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) if (!(IS_P3P_TYPE(ha))) ha->isp_ops->reset_chip(vha); + ha->chip_reset++; + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); @@ -4784,8 +5582,6 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) /* Requeue all commands in outstanding command list. */ qla2x00_abort_all_cmds(vha, DID_RESET << 16); } - - ha->chip_reset++; /* memory barrier */ wmb(); } @@ -4981,7 +5777,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) if (!status) { /* Issue a marker after FW becomes ready. */ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } @@ -5209,7 +6004,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) rval = 1; } - if (!qla_ini_mode_enabled(vha)) { + if (qla_tgt_mode_enabled(vha)) { /* Don't enable full login after initial LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_13); /* Don't enable LIP full login for initiator */ @@ -5400,6 +6195,7 @@ uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha) for (chksum = 0; cnt--; wptr++) chksum += le32_to_cpu(*wptr); + if (chksum) { ql_dbg(ql_dbg_init, vha, 0x018c, "Checksum validation failed for primary image (0x%x)\n", @@ -5669,7 +6465,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) /* Validate firmware image by checking version. */ if (blob->fw->size < 8 * sizeof(uint16_t)) { ql_log(ql_log_fatal, vha, 0x0085, - "Unable to verify integrity of firmware image (%Zd).\n", + "Unable to verify integrity of firmware image (%zd).\n", blob->fw->size); goto fail_fw_integrity; } @@ -5697,7 +6493,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) if (blob->fw->size < fwclen) { ql_log(ql_log_fatal, vha, 0x0088, "Unable to verify integrity of firmware image " - "(%Zd).\n", blob->fw->size); + "(%zd).\n", blob->fw->size); goto fail_fw_integrity; } @@ -5778,7 +6574,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) /* Validate firmware image by checking version. */ if (blob->fw->size < 8 * sizeof(uint32_t)) { ql_log(ql_log_fatal, vha, 0x0093, - "Unable to verify integrity of firmware image (%Zd).\n", + "Unable to verify integrity of firmware image (%zd).\n", blob->fw->size); return QLA_FUNCTION_FAILED; } @@ -5789,7 +6585,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && dcode[3] == 0)) { ql_log(ql_log_fatal, vha, 0x0094, - "Unable to verify integrity of firmware image (%Zd).\n", + "Unable to verify integrity of firmware image (%zd).\n", blob->fw->size); ql_log(ql_log_fatal, vha, 0x0095, "Firmware data: %08x %08x %08x %08x.\n", @@ -5807,7 +6603,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) if (blob->fw->size < fwclen) { ql_log(ql_log_fatal, vha, 0x0096, "Unable to verify integrity of firmware image " - "(%Zd).\n", blob->fw->size); + "(%zd).\n", blob->fw->size); return QLA_FUNCTION_FAILED; } @@ -6412,6 +7208,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) vha->flags.process_response_queue = 1; } + /* enable RIDA Format2 */ + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) + icb->firmware_options_3 |= BIT_0; + if (rval) { ql_log(ql_log_warn, vha, 0x0076, "NVRAM configuration failed.\n"); @@ -6536,13 +7336,26 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha) __func__, ha->fw_options[2]); } - if (!ql2xetsenable) - goto out; + /* Move PUREX, ABTS RX & RIDA to ATIOQ */ + if (ql2xmvasynctoatio) { + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) + ha->fw_options[2] |= BIT_11; + else + ha->fw_options[2] &= ~BIT_11; + } + + if (ql2xetsenable) { + /* Enable ETS Burst. */ + memset(ha->fw_options, 0, sizeof(ha->fw_options)); + ha->fw_options[2] |= BIT_9; + } + + ql_dbg(ql_dbg_init, vha, 0xffff, + "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", + __func__, ha->fw_options[1], ha->fw_options[2], + ha->fw_options[3], vha->host->active_mode); - /* Enable ETS Burst. */ - memset(ha->fw_options, 0, sizeof(ha->fw_options)); - ha->fw_options[2] |= BIT_9; -out: qla2x00_set_fw_options(vha, ha->fw_options); } @@ -6748,6 +7561,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v memset(qpair, 0, sizeof(struct qla_qpair)); qpair->hw = vha->hw; + qpair->vha = vha; /* Assign available que pair id */ mutex_lock(&ha->mq_lock); diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 44e404583c86..66df6cec59da 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -166,8 +166,8 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state) /* Don't print state transitions during initial allocation of fcport */ if (old_state && old_state != state) { ql_dbg(ql_dbg_disc, fcport->vha, 0x207d, - "FCPort state transitioned from %s to %s - " - "portid=%02x%02x%02x.\n", + "FCPort %8phC state transitioned from %s to %s - " + "portid=%02x%02x%02x.\n", fcport->port_name, port_state_str[old_state], port_state_str[state], fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); @@ -232,6 +232,7 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag) memset(sp, 0, sizeof(*sp)); sp->fcport = fcport; sp->iocbs = 1; + sp->vha = qpair->vha; done: if (!sp) QLA_QPAIR_MARK_NOT_BUSY(qpair); @@ -249,20 +250,20 @@ static inline srb_t * qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) { srb_t *sp = NULL; - struct qla_hw_data *ha = vha->hw; uint8_t bail; QLA_VHA_MARK_BUSY(vha, bail); if (unlikely(bail)) return NULL; - sp = mempool_alloc(ha->srb_mempool, flag); + sp = mempool_alloc(vha->hw->srb_mempool, flag); if (!sp) goto done; memset(sp, 0, sizeof(*sp)); sp->fcport = fcport; sp->iocbs = 1; + sp->vha = vha; done: if (!sp) QLA_VHA_MARK_NOT_BUSY(vha); @@ -270,10 +271,10 @@ done: } static inline void -qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp) +qla2x00_rel_sp(srb_t *sp) { - mempool_free(sp, vha->hw->srb_mempool); - QLA_VHA_MARK_NOT_BUSY(vha); + QLA_VHA_MARK_NOT_BUSY(sp->vha); + mempool_free(sp, sp->vha->hw->srb_mempool); } static inline void @@ -285,8 +286,7 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo) sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout; add_timer(&sp->u.iocb_cmd.timer); sp->free = qla2x00_sp_free; - if ((IS_QLAFX00(sp->fcport->vha->hw)) && - (sp->type == SRB_FXIOCB_DCMD)) + if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD)) init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); if (sp->type == SRB_ELS_DCMD) init_completion(&sp->u.iocb_cmd.u.els_logo.comp); diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 58e49a3e1de8..535079280288 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -23,7 +23,7 @@ qla2x00_get_cmd_direction(srb_t *sp) { uint16_t cflags; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; cflags = 0; @@ -210,7 +210,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, return; } - vha = sp->fcport->vha; + vha = sp->vha; cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); /* Three DSDs are available in the Command Type 2 IOCB */ @@ -267,7 +267,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, return; } - vha = sp->fcport->vha; + vha = sp->vha; cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); /* Two DSDs are available in the Command Type 3 IOCB */ @@ -324,7 +324,7 @@ qla2x00_start_scsi(srb_t *sp) struct rsp_que *rsp; /* Setup device pointers. */ - vha = sp->fcport->vha; + vha = sp->vha; ha = vha->hw; reg = &ha->iobase->isp; cmd = GET_CMD_SP(sp); @@ -601,7 +601,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, return 0; } - vha = sp->fcport->vha; + vha = sp->vha; ha = vha->hw; /* Set transfer direction */ @@ -716,7 +716,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, return; } - vha = sp->fcport->vha; + vha = sp->vha; /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { @@ -1108,7 +1108,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, if (sp) { cmd = GET_CMD_SP(sp); sgl = scsi_prot_sglist(cmd); - vha = sp->fcport->vha; + vha = sp->vha; } else if (tc) { vha = tc->vha; sgl = tc->prot_sg; @@ -1215,7 +1215,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, /* Update entry type to indicate Command Type CRC_2 IOCB */ *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2); - vha = sp->fcport->vha; + vha = sp->vha; ha = vha->hw; /* No data transfer */ @@ -1225,7 +1225,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, return QLA_SUCCESS; } - cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + cmd_pkt->vp_index = sp->vha->vp_idx; /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { @@ -1415,7 +1415,7 @@ qla24xx_start_scsi(srb_t *sp) struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; /* Setup device pointers. */ @@ -1492,7 +1492,7 @@ qla24xx_start_scsi(srb_t *sp) cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; - cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + cmd_pkt->vp_index = sp->vha->vp_idx; int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); @@ -1564,7 +1564,7 @@ qla24xx_dif_start_scsi(srb_t *sp) struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct cmd_type_crc_2 *cmd_pkt; uint32_t status = 0; @@ -2214,13 +2214,13 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; logio->port_id[2] = sp->fcport->d_id.b.domain; - logio->vp_index = sp->fcport->vha->vp_idx; + logio->vp_index = sp->vha->vp_idx; } static void qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) { - struct qla_hw_data *ha = sp->fcport->vha->hw; + struct qla_hw_data *ha = sp->vha->hw; struct srb_iocb *lio = &sp->u.iocb_cmd; uint16_t opts; @@ -2238,7 +2238,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | sp->fcport->d_id.b.al_pa); - mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); + mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); } static void @@ -2247,20 +2247,20 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); - if (!sp->fcport->tgt_session || - !sp->fcport->tgt_session->keep_nport_handle) + if (!sp->fcport->se_sess || + !sp->fcport->keep_nport_handle) logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; logio->port_id[2] = sp->fcport->d_id.b.domain; - logio->vp_index = sp->fcport->vha->vp_idx; + logio->vp_index = sp->vha->vp_idx; } static void qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) { - struct qla_hw_data *ha = sp->fcport->vha->hw; + struct qla_hw_data *ha = sp->vha->hw; mbx->entry_type = MBX_IOCB_TYPE; SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); @@ -2271,7 +2271,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | sp->fcport->d_id.b.al_pa); - mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); + mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); /* Implicit: mbx->mbx10 = 0. */ } @@ -2281,13 +2281,13 @@ qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); - logio->vp_index = sp->fcport->vha->vp_idx; + logio->vp_index = sp->vha->vp_idx; } static void qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) { - struct qla_hw_data *ha = sp->fcport->vha->hw; + struct qla_hw_data *ha = sp->vha->hw; mbx->entry_type = MBX_IOCB_TYPE; SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); @@ -2302,7 +2302,7 @@ qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); - mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); + mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); } static void @@ -2338,32 +2338,30 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) } static void -qla2x00_els_dcmd_sp_free(void *ptr, void *data) +qla2x00_els_dcmd_sp_free(void *data) { - struct scsi_qla_host *vha = (scsi_qla_host_t *)ptr; - struct qla_hw_data *ha = vha->hw; - srb_t *sp = (srb_t *)data; + srb_t *sp = data; struct srb_iocb *elsio = &sp->u.iocb_cmd; kfree(sp->fcport); if (elsio->u.els_logo.els_logo_pyld) - dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE, + dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE, elsio->u.els_logo.els_logo_pyld, elsio->u.els_logo.els_logo_pyld_dma); del_timer(&elsio->timer); - qla2x00_rel_sp(vha, sp); + qla2x00_rel_sp(sp); } static void qla2x00_els_dcmd_iocb_timeout(void *data) { - srb_t *sp = (srb_t *)data; - struct srb_iocb *lio = &sp->u.iocb_cmd; + srb_t *sp = data; fc_port_t *fcport = sp->fcport; - struct scsi_qla_host *vha = fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; + struct srb_iocb *lio = &sp->u.iocb_cmd; unsigned long flags = 0; ql_dbg(ql_dbg_io, vha, 0x3069, @@ -2386,12 +2384,12 @@ qla2x00_els_dcmd_iocb_timeout(void *data) } static void -qla2x00_els_dcmd_sp_done(void *data, void *ptr, int res) +qla2x00_els_dcmd_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; fc_port_t *fcport = sp->fcport; struct srb_iocb *lio = &sp->u.iocb_cmd; - struct scsi_qla_host *vha = fcport->vha; + struct scsi_qla_host *vha = sp->vha; ql_dbg(ql_dbg_io, vha, 0x3072, "%s hdl=%x, portid=%02x%02x%02x done\n", @@ -2449,7 +2447,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, GFP_KERNEL); if (!elsio->u.els_logo.els_logo_pyld) { - sp->free(vha, sp); + sp->free(sp); return QLA_FUNCTION_FAILED; } @@ -2468,7 +2466,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { - sp->free(vha, sp); + sp->free(sp); return QLA_FUNCTION_FAILED; } @@ -2479,14 +2477,14 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, wait_for_completion(&elsio->u.els_logo.comp); - sp->free(vha, sp); + sp->free(sp); return rval; } static void qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) { - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct srb_iocb *elsio = &sp->u.iocb_cmd; els_iocb->entry_type = ELS_IOCB_TYPE; @@ -2518,7 +2516,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->rx_address[1] = 0; els_iocb->rx_len = 0; - sp->fcport->vha->qla_stats.control_requests++; + sp->vha->qla_stats.control_requests++; } static void @@ -2534,7 +2532,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->handle = sp->handle; els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); - els_iocb->vp_index = sp->fcport->vha->vp_idx; + els_iocb->vp_index = sp->vha->vp_idx; els_iocb->sof_type = EST_SOFI3; els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); @@ -2565,7 +2563,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->rx_len = cpu_to_le32(sg_dma_len (bsg_job->reply_payload.sg_list)); - sp->fcport->vha->qla_stats.control_requests++; + sp->vha->qla_stats.control_requests++; } static void @@ -2576,7 +2574,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) struct scatterlist *sg; int index; uint16_t tot_dsds; - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct bsg_job *bsg_job = sp->u.bsg_job; int loop_iterartion = 0; @@ -2642,7 +2640,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) } ct_iocb->entry_count = entry_count; - sp->fcport->vha->qla_stats.control_requests++; + sp->vha->qla_stats.control_requests++; } static void @@ -2653,7 +2651,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) struct scatterlist *sg; int index; uint16_t tot_dsds; - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct bsg_job *bsg_job = sp->u.bsg_job; int loop_iterartion = 0; @@ -2665,7 +2663,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) ct_iocb->handle = sp->handle; ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); - ct_iocb->vp_index = sp->fcport->vha->vp_idx; + ct_iocb->vp_index = sp->vha->vp_idx; ct_iocb->comp_status = cpu_to_le16(0); ct_iocb->cmd_dsd_count = @@ -2739,7 +2737,7 @@ qla82xx_start_scsi(srb_t *sp) uint32_t *fcp_dl; uint8_t additional_cdb_len; struct ct6_dsd *ctx; - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct req_que *req = NULL; struct rsp_que *rsp = NULL; @@ -2901,7 +2899,7 @@ sufficient_dsds: cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; - cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + cmd_pkt->vp_index = sp->vha->vp_idx; /* Build IOCB segments */ if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) @@ -2974,7 +2972,7 @@ sufficient_dsds: cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; - cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + cmd_pkt->vp_index = sp->vha->vp_idx; int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, @@ -3060,7 +3058,7 @@ static void qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) { struct srb_iocb *aio = &sp->u.iocb_cmd; - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct req_que *req = vha->req; memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); @@ -3079,19 +3077,69 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) wmb(); } +static void +qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx) +{ + int i, sz; + + mbx->entry_type = MBX_IOCB_TYPE; + mbx->handle = sp->handle; + sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb)); + + for (i = 0; i < sz; i++) + mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]); +} + +static void +qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt) +{ + sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt; + qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg); + ct_pkt->handle = sp->handle; +} + +static void qla2x00_send_notify_ack_iocb(srb_t *sp, + struct nack_to_isp *nack) +{ + struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy; + + nack->entry_type = NOTIFY_ACK_TYPE; + nack->entry_count = 1; + nack->ox_id = ntfy->ox_id; + + nack->u.isp24.handle = sp->handle; + nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; + if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { + nack->u.isp24.flags = ntfy->u.isp24.flags & + cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); + } + nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; + nack->u.isp24.status = ntfy->u.isp24.status; + nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; + nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; + nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; + nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; + nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; + nack->u.isp24.srr_flags = 0; + nack->u.isp24.srr_reject_code = 0; + nack->u.isp24.srr_reject_code_expl = 0; + nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; +} + int qla2x00_start_sp(srb_t *sp) { int rval; - struct qla_hw_data *ha = sp->fcport->vha->hw; + scsi_qla_host_t *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; void *pkt; unsigned long flags; rval = QLA_FUNCTION_FAILED; spin_lock_irqsave(&ha->hardware_lock, flags); - pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp); + pkt = qla2x00_alloc_iocbs(vha, sp); if (!pkt) { - ql_log(ql_log_warn, sp->fcport->vha, 0x700c, + ql_log(ql_log_warn, vha, 0x700c, "qla2x00_alloc_iocbs failed.\n"); goto done; } @@ -3139,12 +3187,23 @@ qla2x00_start_sp(srb_t *sp) case SRB_ELS_DCMD: qla24xx_els_logo_iocb(sp, pkt); break; + case SRB_CT_PTHRU_CMD: + qla2x00_ctpthru_cmd_iocb(sp, pkt); + break; + case SRB_MB_IOCB: + qla2x00_mb_iocb(sp, pkt); + break; + case SRB_NACK_PLOGI: + case SRB_NACK_PRLI: + case SRB_NACK_LOGO: + qla2x00_send_notify_ack_iocb(sp, pkt); + break; default: break; } wmb(); - qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]); + qla2x00_start_iocbs(vha, ha->req_q_map[0]); done: spin_unlock_irqrestore(&ha->hardware_lock, flags); return rval; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index edc2264db45b..352cfb6292c6 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -561,14 +561,50 @@ qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) return ret; } -static inline fc_port_t * +fc_port_t * qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) { - fc_port_t *fcport; + fc_port_t *f, *tf; + + f = tf = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) + if (f->loop_id == loop_id) + return f; + return NULL; +} - list_for_each_entry(fcport, &vha->vp_fcports, list) - if (fcport->loop_id == loop_id) - return fcport; +fc_port_t * +qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) +{ + fc_port_t *f, *tf; + + f = tf = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { + if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { + if (incl_deleted) + return f; + else if (f->deleted == 0) + return f; + } + } + return NULL; +} + +fc_port_t * +qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, + u8 incl_deleted) +{ + fc_port_t *f, *tf; + + f = tf = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { + if (f->d_id.b24 == id->b24) { + if (incl_deleted) + return f; + else if (f->deleted == 0) + return f; + } + } return NULL; } @@ -934,7 +970,11 @@ skip_rio: ql_dbg(ql_dbg_async, vha, 0x508a, "Marking port lost loopid=%04x portid=%06x.\n", fcport->loop_id, fcport->d_id.b24); - qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); + if (qla_ini_mode_enabled(vha)) { + qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); + fcport->logout_on_delete = 0; + qlt_schedule_sess_for_deletion_lock(fcport); + } break; global_port_update: @@ -985,9 +1025,6 @@ global_port_update: qla2x00_mark_all_devices_lost(vha, 1); - if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha)) - set_bit(SCR_PENDING, &vha->dpc_flags); - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(VP_CONFIG_OK, &vha->vp_flags); @@ -1024,27 +1061,19 @@ global_port_update: if (qla2x00_is_a_vp_did(vha, rscn_entry)) break; - /* - * Search for the rport related to this RSCN entry and mark it - * as lost. - */ - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (atomic_read(&fcport->state) != FCS_ONLINE) - continue; - if (fcport->d_id.b24 == rscn_entry) { - qla2x00_mark_device_lost(vha, fcport, 0, 0); - break; - } - } - atomic_set(&vha->loop_down_timer, 0); vha->flags.management_server_logged_in = 0; - - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - set_bit(RSCN_UPDATE, &vha->dpc_flags); - qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); + { + struct event_arg ea; + + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_RSCN; + ea.id.b24 = rscn_entry; + ea.id.b.rsvd_1 = rscn_entry >> 24; + qla2x00_fcport_event_handler(vha, &ea); + qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); + } break; - /* case MBA_RIO_RESPONSE: */ case MBA_ZIO_RESPONSE: ql_dbg(ql_dbg_async, vha, 0x5015, @@ -1212,7 +1241,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, req->outstanding_cmds[index] = NULL; /* Save ISP completion status */ - sp->done(ha, sp, DID_OK << 16); + sp->done(sp, DID_OK << 16); } else { ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); @@ -1235,7 +1264,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, index = LSW(pkt->handle); if (index >= req->num_outstanding_cmds) { ql_log(ql_log_warn, vha, 0x5031, - "Invalid command index (%x).\n", index); + "Invalid command index (%x) type %8ph.\n", + index, iocb); if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else @@ -1343,66 +1373,122 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, le16_to_cpu(mbx->mb7)); logio_done: - sp->done(vha, sp, 0); + sp->done(sp, 0); } static void -qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, - sts_entry_t *pkt, int iocb_type) +qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct mbx_24xx_entry *pkt) { - const char func[] = "CT_IOCB"; - const char *type; + const char func[] = "MBX-IOCB2"; srb_t *sp; - struct bsg_job *bsg_job; - struct fc_bsg_reply *bsg_reply; - uint16_t comp_status; + struct srb_iocb *si; + u16 sz, i; int res; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (!sp) return; - bsg_job = sp->u.bsg_job; - bsg_reply = bsg_job->reply; + si = &sp->u.iocb_cmd; + sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); - type = "ct pass-through"; + for (i = 0; i < sz; i++) + si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]); - comp_status = le16_to_cpu(pkt->comp_status); + res = (si->u.mbx.in_mb[0] & MBS_MASK); - /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT - * fc payload to the caller - */ - bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; - bsg_job->reply_len = sizeof(struct fc_bsg_reply); + sp->done(sp, res); +} - if (comp_status != CS_COMPLETE) { - if (comp_status == CS_DATA_UNDERRUN) { - res = DID_OK << 16; - bsg_reply->reply_payload_rcv_len = - le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); +static void +qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct nack_to_isp *pkt) +{ + const char func[] = "nack"; + srb_t *sp; + int res = 0; - ql_log(ql_log_warn, vha, 0x5048, - "CT pass-through-%s error " - "comp_status-status=0x%x total_byte = 0x%x.\n", - type, comp_status, - bsg_reply->reply_payload_rcv_len); - } else { - ql_log(ql_log_warn, vha, 0x5049, - "CT pass-through-%s error " - "comp_status-status=0x%x.\n", type, comp_status); - res = DID_ERROR << 16; - bsg_reply->reply_payload_rcv_len = 0; - } - ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, - (uint8_t *)pkt, sizeof(*pkt)); - } else { - res = DID_OK << 16; - bsg_reply->reply_payload_rcv_len = - bsg_job->reply_payload.payload_len; - bsg_job->reply_len = 0; - } + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) + res = QLA_FUNCTION_FAILED; - sp->done(vha, sp, res); + sp->done(sp, res); +} + +static void +qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, + sts_entry_t *pkt, int iocb_type) +{ + const char func[] = "CT_IOCB"; + const char *type; + srb_t *sp; + struct bsg_job *bsg_job; + struct fc_bsg_reply *bsg_reply; + uint16_t comp_status; + int res = 0; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + switch (sp->type) { + case SRB_CT_CMD: + bsg_job = sp->u.bsg_job; + bsg_reply = bsg_job->reply; + + type = "ct pass-through"; + + comp_status = le16_to_cpu(pkt->comp_status); + + /* + * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT + * fc payload to the caller + */ + bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + + if (comp_status != CS_COMPLETE) { + if (comp_status == CS_DATA_UNDERRUN) { + res = DID_OK << 16; + bsg_reply->reply_payload_rcv_len = + le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); + + ql_log(ql_log_warn, vha, 0x5048, + "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", + type, comp_status, + bsg_reply->reply_payload_rcv_len); + } else { + ql_log(ql_log_warn, vha, 0x5049, + "CT pass-through-%s error comp_status=0x%x.\n", + type, comp_status); + res = DID_ERROR << 16; + bsg_reply->reply_payload_rcv_len = 0; + } + ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, + (uint8_t *)pkt, sizeof(*pkt)); + } else { + res = DID_OK << 16; + bsg_reply->reply_payload_rcv_len = + bsg_job->reply_payload.payload_len; + bsg_job->reply_len = 0; + } + break; + case SRB_CT_PTHRU_CMD: + /* + * borrowing sts_entry_24xx.comp_status. + * same location as ct_entry_24xx.comp_status + */ + res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, + (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, + sp->name); + break; + } + + sp->done(sp, res); } static void @@ -1438,7 +1524,16 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, type = "Driver ELS logo"; ql_dbg(ql_dbg_user, vha, 0x5047, "Completing %s: (%p) type=%d.\n", type, sp, sp->type); - sp->done(vha, sp, 0); + sp->done(sp, 0); + return; + case SRB_CT_PTHRU_CMD: + /* borrowing sts_entry_24xx.comp_status. + same location as ct_entry_24xx.comp_status + */ + res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, + (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, + sp->name); + sp->done(sp, res); return; default: ql_dbg(ql_dbg_user, vha, 0x503e, @@ -1496,7 +1591,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, bsg_job->reply_len = 0; } - sp->done(vha, sp, res); + sp->done(sp, res); } static void @@ -1543,6 +1638,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, fcport->d_id.b.area, fcport->d_id.b.al_pa, le32_to_cpu(logio->io_parameter[0])); + vha->hw->exch_starvation = 0; data[0] = MBS_COMMAND_COMPLETE; if (sp->type != SRB_LOGIN_CMD) goto logio_done; @@ -1568,6 +1664,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, iop[0] = le32_to_cpu(logio->io_parameter[0]); iop[1] = le32_to_cpu(logio->io_parameter[1]); + lio->u.logio.iop[0] = iop[0]; + lio->u.logio.iop[1] = iop[1]; switch (iop[0]) { case LSC_SCODE_PORTID_USED: data[0] = MBS_PORT_ID_USED; @@ -1576,6 +1674,21 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, case LSC_SCODE_NPORT_USED: data[0] = MBS_LOOP_ID_USED; break; + case LSC_SCODE_NOXCB: + vha->hw->exch_starvation++; + if (vha->hw->exch_starvation > 5) { + ql_log(ql_log_warn, vha, 0xffff, + "Exchange starvation. Resetting RISC\n"); + + vha->hw->exch_starvation = 0; + + if (IS_P3P_TYPE(vha->hw)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + /* drop through */ default: data[0] = MBS_COMMAND_ERROR; break; @@ -1590,7 +1703,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, le32_to_cpu(logio->io_parameter[1])); logio_done: - sp->done(vha, sp, 0); + sp->done(sp, 0); } static void @@ -1640,7 +1753,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, (uint8_t *)sts, sizeof(*sts)); - sp->done(vha, sp, 0); + sp->done(sp, 0); } /** @@ -1728,7 +1841,7 @@ static inline void qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, uint32_t sense_len, struct rsp_que *rsp, int res) { - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct scsi_cmnd *cp = GET_CMD_SP(sp); uint32_t track_sense_len; @@ -1756,7 +1869,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, if (sense_len) { ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", - sp->fcport->vha->host_no, cp->device->id, cp->device->lun, + sp->vha->host_no, cp->device->id, cp->device->lun, cp); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, cp->sense_buffer, sense_len); @@ -1778,7 +1891,7 @@ struct scsi_dif_tuple { static inline int qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) { - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct scsi_cmnd *cmd = GET_CMD_SP(sp); uint8_t *ap = &sts24->data[12]; uint8_t *ep = &sts24->data[20]; @@ -2043,7 +2156,7 @@ done: bsg_job->reply_len = sizeof(struct fc_bsg_reply); /* Always return DID_OK, bsg will send the vendor specific response * in this case only */ - sp->done(vha, sp, (DID_OK << 6)); + sp->done(sp, DID_OK << 6); } @@ -2076,6 +2189,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) int res = 0; uint16_t state_flags = 0; uint16_t retry_delay = 0; + uint8_t no_logout = 0; sts = (sts_entry_t *) pkt; sts24 = (struct sts_entry_24xx *) pkt; @@ -2336,6 +2450,7 @@ check_scsi_status: break; case CS_PORT_LOGGED_OUT: + no_logout = 1; case CS_PORT_CONFIG_CHG: case CS_PORT_BUSY: case CS_INCOMPLETE: @@ -2358,14 +2473,21 @@ check_scsi_status: break; } - ql_dbg(ql_dbg_io, fcport->vha, 0x3021, - "Port to be marked lost on fcport=%02x%02x%02x, current " - "port state= %s.\n", fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa, - port_state_str[atomic_read(&fcport->state)]); + if (atomic_read(&fcport->state) == FCS_ONLINE) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, + "Port to be marked lost on fcport=%02x%02x%02x, current " + "port state= %s comp_status %x.\n", fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa, + port_state_str[atomic_read(&fcport->state)], + comp_status); + + if (no_logout) + fcport->logout_on_delete = 0; - if (atomic_read(&fcport->state) == FCS_ONLINE) qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); + qlt_schedule_sess_for_deletion_lock(fcport); + } + break; case CS_ABORTED: @@ -2407,7 +2529,7 @@ out: resid_len, fw_resid_len, sp, cp); if (rsp->status_srb == NULL) - sp->done(ha, sp, res); + sp->done(sp, res); } /** @@ -2464,7 +2586,7 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) /* Place command on done queue. */ if (sense_len == 0) { rsp->status_srb = NULL; - sp->done(ha, sp, cp->result); + sp->done(sp, cp->result); } } @@ -2500,7 +2622,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { - sp->done(ha, sp, res); + sp->done(sp, res); return; } fatal: @@ -2558,7 +2680,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, abt = &sp->u.iocb_cmd; abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); - sp->done(vha, sp, 0); + sp->done(sp, 0); } /** @@ -2629,10 +2751,16 @@ process_err: } case ABTS_RESP_24XX: case CTIO_TYPE7: - case NOTIFY_ACK_TYPE: case CTIO_CRC2: qlt_response_pkt_all_vps(vha, (response_t *)pkt); break; + case NOTIFY_ACK_TYPE: + if (pkt->handle == QLA_TGT_SKIP_HANDLE) + qlt_response_pkt_all_vps(vha, (response_t *)pkt); + else + qla24xxx_nack_iocb_entry(vha, rsp->req, + (struct nack_to_isp *)pkt); + break; case MARKER_TYPE: /* Do nothing in this case, this check is to prevent it * from falling into default case @@ -2642,6 +2770,10 @@ process_err: qla24xx_abort_iocb_entry(vha, rsp->req, (struct abort_entry_24xx *)pkt); break; + case MBX_IOCB_TYPE: + qla24xx_mbx_iocb_entry(vha, rsp->req, + (struct mbx_24xx_entry *)pkt); + break; default: /* Type Not Supported. */ ql_dbg(ql_dbg_async, vha, 0x5042, @@ -2658,8 +2790,9 @@ process_err: if (IS_P3P_TYPE(ha)) { struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); - } else + } else { WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); + } } static void diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 67f64db390b0..35079f417417 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -1637,94 +1637,6 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) return rval; } -/* - * qla2x00_get_node_name_list - * Issue get node name list mailbox command, kmalloc() - * and return the resulting list. Caller must kfree() it! - * - * Input: - * ha = adapter state pointer. - * out_data = resulting list - * out_len = length of the resulting list - * - * Returns: - * qla2x00 local function return status code. - * - * Context: - * Kernel context. - */ -int -qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len) -{ - struct qla_hw_data *ha = vha->hw; - struct qla_port_24xx_data *list = NULL; - void *pmap; - mbx_cmd_t mc; - dma_addr_t pmap_dma; - ulong dma_size; - int rval, left; - - left = 1; - while (left > 0) { - dma_size = left * sizeof(*list); - pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size, - &pmap_dma, GFP_KERNEL); - if (!pmap) { - ql_log(ql_log_warn, vha, 0x113f, - "%s(%ld): DMA Alloc failed of %ld\n", - __func__, vha->host_no, dma_size); - rval = QLA_MEMORY_ALLOC_FAILED; - goto out; - } - - mc.mb[0] = MBC_PORT_NODE_NAME_LIST; - mc.mb[1] = BIT_1 | BIT_3; - mc.mb[2] = MSW(pmap_dma); - mc.mb[3] = LSW(pmap_dma); - mc.mb[6] = MSW(MSD(pmap_dma)); - mc.mb[7] = LSW(MSD(pmap_dma)); - mc.mb[8] = dma_size; - mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8; - mc.in_mb = MBX_0|MBX_1; - mc.tov = 30; - mc.flags = MBX_DMA_IN; - - rval = qla2x00_mailbox_command(vha, &mc); - if (rval != QLA_SUCCESS) { - if ((mc.mb[0] == MBS_COMMAND_ERROR) && - (mc.mb[1] == 0xA)) { - left += le16_to_cpu(mc.mb[2]) / - sizeof(struct qla_port_24xx_data); - goto restart; - } - goto out_free; - } - - left = 0; - - list = kmemdup(pmap, dma_size, GFP_KERNEL); - if (!list) { - ql_log(ql_log_warn, vha, 0x1140, - "%s(%ld): failed to allocate node names list " - "structure.\n", __func__, vha->host_no); - rval = QLA_MEMORY_ALLOC_FAILED; - goto out_free; - } - -restart: - dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); - } - - *out_data = list; - *out_len = dma_size; - -out: - return rval; - -out_free: - dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); - return rval; -} /* * qla2x00_get_port_database @@ -3687,10 +3599,8 @@ void qla24xx_report_id_acquisition(scsi_qla_host_t *vha, struct vp_rpt_id_entry_24xx *rptid_entry) { - uint8_t vp_idx; - uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); struct qla_hw_data *ha = vha->hw; - scsi_qla_host_t *vp; + scsi_qla_host_t *vp = NULL; unsigned long flags; int found; @@ -3701,80 +3611,124 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, return; if (rptid_entry->format == 0) { + /* loop */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7, "Format 0 : Number of VPs setup %d, number of " - "VPs acquired %d.\n", - MSB(le16_to_cpu(rptid_entry->vp_count)), - LSB(le16_to_cpu(rptid_entry->vp_count))); + "VPs acquired %d.\n", rptid_entry->vp_setup, + rptid_entry->vp_acquired); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8, "Primary port id %02x%02x%02x.\n", rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); + + vha->d_id.b.domain = rptid_entry->port_id[2]; + vha->d_id.b.area = rptid_entry->port_id[1]; + vha->d_id.b.al_pa = rptid_entry->port_id[0]; + + spin_lock_irqsave(&ha->vport_slock, flags); + qlt_update_vp_map(vha, SET_AL_PA); + spin_unlock_irqrestore(&ha->vport_slock, flags); + } else if (rptid_entry->format == 1) { - vp_idx = LSB(stat); + /* fabric */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9, "Format 1: VP[%d] enabled - status %d - with " - "port id %02x%02x%02x.\n", vp_idx, MSB(stat), + "port id %02x%02x%02x.\n", rptid_entry->vp_idx, + rptid_entry->vp_status, rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); /* buffer to buffer credit flag */ - vha->flags.bbcr_enable = (rptid_entry->bbcr & 0xf) != 0; - - /* FA-WWN is only for physical port */ - if (!vp_idx) { - void *wwpn = ha->init_cb->port_name; + vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; + + if (rptid_entry->vp_idx == 0) { + if (rptid_entry->vp_status == VP_STAT_COMPL) { + /* FA-WWN is only for physical port */ + if (qla_ini_mode_enabled(vha) && + ha->flags.fawwpn_enabled && + (rptid_entry->u.f1.flags & + VP_FLAGS_NAME_VALID)) { + memcpy(vha->port_name, + rptid_entry->u.f1.port_name, + WWN_SIZE); + } - if (!MSB(stat)) { - if (rptid_entry->vp_idx_map[1] & BIT_6) - wwpn = rptid_entry->reserved_4 + 8; + vha->d_id.b.domain = rptid_entry->port_id[2]; + vha->d_id.b.area = rptid_entry->port_id[1]; + vha->d_id.b.al_pa = rptid_entry->port_id[0]; + spin_lock_irqsave(&ha->vport_slock, flags); + qlt_update_vp_map(vha, SET_AL_PA); + spin_unlock_irqrestore(&ha->vport_slock, flags); } - memcpy(vha->port_name, wwpn, WWN_SIZE); + fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); - ql_dbg(ql_dbg_mbx, vha, 0x1018, - "FA-WWN portname %016llx (%x)\n", - fc_host_port_name(vha->host), MSB(stat)); - } - - vp = vha; - if (vp_idx == 0) - goto reg_needed; - if (MSB(stat) != 0 && MSB(stat) != 2) { - ql_dbg(ql_dbg_mbx, vha, 0x10ba, - "Could not acquire ID for VP[%d].\n", vp_idx); - return; - } + if (qla_ini_mode_enabled(vha)) + ql_dbg(ql_dbg_mbx, vha, 0x1018, + "FA-WWN portname %016llx (%x)\n", + fc_host_port_name(vha->host), + rptid_entry->vp_status); - found = 0; - spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vp, &ha->vp_list, list) { - if (vp_idx == vp->vp_idx) { - found = 1; - break; + set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); + set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); + } else { + if (rptid_entry->vp_status != VP_STAT_COMPL && + rptid_entry->vp_status != VP_STAT_ID_CHG) { + ql_dbg(ql_dbg_mbx, vha, 0x10ba, + "Could not acquire ID for VP[%d].\n", + rptid_entry->vp_idx); + return; } - } - spin_unlock_irqrestore(&ha->vport_slock, flags); - if (!found) - return; + found = 0; + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) { + if (rptid_entry->vp_idx == vp->vp_idx) { + found = 1; + break; + } + } + spin_unlock_irqrestore(&ha->vport_slock, flags); - vp->d_id.b.domain = rptid_entry->port_id[2]; - vp->d_id.b.area = rptid_entry->port_id[1]; - vp->d_id.b.al_pa = rptid_entry->port_id[0]; + if (!found) + return; - /* - * Cannot configure here as we are still sitting on the - * response queue. Handle it in dpc context. - */ - set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); + vp->d_id.b.domain = rptid_entry->port_id[2]; + vp->d_id.b.area = rptid_entry->port_id[1]; + vp->d_id.b.al_pa = rptid_entry->port_id[0]; + spin_lock_irqsave(&ha->vport_slock, flags); + qlt_update_vp_map(vp, SET_AL_PA); + spin_unlock_irqrestore(&ha->vport_slock, flags); -reg_needed: - set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); - set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); + /* + * Cannot configure here as we are still sitting on the + * response queue. Handle it in dpc context. + */ + set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); + set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); + set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); + } set_bit(VP_DPC_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); + } else if (rptid_entry->format == 2) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", + rptid_entry->port_id[2], rptid_entry->port_id[1], + rptid_entry->port_id[0]); + + ql_dbg(ql_dbg_async, vha, 0xffff, + "N2N: Remote WWPN %8phC.\n", + rptid_entry->u.f2.port_name); + + /* N2N. direct connect */ + vha->d_id.b.domain = rptid_entry->port_id[2]; + vha->d_id.b.area = rptid_entry->port_id[1]; + vha->d_id.b.al_pa = rptid_entry->port_id[0]; + + spin_lock_irqsave(&ha->vport_slock, flags); + qlt_update_vp_map(vha, SET_AL_PA); + spin_unlock_irqrestore(&ha->vport_slock, flags); } } diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 96c33e292eba..10b742d27e16 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -1789,16 +1789,16 @@ qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo) static void qla2x00_fxdisc_iocb_timeout(void *data) { - srb_t *sp = (srb_t *)data; + srb_t *sp = data; struct srb_iocb *lio = &sp->u.iocb_cmd; complete(&lio->u.fxiocb.fxiocb_comp); } static void -qla2x00_fxdisc_sp_done(void *data, void *ptr, int res) +qla2x00_fxdisc_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct srb_iocb *lio = &sp->u.iocb_cmd; complete(&lio->u.fxiocb.fxiocb_comp); @@ -1999,7 +1999,7 @@ done_unmap_req: dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len, fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle); done_free_sp: - sp->free(vha, sp); + sp->free(sp); done: return rval; } @@ -2127,7 +2127,7 @@ static inline void qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, uint32_t sense_len, struct rsp_que *rsp, int res) { - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct scsi_cmnd *cp = GET_CMD_SP(sp); uint32_t track_sense_len; @@ -2162,7 +2162,7 @@ qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, if (sense_len) { ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039, "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", - sp->fcport->vha->host_no, cp->device->id, cp->device->lun, + sp->vha->host_no, cp->device->id, cp->device->lun, cp); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049, cp->sense_buffer, sense_len); @@ -2181,7 +2181,7 @@ qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID))) cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE); tmf->u.tmf.comp_status = cpstatus; - sp->done(vha, sp, 0); + sp->done(sp, 0); } static void @@ -2198,7 +2198,7 @@ qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, abt = &sp->u.iocb_cmd; abt->u.abt.comp_status = pkt->tgt_id_sts; - sp->done(vha, sp, 0); + sp->done(sp, 0); } static void @@ -2264,7 +2264,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; } - sp->done(vha, sp, res); + sp->done(sp, res); } /** @@ -2537,7 +2537,7 @@ check_scsi_status: par_sense_len, rsp_info_len); if (rsp->status_srb == NULL) - sp->done(ha, sp, res); + sp->done(sp, res); } /** @@ -2614,7 +2614,7 @@ qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) /* Place command on done queue. */ if (sense_len == 0) { rsp->status_srb = NULL; - sp->done(ha, sp, cp->result); + sp->done(sp, cp->result); } } @@ -2695,7 +2695,7 @@ qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { - sp->done(ha, sp, res); + sp->done(sp, res); return; } @@ -2997,7 +2997,7 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, cont_a64_entry_t lcont_pkt; cont_a64_entry_t *cont_pkt; - vha = sp->fcport->vha; + vha = sp->vha; req = vha->req; cmd = GET_CMD_SP(sp); @@ -3081,7 +3081,7 @@ qlafx00_start_scsi(srb_t *sp) struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct cmd_type_7_fx00 *cmd_pkt; struct cmd_type_7_fx00 lcmd_pkt; @@ -3205,7 +3205,7 @@ void qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb) { struct srb_iocb *fxio = &sp->u.iocb_cmd; - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct req_que *req = vha->req; struct tsk_mgmt_entry_fx00 tm_iocb; struct scsi_lun llun; @@ -3232,7 +3232,7 @@ void qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb) { struct srb_iocb *fxio = &sp->u.iocb_cmd; - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct req_que *req = vha->req; struct abort_iocb_entry_fx00 abt_iocb; @@ -3346,8 +3346,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) REQUEST_ENTRY_SIZE); cont_pkt = qlafx00_prep_cont_type1_iocb( - sp->fcport->vha->req, - &lcont_pkt); + sp->vha->req, &lcont_pkt); cur_dsd = (__le32 *) lcont_pkt.dseg_0_address; avail_dsds = 5; @@ -3368,7 +3367,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) &lcont_pkt, REQUEST_ENTRY_SIZE); ql_dump_buffer( ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x3042, + sp->vha, 0x3042, (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); } @@ -3377,7 +3376,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, REQUEST_ENTRY_SIZE); ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x3043, + sp->vha, 0x3043, (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); } } @@ -3409,8 +3408,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) REQUEST_ENTRY_SIZE); cont_pkt = qlafx00_prep_cont_type1_iocb( - sp->fcport->vha->req, - &lcont_pkt); + sp->vha->req, &lcont_pkt); cur_dsd = (__le32 *) lcont_pkt.dseg_0_address; avail_dsds = 5; @@ -3431,7 +3429,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) REQUEST_ENTRY_SIZE); ql_dump_buffer( ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x3045, + sp->vha, 0x3045, (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); } @@ -3440,7 +3438,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, REQUEST_ENTRY_SIZE); ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x3046, + sp->vha, 0x3046, (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); } } @@ -3452,7 +3450,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) } ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x3047, + sp->vha, 0x3047, (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00)); memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index d01c90c7dd04..bbf1ad956251 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -237,6 +237,13 @@ MODULE_PARM_DESC(ql2xfwholdabts, "0 (Default) Do not set fw option. " "1 - Set fw option to hold ABTS."); +int ql2xmvasynctoatio = 1; +module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xmvasynctoatio, + "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ" + "0 (Default). Do not move IOCBs" + "1 - Move IOCBs."); + /* * SCSI host template entry points */ @@ -607,11 +614,11 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) } void -qla2x00_sp_free_dma(void *vha, void *ptr) +qla2x00_sp_free_dma(void *ptr) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; + struct qla_hw_data *ha = sp->vha->hw; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - struct qla_hw_data *ha = sp->fcport->vha->hw; void *ctx = GET_CMD_CTX_SP(sp); if (sp->flags & SRB_DMA_VALID) { @@ -650,20 +657,19 @@ qla2x00_sp_free_dma(void *vha, void *ptr) } CMD_SP(cmd) = NULL; - qla2x00_rel_sp(sp->fcport->vha, sp); + qla2x00_rel_sp(sp); } void -qla2x00_sp_compl(void *data, void *ptr, int res) +qla2x00_sp_compl(void *ptr, int res) { - struct qla_hw_data *ha = (struct qla_hw_data *)data; - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct scsi_cmnd *cmd = GET_CMD_SP(sp); cmd->result = res; if (atomic_read(&sp->ref_count) == 0) { - ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015, + ql_dbg(ql_dbg_io, sp->vha, 0x3015, "SP reference-count to ZERO -- sp=%p cmd=%p.\n", sp, GET_CMD_SP(sp)); if (ql2xextended_error_logging & ql_dbg_io) @@ -673,12 +679,12 @@ qla2x00_sp_compl(void *data, void *ptr, int res) if (!atomic_dec_and_test(&sp->ref_count)) return; - qla2x00_sp_free_dma(ha, sp); + qla2x00_sp_free_dma(sp); cmd->scsi_done(cmd); } void -qla2xxx_qpair_sp_free_dma(void *vha, void *ptr) +qla2xxx_qpair_sp_free_dma(void *ptr) { srb_t *sp = (srb_t *)ptr; struct scsi_cmnd *cmd = GET_CMD_SP(sp); @@ -724,9 +730,9 @@ qla2xxx_qpair_sp_free_dma(void *vha, void *ptr) } void -qla2xxx_qpair_sp_compl(void *data, void *ptr, int res) +qla2xxx_qpair_sp_compl(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct scsi_cmnd *cmd = GET_CMD_SP(sp); cmd->result = res; @@ -742,7 +748,7 @@ qla2xxx_qpair_sp_compl(void *data, void *ptr, int res) if (!atomic_dec_and_test(&sp->ref_count)) return; - qla2xxx_qpair_sp_free_dma(sp->fcport->vha, sp); + qla2xxx_qpair_sp_free_dma(sp); cmd->scsi_done(cmd); } @@ -863,7 +869,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) return 0; qc24_host_busy_free_sp: - qla2x00_sp_free_dma(ha, sp); + qla2x00_sp_free_dma(sp); qc24_host_busy: return SCSI_MLQUEUE_HOST_BUSY; @@ -952,7 +958,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, return 0; qc24_host_busy_free_sp: - qla2xxx_qpair_sp_free_dma(vha, sp); + qla2xxx_qpair_sp_free_dma(sp); qc24_host_busy: return SCSI_MLQUEUE_HOST_BUSY; @@ -1044,6 +1050,34 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) return (return_status); } +static inline int test_fcport_count(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + int res; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + ql_dbg(ql_dbg_init, vha, 0xffff, + "tgt %p, fcport_count=%d\n", + vha, vha->fcport_count); + res = (vha->fcport_count == 0); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + return res; +} + +/* + * qla2x00_wait_for_sess_deletion can only be called from remove_one. + * it has dependency on UNLOADING flag to stop device discovery + */ +static void +qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) +{ + qla2x00_mark_all_devices_lost(vha, 0); + + wait_event(vha->fcport_waitQ, test_fcport_count(vha)); +} + /* * qla2x00_wait_for_hba_ready * Wait till the HBA is ready before doing driver unload @@ -1204,7 +1238,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) } spin_lock_irqsave(&ha->hardware_lock, flags); - sp->done(ha, sp, 0); + sp->done(sp, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Did the command return during mailbox execution? */ @@ -1249,7 +1283,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, continue; if (sp->type != SRB_SCSI_CMD) continue; - if (vha->vp_idx != sp->fcport->vha->vp_idx) + if (vha->vp_idx != sp->vha->vp_idx) continue; match = 0; cmd = GET_CMD_SP(sp); @@ -1629,7 +1663,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) spin_lock_irqsave(&ha->hardware_lock, flags); } req->outstanding_cmds[cnt] = NULL; - sp->done(vha, sp, res); + sp->done(sp, res); } } } @@ -3124,7 +3158,8 @@ skip_dpc: ql_dbg(ql_dbg_init, base_vha, 0x00f2, "Init done and hba is online.\n"); - if (qla_ini_mode_enabled(base_vha)) + if (qla_ini_mode_enabled(base_vha) || + qla_dual_mode_enabled(base_vha)) scsi_scan_host(host); else ql_dbg(ql_dbg_init, base_vha, 0x0122, @@ -3373,21 +3408,26 @@ qla2x00_remove_one(struct pci_dev *pdev) * resources. */ if (!atomic_read(&pdev->enable_cnt)) { + dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, + base_vha->gnl.l, base_vha->gnl.ldma); + scsi_host_put(base_vha->host); kfree(ha); pci_set_drvdata(pdev, NULL); return; } - qla2x00_wait_for_hba_ready(base_vha); - /* if UNLOAD flag is already set, then continue unload, + /* + * if UNLOAD flag is already set, then continue unload, * where it was set first. */ if (test_bit(UNLOADING, &base_vha->dpc_flags)) return; set_bit(UNLOADING, &base_vha->dpc_flags); + dma_free_coherent(&ha->pdev->dev, + base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); if (IS_QLAFX00(ha)) qlafx00_driver_shutdown(base_vha, 20); @@ -3536,10 +3576,14 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, qla2xxx_wake_dpc(base_vha); } else { int now; - if (rport) + if (rport) { + ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, + "%s %8phN. rport %p roles %x \n", + __func__, fcport->port_name, rport, + rport->roles); fc_remote_port_delete(rport); + } qlt_do_generation_tick(vha, &now); - qlt_fc_port_deleted(vha, fcport, now); } } @@ -3582,7 +3626,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, fcport->login_retry = vha->hw->login_retry_count; ql_dbg(ql_dbg_disc, vha, 0x2067, - "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n", + "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", fcport->port_name, fcport->loop_id, fcport->login_retry); } } @@ -3605,7 +3649,13 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) { fc_port_t *fcport; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Mark all dev lost\n"); + list_for_each_entry(fcport, &vha->vp_fcports, list) { + fcport->scan_state = 0; + qlt_schedule_sess_for_deletion_lock(fcport); + if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx) continue; @@ -4195,10 +4245,10 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, struct scsi_qla_host *vha = NULL; host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); - if (host == NULL) { + if (!host) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0107, "Failed to allocate host from the scsi layer, aborting.\n"); - goto fail; + return NULL; } /* Clear our data area */ @@ -4217,9 +4267,22 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, INIT_LIST_HEAD(&vha->logo_list); INIT_LIST_HEAD(&vha->plogi_ack_list); INIT_LIST_HEAD(&vha->qp_list); + INIT_LIST_HEAD(&vha->gnl.fcports); spin_lock_init(&vha->work_lock); spin_lock_init(&vha->cmd_list_lock); + init_waitqueue_head(&vha->fcport_waitQ); + + vha->gnl.size = sizeof(struct get_name_list_extended) * + (ha->max_loop_id + 1); + vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev, + vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL); + if (!vha->gnl.l) { + ql_log(ql_log_fatal, vha, 0xffff, + "Alloc failed for name list.\n"); + scsi_remove_host(vha->host); + return NULL; + } sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); ql_dbg(ql_dbg_init, vha, 0x0041, @@ -4228,12 +4291,9 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, dev_name(&(ha->pdev->dev))); return vha; - -fail: - return vha; } -static struct qla_work_evt * +struct qla_work_evt * qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) { struct qla_work_evt *e; @@ -4255,7 +4315,7 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) return e; } -static int +int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) { unsigned long flags; @@ -4316,7 +4376,6 @@ int qla2x00_post_async_##name##_work( \ } qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); -qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE); qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); @@ -4369,6 +4428,67 @@ qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, return qla2x00_post_work(vha, e); } +int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + return qla2x00_post_work(vha, e); +} + +static +void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) +{ + unsigned long flags; + fc_port_t *fcport = NULL; + struct qlt_plogi_ack_t *pla = + (struct qlt_plogi_ack_t *)e->u.new_sess.pla; + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1); + if (fcport) { + fcport->d_id = e->u.new_sess.id; + if (pla) { + fcport->fw_login_state = DSC_LS_PLOGI_PEND; + qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN); + /* we took an extra ref_count to prevent PLOGI ACK when + * fcport/sess has not been created. + */ + pla->ref_count--; + } + } else { + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (fcport) { + fcport->d_id = e->u.new_sess.id; + fcport->scan_state = QLA_FCPORT_FOUND; + fcport->flags |= FCF_FABRIC_DEVICE; + fcport->fw_login_state = DSC_LS_PLOGI_PEND; + + memcpy(fcport->port_name, e->u.new_sess.port_name, + WWN_SIZE); + list_add_tail(&fcport->list, &vha->vp_fcports); + + if (pla) { + qlt_plogi_ack_link(vha, pla, fcport, + QLT_PLOGI_LINK_SAME_WWN); + pla->ref_count--; + } + } + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + if (fcport) { + if (pla) + qlt_plogi_ack_unref(vha, pla); + else + qla24xx_async_gnl(vha, fcport); + } +} + void qla2x00_do_work(struct scsi_qla_host *vha) { @@ -4395,10 +4515,6 @@ qla2x00_do_work(struct scsi_qla_host *vha) qla2x00_async_login(vha, e->u.logio.fcport, e->u.logio.data); break; - case QLA_EVT_ASYNC_LOGIN_DONE: - qla2x00_async_login_done(vha, e->u.logio.fcport, - e->u.logio.data); - break; case QLA_EVT_ASYNC_LOGOUT: qla2x00_async_logout(vha, e->u.logio.fcport); break; @@ -4420,6 +4536,34 @@ qla2x00_do_work(struct scsi_qla_host *vha) case QLA_EVT_AENFX: qlafx00_process_aen(vha, e); break; + case QLA_EVT_GIDPN: + qla24xx_async_gidpn(vha, e->u.fcport.fcport); + break; + case QLA_EVT_GPNID: + qla24xx_async_gpnid(vha, &e->u.gpnid.id); + break; + case QLA_EVT_GPNID_DONE: + qla24xx_async_gpnid_done(vha, e->u.iosb.sp); + break; + case QLA_EVT_NEW_SESS: + qla24xx_create_new_sess(vha, e); + break; + case QLA_EVT_GPDB: + qla24xx_async_gpdb(vha, e->u.fcport.fcport, + e->u.fcport.opt); + break; + case QLA_EVT_GPSC: + qla24xx_async_gpsc(vha, e->u.fcport.fcport); + break; + case QLA_EVT_UPD_FCPORT: + qla2x00_update_fcport(vha, e->u.fcport.fcport); + break; + case QLA_EVT_GNL: + qla24xx_async_gnl(vha, e->u.fcport.fcport); + break; + case QLA_EVT_NACK: + qla24xx_do_nack_work(vha, e); + break; } if (e->flags & QLA_EVT_FLAG_FREE) kfree(e); @@ -4436,9 +4580,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha) { fc_port_t *fcport; int status; - uint16_t next_loopid = 0; - struct qla_hw_data *ha = vha->hw; - uint16_t data[2]; + struct event_arg ea; list_for_each_entry(fcport, &vha->vp_fcports, list) { /* @@ -4449,77 +4591,38 @@ void qla2x00_relogin(struct scsi_qla_host *vha) fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) { fcport->login_retry--; if (fcport->flags & FCF_FABRIC_DEVICE) { - if (fcport->flags & FCF_FCP2_DEVICE) - ha->isp_ops->fabric_logout(vha, - fcport->loop_id, - fcport->d_id.b.domain, - fcport->d_id.b.area, - fcport->d_id.b.al_pa); - - if (fcport->loop_id == FC_NO_LOOP_ID) { - fcport->loop_id = next_loopid = - ha->min_external_loopid; - status = qla2x00_find_new_loop_id( - vha, fcport); - if (status != QLA_SUCCESS) { - /* Ran out of IDs to use */ - break; - } - } - - if (IS_ALOGIO_CAPABLE(ha)) { - fcport->flags |= FCF_ASYNC_SENT; - data[0] = 0; - data[1] = QLA_LOGIO_LOGIN_RETRIED; - status = qla2x00_post_async_login_work( - vha, fcport, data); - if (status == QLA_SUCCESS) - continue; - /* Attempt a retry. */ - status = 1; - } else { - status = qla2x00_fabric_login(vha, - fcport, &next_loopid); - if (status == QLA_SUCCESS) { - int status2; - uint8_t opts; - - opts = 0; - if (fcport->flags & - FCF_FCP2_DEVICE) - opts |= BIT_1; - status2 = - qla2x00_get_port_database( - vha, fcport, opts); - if (status2 != QLA_SUCCESS) - status = 1; - } - } - } else + ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, + "%s %8phC DS %d LS %d\n", __func__, + fcport->port_name, fcport->disc_state, + fcport->fw_login_state); + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_RELOGIN; + ea.fcport = fcport; + qla2x00_fcport_event_handler(vha, &ea); + } else { status = qla2x00_local_device_login(vha, fcport); + if (status == QLA_SUCCESS) { + fcport->old_loop_id = fcport->loop_id; + ql_dbg(ql_dbg_disc, vha, 0x2003, + "Port login OK: logged in ID 0x%x.\n", + fcport->loop_id); + qla2x00_update_fcport(vha, fcport); + } else if (status == 1) { + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + /* retry the login again */ + ql_dbg(ql_dbg_disc, vha, 0x2007, + "Retrying %d login again loop_id 0x%x.\n", + fcport->login_retry, + fcport->loop_id); + } else { + fcport->login_retry = 0; + } - if (status == QLA_SUCCESS) { - fcport->old_loop_id = fcport->loop_id; - - ql_dbg(ql_dbg_disc, vha, 0x2003, - "Port login OK: logged in ID 0x%x.\n", - fcport->loop_id); - - qla2x00_update_fcport(vha, fcport); - - } else if (status == 1) { - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - /* retry the login again */ - ql_dbg(ql_dbg_disc, vha, 0x2007, - "Retrying %d login again loop_id 0x%x.\n", - fcport->login_retry, fcport->loop_id); - } else { - fcport->login_retry = 0; + if (fcport->login_retry == 0 && + status != QLA_SUCCESS) + qla2x00_clear_loop_id(fcport); } - - if (fcport->login_retry == 0 && status != QLA_SUCCESS) - qla2x00_clear_loop_id(fcport); } if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; @@ -5183,7 +5286,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) struct pci_dev *pdev = ha->pdev; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - /* if UNLOAD flag is already set, then continue unload, + /* + * if UNLOAD flag is already set, then continue unload, * where it was set first. */ if (test_bit(UNLOADING, &base_vha->dpc_flags)) @@ -5192,6 +5296,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) ql_log(ql_log_warn, base_vha, 0x015b, "Disabling adapter.\n"); + qla2x00_wait_for_sess_deletion(base_vha); + set_bit(UNLOADING, &base_vha->dpc_flags); qla2x00_delete_all_vps(ha, base_vha); @@ -5410,16 +5516,6 @@ qla2x00_do_dpc(void *data) qla2x00_update_fcports(base_vha); } - if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) { - int ret; - ret = qla2x00_send_change_request(base_vha, 0x3, 0); - if (ret != QLA_SUCCESS) - ql_log(ql_log_warn, base_vha, 0x121, - "Failed to enable receiving of RSCN " - "requests: 0x%x.\n", ret); - clear_bit(SCR_PENDING, &base_vha->dpc_flags); - } - if (IS_QLAFX00(ha)) goto loop_resync_check; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index e4fda84b959e..45f5077684f0 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -55,8 +55,17 @@ MODULE_PARM_DESC(qlini_mode, "disabled on enabling target mode and then on disabling target mode " "enabled back; " "\"disabled\" - initiator mode will never be enabled; " + "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated " + "when ready " "\"enabled\" (default) - initiator mode will always stay enabled."); +static int ql_dm_tgt_ex_pct = 50; +module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql_dm_tgt_ex_pct, + "For Dual Mode (qlini_mode=dual), this parameter determines " + "the percentage of exchanges/cmds FW will allocate resources " + "for Target mode."); + int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; static int temp_sam_status = SAM_STAT_BUSY; @@ -102,12 +111,10 @@ enum fcp_resp_rsp_codes { static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, struct atio_from_isp *pkt, uint8_t); static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); -static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, +static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, int fn, void *iocb, int flags); static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); -static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, - struct qla_tgt_srr_imm *imm, int ha_lock); static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd); static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, @@ -120,6 +127,9 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *imm, int ha_locked); +static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, + fc_port_t *fcport, bool local); +void qlt_unreg_sess(struct fc_port *sess); /* * Global Variables */ @@ -140,21 +150,6 @@ void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) wmb(); } -/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ -static struct qla_tgt_sess *qlt_find_sess_by_port_name( - struct qla_tgt *tgt, - const uint8_t *port_name) -{ - struct qla_tgt_sess *sess; - - list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) { - if (!memcmp(sess->port_name, port_name, WWN_SIZE)) - return sess; - } - - return NULL; -} - /* Might release hw lock, then reaquire!! */ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) { @@ -229,6 +224,105 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); } + +static void qlt_queue_unknown_atio(scsi_qla_host_t *vha, + struct atio_from_isp *atio, uint8_t ha_locked) +{ + struct qla_tgt_sess_op *u; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + unsigned long flags; + + if (tgt->tgt_stop) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "qla_target(%d): dropping unknown ATIO_TYPE7, " + "because tgt is being stopped", vha->vp_idx); + goto out_term; + } + + u = kzalloc(sizeof(*u), GFP_ATOMIC); + if (u == NULL) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "Alloc of struct unknown_atio (size %zd) failed", sizeof(*u)); + /* It should be harmless and on the next retry should work well */ + goto out_term; + } + + u->vha = vha; + memcpy(&u->atio, atio, sizeof(*atio)); + INIT_LIST_HEAD(&u->cmd_list); + + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_add_tail(&u->cmd_list, &vha->unknown_atio_list); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); + + schedule_delayed_work(&vha->unknown_atio_work, 1); + +out: + return; + +out_term: + qlt_send_term_exchange(vha, NULL, atio, ha_locked, 0); + goto out; +} + +static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, + uint8_t ha_locked) +{ + struct qla_tgt_sess_op *u, *t; + scsi_qla_host_t *host; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + unsigned long flags; + uint8_t queued = 0; + + list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) { + if (u->aborted) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "Freeing unknown %s %p, because of Abort", + "ATIO_TYPE7", u); + qlt_send_term_exchange(vha, NULL, &u->atio, + ha_locked, 0); + goto abort; + } + + host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); + if (host != NULL) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "Requeuing unknown ATIO_TYPE7 %p", u); + qlt_24xx_atio_pkt(host, &u->atio, ha_locked); + } else if (tgt->tgt_stop) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "Freeing unknown %s %p, because tgt is being stopped", + "ATIO_TYPE7", u); + qlt_send_term_exchange(vha, NULL, &u->atio, + ha_locked, 0); + } else { + ql_dbg(ql_dbg_async, vha, 0xffff, + "u %p, vha %p, host %p, sched again..", u, + vha, host); + if (!queued) { + queued = 1; + schedule_delayed_work(&vha->unknown_atio_work, + 1); + } + continue; + } + +abort: + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_del(&u->cmd_list); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); + kfree(u); + } +} + +void qlt_unknown_atio_work_fn(struct work_struct *work) +{ + struct scsi_qla_host *vha = container_of(to_delayed_work(work), + struct scsi_qla_host, unknown_atio_work); + + qlt_try_to_dequeue_unknown_atios(vha, 0); +} + static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, struct atio_from_isp *atio, uint8_t ha_locked) { @@ -249,8 +343,14 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, atio->u.isp24.fcp_hdr.d_id[0], atio->u.isp24.fcp_hdr.d_id[1], atio->u.isp24.fcp_hdr.d_id[2]); + + + qlt_queue_unknown_atio(vha, atio, ha_locked); break; } + if (unlikely(!list_empty(&vha->unknown_atio_list))) + qlt_try_to_dequeue_unknown_atios(vha, ha_locked); + qlt_24xx_atio_pkt(host, atio, ha_locked); break; } @@ -278,6 +378,31 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, break; } + case VP_RPT_ID_IOCB_TYPE: + qla24xx_report_id_acquisition(vha, + (struct vp_rpt_id_entry_24xx *)atio); + break; + + case ABTS_RECV_24XX: + { + struct abts_recv_from_24xx *entry = + (struct abts_recv_from_24xx *)atio; + struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, + entry->vp_index); + if (unlikely(!host)) { + ql_dbg(ql_dbg_tgt, vha, 0xffff, + "qla_target(%d): Response pkt (ABTS_RECV_24XX) " + "received, with unknown vp_index %d\n", + vha->vp_idx, entry->vp_index); + break; + } + qlt_response_pkt(host, (response_t *)atio); + break; + + } + + /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ + default: ql_dbg(ql_dbg_tgt, vha, 0xe040, "qla_target(%d): Received unknown ATIO atio " @@ -395,22 +520,263 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) /* * All qlt_plogi_ack_t operations are protected by hardware_lock */ +static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport, + struct imm_ntfy_from_isp *ntfy, int type) +{ + struct qla_work_evt *e; + e = qla2x00_alloc_work(vha, QLA_EVT_NACK); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.nack.fcport = fcport; + e->u.nack.type = type; + memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp)); + return qla2x00_post_work(vha, e); +} + +static +void qla2x00_async_nack_sp_done(void *s, int res) +{ + struct srb *sp = (struct srb *)s; + struct scsi_qla_host *vha = sp->vha; + unsigned long flags; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x %8phC type %d\n", + sp->name, res, sp->fcport->port_name, sp->type); + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + sp->fcport->flags &= ~FCF_ASYNC_SENT; + sp->fcport->chip_reset = vha->hw->chip_reset; + + switch (sp->type) { + case SRB_NACK_PLOGI: + sp->fcport->login_gen++; + sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; + sp->fcport->logout_on_delete = 1; + break; + + case SRB_NACK_PRLI: + sp->fcport->fw_login_state = DSC_LS_PRLI_COMP; + sp->fcport->deleted = 0; + + if (!sp->fcport->login_succ && + !IS_SW_RESV_ADDR(sp->fcport->d_id)) { + sp->fcport->login_succ = 1; + + vha->fcport_count++; + + if (!IS_IIDMA_CAPABLE(vha->hw) || + !vha->hw->flags.gpsc_supported) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post upd_fcport fcp_cnt %d\n", + __func__, __LINE__, + sp->fcport->port_name, + vha->fcport_count); + + qla24xx_post_upd_fcport_work(vha, sp->fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpsc fcp_cnt %d\n", + __func__, __LINE__, + sp->fcport->port_name, + vha->fcport_count); + + qla24xx_post_gpsc_work(vha, sp->fcport); + } + } + break; + + case SRB_NACK_LOGO: + sp->fcport->login_gen++; + sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE); + break; + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + sp->free(sp); +} + +int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, + struct imm_ntfy_from_isp *ntfy, int type) +{ + int rval = QLA_FUNCTION_FAILED; + srb_t *sp; + char *c = NULL; + + fcport->flags |= FCF_ASYNC_SENT; + switch (type) { + case SRB_NACK_PLOGI: + fcport->fw_login_state = DSC_LS_PLOGI_PEND; + c = "PLOGI"; + break; + case SRB_NACK_PRLI: + fcport->fw_login_state = DSC_LS_PRLI_PEND; + c = "PRLI"; + break; + case SRB_NACK_LOGO: + fcport->fw_login_state = DSC_LS_LOGO_PEND; + c = "LOGO"; + break; + } + + sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); + if (!sp) + goto done; + + sp->type = type; + sp->name = "nack"; + + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); + + sp->u.iocb_cmd.u.nack.ntfy = ntfy; + + sp->done = qla2x00_async_nack_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s %8phC hndl %x %s\n", + sp->name, fcport->port_name, sp->handle, c); + + return rval; + +done_free_sp: + sp->free(sp); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} + +void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) +{ + fc_port_t *t; + unsigned long flags; + + switch (e->u.nack.type) { + case SRB_NACK_PRLI: + mutex_lock(&vha->vha_tgt.tgt_mutex); + t = qlt_create_sess(vha, e->u.nack.fcport, 0); + mutex_unlock(&vha->vha_tgt.tgt_mutex); + if (t) { + ql_log(ql_log_info, vha, 0xffff, + "%s create sess success %p", __func__, t); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + /* create sess has an extra kref */ + vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + } + break; + } + qla24xx_async_notify_ack(vha, e->u.nack.fcport, + (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type); +} + +void qla24xx_delete_sess_fn(struct work_struct *work) +{ + fc_port_t *fcport = container_of(work, struct fc_port, del_work); + struct qla_hw_data *ha = fcport->vha->hw; + unsigned long flags; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + + if (fcport->se_sess) { + ha->tgt.tgt_ops->shutdown_sess(fcport); + ha->tgt.tgt_ops->put_sess(fcport); + } else { + qlt_unreg_sess(fcport); + } + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); +} + +/* + * Called from qla2x00_reg_remote_port() + */ +void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct fc_port *sess = fcport; + unsigned long flags; + + if (!vha->hw->tgt.tgt_ops) + return; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (tgt->tgt_stop) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return; + } + + if (fcport->disc_state == DSC_DELETE_PEND) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return; + } + + if (!sess->se_sess) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + mutex_lock(&vha->vha_tgt.tgt_mutex); + sess = qlt_create_sess(vha, fcport, false); + mutex_unlock(&vha->vha_tgt.tgt_mutex); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + } else { + if (fcport->fw_login_state == DSC_LS_PRLI_COMP) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return; + } + + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: kref_get fail sess %8phC \n", + __func__, sess->port_name); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return; + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, + "qla_target(%u): %ssession for port %8phC " + "(loop ID %d) reappeared\n", vha->vp_idx, + sess->local ? "local " : "", sess->port_name, sess->loop_id); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, + "Reappeared sess %p\n", sess); + + ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, + fcport->loop_id, + (fcport->flags & FCF_CONF_COMP_SUPPORTED)); + } + + if (sess && sess->local) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, + "qla_target(%u): local session for " + "port %8phC (loop ID %d) became global\n", vha->vp_idx, + fcport->port_name, sess->loop_id); + sess->local = 0; + } + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); +} /* * This is a zero-base ref-counting solution, since hardware_lock * guarantees that ref_count is not modified concurrently. * Upon successful return content of iocb is undefined */ -static qlt_plogi_ack_t * +static struct qlt_plogi_ack_t * qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, struct imm_ntfy_from_isp *iocb) { - qlt_plogi_ack_t *pla; + struct qlt_plogi_ack_t *pla; list_for_each_entry(pla, &vha->plogi_ack_list, list) { if (pla->id.b24 == id->b24) { qlt_send_term_imm_notif(vha, &pla->iocb, 1); - pla->iocb = *iocb; + memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); return pla; } } @@ -423,50 +789,78 @@ qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, return NULL; } - pla->iocb = *iocb; + memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); pla->id = *id; list_add_tail(&pla->list, &vha->plogi_ack_list); return pla; } -static void qlt_plogi_ack_unref(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla) +void qlt_plogi_ack_unref(struct scsi_qla_host *vha, + struct qlt_plogi_ack_t *pla) { + struct imm_ntfy_from_isp *iocb = &pla->iocb; + port_id_t port_id; + uint16_t loop_id; + fc_port_t *fcport = pla->fcport; + BUG_ON(!pla->ref_count); pla->ref_count--; if (pla->ref_count) return; - ql_dbg(ql_dbg_async, vha, 0x5089, + ql_dbg(ql_dbg_disc, vha, 0x5089, "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x" - " exch %#x ox_id %#x\n", pla->iocb.u.isp24.port_name, - pla->iocb.u.isp24.port_id[2], pla->iocb.u.isp24.port_id[1], - pla->iocb.u.isp24.port_id[0], - le16_to_cpu(pla->iocb.u.isp24.nport_handle), - pla->iocb.u.isp24.exchange_address, pla->iocb.ox_id); - qlt_send_notify_ack(vha, &pla->iocb, 0, 0, 0, 0, 0, 0); + " exch %#x ox_id %#x\n", iocb->u.isp24.port_name, + iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1], + iocb->u.isp24.port_id[0], + le16_to_cpu(iocb->u.isp24.nport_handle), + iocb->u.isp24.exchange_address, iocb->ox_id); + + port_id.b.domain = iocb->u.isp24.port_id[2]; + port_id.b.area = iocb->u.isp24.port_id[1]; + port_id.b.al_pa = iocb->u.isp24.port_id[0]; + port_id.b.rsvd_1 = 0; + + loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); + + fcport->loop_id = loop_id; + fcport->d_id = port_id; + qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI); + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla) + fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; + if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla) + fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; + } list_del(&pla->list); kmem_cache_free(qla_tgt_plogi_cachep, pla); } -static void -qlt_plogi_ack_link(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla, - struct qla_tgt_sess *sess, qlt_plogi_link_t link) +void +qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla, + struct fc_port *sess, enum qlt_plogi_link_t link) { + struct imm_ntfy_from_isp *iocb = &pla->iocb; /* Inc ref_count first because link might already be pointing at pla */ pla->ref_count++; + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, + "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" + " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n", + sess, link, sess->port_name, + iocb->u.isp24.port_name, iocb->u.isp24.port_id[2], + iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], + pla->ref_count, pla, link); + if (sess->plogi_link[link]) qlt_plogi_ack_unref(vha, sess->plogi_link[link]); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, - "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" - " s_id %02x:%02x:%02x, ref=%d\n", sess, link, sess->port_name, - pla->iocb.u.isp24.port_name, pla->iocb.u.isp24.port_id[2], - pla->iocb.u.isp24.port_id[1], pla->iocb.u.isp24.port_id[0], - pla->ref_count); + if (link == QLT_PLOGI_LINK_SAME_WWN) + pla->fcport = sess; sess->plogi_link[link] = pla; } @@ -519,49 +913,45 @@ qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) static void qlt_free_session_done(struct work_struct *work) { - struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess, + struct fc_port *sess = container_of(work, struct fc_port, free_work); struct qla_tgt *tgt = sess->tgt; struct scsi_qla_host *vha = sess->vha; struct qla_hw_data *ha = vha->hw; unsigned long flags; bool logout_started = false; - fc_port_t fcport; + struct event_arg ea; + scsi_qla_host_t *base_vha; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, - sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, + sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, sess->logout_on_delete, sess->keep_nport_handle, sess->send_els_logo); - BUG_ON(!tgt); - if (sess->send_els_logo) { - qlt_port_logo_t logo; - logo.id = sess->s_id; - logo.cmd_count = 0; - qlt_send_first_logo(vha, &logo); - } + if (!IS_SW_RESV_ADDR(sess->d_id)) { + if (sess->send_els_logo) { + qlt_port_logo_t logo; - if (sess->logout_on_delete) { - int rc; + logo.id = sess->d_id; + logo.cmd_count = 0; + qlt_send_first_logo(vha, &logo); + } - memset(&fcport, 0, sizeof(fcport)); - fcport.loop_id = sess->loop_id; - fcport.d_id = sess->s_id; - memcpy(fcport.port_name, sess->port_name, WWN_SIZE); - fcport.vha = vha; - fcport.tgt_session = sess; - - rc = qla2x00_post_async_logout_work(vha, &fcport, NULL); - if (rc != QLA_SUCCESS) - ql_log(ql_log_warn, vha, 0xf085, - "Schedule logo failed sess %p rc %d\n", - sess, rc); - else - logout_started = true; + if (sess->logout_on_delete) { + int rc; + + rc = qla2x00_post_async_logout_work(vha, sess, NULL); + if (rc != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0xf085, + "Schedule logo failed sess %p rc %d\n", + sess, rc); + else + logout_started = true; + } } /* @@ -583,29 +973,61 @@ static void qlt_free_session_done(struct work_struct *work) msleep(100); } - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087, - "%s: sess %p logout completed\n", - __func__, sess); + ql_dbg(ql_dbg_disc, vha, 0xf087, + "%s: sess %p logout completed\n",__func__, sess); } - spin_lock_irqsave(&ha->hardware_lock, flags); + if (sess->logo_ack_needed) { + sess->logo_ack_needed = 0; + qla24xx_async_notify_ack(vha, sess, + (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); + } + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (sess->se_sess) { + sess->se_sess = NULL; + if (tgt && !IS_SW_RESV_ADDR(sess->d_id)) + tgt->sess_count--; + } + + sess->disc_state = DSC_DELETED; + sess->fw_login_state = DSC_LS_PORT_UNAVAIL; + sess->deleted = QLA_SESS_DELETED; + sess->login_retry = vha->hw->login_retry_count; + + if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) { + vha->fcport_count--; + sess->login_succ = 0; + } + + if (sess->chip_reset != sess->vha->hw->chip_reset) + qla2x00_clear_loop_id(sess); + + if (sess->conflict) { + sess->conflict->login_pause = 0; + sess->conflict = NULL; + if (!test_bit(UNLOADING, &vha->dpc_flags)) + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + } { - qlt_plogi_ack_t *own = + struct qlt_plogi_ack_t *own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; - qlt_plogi_ack_t *con = + struct qlt_plogi_ack_t *con = sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; + struct imm_ntfy_from_isp *iocb; if (con) { + iocb = &con->iocb; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099, - "se_sess %p / sess %p port %8phC is gone," - " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", - sess->se_sess, sess, sess->port_name, - own ? "releasing own PLOGI" : - "no own PLOGI pending", - own ? own->ref_count : -1, - con->iocb.u.isp24.port_name, con->ref_count); + "se_sess %p / sess %p port %8phC is gone," + " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", + sess->se_sess, sess, sess->port_name, + own ? "releasing own PLOGI" : "no own PLOGI pending", + own ? own->ref_count : -1, + iocb->u.isp24.port_name, con->ref_count); qlt_plogi_ack_unref(vha, con); + sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; } else { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a, "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n", @@ -615,59 +1037,64 @@ static void qlt_free_session_done(struct work_struct *work) own ? own->ref_count : -1); } - if (own) + if (own) { + sess->fw_login_state = DSC_LS_PLOGI_PEND; qlt_plogi_ack_unref(vha, own); + sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; + } } - - list_del(&sess->sess_list_entry); - - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, - "Unregistration of sess %p finished\n", sess); + "Unregistration of sess %p %8phC finished fcp_cnt %d\n", + sess, sess->port_name, vha->fcport_count); - kfree(sess); - /* - * We need to protect against race, when tgt is freed before or - * inside wake_up() - */ - tgt->sess_count--; - if (tgt->sess_count == 0) + if (tgt && (tgt->sess_count == 0)) wake_up_all(&tgt->waitQ); + + if (vha->fcport_count == 0) + wake_up_all(&vha->fcport_waitQ); + + base_vha = pci_get_drvdata(ha->pdev); + if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags)) + return; + + if (!tgt || !tgt->tgt_stop) { + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_DELETE_DONE; + ea.fcport = sess; + qla2x00_fcport_event_handler(vha, &ea); + } } /* ha->tgt.sess_lock supposed to be held on entry */ -static void qlt_release_session(struct kref *kref) +void qlt_unreg_sess(struct fc_port *sess) { - struct qla_tgt_sess *sess = - container_of(kref, struct qla_tgt_sess, sess_kref); struct scsi_qla_host *vha = sess->vha; + ql_dbg(ql_dbg_disc, sess->vha, 0xffff, + "%s sess %p for deletion %8phC\n", + __func__, sess, sess->port_name); + if (sess->se_sess) vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); - if (!list_empty(&sess->del_list_entry)) - list_del_init(&sess->del_list_entry); + qla2x00_mark_device_lost(vha, sess, 1, 1); + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; + sess->disc_state = DSC_DELETE_PEND; + sess->last_rscn_gen = sess->rscn_gen; + sess->last_login_gen = sess->login_gen; INIT_WORK(&sess->free_work, qlt_free_session_done); schedule_work(&sess->free_work); } - -void qlt_put_sess(struct qla_tgt_sess *sess) -{ - if (!sess) - return; - - assert_spin_locked(&sess->vha->hw->tgt.sess_lock); - kref_put(&sess->sess_kref, qlt_release_session); -} -EXPORT_SYMBOL(qlt_put_sess); +EXPORT_SYMBOL(qlt_unreg_sess); static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; uint16_t loop_id; int res = 0; struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; @@ -680,31 +1107,6 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) spin_lock_irqsave(&ha->tgt.sess_lock, flags); qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); -#if 0 /* FIXME: do we need to choose a session here? */ - if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { - sess = list_entry(ha->tgt.qla_tgt->sess_list.next, - typeof(*sess), sess_list_entry); - switch (mcmd) { - case QLA_TGT_NEXUS_LOSS_SESS: - mcmd = QLA_TGT_NEXUS_LOSS; - break; - case QLA_TGT_ABORT_ALL_SESS: - mcmd = QLA_TGT_ABORT_ALL; - break; - case QLA_TGT_NEXUS_LOSS: - case QLA_TGT_ABORT_ALL: - break; - default: - ql_dbg(ql_dbg_tgt, vha, 0xe046, - "qla_target(%d): Not allowed " - "command %x in %s", vha->vp_idx, - mcmd, __func__); - sess = NULL; - break; - } - } else - sess = NULL; -#endif } else { spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); @@ -726,57 +1128,69 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); } +static void qla24xx_chk_fcp_state(struct fc_port *sess) +{ + if (sess->chip_reset != sess->vha->hw->chip_reset) { + sess->logout_on_delete = 0; + sess->logo_ack_needed = 0; + sess->fw_login_state = DSC_LS_PORT_UNAVAIL; + sess->scan_state = 0; + } +} + /* ha->tgt.sess_lock supposed to be held on entry */ -static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, +void qlt_schedule_sess_for_deletion(struct fc_port *sess, bool immediate) { struct qla_tgt *tgt = sess->tgt; - uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; - if (sess->deleted) { - /* Upgrade to unconditional deletion in case it was temporary */ - if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING) - list_del(&sess->del_list_entry); - else + if (sess->disc_state == DSC_DELETE_PEND) + return; + + if (sess->disc_state == DSC_DELETED) { + if (tgt && tgt->tgt_stop && (tgt->sess_count == 0)) + wake_up_all(&tgt->waitQ); + if (sess->vha->fcport_count == 0) + wake_up_all(&sess->vha->fcport_waitQ); + + if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && + !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) return; } - ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, - "Scheduling sess %p for deletion\n", sess); + sess->disc_state = DSC_DELETE_PEND; - if (immediate) { - dev_loss_tmo = 0; - sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; - list_add(&sess->del_list_entry, &tgt->del_sess_list); - } else { - sess->deleted = QLA_SESS_DELETION_PENDING; - list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); - } + if (sess->deleted == QLA_SESS_DELETED) + sess->logout_on_delete = 0; - sess->expires = jiffies + dev_loss_tmo * HZ; + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; + qla24xx_chk_fcp_state(sess); - ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, - "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)" - " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n", - sess->vha->vp_idx, sess->port_name, sess->loop_id, - sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, - dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete, - sess->generation); + ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, + "Scheduling sess %p for deletion\n", sess); - if (immediate) - mod_delayed_work(system_wq, &tgt->sess_del_work, 0); - else - schedule_delayed_work(&tgt->sess_del_work, - sess->expires - jiffies); + schedule_work(&sess->del_work); +} + +void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess) +{ + unsigned long flags; + struct qla_hw_data *ha = sess->vha->hw; + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + qlt_schedule_sess_for_deletion(sess, 1); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } /* ha->tgt.sess_lock supposed to be held on entry */ static void qlt_clear_tgt_db(struct qla_tgt *tgt) { - struct qla_tgt_sess *sess; + struct fc_port *sess; + scsi_qla_host_t *vha = tgt->vha; - list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) - qlt_schedule_sess_for_deletion(sess, true); + list_for_each_entry(sess, &vha->vp_fcports, list) { + if (sess->se_sess) + qlt_schedule_sess_for_deletion(sess, 1); + } /* At this point tgt could be already dead */ } @@ -830,240 +1244,84 @@ out_free_id_list: return res; } -/* ha->tgt.sess_lock supposed to be held on entry */ -static void qlt_undelete_sess(struct qla_tgt_sess *sess) -{ - BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING); - - list_del_init(&sess->del_list_entry); - sess->deleted = 0; -} - -static void qlt_del_sess_work_fn(struct delayed_work *work) -{ - struct qla_tgt *tgt = container_of(work, struct qla_tgt, - sess_del_work); - struct scsi_qla_host *vha = tgt->vha; - struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; - unsigned long flags, elapsed; - - spin_lock_irqsave(&ha->tgt.sess_lock, flags); - while (!list_empty(&tgt->del_sess_list)) { - sess = list_entry(tgt->del_sess_list.next, typeof(*sess), - del_list_entry); - elapsed = jiffies; - if (time_after_eq(elapsed, sess->expires)) { - /* No turning back */ - list_del_init(&sess->del_list_entry); - sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, - "Timeout: sess %p about to be deleted\n", - sess); - if (sess->se_sess) - ha->tgt.tgt_ops->shutdown_sess(sess); - qlt_put_sess(sess); - } else { - schedule_delayed_work(&tgt->sess_del_work, - sess->expires - elapsed); - break; - } - } - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); -} - /* * Adds an extra ref to allow to drop hw lock after adding sess to the list. * Caller must put it. */ -static struct qla_tgt_sess *qlt_create_sess( +static struct fc_port *qlt_create_sess( struct scsi_qla_host *vha, fc_port_t *fcport, bool local) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; + struct fc_port *sess = fcport; unsigned long flags; - /* Check to avoid double sessions */ - spin_lock_irqsave(&ha->tgt.sess_lock, flags); - list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list, - sess_list_entry) { - if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, - "Double sess %p found (s_id %x:%x:%x, " - "loop_id %d), updating to d_id %x:%x:%x, " - "loop_id %d", sess, sess->s_id.b.domain, - sess->s_id.b.al_pa, sess->s_id.b.area, - sess->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.al_pa, fcport->d_id.b.area, - fcport->loop_id); - - /* Cannot undelete at this point */ - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { - spin_unlock_irqrestore(&ha->tgt.sess_lock, - flags); - return NULL; - } - - if (sess->deleted) - qlt_undelete_sess(sess); - - if (!sess->se_sess) { - if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, - &sess->port_name[0], sess) < 0) { - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - return NULL; - } - } - - kref_get(&sess->sess_kref); - ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, - (fcport->flags & FCF_CONF_COMP_SUPPORTED)); - - if (sess->local && !local) - sess->local = 0; - - qlt_do_generation_tick(vha, &sess->generation); - - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + if (vha->vha_tgt.qla_tgt->tgt_stop) + return NULL; - return sess; + if (fcport->se_sess) { + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: kref_get_unless_zero failed for %8phC\n", + __func__, sess->port_name); + return NULL; } - } - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - - sess = kzalloc(sizeof(*sess), GFP_KERNEL); - if (!sess) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a, - "qla_target(%u): session allocation failed, all commands " - "from port %8phC will be refused", vha->vp_idx, - fcport->port_name); - - return NULL; + return fcport; } sess->tgt = vha->vha_tgt.qla_tgt; - sess->vha = vha; - sess->s_id = fcport->d_id; - sess->loop_id = fcport->loop_id; sess->local = local; - kref_init(&sess->sess_kref); - INIT_LIST_HEAD(&sess->del_list_entry); - /* Under normal circumstances we want to logout from firmware when + /* + * Under normal circumstances we want to logout from firmware when * session eventually ends and release corresponding nport handle. * In the exception cases (e.g. when new PLOGI is waiting) corresponding - * code will adjust these flags as necessary. */ + * code will adjust these flags as necessary. + */ sess->logout_on_delete = 1; sess->keep_nport_handle = 0; + sess->logout_completed = 0; - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, - "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", - sess, vha->vha_tgt.qla_tgt); - - sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED); - BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); - memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); - - spin_lock_irqsave(&ha->tgt.sess_lock, flags); - list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); - vha->vha_tgt.qla_tgt->sess_count++; - qlt_do_generation_tick(vha, &sess->generation); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, - "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " - "s_id %x:%x:%x, confirmed completion %ssupported) added\n", - vha->vp_idx, local ? "local " : "", fcport->port_name, - fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area, - sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); - - /* - * Determine if this fc_port->port_name is allowed to access - * target mode using explict NodeACLs+MappedLUNs, or using - * TPG demo mode. If this is successful a target mode FC nexus - * is created. - */ if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, &fcport->port_name[0], sess) < 0) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "(%d) %8phC check_initiator_node_acl failed\n", + vha->vp_idx, fcport->port_name); return NULL; } else { + kref_init(&fcport->sess_kref); /* - * Take an extra reference to ->sess_kref here to handle qla_tgt_sess - * access across ->tgt.sess_lock reaquire. + * Take an extra reference to ->sess_kref here to handle + * fc_port access across ->tgt.sess_lock reaquire. */ - kref_get(&sess->sess_kref); - } - - return sess; -} - -/* - * Called from qla2x00_reg_remote_port() - */ -void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) -{ - struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_sess *sess; - unsigned long flags; - - if (!vha->hw->tgt.tgt_ops) - return; - - if (!tgt || (fcport->port_type != FCT_INITIATOR)) - return; + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: kref_get_unless_zero failed for %8phC\n", + __func__, sess->port_name); + return NULL; + } - if (qla_ini_mode_enabled(vha)) - return; + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (!IS_SW_RESV_ADDR(sess->d_id)) + vha->vha_tgt.qla_tgt->sess_count++; - spin_lock_irqsave(&ha->tgt.sess_lock, flags); - if (tgt->tgt_stop) { + qlt_do_generation_tick(vha, &sess->generation); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - return; } - sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); - if (!sess) { - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - - mutex_lock(&vha->vha_tgt.tgt_mutex); - sess = qlt_create_sess(vha, fcport, false); - mutex_unlock(&vha->vha_tgt.tgt_mutex); - spin_lock_irqsave(&ha->tgt.sess_lock, flags); - } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { - /* Point of no return */ - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - return; - } else { - kref_get(&sess->sess_kref); - - if (sess->deleted) { - qlt_undelete_sess(sess); - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, - "qla_target(%u): %ssession for port %8phC " - "(loop ID %d) reappeared\n", vha->vp_idx, - sess->local ? "local " : "", sess->port_name, - sess->loop_id); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, + "Adding sess %p se_sess %p to tgt %p sess_count %d\n", + sess, sess->se_sess, vha->vha_tgt.qla_tgt, + vha->vha_tgt.qla_tgt->sess_count); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, - "Reappeared sess %p\n", sess); - } - ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, - (fcport->flags & FCF_CONF_COMP_SUPPORTED)); - } + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, + "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " + "s_id %x:%x:%x, confirmed completion %ssupported) added\n", + vha->vp_idx, local ? "local " : "", fcport->port_name, + fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area, + sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); - if (sess && sess->local) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, - "qla_target(%u): local session for " - "port %8phC (loop ID %d) became global\n", vha->vp_idx, - fcport->port_name, sess->loop_id); - sess->local = 0; - } - qlt_put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return sess; } /* @@ -1074,7 +1332,7 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_sess *sess; + struct fc_port *sess = fcport; unsigned long flags; if (!vha->hw->tgt.tgt_ops) @@ -1088,8 +1346,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); return; } - sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); - if (!sess) { + if (!sess->se_sess) { spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); return; } @@ -1120,12 +1377,12 @@ static inline int test_tgt_sess_count(struct qla_tgt *tgt) * We need to protect against race, when tgt is freed before or * inside wake_up() */ - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, - "tgt %p, empty(sess_list)=%d sess_count=%d\n", - tgt, list_empty(&tgt->sess_list), tgt->sess_count); + "tgt %p, sess_count=%d\n", + tgt, tgt->sess_count); res = (tgt->sess_count == 0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return res; } @@ -1173,8 +1430,6 @@ int qlt_stop_phase1(struct qla_tgt *tgt) mutex_unlock(&vha->vha_tgt.tgt_mutex); mutex_unlock(&qla_tgt_mutex); - flush_delayed_work(&tgt->sess_del_work); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, "Waiting for sess works (tgt %p)", tgt); spin_lock_irqsave(&tgt->sess_work_lock, flags); @@ -1186,14 +1441,13 @@ int qlt_stop_phase1(struct qla_tgt *tgt) spin_unlock_irqrestore(&tgt->sess_work_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, - "Waiting for tgt %p: list_empty(sess_list)=%d " - "sess_count=%d\n", tgt, list_empty(&tgt->sess_list), - tgt->sess_count); + "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count); wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); /* Big hammer */ - if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha)) + if (!ha->flags.host_shutting_down && + (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))) qlt_disable_vha(vha); /* Wait for sessions to clear out (just in case) */ @@ -1320,6 +1574,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, nack = (struct nack_to_isp *)pkt; nack->ox_id = ntfy->ox_id; + nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE; nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { nack->u.isp24.flags = ntfy->u.isp24.flags & @@ -1489,6 +1744,14 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag) } } + list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { + if (tag == op->atio.u.isp24.exchange_addr) { + op->aborted = true; + spin_unlock(&vha->cmd_list_lock); + return 1; + } + } + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { if (tag == cmd->atio.u.isp24.exchange_addr) { cmd->aborted = 1; @@ -1525,6 +1788,18 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, if (op_key == key && op_lun == lun) op->aborted = true; } + + list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { + uint32_t op_key; + u64 op_lun; + + op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); + op_lun = scsilun_to_int( + (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); + if (op_key == key && op_lun == lun) + op->aborted = true; + } + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { uint32_t cmd_key; uint32_t cmd_lun; @@ -1540,7 +1815,7 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, /* ha->hardware_lock supposed to be held on entry */ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, - struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) + struct abts_recv_from_24xx *abts, struct fc_port *sess) { struct qla_hw_data *ha = vha->hw; struct se_session *se_sess = sess->se_sess; @@ -1549,8 +1824,9 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, u32 lun = 0; int rc; bool found_lun = false; + unsigned long flags; - spin_lock(&se_sess->sess_cmd_lock); + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); @@ -1560,7 +1836,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, break; } } - spin_unlock(&se_sess->sess_cmd_lock); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); /* cmd not in LIO lists, look in qla list */ if (!found_lun) { @@ -1592,8 +1868,9 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, mcmd->sess = sess; memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); mcmd->reset_count = vha->hw->chip_reset; + mcmd->tmr_func = QLA_TGT_ABTS; - rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK, + rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, abts->exchange_addr_to_abort); if (rc != 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, @@ -1613,7 +1890,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, struct abts_recv_from_24xx *abts) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; + struct fc_port *sess; uint32_t tag = abts->exchange_addr_to_abort; uint8_t s_id[3]; int rc; @@ -1665,7 +1942,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + if (sess->deleted) { qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); return; } @@ -1763,10 +2040,23 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) return; } - if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) - qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, - 0, 0, 0, 0, 0, 0); - else { + if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) { + if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == + ELS_LOGO || + mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == + ELS_PRLO || + mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == + ELS_TPRLO) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "TM response logo %phC status %#x state %#x", + mcmd->sess->port_name, mcmd->fc_tm_rsp, + mcmd->flags); + qlt_schedule_sess_for_deletion_lock(mcmd->sess); + } else { + qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, + 0, 0, 0, 0, 0, 0); + } + } else { if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, mcmd->fc_tm_rsp, false); @@ -2182,95 +2472,6 @@ static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, cmd->conf_compl_supported; } -#ifdef CONFIG_QLA_TGT_DEBUG_SRR -/* - * Original taken from the XFS code - */ -static unsigned long qlt_srr_random(void) -{ - static int Inited; - static unsigned long RandomValue; - static DEFINE_SPINLOCK(lock); - /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ - register long rv; - register long lo; - register long hi; - unsigned long flags; - - spin_lock_irqsave(&lock, flags); - if (!Inited) { - RandomValue = jiffies; - Inited = 1; - } - rv = RandomValue; - hi = rv / 127773; - lo = rv % 127773; - rv = 16807 * lo - 2836 * hi; - if (rv <= 0) - rv += 2147483647; - RandomValue = rv; - spin_unlock_irqrestore(&lock, flags); - return rv; -} - -static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) -{ -#if 0 /* This is not a real status packets lost, so it won't lead to SRR */ - if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200) - == 50) { - *xmit_type &= ~QLA_TGT_XMIT_STATUS; - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015, - "Dropping cmd %p (tag %d) status", cmd, se_cmd->tag); - } -#endif - /* - * It's currently not possible to simulate SRRs for FCP_WRITE without - * a physical link layer failure, so don't even try here.. - */ - if (cmd->dma_data_direction != DMA_FROM_DEVICE) - return; - - if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) && - ((qlt_srr_random() % 100) == 20)) { - int i, leave = 0; - unsigned int tot_len = 0; - - while (leave == 0) - leave = qlt_srr_random() % cmd->sg_cnt; - - for (i = 0; i < leave; i++) - tot_len += cmd->sg[i].length; - - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016, - "Cutting cmd %p (tag %d) buffer" - " tail to len %d, sg_cnt %d (cmd->bufflen %d," - " cmd->sg_cnt %d)", cmd, se_cmd->tag, tot_len, leave, - cmd->bufflen, cmd->sg_cnt); - - cmd->bufflen = tot_len; - cmd->sg_cnt = leave; - } - - if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) { - unsigned int offset = qlt_srr_random() % cmd->bufflen; - - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017, - "Cutting cmd %p (tag %d) buffer head " - "to offset %d (cmd->bufflen %d)", cmd, se_cmd->tag, offset, - cmd->bufflen); - if (offset == 0) - *xmit_type &= ~QLA_TGT_XMIT_DATA; - else if (qlt_set_data_offset(cmd, offset)) { - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018, - "qlt_set_data_offset() failed (tag %d)", se_cmd->tag); - } - } -} -#else -static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) -{} -#endif - static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, struct qla_tgt_prm *prm) { @@ -2288,7 +2489,7 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, int i; if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { - if (prm->cmd->se_cmd.scsi_status != 0) { + if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) { ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017, "Skipping EXPLICIT_CONFORM and " "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " @@ -2672,7 +2873,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, int res; spin_lock_irqsave(&ha->hardware_lock, flags); - if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + if (cmd->sess && cmd->sess->deleted) { cmd->state = QLA_TGT_STATE_PROCESSED; if (cmd->sess->logout_completed) /* no need to terminate. FW already freed exchange. */ @@ -2685,7 +2886,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, spin_unlock_irqrestore(&ha->hardware_lock, flags); memset(&prm, 0, sizeof(prm)); - qlt_check_srr_debug(cmd, &xmit_type); ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n", @@ -2848,7 +3048,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) spin_lock_irqsave(&ha->hardware_lock, flags); if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) || - (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) { + (cmd->sess && cmd->sess->deleted)) { /* * Either the port is not online or this request was from * previous life, just abort the processing. @@ -3296,7 +3496,7 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd) return EIO; } cmd->aborted = 1; - cmd->cmd_flags |= BIT_6; + cmd->trc_flags |= TRC_ABORT; spin_unlock_irqrestore(&cmd->cmd_lock, flags); qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1); @@ -3306,7 +3506,7 @@ EXPORT_SYMBOL(qlt_abort_cmd); void qlt_free_cmd(struct qla_tgt_cmd *cmd) { - struct qla_tgt_sess *sess = cmd->sess; + struct fc_port *sess = cmd->sess; ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, "%s: se_cmd[%p] ox_id %04x\n", @@ -3335,90 +3535,6 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd) } EXPORT_SYMBOL(qlt_free_cmd); -/* ha->hardware_lock supposed to be held on entry */ -static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha, - struct qla_tgt_cmd *cmd, void *ctio) -{ - struct qla_tgt_srr_ctio *sc; - struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_srr_imm *imm; - - tgt->ctio_srr_id++; - cmd->cmd_flags |= BIT_15; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, - "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx); - - if (!ctio) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055, - "qla_target(%d): SRR CTIO, but ctio is NULL\n", - vha->vp_idx); - return -EINVAL; - } - - sc = kzalloc(sizeof(*sc), GFP_ATOMIC); - if (sc != NULL) { - sc->cmd = cmd; - /* IRQ is already OFF */ - spin_lock(&tgt->srr_lock); - sc->srr_id = tgt->ctio_srr_id; - list_add_tail(&sc->srr_list_entry, - &tgt->srr_ctio_list); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, - "CTIO SRR %p added (id %d)\n", sc, sc->srr_id); - if (tgt->imm_srr_id == tgt->ctio_srr_id) { - int found = 0; - list_for_each_entry(imm, &tgt->srr_imm_list, - srr_list_entry) { - if (imm->srr_id == sc->srr_id) { - found = 1; - break; - } - } - if (found) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b, - "Scheduling srr work\n"); - schedule_work(&tgt->srr_work); - } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056, - "qla_target(%d): imm_srr_id " - "== ctio_srr_id (%d), but there is no " - "corresponding SRR IMM, deleting CTIO " - "SRR %p\n", vha->vp_idx, - tgt->ctio_srr_id, sc); - list_del(&sc->srr_list_entry); - spin_unlock(&tgt->srr_lock); - - kfree(sc); - return -EINVAL; - } - } - spin_unlock(&tgt->srr_lock); - } else { - struct qla_tgt_srr_imm *ti; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057, - "qla_target(%d): Unable to allocate SRR CTIO entry\n", - vha->vp_idx); - spin_lock(&tgt->srr_lock); - list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list, - srr_list_entry) { - if (imm->srr_id == tgt->ctio_srr_id) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c, - "IMM SRR %p deleted (id %d)\n", - imm, imm->srr_id); - list_del(&imm->srr_list_entry); - qlt_reject_free_srr_imm(vha, imm, 1); - } - } - spin_unlock(&tgt->srr_lock); - - return -ENOMEM; - } - - return 0; -} - /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ @@ -3527,7 +3643,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) dump_stack(); } - cmd->cmd_flags |= BIT_17; + cmd->trc_flags |= TRC_FLUSH; ha->tgt.tgt_ops->free_cmd(cmd); } @@ -3632,20 +3748,14 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, */ cmd->sess->logout_on_delete = 0; cmd->sess->send_els_logo = 1; - qlt_schedule_sess_for_deletion(cmd->sess, true); + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, cmd->sess->port_name); + + qlt_schedule_sess_for_deletion_lock(cmd->sess); } break; } - case CTIO_SRR_RECEIVED: - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a, - "qla_target(%d): CTIO with SRR_RECEIVED" - " status %x received (state %x, se_cmd %p)\n", - vha->vp_idx, status, cmd->state, se_cmd); - if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0) - break; - else - return; - case CTIO_DIF_ERROR: { struct ctio_crc_from_fw *crc = (struct ctio_crc_from_fw *)ctio; @@ -3693,7 +3803,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, */ if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && (!cmd->aborted)) { - cmd->cmd_flags |= BIT_13; + cmd->trc_flags |= TRC_CTIO_ERR; if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) return; } @@ -3701,7 +3811,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, skip_term: if (cmd->state == QLA_TGT_STATE_PROCESSED) { - cmd->cmd_flags |= BIT_12; + cmd->trc_flags |= TRC_CTIO_DONE; } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { cmd->state = QLA_TGT_STATE_DATA_IN; @@ -3711,11 +3821,11 @@ skip_term: ha->tgt.tgt_ops->handle_data(cmd); return; } else if (cmd->aborted) { - cmd->cmd_flags |= BIT_18; + cmd->trc_flags |= TRC_CTIO_ABORTED; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); } else { - cmd->cmd_flags |= BIT_19; + cmd->trc_flags |= TRC_CTIO_STRANGE; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, "qla_target(%d): A command in state (%d) should " "not return a CTIO complete\n", vha->vp_idx, cmd->state); @@ -3762,7 +3872,7 @@ static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, return fcp_task_attr; } -static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, +static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *, uint8_t *); /* * Process context for I/O path into tcm_qla2xxx code @@ -3772,7 +3882,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) scsi_qla_host_t *vha = cmd->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_sess *sess = cmd->sess; + struct fc_port *sess = cmd->sess; struct atio_from_isp *atio = &cmd->atio; unsigned char *cdb; unsigned long flags; @@ -3780,7 +3890,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) int ret, fcp_task_attr, data_dir, bidi = 0; cmd->cmd_in_wq = 0; - cmd->cmd_flags |= BIT_1; + cmd->trc_flags |= TRC_DO_WORK; if (tgt->tgt_stop) goto out_term; @@ -3822,7 +3932,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( */ spin_lock_irqsave(&ha->tgt.sess_lock, flags); - qlt_put_sess(sess); + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; @@ -3832,7 +3942,7 @@ out_term: * cmd has not sent to target yet, so pass NULL as the second * argument to qlt_send_term_exchange() and free the memory here. */ - cmd->cmd_flags |= BIT_2; + cmd->trc_flags |= TRC_DO_WORK_ERR; spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0); @@ -3841,7 +3951,7 @@ out_term: spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->tgt.sess_lock, flags); - qlt_put_sess(sess); + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } @@ -3859,7 +3969,7 @@ static void qlt_do_work(struct work_struct *work) } static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, - struct qla_tgt_sess *sess, + struct fc_port *sess, struct atio_from_isp *atio) { struct se_session *se_sess = sess->se_sess; @@ -3883,7 +3993,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, cmd->loop_id = sess->loop_id; cmd->conf_compl_supported = sess->conf_compl_supported; - cmd->cmd_flags = 0; + cmd->trc_flags = 0; cmd->jiffies_at_alloc = get_jiffies_64(); cmd->reset_count = vha->hw->chip_reset; @@ -3900,7 +4010,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work) struct qla_tgt_sess_op, work); scsi_qla_host_t *vha = op->vha; struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; + struct fc_port *sess; struct qla_tgt_cmd *cmd; unsigned long flags; uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; @@ -3941,11 +4051,12 @@ static void qlt_create_sess_from_atio(struct work_struct *work) if (!cmd) { spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY); - qlt_put_sess(sess); + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->hardware_lock, flags); kfree(op); return; } + /* * __qlt_do_work() will call qlt_put_sess() to release * the extra reference taken above by qlt_make_local_sess() @@ -3953,13 +4064,11 @@ static void qlt_create_sess_from_atio(struct work_struct *work) __qlt_do_work(cmd); kfree(op); return; - out_term: spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); kfree(op); - } /* ha->hardware_lock supposed to be held on entry */ @@ -3968,8 +4077,9 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_sess *sess; + struct fc_port *sess; struct qla_tgt_cmd *cmd; + unsigned long flags; if (unlikely(tgt->tgt_stop)) { ql_dbg(ql_dbg_io, vha, 0x3061, @@ -3998,7 +4108,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, /* Another WWN used to have our s_id. Our PLOGI scheduled its * session deletion, but it's still in sess_del_work wq */ - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + if (sess->deleted) { ql_dbg(ql_dbg_io, vha, 0x3061, "New command while old session %p is being deleted\n", sess); @@ -4008,24 +4118,32 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, /* * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. */ - kref_get(&sess->sess_kref); + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_tgt, vha, 0xffff, + "%s: kref_get fail, %8phC oxid %x \n", + __func__, sess->port_name, + be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); + return -EFAULT; + } cmd = qlt_get_tag(vha, sess, atio); if (!cmd) { ql_dbg(ql_dbg_io, vha, 0x3062, "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); - qlt_put_sess(sess); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return -ENOMEM; } cmd->cmd_in_wq = 1; - cmd->cmd_flags |= BIT_0; + cmd->trc_flags |= TRC_NEW_CMD; cmd->se_cmd.cpuid = ha->msix_count ? ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND; - spin_lock(&vha->cmd_list_lock); + spin_lock_irqsave(&vha->cmd_list_lock, flags); list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); - spin_unlock(&vha->cmd_list_lock); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); INIT_WORK(&cmd->work, qlt_do_work); if (ha->msix_count) { @@ -4043,7 +4161,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, } /* ha->hardware_lock supposed to be held on entry */ -static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, +static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, int fn, void *iocb, int flags) { struct scsi_qla_host *vha = sess->vha; @@ -4051,7 +4169,6 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, struct qla_tgt_mgmt_cmd *mcmd; struct atio_from_isp *a = (struct atio_from_isp *)iocb; int res; - uint8_t tmr_func; mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); if (!mcmd) { @@ -4073,74 +4190,12 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, mcmd->reset_count = vha->hw->chip_reset; switch (fn) { - case QLA_TGT_CLEAR_ACA: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000, - "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx); - tmr_func = TMR_CLEAR_ACA; - break; - - case QLA_TGT_TARGET_RESET: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001, - "qla_target(%d): TARGET_RESET received\n", - sess->vha->vp_idx); - tmr_func = TMR_TARGET_WARM_RESET; - break; - case QLA_TGT_LUN_RESET: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, - "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); - tmr_func = TMR_LUN_RESET; - abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); - break; - - case QLA_TGT_CLEAR_TS: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003, - "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx); - tmr_func = TMR_CLEAR_TASK_SET; - break; - - case QLA_TGT_ABORT_TS: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004, - "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx); - tmr_func = TMR_ABORT_TASK_SET; - break; -#if 0 - case QLA_TGT_ABORT_ALL: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005, - "qla_target(%d): Doing ABORT_ALL_TASKS\n", - sess->vha->vp_idx); - tmr_func = 0; - break; - - case QLA_TGT_ABORT_ALL_SESS: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006, - "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n", - sess->vha->vp_idx); - tmr_func = 0; - break; - - case QLA_TGT_NEXUS_LOSS_SESS: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007, - "qla_target(%d): Doing NEXUS_LOSS_SESS\n", - sess->vha->vp_idx); - tmr_func = 0; - break; - - case QLA_TGT_NEXUS_LOSS: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008, - "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx); - tmr_func = 0; - break; -#endif - default: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a, - "qla_target(%d): Unknown task mgmt fn 0x%x\n", - sess->vha->vp_idx, fn); - mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); - return -ENOSYS; + abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); + break; } - res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0); + res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 0); if (res != 0) { ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", @@ -4158,7 +4213,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) struct atio_from_isp *a = (struct atio_from_isp *)iocb; struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt; - struct qla_tgt_sess *sess; + struct fc_port *sess; uint32_t lun, unpacked_lun; int fn; unsigned long flags; @@ -4183,7 +4238,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) sizeof(struct atio_from_isp)); } - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) + if (sess->deleted) return -EFAULT; return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); @@ -4191,7 +4246,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) /* ha->hardware_lock supposed to be held on entry */ static int __qlt_abort_task(struct scsi_qla_host *vha, - struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess) + struct imm_ntfy_from_isp *iocb, struct fc_port *sess) { struct atio_from_isp *a = (struct atio_from_isp *)iocb; struct qla_hw_data *ha = vha->hw; @@ -4215,8 +4270,9 @@ static int __qlt_abort_task(struct scsi_qla_host *vha, lun = a->u.isp24.fcp_cmnd.lun; unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); mcmd->reset_count = vha->hw->chip_reset; + mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK; - rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, + rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func, le16_to_cpu(iocb->u.isp2x.seq_id)); if (rc != 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, @@ -4234,7 +4290,7 @@ static int qlt_abort_task(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; + struct fc_port *sess; int loop_id; unsigned long flags; @@ -4257,22 +4313,20 @@ static int qlt_abort_task(struct scsi_qla_host *vha, void qlt_logo_completion_handler(fc_port_t *fcport, int rc) { - if (fcport->tgt_session) { - if (rc != MBS_COMMAND_COMPLETE) { - ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, - "%s: se_sess %p / sess %p from" - " port %8phC loop_id %#04x s_id %02x:%02x:%02x" - " LOGO failed: %#x\n", - __func__, - fcport->tgt_session->se_sess, - fcport->tgt_session, - fcport->port_name, fcport->loop_id, - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa, rc); - } - - fcport->tgt_session->logout_completed = 1; + if (rc != MBS_COMMAND_COMPLETE) { + ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, + "%s: se_sess %p / sess %p from" + " port %8phC loop_id %#04x s_id %02x:%02x:%02x" + " LOGO failed: %#x\n", + __func__, + fcport->se_sess, + fcport, + fcport->port_name, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, rc); } + + fcport->logout_completed = 1; } /* @@ -4282,16 +4336,16 @@ void qlt_logo_completion_handler(fc_port_t *fcport, int rc) * deletion. Returns existing session with matching wwn if present. * Null otherwise. */ -static struct qla_tgt_sess * -qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, - port_id_t port_id, uint16_t loop_id, struct qla_tgt_sess **conflict_sess) +struct fc_port * +qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, + port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess) { - struct qla_tgt_sess *sess = NULL, *other_sess; + struct fc_port *sess = NULL, *other_sess; uint64_t other_wwn; *conflict_sess = NULL; - list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) { + list_for_each_entry(other_sess, &vha->vp_fcports, list) { other_wwn = wwn_to_u64(other_sess->port_name); @@ -4302,9 +4356,9 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, } /* find other sess with nport_id collision */ - if (port_id.b24 == other_sess->s_id.b24) { + if (port_id.b24 == other_sess->d_id.b24) { if (loop_id != other_sess->loop_id) { - ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c, + ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); @@ -4320,6 +4374,11 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, * Another wwn used to have our s_id/loop_id * kill the session, but don't free the loop_id */ + ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff, + "Invalidating sess %p loop_id %d wwn %llx.\n", + other_sess, other_sess->loop_id, other_wwn); + + other_sess->keep_nport_handle = 1; *conflict_sess = other_sess; qlt_schedule_sess_for_deletion(other_sess, @@ -4329,8 +4388,9 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, } /* find other sess with nport handle collision */ - if (loop_id == other_sess->loop_id) { - ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d, + if ((loop_id == other_sess->loop_id) && + (loop_id != FC_NO_LOOP_ID)) { + ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); @@ -4358,11 +4418,21 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) spin_lock(&vha->cmd_list_lock); list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); + if (op_key == key) { op->aborted = true; count++; } } + + list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { + uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); + if (op_key == key) { + op->aborted = true; + count++; + } + } + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); if (cmd_key == key) { @@ -4383,13 +4453,13 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess = NULL, *conflict_sess = NULL; + struct fc_port *sess = NULL, *conflict_sess = NULL; uint64_t wwn; port_id_t port_id; uint16_t loop_id; uint16_t wd3_lo; int res = 0; - qlt_plogi_ack_t *pla; + struct qlt_plogi_ack_t *pla; unsigned long flags; wwn = wwn_to_u64(iocb->u.isp24.port_name); @@ -4401,9 +4471,12 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, - "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", - vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); + ql_dbg(ql_dbg_disc, vha, 0xf026, + "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n", + vha->vp_idx, iocb->u.isp24.port_id[2], + iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], + iocb->u.isp24.status_subcode, loop_id, + iocb->u.isp24.port_name); /* res = 1 means ack at the end of thread * res = 0 means ack async/later. @@ -4416,12 +4489,12 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, if (wwn) { spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); - sess = qlt_find_sess_invalidate_other(tgt, wwn, - port_id, loop_id, &conflict_sess); + sess = qlt_find_sess_invalidate_other(vha, wwn, + port_id, loop_id, &conflict_sess); spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); } - if (IS_SW_RESV_ADDR(port_id) || (!sess && !conflict_sess)) { + if (IS_SW_RESV_ADDR(port_id)) { res = 1; break; } @@ -4429,42 +4502,66 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); if (!pla) { qlt_send_term_imm_notif(vha, iocb, 1); - - res = 0; break; } res = 0; - if (conflict_sess) + if (conflict_sess) { + conflict_sess->login_gen++; qlt_plogi_ack_link(vha, pla, conflict_sess, - QLT_PLOGI_LINK_CONFLICT); + QLT_PLOGI_LINK_CONFLICT); + } - if (!sess) + if (!sess) { + pla->ref_count++; + qla24xx_post_newsess_work(vha, &port_id, + iocb->u.isp24.port_name, pla); + res = 0; break; + } qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); - /* - * Under normal circumstances we want to release nport handle - * during LOGO process to avoid nport handle leaks inside FW. - * The exception is when LOGO is done while another PLOGI with - * the same nport handle is waiting as might be the case here. - * Note: there is always a possibily of a race where session - * deletion has already started for other reasons (e.g. ACL - * removal) and now PLOGI arrives: - * 1. if PLOGI arrived in FW after nport handle has been freed, - * FW must have assigned this PLOGI a new/same handle and we - * can proceed ACK'ing it as usual when session deletion - * completes. - * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT - * bit reached it, the handle has now been released. We'll - * get an error when we ACK this PLOGI. Nothing will be sent - * back to initiator. Initiator should eventually retry - * PLOGI and situation will correct itself. - */ - sess->keep_nport_handle = ((sess->loop_id == loop_id) && - (sess->s_id.b24 == port_id.b24)); - qlt_schedule_sess_for_deletion(sess, true); + sess->fw_login_state = DSC_LS_PLOGI_PEND; + sess->d_id = port_id; + sess->login_gen++; + + switch (sess->disc_state) { + case DSC_DELETED: + qlt_plogi_ack_unref(vha, pla); + break; + + default: + /* + * Under normal circumstances we want to release nport handle + * during LOGO process to avoid nport handle leaks inside FW. + * The exception is when LOGO is done while another PLOGI with + * the same nport handle is waiting as might be the case here. + * Note: there is always a possibily of a race where session + * deletion has already started for other reasons (e.g. ACL + * removal) and now PLOGI arrives: + * 1. if PLOGI arrived in FW after nport handle has been freed, + * FW must have assigned this PLOGI a new/same handle and we + * can proceed ACK'ing it as usual when session deletion + * completes. + * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT + * bit reached it, the handle has now been released. We'll + * get an error when we ACK this PLOGI. Nothing will be sent + * back to initiator. Initiator should eventually retry + * PLOGI and situation will correct itself. + */ + sess->keep_nport_handle = ((sess->loop_id == loop_id) && + (sess->d_id.b24 == port_id.b24)); + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, sess->port_name); + + + qlt_schedule_sess_for_deletion_lock(sess); + break; + } + break; case ELS_PRLI: @@ -4472,8 +4569,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, if (wwn) { spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); - sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id, - loop_id, &conflict_sess); + sess = qlt_find_sess_invalidate_other(vha, wwn, port_id, + loop_id, &conflict_sess); spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); } @@ -4487,7 +4584,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, } if (sess != NULL) { - if (sess->deleted) { + if (sess->fw_login_state == DSC_LS_PLOGI_PEND) { /* * Impatient initiator sent PRLI before last * PLOGI could finish. Will force him to re-try, @@ -4511,11 +4608,16 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, sess->local = 0; sess->loop_id = loop_id; - sess->s_id = port_id; + sess->d_id = port_id; + sess->fw_login_state = DSC_LS_PRLI_PEND; if (wd3_lo & BIT_7) sess->conf_compl_supported = 1; + if ((wd3_lo & BIT_4) == 0) + sess->port_type = FCT_INITIATOR; + else + sess->port_type = FCT_TARGET; } res = 1; /* send notify ack */ @@ -4525,15 +4627,61 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } else { - /* todo: else - create sess here. */ - res = 1; /* send notify ack */ - } + if (sess) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post nack\n", + __func__, __LINE__, sess->port_name); + qla24xx_post_nack_work(vha, sess, iocb, + SRB_NACK_PRLI); + res = 0; + } + } break; + + case ELS_TPRLO: + if (le16_to_cpu(iocb->u.isp24.flags) & + NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { + loop_id = 0xFFFF; + qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS); + res = 1; + break; + } + /* drop through */ case ELS_LOGO: case ELS_PRLO: + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + sess = qla2x00_find_fcport_by_loopid(vha, loop_id); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + if (sess) { + sess->login_gen++; + sess->fw_login_state = DSC_LS_LOGO_PEND; + sess->logo_ack_needed = 1; + memcpy(sess->iocb, iocb, IOCB_SIZE); + } + res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: logo %llx res %d sess %p ", + __func__, wwn, res, sess); + if (res == 0) { + /* + * cmd went upper layer, look for qlt_xmit_tm_rsp() + * for LOGO_ACK & sess delete + */ + BUG_ON(!sess); + res = 0; + } else { + /* cmd did not go to upper layer. */ + if (sess) { + qlt_schedule_sess_for_deletion_lock(sess); + res = 0; + } + /* else logo will be ack */ + } break; case ELS_PDISC: case ELS_ADISC: @@ -4544,6 +4692,16 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 0, 0, 0, 0, 0, 0); tgt->link_reinit_iocb_pending = 0; } + + sess = qla2x00_find_fcport_by_wwpn(vha, + iocb->u.isp24.port_name, 1); + if (sess) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "sess %p lid %d|%d DS %d LS %d\n", + sess, sess->loop_id, loop_id, + sess->disc_state, sess->fw_login_state); + } + res = 1; /* send notify ack */ break; } @@ -4560,451 +4718,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, return res; } -static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) -{ -#if 1 - /* - * FIXME: Reject non zero SRR relative offset until we can test - * this code properly. - */ - pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); - return -1; -#else - struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL; - size_t first_offset = 0, rem_offset = offset, tmp = 0; - int i, sg_srr_cnt, bufflen = 0; - - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023, - "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, " - "cmd->sg_cnt: %u, direction: %d\n", - cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); - - if (!cmd->sg || !cmd->sg_cnt) { - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055, - "Missing cmd->sg or zero cmd->sg_cnt in" - " qla_tgt_set_data_offset\n"); - return -EINVAL; - } - /* - * Walk the current cmd->sg list until we locate the new sg_srr_start - */ - for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) { - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024, - "sg[%d]: %p page: %p, length: %d, offset: %d\n", - i, sg, sg_page(sg), sg->length, sg->offset); - - if ((sg->length + tmp) > offset) { - first_offset = rem_offset; - sg_srr_start = sg; - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025, - "Found matching sg[%d], using %p as sg_srr_start, " - "and using first_offset: %zu\n", i, sg, - first_offset); - break; - } - tmp += sg->length; - rem_offset -= sg->length; - } - - if (!sg_srr_start) { - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056, - "Unable to locate sg_srr_start for offset: %u\n", offset); - return -EINVAL; - } - sg_srr_cnt = (cmd->sg_cnt - i); - - sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL); - if (!sg_srr) { - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057, - "Unable to allocate sgp\n"); - return -ENOMEM; - } - sg_init_table(sg_srr, sg_srr_cnt); - sgp = &sg_srr[0]; - /* - * Walk the remaining list for sg_srr_start, mapping to the newly - * allocated sg_srr taking first_offset into account. - */ - for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) { - if (first_offset) { - sg_set_page(sgp, sg_page(sg), - (sg->length - first_offset), first_offset); - first_offset = 0; - } else { - sg_set_page(sgp, sg_page(sg), sg->length, 0); - } - bufflen += sgp->length; - - sgp = sg_next(sgp); - if (!sgp) - break; - } - - cmd->sg = sg_srr; - cmd->sg_cnt = sg_srr_cnt; - cmd->bufflen = bufflen; - cmd->offset += offset; - cmd->free_sg = 1; - - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg); - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n", - cmd->sg_cnt); - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n", - cmd->bufflen); - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n", - cmd->offset); - - if (cmd->sg_cnt < 0) - BUG(); - - if (cmd->bufflen < 0) - BUG(); - - return 0; -#endif -} - -static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd, - uint32_t srr_rel_offs, int *xmit_type) -{ - int res = 0, rel_offs; - - rel_offs = srr_rel_offs - cmd->offset; - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d", - srr_rel_offs, rel_offs); - - *xmit_type = QLA_TGT_XMIT_ALL; - - if (rel_offs < 0) { - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062, - "qla_target(%d): SRR rel_offs (%d) < 0", - cmd->vha->vp_idx, rel_offs); - res = -1; - } else if (rel_offs == cmd->bufflen) - *xmit_type = QLA_TGT_XMIT_STATUS; - else if (rel_offs > 0) - res = qlt_set_data_offset(cmd, rel_offs); - - return res; -} - -/* No locks, thread context */ -static void qlt_handle_srr(struct scsi_qla_host *vha, - struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm) -{ - struct imm_ntfy_from_isp *ntfy = - (struct imm_ntfy_from_isp *)&imm->imm_ntfy; - struct qla_hw_data *ha = vha->hw; - struct qla_tgt_cmd *cmd = sctio->cmd; - struct se_cmd *se_cmd = &cmd->se_cmd; - unsigned long flags; - int xmit_type = 0, resp = 0; - uint32_t offset; - uint16_t srr_ui; - - offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs); - srr_ui = ntfy->u.isp24.srr_ui; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n", - cmd, srr_ui); - - switch (srr_ui) { - case SRR_IU_STATUS: - spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_notify_ack(vha, ntfy, - 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - xmit_type = QLA_TGT_XMIT_STATUS; - resp = 1; - break; - case SRR_IU_DATA_IN: - if (!cmd->sg || !cmd->sg_cnt) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063, - "Unable to process SRR_IU_DATA_IN due to" - " missing cmd->sg, state: %d\n", cmd->state); - dump_stack(); - goto out_reject; - } - if (se_cmd->scsi_status != 0) { - ql_dbg(ql_dbg_tgt, vha, 0xe02a, - "Rejecting SRR_IU_DATA_IN with non GOOD " - "scsi_status\n"); - goto out_reject; - } - cmd->bufflen = se_cmd->data_length; - - if (qlt_has_data(cmd)) { - if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) - goto out_reject; - spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_notify_ack(vha, ntfy, - 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - resp = 1; - } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064, - "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject", - vha->vp_idx, se_cmd->tag, - cmd->se_cmd.scsi_status); - goto out_reject; - } - break; - case SRR_IU_DATA_OUT: - if (!cmd->sg || !cmd->sg_cnt) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065, - "Unable to process SRR_IU_DATA_OUT due to" - " missing cmd->sg\n"); - dump_stack(); - goto out_reject; - } - if (se_cmd->scsi_status != 0) { - ql_dbg(ql_dbg_tgt, vha, 0xe02b, - "Rejecting SRR_IU_DATA_OUT" - " with non GOOD scsi_status\n"); - goto out_reject; - } - cmd->bufflen = se_cmd->data_length; - - if (qlt_has_data(cmd)) { - if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) - goto out_reject; - spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_notify_ack(vha, ntfy, - 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (xmit_type & QLA_TGT_XMIT_DATA) { - cmd->cmd_flags |= BIT_8; - qlt_rdy_to_xfer(cmd); - } - } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, - "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject", - vha->vp_idx, se_cmd->tag, cmd->se_cmd.scsi_status); - goto out_reject; - } - break; - default: - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067, - "qla_target(%d): Unknown srr_ui value %x", - vha->vp_idx, srr_ui); - goto out_reject; - } - - /* Transmit response in case of status and data-in cases */ - if (resp) { - cmd->cmd_flags |= BIT_7; - qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); - } - - return; - -out_reject: - spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_notify_ack(vha, ntfy, 0, 0, 0, - NOTIFY_ACK_SRR_FLAGS_REJECT, - NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, - NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); - if (cmd->state == QLA_TGT_STATE_NEED_DATA) { - cmd->state = QLA_TGT_STATE_DATA_IN; - dump_stack(); - } else { - cmd->cmd_flags |= BIT_9; - qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0); - } - spin_unlock_irqrestore(&ha->hardware_lock, flags); -} - -static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha, - struct qla_tgt_srr_imm *imm, int ha_locked) -{ - struct qla_hw_data *ha = vha->hw; - unsigned long flags = 0; - -#ifndef __CHECKER__ - if (!ha_locked) - spin_lock_irqsave(&ha->hardware_lock, flags); -#endif - - qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0, - NOTIFY_ACK_SRR_FLAGS_REJECT, - NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, - NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); - -#ifndef __CHECKER__ - if (!ha_locked) - spin_unlock_irqrestore(&ha->hardware_lock, flags); -#endif - - kfree(imm); -} - -static void qlt_handle_srr_work(struct work_struct *work) -{ - struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work); - struct scsi_qla_host *vha = tgt->vha; - struct qla_tgt_srr_ctio *sctio; - unsigned long flags; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n", - tgt); - -restart: - spin_lock_irqsave(&tgt->srr_lock, flags); - list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) { - struct qla_tgt_srr_imm *imm, *i, *ti; - struct qla_tgt_cmd *cmd; - struct se_cmd *se_cmd; - - imm = NULL; - list_for_each_entry_safe(i, ti, &tgt->srr_imm_list, - srr_list_entry) { - if (i->srr_id == sctio->srr_id) { - list_del(&i->srr_list_entry); - if (imm) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068, - "qla_target(%d): There must be " - "only one IMM SRR per CTIO SRR " - "(IMM SRR %p, id %d, CTIO %p\n", - vha->vp_idx, i, i->srr_id, sctio); - qlt_reject_free_srr_imm(tgt->vha, i, 0); - } else - imm = i; - } - } - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a, - "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio, - sctio->srr_id); - - if (imm == NULL) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b, - "Not found matching IMM for SRR CTIO (id %d)\n", - sctio->srr_id); - continue; - } else - list_del(&sctio->srr_list_entry); - - spin_unlock_irqrestore(&tgt->srr_lock, flags); - - cmd = sctio->cmd; - /* - * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow - * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in() - * logic.. - */ - cmd->offset = 0; - if (cmd->free_sg) { - kfree(cmd->sg); - cmd->sg = NULL; - cmd->free_sg = 0; - } - se_cmd = &cmd->se_cmd; - - cmd->sg_cnt = se_cmd->t_data_nents; - cmd->sg = se_cmd->t_data_sg; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, - "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d", - cmd, &cmd->se_cmd, se_cmd->tag, se_cmd->t_task_cdb ? - se_cmd->t_task_cdb[0] : 0, cmd->sg_cnt, cmd->offset); - - qlt_handle_srr(vha, sctio, imm); - - kfree(imm); - kfree(sctio); - goto restart; - } - spin_unlock_irqrestore(&tgt->srr_lock, flags); -} - -/* ha->hardware_lock supposed to be held on entry */ -static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, - struct imm_ntfy_from_isp *iocb) -{ - struct qla_tgt_srr_imm *imm; - struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_srr_ctio *sctio; - - tgt->imm_srr_id++; - - ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n", - vha->vp_idx); - - imm = kzalloc(sizeof(*imm), GFP_ATOMIC); - if (imm != NULL) { - memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy)); - - /* IRQ is already OFF */ - spin_lock(&tgt->srr_lock); - imm->srr_id = tgt->imm_srr_id; - list_add_tail(&imm->srr_list_entry, - &tgt->srr_imm_list); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e, - "IMM NTFY SRR %p added (id %d, ui %x)\n", - imm, imm->srr_id, iocb->u.isp24.srr_ui); - if (tgt->imm_srr_id == tgt->ctio_srr_id) { - int found = 0; - list_for_each_entry(sctio, &tgt->srr_ctio_list, - srr_list_entry) { - if (sctio->srr_id == imm->srr_id) { - found = 1; - break; - } - } - if (found) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s", - "Scheduling srr work\n"); - schedule_work(&tgt->srr_work); - } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030, - "qla_target(%d): imm_srr_id " - "== ctio_srr_id (%d), but there is no " - "corresponding SRR CTIO, deleting IMM " - "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id, - imm); - list_del(&imm->srr_list_entry); - - kfree(imm); - - spin_unlock(&tgt->srr_lock); - goto out_reject; - } - } - spin_unlock(&tgt->srr_lock); - } else { - struct qla_tgt_srr_ctio *ts; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069, - "qla_target(%d): Unable to allocate SRR IMM " - "entry, SRR request will be rejected\n", vha->vp_idx); - - /* IRQ is already OFF */ - spin_lock(&tgt->srr_lock); - list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list, - srr_list_entry) { - if (sctio->srr_id == tgt->imm_srr_id) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031, - "CTIO SRR %p deleted (id %d)\n", - sctio, sctio->srr_id); - list_del(&sctio->srr_list_entry); - qlt_send_term_exchange(vha, sctio->cmd, - &sctio->cmd->atio, 1, 0); - kfree(sctio); - } - } - spin_unlock(&tgt->srr_lock); - goto out_reject; - } - - return; - -out_reject: - qlt_send_notify_ack(vha, iocb, 0, 0, 0, - NOTIFY_ACK_SRR_FLAGS_REJECT, - NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, - NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); -} - /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ @@ -5126,12 +4839,6 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha, if (qlt_24xx_handle_els(vha, iocb) == 0) send_notify_ack = 0; break; - - case IMM_NTFY_SRR: - qlt_prepare_srr_imm(vha, iocb); - send_notify_ack = 0; - break; - default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, "qla_target(%d): Received unknown immediate " @@ -5153,7 +4860,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha, struct ctio7_to_24xx *ctio24; struct qla_hw_data *ha = vha->hw; request_t *pkt; - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; unsigned long flags; spin_lock_irqsave(&ha->tgt.sess_lock, flags); @@ -5214,7 +4921,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; + struct fc_port *sess; struct se_session *se_sess; struct qla_tgt_cmd *cmd; int tag; @@ -5756,6 +5463,32 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); break; + case MBA_REJECTED_FCP_CMD: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "qla_target(%d): Async event LS_REJECT occurred " + "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, + le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), + le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); + + if (le16_to_cpu(mailbox[3]) == 1) { + /* exchange starvation. */ + vha->hw->exch_starvation++; + if (vha->hw->exch_starvation > 5) { + ql_log(ql_log_warn, vha, 0xffff, + "Exchange starvation-. Resetting RISC\n"); + + vha->hw->exch_starvation = 0; + if (IS_P3P_TYPE(vha->hw)) + set_bit(FCOE_CTX_RESET_NEEDED, + &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + } + break; + case MBA_PORT_UPDATE: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, "qla_target(%d): Port update async event %#x " @@ -5765,14 +5498,14 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); login_code = le16_to_cpu(mailbox[2]); - if (login_code == 0x4) + if (login_code == 0x4) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, "Async MB 2: Got PLOGI Complete\n"); - else if (login_code == 0x7) + vha->hw->exch_starvation = 0; + } else if (login_code == 0x7) ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, "Async MB 2: Port Logged Out\n"); break; - default: break; } @@ -5783,8 +5516,10 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, uint16_t loop_id) { - fc_port_t *fcport; + fc_port_t *fcport, *tfcp, *del; int rc; + unsigned long flags; + u8 newfcport = 0; fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); if (!fcport) { @@ -5806,18 +5541,82 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, return NULL; } + del = NULL; + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1); + + if (tfcp) { + tfcp->d_id = fcport->d_id; + tfcp->port_type = fcport->port_type; + tfcp->supported_classes = fcport->supported_classes; + tfcp->flags |= fcport->flags; + + del = fcport; + fcport = tfcp; + } else { + if (vha->hw->current_topology == ISP_CFG_F) + fcport->flags |= FCF_FABRIC_DEVICE; + + list_add_tail(&fcport->list, &vha->vp_fcports); + if (!IS_SW_RESV_ADDR(fcport->d_id)) + vha->fcport_count++; + fcport->login_gen++; + fcport->disc_state = DSC_LOGIN_COMPLETE; + fcport->login_succ = 1; + newfcport = 1; + } + + fcport->deleted = 0; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + switch (vha->host->active_mode) { + case MODE_INITIATOR: + case MODE_DUAL: + if (newfcport) { + if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post upd_fcport fcp_cnt %d\n", + __func__, __LINE__, fcport->port_name, vha->fcport_count); + qla24xx_post_upd_fcport_work(vha, fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpsc fcp_cnt %d\n", + __func__, __LINE__, fcport->port_name, vha->fcport_count); + qla24xx_post_gpsc_work(vha, fcport); + } + } + break; + + case MODE_TARGET: + default: + break; + } + if (del) + qla2x00_free_fcport(del); + return fcport; } /* Must be called under tgt_mutex */ -static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, +static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha, uint8_t *s_id) { - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; fc_port_t *fcport = NULL; int rc, global_resets; uint16_t loop_id = 0; + if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) { + /* + * This is Domain Controller, so it should be + * OK to drop SCSI commands from it. + */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, + "Unable to find initiator with S_ID %x:%x:%x", + s_id[0], s_id[1], s_id[2]); + return NULL; + } + mutex_lock(&vha->vha_tgt.tgt_mutex); retry: @@ -5828,21 +5627,11 @@ retry: if (rc != 0) { mutex_unlock(&vha->vha_tgt.tgt_mutex); - if ((s_id[0] == 0xFF) && - (s_id[1] == 0xFC)) { - /* - * This is Domain Controller, so it should be - * OK to drop SCSI commands from it. - */ - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, - "Unable to find initiator with S_ID %x:%x:%x", - s_id[0], s_id[1], s_id[2]); - } else - ql_log(ql_log_info, vha, 0xf071, - "qla_target(%d): Unable to find " - "initiator with S_ID %x:%x:%x", - vha->vp_idx, s_id[0], s_id[1], - s_id[2]); + ql_log(ql_log_info, vha, 0xf071, + "qla_target(%d): Unable to find " + "initiator with S_ID %x:%x:%x", + vha->vp_idx, s_id[0], s_id[1], + s_id[2]); if (rc == -ENOENT) { qlt_port_logo_t logo; @@ -5875,7 +5664,6 @@ retry: mutex_unlock(&vha->vha_tgt.tgt_mutex); - kfree(fcport); return sess; } @@ -5884,7 +5672,7 @@ static void qlt_abort_work(struct qla_tgt *tgt, { struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; unsigned long flags = 0, flags2 = 0; uint32_t be_s_id; uint8_t s_id[3]; @@ -5911,12 +5699,18 @@ static void qlt_abort_work(struct qla_tgt *tgt, if (!sess) goto out_term2; } else { - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + if (sess->deleted) { sess = NULL; goto out_term2; } - kref_get(&sess->sess_kref); + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff, + "%s: kref_get fail %8phC \n", + __func__, sess->port_name); + sess = NULL; + goto out_term2; + } } spin_lock_irqsave(&ha->hardware_lock, flags); @@ -5928,8 +5722,8 @@ static void qlt_abort_work(struct qla_tgt *tgt, if (rc != 0) goto out_term; spin_unlock_irqrestore(&ha->hardware_lock, flags); - - qlt_put_sess(sess); + if (sess) + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); return; @@ -5940,7 +5734,8 @@ out_term: qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); spin_unlock_irqrestore(&ha->hardware_lock, flags); - qlt_put_sess(sess); + if (sess) + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); } @@ -5950,7 +5745,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, struct atio_from_isp *a = &prm->tm_iocb2; struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; unsigned long flags; uint8_t *s_id = NULL; /* to hide compiler warnings */ int rc; @@ -5975,12 +5770,18 @@ static void qlt_tmr_work(struct qla_tgt *tgt, if (!sess) goto out_term; } else { - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + if (sess->deleted) { sess = NULL; goto out_term; } - kref_get(&sess->sess_kref); + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff, + "%s: kref_get fail %8phC\n", + __func__, sess->port_name); + sess = NULL; + goto out_term; + } } iocb = a; @@ -5992,13 +5793,13 @@ static void qlt_tmr_work(struct qla_tgt *tgt, if (rc != 0) goto out_term; - qlt_put_sess(sess); + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; out_term: qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); - qlt_put_sess(sess); + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } @@ -6075,17 +5876,10 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) tgt->ha = ha; tgt->vha = base_vha; init_waitqueue_head(&tgt->waitQ); - INIT_LIST_HEAD(&tgt->sess_list); INIT_LIST_HEAD(&tgt->del_sess_list); - INIT_DELAYED_WORK(&tgt->sess_del_work, - (void (*)(struct work_struct *))qlt_del_sess_work_fn); spin_lock_init(&tgt->sess_work_lock); INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); INIT_LIST_HEAD(&tgt->sess_works_list); - spin_lock_init(&tgt->srr_lock); - INIT_LIST_HEAD(&tgt->srr_ctio_list); - INIT_LIST_HEAD(&tgt->srr_imm_list); - INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); atomic_set(&tgt->tgt_global_resets_count, 0); base_vha->vha_tgt.qla_tgt = tgt; @@ -6251,29 +6045,25 @@ EXPORT_SYMBOL(qlt_lport_deregister); /* Must be called under HW lock */ static void qlt_set_mode(struct scsi_qla_host *vha) { - struct qla_hw_data *ha = vha->hw; - switch (ql2x_ini_mode) { case QLA2XXX_INI_MODE_DISABLED: case QLA2XXX_INI_MODE_EXCLUSIVE: vha->host->active_mode = MODE_TARGET; break; case QLA2XXX_INI_MODE_ENABLED: - vha->host->active_mode |= MODE_TARGET; + vha->host->active_mode = MODE_UNKNOWN; + break; + case QLA2XXX_INI_MODE_DUAL: + vha->host->active_mode = MODE_DUAL; break; default: break; } - - if (ha->tgt.ini_mode_force_reverse) - qla_reverse_ini_mode(vha); } /* Must be called under HW lock */ static void qlt_clear_mode(struct scsi_qla_host *vha) { - struct qla_hw_data *ha = vha->hw; - switch (ql2x_ini_mode) { case QLA2XXX_INI_MODE_DISABLED: vha->host->active_mode = MODE_UNKNOWN; @@ -6282,14 +6072,12 @@ static void qlt_clear_mode(struct scsi_qla_host *vha) vha->host->active_mode = MODE_INITIATOR; break; case QLA2XXX_INI_MODE_ENABLED: - vha->host->active_mode &= ~MODE_TARGET; + case QLA2XXX_INI_MODE_DUAL: + vha->host->active_mode = MODE_INITIATOR; break; default: break; } - - if (ha->tgt.ini_mode_force_reverse) - qla_reverse_ini_mode(vha); } /* @@ -6377,9 +6165,6 @@ static void qlt_disable_vha(struct scsi_qla_host *vha) void qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) { - if (!qla_tgt_mode_enabled(vha)) - return; - vha->vha_tgt.qla_tgt = NULL; mutex_init(&vha->vha_tgt.tgt_mutex); @@ -6405,13 +6190,11 @@ qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req) * FC-4 Feature bit 0 indicates target functionality to the name server. */ if (qla_tgt_mode_enabled(vha)) { - if (qla_ini_mode_enabled(vha)) - ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; - else - ct_req->req.rff_id.fc4_feature = BIT_0; + ct_req->req.rff_id.fc4_feature = BIT_0; } else if (qla_ini_mode_enabled(vha)) { ct_req->req.rff_id.fc4_feature = BIT_1; - } + } else if (qla_dual_mode_enabled(vha)) + ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; } /* @@ -6430,7 +6213,7 @@ qlt_init_atio_q_entries(struct scsi_qla_host *vha) uint16_t cnt; struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; - if (!qla_tgt_mode_enabled(vha)) + if (qla_ini_mode_enabled(vha)) return; for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { @@ -6523,8 +6306,10 @@ void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) { struct qla_hw_data *ha = vha->hw; + u32 tmp; + u16 t; - if (qla_tgt_mode_enabled(vha)) { + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { if (!ha->tgt.saved_set) { /* We save only once */ ha->tgt.saved_exchange_count = nv->exchange_count; @@ -6537,13 +6322,30 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) ha->tgt.saved_set = 1; } - nv->exchange_count = cpu_to_le16(0xFFFF); + if (qla_tgt_mode_enabled(vha)) { + nv->exchange_count = cpu_to_le16(0xFFFF); + } else { /* dual */ + if (ql_dm_tgt_ex_pct > 100) { + ql_dm_tgt_ex_pct = 50; + } else if (ql_dm_tgt_ex_pct == 100) { + /* leave some for FW */ + ql_dm_tgt_ex_pct = 95; + } + + tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct; + tmp = tmp/100; + if (tmp > 0xffff) + tmp = 0xffff; + + t = tmp & 0xffff; + nv->exchange_count = cpu_to_le16(t); + } /* Enable target mode */ nv->firmware_options_1 |= cpu_to_le32(BIT_4); /* Disable ini mode, if requested */ - if (!qla_ini_mode_enabled(vha)) + if (qla_tgt_mode_enabled(vha)) nv->firmware_options_1 |= cpu_to_le32(BIT_5); /* Disable Full Login after LIP */ @@ -6622,11 +6424,13 @@ void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) { struct qla_hw_data *ha = vha->hw; + u32 tmp; + u16 t; if (!QLA_TGT_MODE_ENABLED()) return; - if (qla_tgt_mode_enabled(vha)) { + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { if (!ha->tgt.saved_set) { /* We save only once */ ha->tgt.saved_exchange_count = nv->exchange_count; @@ -6639,13 +6443,29 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) ha->tgt.saved_set = 1; } - nv->exchange_count = cpu_to_le16(0xFFFF); + if (qla_tgt_mode_enabled(vha)) { + nv->exchange_count = cpu_to_le16(0xFFFF); + } else { /* dual */ + if (ql_dm_tgt_ex_pct > 100) { + ql_dm_tgt_ex_pct = 50; + } else if (ql_dm_tgt_ex_pct == 100) { + /* leave some for FW */ + ql_dm_tgt_ex_pct = 95; + } + + tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct; + tmp = tmp/100; + if (tmp > 0xffff) + tmp = 0xffff; + t = tmp & 0xffff; + nv->exchange_count = cpu_to_le16(t); + } /* Enable target mode */ nv->firmware_options_1 |= cpu_to_le32(BIT_4); /* Disable ini mode, if requested */ - if (!qla_ini_mode_enabled(vha)) + if (qla_tgt_mode_enabled(vha)) nv->firmware_options_1 |= cpu_to_le32(BIT_5); /* Disable Full Login after LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_13); @@ -6749,10 +6569,12 @@ void qlt_modify_vp_config(struct scsi_qla_host *vha, struct vp_config_entry_24xx *vpmod) { - if (qla_tgt_mode_enabled(vha)) + /* enable target mode. Bit5 = 1 => disable */ + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) vpmod->options_idx1 &= ~BIT_5; - /* Disable ini mode, if requested */ - if (!qla_ini_mode_enabled(vha)) + + /* Disable ini mode, if requested. bit4 = 1 => disable */ + if (qla_tgt_mode_enabled(vha)) vpmod->options_idx1 &= ~BIT_4; } @@ -6772,6 +6594,11 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) mutex_init(&base_vha->vha_tgt.tgt_mutex); mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); + + INIT_LIST_HEAD(&base_vha->unknown_atio_list); + INIT_DELAYED_WORK(&base_vha->unknown_atio_work, + qlt_unknown_atio_work_fn); + qlt_clear_mode(base_vha); } @@ -6906,6 +6733,8 @@ static int __init qlt_parse_ini_mode(void) ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; + else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0) + ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL; else return false; @@ -6935,9 +6764,8 @@ int __init qlt_init(void) } qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep", - sizeof(qlt_plogi_ack_t), - __alignof__(qlt_plogi_ack_t), - 0, NULL); + sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t), + 0, NULL); if (!qla_tgt_plogi_cachep) { ql_log(ql_log_fatal, NULL, 0xe06d, diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 0824a8164a24..a7f90dcaae37 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -45,10 +45,12 @@ #define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive" #define QLA2XXX_INI_MODE_STR_DISABLED "disabled" #define QLA2XXX_INI_MODE_STR_ENABLED "enabled" +#define QLA2XXX_INI_MODE_STR_DUAL "dual" #define QLA2XXX_INI_MODE_EXCLUSIVE 0 #define QLA2XXX_INI_MODE_DISABLED 1 #define QLA2XXX_INI_MODE_ENABLED 2 +#define QLA2XXX_INI_MODE_DUAL 3 #define QLA2XXX_COMMAND_COUNT_INIT 250 #define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250 @@ -118,84 +120,6 @@ ? le16_to_cpu((iocb)->u.isp2x.target.extended) \ : (uint16_t)(iocb)->u.isp2x.target.id.standard) -#ifndef IMMED_NOTIFY_TYPE -#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */ -/* - * ISP queue - immediate notify entry structure definition. - * This is sent by the ISP to the Target driver. - * This IOCB would have report of events sent by the - * initiator, that needs to be handled by the target - * driver immediately. - */ -struct imm_ntfy_from_isp { - uint8_t entry_type; /* Entry type. */ - uint8_t entry_count; /* Entry count. */ - uint8_t sys_define; /* System defined. */ - uint8_t entry_status; /* Entry Status. */ - union { - struct { - uint32_t sys_define_2; /* System defined. */ - target_id_t target; - uint16_t lun; - uint8_t target_id; - uint8_t reserved_1; - uint16_t status_modifier; - uint16_t status; - uint16_t task_flags; - uint16_t seq_id; - uint16_t srr_rx_id; - uint32_t srr_rel_offs; - uint16_t srr_ui; -#define SRR_IU_DATA_IN 0x1 -#define SRR_IU_DATA_OUT 0x5 -#define SRR_IU_STATUS 0x7 - uint16_t srr_ox_id; - uint8_t reserved_2[28]; - } isp2x; - struct { - uint32_t reserved; - uint16_t nport_handle; - uint16_t reserved_2; - uint16_t flags; -#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1 -#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0 - uint16_t srr_rx_id; - uint16_t status; - uint8_t status_subcode; - uint8_t fw_handle; - uint32_t exchange_address; - uint32_t srr_rel_offs; - uint16_t srr_ui; - uint16_t srr_ox_id; - union { - struct { - uint8_t node_name[8]; - } plogi; /* PLOGI/ADISC/PDISC */ - struct { - /* PRLI word 3 bit 0-15 */ - uint16_t wd3_lo; - uint8_t resv0[6]; - } prli; - struct { - uint8_t port_id[3]; - uint8_t resv1; - uint16_t nport_handle; - uint16_t resv2; - } req_els; - } u; - uint8_t port_name[8]; - uint8_t resv3[3]; - uint8_t vp_index; - uint32_t reserved_5; - uint8_t port_id[3]; - uint8_t reserved_6; - } isp24; - } u; - uint16_t reserved_7; - uint16_t ox_id; -} __packed; -#endif - #ifndef NOTIFY_ACK_TYPE #define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */ /* @@ -731,7 +655,7 @@ struct abts_resp_from_24xx_fw { \********************************************************************/ struct qla_tgt_mgmt_cmd; -struct qla_tgt_sess; +struct fc_port; /* * This structure provides a template of function calls that the @@ -744,21 +668,22 @@ struct qla_tgt_func_tmpl { unsigned char *, uint32_t, int, int, int); void (*handle_data)(struct qla_tgt_cmd *); void (*handle_dif_err)(struct qla_tgt_cmd *); - int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, + int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t, uint32_t); void (*free_cmd)(struct qla_tgt_cmd *); void (*free_mcmd)(struct qla_tgt_mgmt_cmd *); - void (*free_session)(struct qla_tgt_sess *); + void (*free_session)(struct fc_port *); int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *, - struct qla_tgt_sess *); - void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool); - struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *, + struct fc_port *); + void (*update_sess)(struct fc_port *, port_id_t, uint16_t, bool); + struct fc_port *(*find_sess_by_loop_id)(struct scsi_qla_host *, const uint16_t); - struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *, + struct fc_port *(*find_sess_by_s_id)(struct scsi_qla_host *, const uint8_t *); - void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *); - void (*shutdown_sess)(struct qla_tgt_sess *); + void (*clear_nacl_from_fcport_map)(struct fc_port *); + void (*put_sess)(struct fc_port *); + void (*shutdown_sess)(struct fc_port *); }; int qla2x00_wait_for_hba_online(struct scsi_qla_host *); @@ -795,6 +720,8 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *); #define QLA_TGT_ABORT_ALL 0xFFFE #define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD #define QLA_TGT_NEXUS_LOSS 0xFFFC +#define QLA_TGT_ABTS 0xFFFB +#define QLA_TGT_2G_ABORT_TASK 0xFFFA /* Notify Acknowledge flags */ #define NOTIFY_ACK_RES_COUNT BIT_8 @@ -872,12 +799,8 @@ struct qla_tgt { /* Count of sessions refering qla_tgt. Protected by hardware_lock. */ int sess_count; - /* Protected by hardware_lock. Addition also protected by tgt_mutex. */ - struct list_head sess_list; - /* Protected by hardware_lock */ struct list_head del_sess_list; - struct delayed_work sess_del_work; spinlock_t sess_work_lock; struct list_head sess_works_list; @@ -888,16 +811,7 @@ struct qla_tgt { int notify_ack_expected; int abts_resp_expected; int modify_lun_expected; - - int ctio_srr_id; - int imm_srr_id; - spinlock_t srr_lock; - struct list_head srr_ctio_list; - struct list_head srr_imm_list; - struct work_struct srr_work; - atomic_t tgt_global_resets_count; - struct list_head tgt_list_entry; }; @@ -910,92 +824,32 @@ struct qla_tgt_sess_op { bool aborted; }; -enum qla_sess_deletion { - QLA_SESS_DELETION_NONE = 0, - QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of - * this one */ - QLA_SESS_DELETION_IN_PROGRESS = 2, -}; - -typedef enum { - QLT_PLOGI_LINK_SAME_WWN, - QLT_PLOGI_LINK_CONFLICT, - QLT_PLOGI_LINK_MAX -} qlt_plogi_link_t; - -typedef struct { - struct list_head list; - struct imm_ntfy_from_isp iocb; - port_id_t id; - int ref_count; -} qlt_plogi_ack_t; - -/* - * Equivilant to IT Nexus (Initiator-Target) - */ -struct qla_tgt_sess { - uint16_t loop_id; - port_id_t s_id; - - unsigned int conf_compl_supported:1; - unsigned int deleted:2; - unsigned int local:1; - unsigned int logout_on_delete:1; - unsigned int keep_nport_handle:1; - unsigned int send_els_logo:1; - - unsigned char logout_completed; - - int generation; - - struct se_session *se_sess; - struct kref sess_kref; - struct scsi_qla_host *vha; - struct qla_tgt *tgt; - - struct list_head sess_list_entry; - unsigned long expires; - struct list_head del_list_entry; - - uint8_t port_name[WWN_SIZE]; - struct work_struct free_work; - - qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; +enum trace_flags { + TRC_NEW_CMD = BIT_0, + TRC_DO_WORK = BIT_1, + TRC_DO_WORK_ERR = BIT_2, + TRC_XFR_RDY = BIT_3, + TRC_XMIT_DATA = BIT_4, + TRC_XMIT_STATUS = BIT_5, + TRC_SRR_RSP = BIT_6, + TRC_SRR_XRDY = BIT_7, + TRC_SRR_TERM = BIT_8, + TRC_SRR_CTIO = BIT_9, + TRC_FLUSH = BIT_10, + TRC_CTIO_ERR = BIT_11, + TRC_CTIO_DONE = BIT_12, + TRC_CTIO_ABORTED = BIT_13, + TRC_CTIO_STRANGE= BIT_14, + TRC_CMD_DONE = BIT_15, + TRC_CMD_CHK_STOP = BIT_16, + TRC_CMD_FREE = BIT_17, + TRC_DATA_IN = BIT_18, + TRC_ABORT = BIT_19, }; -typedef enum { - /* - * BIT_0 - Atio Arrival / schedule to work - * BIT_1 - qlt_do_work - * BIT_2 - qlt_do work failed - * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending - * BIT_4 - read respond/tcm_qla2xx_queue_data_in - * BIT_5 - status respond / tcm_qla2xx_queue_status - * BIT_6 - tcm request to abort/Term exchange. - * pre_xmit_response->qlt_send_term_exchange - * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response) - * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer) - * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange) - * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data - - * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd - * BIT_13 - Bad completion - - * qlt_ctio_do_completion --> qlt_term_ctio_exchange - * BIT_14 - Back end data received/sent. - * BIT_15 - SRR prepare ctio - * BIT_16 - complete free - * BIT_17 - flush - qlt_abort_cmd_on_host_reset - * BIT_18 - completion w/abort status - * BIT_19 - completion w/unknown status - * BIT_20 - tcm_qla2xxx_free_cmd - */ - CMD_FLAG_DATA_WORK = BIT_11, - CMD_FLAG_DATA_WORK_FREE = BIT_21, -} cmd_flags_t; - struct qla_tgt_cmd { struct se_cmd se_cmd; - struct qla_tgt_sess *sess; + struct fc_port *sess; int state; struct work_struct free_work; struct work_struct work; @@ -1014,6 +868,8 @@ struct qla_tgt_cmd { unsigned int cmd_sent_to_fw:1; unsigned int cmd_in_wq:1; unsigned int aborted:1; + unsigned int data_work:1; + unsigned int data_work_free:1; struct scatterlist *sg; /* cmd data buffer SG vector */ int sg_cnt; /* SG segments count */ @@ -1038,7 +894,7 @@ struct qla_tgt_cmd { uint64_t jiffies_at_alloc; uint64_t jiffies_at_free; - cmd_flags_t cmd_flags; + enum trace_flags trc_flags; }; struct qla_tgt_sess_work_param { @@ -1056,9 +912,9 @@ struct qla_tgt_sess_work_param { }; struct qla_tgt_mgmt_cmd { - uint8_t tmr_func; + uint16_t tmr_func; uint8_t fc_tm_rsp; - struct qla_tgt_sess *sess; + struct fc_port *sess; struct se_cmd se_cmd; struct work_struct free_work; unsigned int flags; @@ -1090,18 +946,6 @@ struct qla_tgt_prm { uint16_t tot_dsds; }; -struct qla_tgt_srr_imm { - struct list_head srr_list_entry; - int srr_id; - struct imm_ntfy_from_isp imm_ntfy; -}; - -struct qla_tgt_srr_ctio { - struct list_head srr_list_entry; - int srr_id; - struct qla_tgt_cmd *cmd; -}; - /* Check for Switch reserved address */ #define IS_SW_RESV_ADDR(_s_id) \ ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc)) @@ -1121,7 +965,7 @@ extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *); extern int qlt_lport_register(void *, u64, u64, u64, int (*callback)(struct scsi_qla_host *, void *, u64, u64)); extern void qlt_lport_deregister(struct scsi_qla_host *); -void qlt_put_sess(struct qla_tgt_sess *sess); +extern void qlt_unreg_sess(struct fc_port *); extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int); extern int __init qlt_init(void); @@ -1133,24 +977,22 @@ extern void qlt_update_vp_map(struct scsi_qla_host *, int); * is not set. Right now, ha value is ignored. */ #define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED) + extern int ql2x_ini_mode; static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha) { - return ha->host->active_mode & MODE_TARGET; + return ha->host->active_mode == MODE_TARGET; } static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha) { - return ha->host->active_mode & MODE_INITIATOR; + return ha->host->active_mode == MODE_INITIATOR; } -static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha) +static inline bool qla_dual_mode_enabled(struct scsi_qla_host *ha) { - if (ha->host->active_mode & MODE_INITIATOR) - ha->host->active_mode &= ~MODE_INITIATOR; - else - ha->host->active_mode |= MODE_INITIATOR; + return (ha->host->active_mode == MODE_DUAL); } static inline uint32_t sid_to_key(const uint8_t *s_id) diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 3084983c1287..c2f8c3580880 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -282,10 +282,10 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work) cmd->cmd_in_wq = 0; - WARN_ON(cmd->cmd_flags & BIT_16); + WARN_ON(cmd->trc_flags & TRC_CMD_FREE); cmd->vha->tgt_counters.qla_core_ret_sta_ctio++; - cmd->cmd_flags |= BIT_16; + cmd->trc_flags |= TRC_CMD_FREE; transport_generic_free_cmd(&cmd->se_cmd, 0); } @@ -299,8 +299,8 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) cmd->vha->tgt_counters.core_qla_free_cmd++; cmd->cmd_in_wq = 1; - BUG_ON(cmd->cmd_flags & BIT_20); - cmd->cmd_flags |= BIT_20; + WARN_ON(cmd->trc_flags & TRC_CMD_DONE); + cmd->trc_flags |= TRC_CMD_DONE; INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); @@ -315,7 +315,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) { cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - cmd->cmd_flags |= BIT_14; + cmd->trc_flags |= TRC_CMD_CHK_STOP; } return target_put_sess_cmd(se_cmd); @@ -339,9 +339,26 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) qlt_free_cmd(cmd); } +static void tcm_qla2xxx_release_session(struct kref *kref) +{ + struct fc_port *sess = container_of(kref, + struct fc_port, sess_kref); + + qlt_unreg_sess(sess); +} + +static void tcm_qla2xxx_put_sess(struct fc_port *sess) +{ + if (!sess) + return; + + assert_spin_locked(&sess->vha->hw->tgt.sess_lock); + kref_put(&sess->sess_kref, tcm_qla2xxx_release_session); +} + static void tcm_qla2xxx_close_session(struct se_session *se_sess) { - struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; + struct fc_port *sess = se_sess->fabric_sess_ptr; struct scsi_qla_host *vha; unsigned long flags; @@ -350,7 +367,7 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); target_sess_cmd_list_set_waiting(se_sess); - qlt_put_sess(sess); + tcm_qla2xxx_put_sess(sess); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } @@ -377,7 +394,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) cmd->se_cmd.se_cmd_flags); return 0; } - cmd->cmd_flags |= BIT_3; + cmd->trc_flags |= TRC_XFR_RDY; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); @@ -441,7 +458,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, { struct se_cmd *se_cmd = &cmd->se_cmd; struct se_session *se_sess; - struct qla_tgt_sess *sess; + struct fc_port *sess; #ifdef CONFIG_TCM_QLA2XXX_DEBUG struct se_portal_group *se_tpg; struct tcm_qla2xxx_tpg *tpg; @@ -456,7 +473,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, sess = cmd->sess; if (!sess) { - pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n"); + pr_err("Unable to locate struct fc_port from qla_tgt_cmd\n"); return -EINVAL; } @@ -493,9 +510,9 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) cmd->cmd_in_wq = 0; spin_lock_irqsave(&cmd->cmd_lock, flags); - cmd->cmd_flags |= CMD_FLAG_DATA_WORK; + cmd->data_work = 1; if (cmd->aborted) { - cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE; + cmd->data_work_free = 1; spin_unlock_irqrestore(&cmd->cmd_lock, flags); tcm_qla2xxx_free_cmd(cmd); @@ -532,7 +549,7 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) */ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) { - cmd->cmd_flags |= BIT_10; + cmd->trc_flags |= TRC_DATA_IN; cmd->cmd_in_wq = 1; INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); @@ -563,13 +580,49 @@ static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd) * Called from qla_target.c:qlt_issue_task_mgmt() */ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, - uint8_t tmr_func, uint32_t tag) + uint16_t tmr_func, uint32_t tag) { - struct qla_tgt_sess *sess = mcmd->sess; + struct fc_port *sess = mcmd->sess; struct se_cmd *se_cmd = &mcmd->se_cmd; + int transl_tmr_func = 0; + + switch (tmr_func) { + case QLA_TGT_ABTS: + pr_debug("%ld: ABTS received\n", sess->vha->host_no); + transl_tmr_func = TMR_ABORT_TASK; + break; + case QLA_TGT_2G_ABORT_TASK: + pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no); + transl_tmr_func = TMR_ABORT_TASK; + break; + case QLA_TGT_CLEAR_ACA: + pr_debug("%ld: CLEAR_ACA received\n", sess->vha->host_no); + transl_tmr_func = TMR_CLEAR_ACA; + break; + case QLA_TGT_TARGET_RESET: + pr_debug("%ld: TARGET_RESET received\n", sess->vha->host_no); + transl_tmr_func = TMR_TARGET_WARM_RESET; + break; + case QLA_TGT_LUN_RESET: + pr_debug("%ld: LUN_RESET received\n", sess->vha->host_no); + transl_tmr_func = TMR_LUN_RESET; + break; + case QLA_TGT_CLEAR_TS: + pr_debug("%ld: CLEAR_TS received\n", sess->vha->host_no); + transl_tmr_func = TMR_CLEAR_TASK_SET; + break; + case QLA_TGT_ABORT_TS: + pr_debug("%ld: ABORT_TS received\n", sess->vha->host_no); + transl_tmr_func = TMR_ABORT_TASK_SET; + break; + default: + pr_debug("%ld: Unknown task mgmt fn 0x%x\n", + sess->vha->host_no, tmr_func); + return -ENOSYS; + } return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd, - tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); + transl_tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); } static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) @@ -591,7 +644,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) return 0; } - cmd->cmd_flags |= BIT_4; + cmd->trc_flags |= TRC_XMIT_DATA; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); @@ -622,11 +675,11 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) cmd->sg_cnt = 0; cmd->offset = 0; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); - if (cmd->cmd_flags & BIT_5) { - pr_crit("Bit_5 already set for cmd = %p.\n", cmd); + if (cmd->trc_flags & TRC_XMIT_STATUS) { + pr_crit("Multiple calls for status = %p.\n", cmd); dump_stack(); } - cmd->cmd_flags |= BIT_5; + cmd->trc_flags |= TRC_XMIT_STATUS; if (se_cmd->data_direction == DMA_FROM_DEVICE) { /* @@ -682,10 +735,7 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) qlt_xmit_tm_rsp(mcmd); } - -#define DATA_WORK_NOT_FREE(_flags) \ - (( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \ - CMD_FLAG_DATA_WORK) +#define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free) static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, @@ -697,13 +747,13 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) spin_lock_irqsave(&cmd->cmd_lock, flags); if ((cmd->state == QLA_TGT_STATE_NEW)|| - ((cmd->state == QLA_TGT_STATE_DATA_IN) && - DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) { - - cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE; + ((cmd->state == QLA_TGT_STATE_DATA_IN) && + DATA_WORK_NOT_FREE(cmd))) { + cmd->data_work_free = 1; spin_unlock_irqrestore(&cmd->cmd_lock, flags); - /* Cmd have not reached firmware. - * Use this trigger to free it. */ + /* + * cmd has not reached fw, Use this trigger to free it. + */ tcm_qla2xxx_free_cmd(cmd); return; } @@ -713,11 +763,11 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) } static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, - struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *); + struct tcm_qla2xxx_nacl *, struct fc_port *); /* * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ -static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) +static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess) { struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; struct se_portal_group *se_tpg = se_nacl->se_tpg; @@ -756,7 +806,7 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); } -static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) +static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess) { assert_spin_locked(&sess->vha->hw->tgt.sess_lock); target_sess_cmd_list_set_waiting(sess->se_sess); @@ -1141,7 +1191,7 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( /* * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ -static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( +static struct fc_port *tcm_qla2xxx_find_sess_by_s_id( scsi_qla_host_t *vha, const uint8_t *s_id) { @@ -1169,12 +1219,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( se_nacl, se_nacl->initiatorname); nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); - if (!nacl->qla_tgt_sess) { - pr_err("Unable to locate struct qla_tgt_sess\n"); + if (!nacl->fc_port) { + pr_err("Unable to locate struct fc_port\n"); return NULL; } - return nacl->qla_tgt_sess; + return nacl->fc_port; } /* @@ -1185,7 +1235,7 @@ static void tcm_qla2xxx_set_sess_by_s_id( struct se_node_acl *new_se_nacl, struct tcm_qla2xxx_nacl *nacl, struct se_session *se_sess, - struct qla_tgt_sess *qla_tgt_sess, + struct fc_port *fc_port, uint8_t *s_id) { u32 key; @@ -1209,22 +1259,22 @@ static void tcm_qla2xxx_set_sess_by_s_id( pr_debug("Wiping nonexisting fc_port entry\n"); } - qla_tgt_sess->se_sess = se_sess; - nacl->qla_tgt_sess = qla_tgt_sess; + fc_port->se_sess = se_sess; + nacl->fc_port = fc_port; return; } - if (nacl->qla_tgt_sess) { + if (nacl->fc_port) { if (new_se_nacl == NULL) { - pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n"); + pr_debug("Clearing existing nacl->fc_port and fc_port entry\n"); btree_remove32(&lport->lport_fcport_map, key); - nacl->qla_tgt_sess = NULL; + nacl->fc_port = NULL; return; } - pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n"); + pr_debug("Replacing existing nacl->fc_port and fc_port entry\n"); btree_update32(&lport->lport_fcport_map, key, new_se_nacl); - qla_tgt_sess->se_sess = se_sess; - nacl->qla_tgt_sess = qla_tgt_sess; + fc_port->se_sess = se_sess; + nacl->fc_port = fc_port; return; } @@ -1234,19 +1284,19 @@ static void tcm_qla2xxx_set_sess_by_s_id( return; } - pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n"); + pr_debug("Replacing existing fc_port entry w/o active nacl->fc_port\n"); btree_update32(&lport->lport_fcport_map, key, new_se_nacl); - qla_tgt_sess->se_sess = se_sess; - nacl->qla_tgt_sess = qla_tgt_sess; + fc_port->se_sess = se_sess; + nacl->fc_port = fc_port; - pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n", - nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); + pr_debug("Setup nacl->fc_port %p by s_id for se_nacl: %p, initiatorname: %s\n", + nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname); } /* * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ -static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id( +static struct fc_port *tcm_qla2xxx_find_sess_by_loop_id( scsi_qla_host_t *vha, const uint16_t loop_id) { @@ -1274,12 +1324,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id( nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); - if (!nacl->qla_tgt_sess) { - pr_err("Unable to locate struct qla_tgt_sess\n"); + if (!nacl->fc_port) { + pr_err("Unable to locate struct fc_port\n"); return NULL; } - return nacl->qla_tgt_sess; + return nacl->fc_port; } /* @@ -1290,7 +1340,7 @@ static void tcm_qla2xxx_set_sess_by_loop_id( struct se_node_acl *new_se_nacl, struct tcm_qla2xxx_nacl *nacl, struct se_session *se_sess, - struct qla_tgt_sess *qla_tgt_sess, + struct fc_port *fc_port, uint16_t loop_id) { struct se_node_acl *saved_nacl; @@ -1305,27 +1355,27 @@ static void tcm_qla2xxx_set_sess_by_loop_id( if (!saved_nacl) { pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n"); fc_loopid->se_nacl = new_se_nacl; - if (qla_tgt_sess->se_sess != se_sess) - qla_tgt_sess->se_sess = se_sess; - if (nacl->qla_tgt_sess != qla_tgt_sess) - nacl->qla_tgt_sess = qla_tgt_sess; + if (fc_port->se_sess != se_sess) + fc_port->se_sess = se_sess; + if (nacl->fc_port != fc_port) + nacl->fc_port = fc_port; return; } - if (nacl->qla_tgt_sess) { + if (nacl->fc_port) { if (new_se_nacl == NULL) { - pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n"); + pr_debug("Clearing nacl->fc_port and fc_loopid->se_nacl\n"); fc_loopid->se_nacl = NULL; - nacl->qla_tgt_sess = NULL; + nacl->fc_port = NULL; return; } - pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n"); + pr_debug("Replacing existing nacl->fc_port and fc_loopid->se_nacl\n"); fc_loopid->se_nacl = new_se_nacl; - if (qla_tgt_sess->se_sess != se_sess) - qla_tgt_sess->se_sess = se_sess; - if (nacl->qla_tgt_sess != qla_tgt_sess) - nacl->qla_tgt_sess = qla_tgt_sess; + if (fc_port->se_sess != se_sess) + fc_port->se_sess = se_sess; + if (nacl->fc_port != fc_port) + nacl->fc_port = fc_port; return; } @@ -1335,29 +1385,29 @@ static void tcm_qla2xxx_set_sess_by_loop_id( return; } - pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n"); + pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->fc_port\n"); fc_loopid->se_nacl = new_se_nacl; - if (qla_tgt_sess->se_sess != se_sess) - qla_tgt_sess->se_sess = se_sess; - if (nacl->qla_tgt_sess != qla_tgt_sess) - nacl->qla_tgt_sess = qla_tgt_sess; + if (fc_port->se_sess != se_sess) + fc_port->se_sess = se_sess; + if (nacl->fc_port != fc_port) + nacl->fc_port = fc_port; - pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n", - nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); + pr_debug("Setup nacl->fc_port %p by loop_id for se_nacl: %p, initiatorname: %s\n", + nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname); } /* * Should always be called with qla_hw_data->tgt.sess_lock held. */ static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, - struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess) + struct tcm_qla2xxx_nacl *nacl, struct fc_port *sess) { struct se_session *se_sess = sess->se_sess; unsigned char be_sid[3]; - be_sid[0] = sess->s_id.b.domain; - be_sid[1] = sess->s_id.b.area; - be_sid[2] = sess->s_id.b.al_pa; + be_sid[0] = sess->d_id.b.domain; + be_sid[1] = sess->d_id.b.area; + be_sid[2] = sess->d_id.b.al_pa; tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, sess, be_sid); @@ -1365,7 +1415,7 @@ static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, sess, sess->loop_id); } -static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) +static void tcm_qla2xxx_free_session(struct fc_port *sess) { struct qla_tgt *tgt = sess->tgt; struct qla_hw_data *ha = tgt->ha; @@ -1377,7 +1427,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) se_sess = sess->se_sess; if (!se_sess) { - pr_err("struct qla_tgt_sess->se_sess is NULL\n"); + pr_err("struct fc_port->se_sess is NULL\n"); dump_stack(); return; } @@ -1404,14 +1454,14 @@ static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg, struct se_node_acl *se_nacl = se_sess->se_node_acl; struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); - struct qla_tgt_sess *qlat_sess = p; + struct fc_port *qlat_sess = p; uint16_t loop_id = qlat_sess->loop_id; unsigned long flags; unsigned char be_sid[3]; - be_sid[0] = qlat_sess->s_id.b.domain; - be_sid[1] = qlat_sess->s_id.b.area; - be_sid[2] = qlat_sess->s_id.b.al_pa; + be_sid[0] = qlat_sess->d_id.b.domain; + be_sid[1] = qlat_sess->d_id.b.area; + be_sid[2] = qlat_sess->d_id.b.al_pa; /* * And now setup se_nacl and session pointers into HW lport internal @@ -1434,7 +1484,7 @@ static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg, static int tcm_qla2xxx_check_initiator_node_acl( scsi_qla_host_t *vha, unsigned char *fc_wwpn, - struct qla_tgt_sess *qlat_sess) + struct fc_port *qlat_sess) { struct qla_hw_data *ha = vha->hw; struct tcm_qla2xxx_lport *lport; @@ -1478,7 +1528,7 @@ static int tcm_qla2xxx_check_initiator_node_acl( return 0; } -static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, +static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id, uint16_t loop_id, bool conf_compl_supported) { struct qla_tgt *tgt = sess->tgt; @@ -1491,11 +1541,11 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, u32 key; - if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24) + if (sess->loop_id != loop_id || sess->d_id.b24 != s_id.b24) pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", sess, sess->port_name, - sess->loop_id, loop_id, sess->s_id.b.domain, - sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain, + sess->loop_id, loop_id, sess->d_id.b.domain, + sess->d_id.b.area, sess->d_id.b.al_pa, s_id.b.domain, s_id.b.area, s_id.b.al_pa); if (sess->loop_id != loop_id) { @@ -1515,18 +1565,20 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, sess->loop_id = loop_id; } - if (sess->s_id.b24 != s_id.b24) { - key = (((u32) sess->s_id.b.domain << 16) | - ((u32) sess->s_id.b.area << 8) | - ((u32) sess->s_id.b.al_pa)); + if (sess->d_id.b24 != s_id.b24) { + key = (((u32) sess->d_id.b.domain << 16) | + ((u32) sess->d_id.b.area << 8) | + ((u32) sess->d_id.b.al_pa)); if (btree_lookup32(&lport->lport_fcport_map, key)) - WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl, - "Found wrong se_nacl when updating s_id %x:%x:%x\n", - sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa); + WARN(btree_remove32(&lport->lport_fcport_map, key) != + se_nacl, "Found wrong se_nacl when updating s_id %x:%x:%x\n", + sess->d_id.b.domain, sess->d_id.b.area, + sess->d_id.b.al_pa); else WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n", - sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa); + sess->d_id.b.domain, sess->d_id.b.area, + sess->d_id.b.al_pa); key = (((u32) s_id.b.domain << 16) | ((u32) s_id.b.area << 8) | @@ -1537,10 +1589,11 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, s_id.b.domain, s_id.b.area, s_id.b.al_pa); btree_update32(&lport->lport_fcport_map, key, se_nacl); } else { - btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC); + btree_insert32(&lport->lport_fcport_map, key, se_nacl, + GFP_ATOMIC); } - sess->s_id = s_id; + sess->d_id = s_id; nacl->nport_id = key; } @@ -1567,6 +1620,7 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, + .put_sess = tcm_qla2xxx_put_sess, .shutdown_sess = tcm_qla2xxx_shutdown_sess, }; @@ -1690,7 +1744,7 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr; struct fc_vport_identifiers vport_id; - if (!qla_tgt_mode_enabled(base_vha)) { + if (qla_ini_mode_enabled(base_vha)) { pr_err("qla2xxx base_vha not enabled for target mode\n"); return -EPERM; } diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index cf8430be183b..071035dfa99a 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -20,8 +20,8 @@ struct tcm_qla2xxx_nacl { u64 nport_wwnn; /* ASCII formatted WWPN for FC Initiator Nport */ char nport_name[TCM_QLA2XXX_NAMELEN]; - /* Pointer to qla_tgt_sess */ - struct qla_tgt_sess *qla_tgt_sess; + /* Pointer to fc_port */ + struct fc_port *fc_port; /* Pointer to TCM FC nexus */ struct se_session *nport_nexus; }; diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index f94535130a34..cdbb293aca08 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1475,7 +1475,7 @@ static void sas_end_device_release(struct device *dev) } /** - * sas_rphy_initialize - common rphy intialization + * sas_rphy_initialize - common rphy initialization * @rphy: rphy to initialise * * Used by both sas_end_device_alloc() and sas_expander_alloc() to diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 81212d4bd9bf..e5ef78a6848e 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -23,7 +23,7 @@ static const char *verstr = "20160209"; #include <linux/fs.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/string.h> diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 585e54f6512c..638e5f427c90 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -280,7 +280,7 @@ static const struct vmstor_protocol vmstor_protocols[] = { /* - * This structure is sent during the intialization phase to get the different + * This structure is sent during the initialization phase to get the different * properties of the channel. */ diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index c680d7641311..939c47df73fa 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/mempool.h> +#include <linux/interrupt.h> #include <linux/virtio.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> @@ -29,6 +30,7 @@ #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> #include <linux/seqlock.h> +#include <linux/blk-mq-virtio.h> #define VIRTIO_SCSI_MEMPOOL_SZ 64 #define VIRTIO_SCSI_EVENT_LEN 8 @@ -108,7 +110,6 @@ struct virtio_scsi { bool affinity_hint_set; struct hlist_node node; - struct hlist_node node_dead; /* Protected by event_vq lock */ bool stop_events; @@ -118,7 +119,6 @@ struct virtio_scsi { struct virtio_scsi_vq req_vqs[]; }; -static enum cpuhp_state virtioscsi_online; static struct kmem_cache *virtscsi_cmd_cache; static mempool_t *virtscsi_cmd_pool; @@ -766,6 +766,13 @@ static void virtscsi_target_destroy(struct scsi_target *starget) kfree(tgt); } +static int virtscsi_map_queues(struct Scsi_Host *shost) +{ + struct virtio_scsi *vscsi = shost_priv(shost); + + return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2); +} + static struct scsi_host_template virtscsi_host_template_single = { .module = THIS_MODULE, .name = "Virtio SCSI HBA", @@ -801,6 +808,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { .use_clustering = ENABLE_CLUSTERING, .target_alloc = virtscsi_target_alloc, .target_destroy = virtscsi_target_destroy, + .map_queues = virtscsi_map_queues, .track_queue_depth = 1, }; @@ -817,80 +825,6 @@ static struct scsi_host_template virtscsi_host_template_multi = { virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ } while(0) -static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) -{ - int i; - int cpu; - - /* In multiqueue mode, when the number of cpu is equal - * to the number of request queues, we let the qeueues - * to be private to one cpu by setting the affinity hint - * to eliminate the contention. - */ - if ((vscsi->num_queues == 1 || - vscsi->num_queues != num_online_cpus()) && affinity) { - if (vscsi->affinity_hint_set) - affinity = false; - else - return; - } - - if (affinity) { - i = 0; - for_each_online_cpu(cpu) { - virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu); - i++; - } - - vscsi->affinity_hint_set = true; - } else { - for (i = 0; i < vscsi->num_queues; i++) { - if (!vscsi->req_vqs[i].vq) - continue; - - virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); - } - - vscsi->affinity_hint_set = false; - } -} - -static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) -{ - get_online_cpus(); - __virtscsi_set_affinity(vscsi, affinity); - put_online_cpus(); -} - -static int virtscsi_cpu_online(unsigned int cpu, struct hlist_node *node) -{ - struct virtio_scsi *vscsi = hlist_entry_safe(node, struct virtio_scsi, - node); - __virtscsi_set_affinity(vscsi, true); - return 0; -} - -static int virtscsi_cpu_notif_add(struct virtio_scsi *vi) -{ - int ret; - - ret = cpuhp_state_add_instance(virtioscsi_online, &vi->node); - if (ret) - return ret; - - ret = cpuhp_state_add_instance(CPUHP_VIRT_SCSI_DEAD, &vi->node_dead); - if (ret) - cpuhp_state_remove_instance(virtioscsi_online, &vi->node); - return ret; -} - -static void virtscsi_cpu_notif_remove(struct virtio_scsi *vi) -{ - cpuhp_state_remove_instance_nocalls(virtioscsi_online, &vi->node); - cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_SCSI_DEAD, - &vi->node_dead); -} - static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, struct virtqueue *vq) { @@ -900,14 +834,8 @@ static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, static void virtscsi_remove_vqs(struct virtio_device *vdev) { - struct Scsi_Host *sh = virtio_scsi_host(vdev); - struct virtio_scsi *vscsi = shost_priv(sh); - - virtscsi_set_affinity(vscsi, false); - /* Stop all the virtqueues. */ vdev->config->reset(vdev); - vdev->config->del_vqs(vdev); } @@ -920,6 +848,7 @@ static int virtscsi_init(struct virtio_device *vdev, vq_callback_t **callbacks; const char **names; struct virtqueue **vqs; + struct irq_affinity desc = { .pre_vectors = 2 }; num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE; vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL); @@ -941,7 +870,8 @@ static int virtscsi_init(struct virtio_device *vdev, } /* Discover virtqueues and write information to configuration. */ - err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); + err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names, + &desc); if (err) goto out; @@ -1007,10 +937,6 @@ static int virtscsi_probe(struct virtio_device *vdev) if (err) goto virtscsi_init_failed; - err = virtscsi_cpu_notif_add(vscsi); - if (err) - goto scsi_add_host_failed; - cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; @@ -1065,9 +991,6 @@ static void virtscsi_remove(struct virtio_device *vdev) virtscsi_cancel_event_work(vscsi); scsi_remove_host(shost); - - virtscsi_cpu_notif_remove(vscsi); - virtscsi_remove_vqs(vdev); scsi_host_put(shost); } @@ -1075,10 +998,6 @@ static void virtscsi_remove(struct virtio_device *vdev) #ifdef CONFIG_PM_SLEEP static int virtscsi_freeze(struct virtio_device *vdev) { - struct Scsi_Host *sh = virtio_scsi_host(vdev); - struct virtio_scsi *vscsi = shost_priv(sh); - - virtscsi_cpu_notif_remove(vscsi); virtscsi_remove_vqs(vdev); return 0; } @@ -1093,11 +1012,6 @@ static int virtscsi_restore(struct virtio_device *vdev) if (err) return err; - err = virtscsi_cpu_notif_add(vscsi); - if (err) { - vdev->config->del_vqs(vdev); - return err; - } virtio_device_ready(vdev); if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) @@ -1152,16 +1066,6 @@ static int __init init(void) pr_err("mempool_create() for virtscsi_cmd_pool failed\n"); goto error; } - ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, - "scsi/virtio:online", - virtscsi_cpu_online, NULL); - if (ret < 0) - goto error; - virtioscsi_online = ret; - ret = cpuhp_setup_state_multi(CPUHP_VIRT_SCSI_DEAD, "scsi/virtio:dead", - NULL, virtscsi_cpu_online); - if (ret) - goto error; ret = register_virtio_driver(&virtio_scsi_driver); if (ret < 0) goto error; @@ -1177,17 +1081,12 @@ error: kmem_cache_destroy(virtscsi_cmd_cache); virtscsi_cmd_cache = NULL; } - if (virtioscsi_online) - cpuhp_remove_multi_state(virtioscsi_online); - cpuhp_remove_multi_state(CPUHP_VIRT_SCSI_DEAD); return ret; } static void __exit fini(void) { unregister_virtio_driver(&virtio_scsi_driver); - cpuhp_remove_multi_state(virtioscsi_online); - cpuhp_remove_multi_state(CPUHP_VIRT_SCSI_DEAD); mempool_destroy(virtscsi_cmd_pool); kmem_cache_destroy(virtscsi_cmd_cache); } diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h index 2eaf3184f61d..2ce394aa4c95 100644 --- a/drivers/soc/fsl/qbman/dpaa_sys.h +++ b/drivers/soc/fsl/qbman/dpaa_sys.h @@ -36,6 +36,7 @@ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/kthread.h> +#include <linux/sched/signal.h> #include <linux/vmalloc.h> #include <linux/platform_device.h> #include <linux/of.h> diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 44222ef9471e..90b5b2efafbf 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -33,6 +33,7 @@ #include <linux/pm_domain.h> #include <linux/export.h> #include <linux/sched/rt.h> +#include <uapi/linux/sched/types.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/ioport.h> diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 2c3ffbcbd621..f45115fce4eb 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -36,6 +36,7 @@ #include <linux/debugfs.h> #include <linux/dma-buf.h> #include <linux/idr.h> +#include <linux/sched/task.h> #include "ion.h" #include "ion_priv.h" diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index 4e5c0f17f579..c69d0bd53693 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -20,6 +20,7 @@ #include <linux/mm.h> #include <linux/rtmutex.h> #include <linux/sched.h> +#include <uapi/linux/sched/types.h> #include <linux/scatterlist.h> #include <linux/vmalloc.h> #include "ion.h" diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index ec3b66561412..054660049395 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -37,7 +37,7 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/oom.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/swap.h> #include <linux/rcupdate.h> #include <linux/profile.h> diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 57e8599b54e6..8deac8d9225d 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -23,7 +23,7 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/mm.h> diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c index c63e591631f6..c3b8fc54883d 100644 --- a/drivers/staging/dgnc/dgnc_tty.c +++ b/drivers/staging/dgnc/dgnc_tty.c @@ -19,7 +19,7 @@ */ #include <linux/kernel.h> -#include <linux/sched.h> /* For jiffies, task states */ +#include <linux/sched/signal.h> /* For jiffies, task states, etc. */ #include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */ #include <linux/module.h> #include <linux/ctype.h> diff --git a/drivers/staging/dgnc/dgnc_utils.c b/drivers/staging/dgnc/dgnc_utils.c index 95272f4765fc..6f59240024d1 100644 --- a/drivers/staging/dgnc/dgnc_utils.c +++ b/drivers/staging/dgnc/dgnc_utils.c @@ -1,5 +1,5 @@ #include <linux/tty.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include "dgnc_utils.h" /* diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c index c4bf3298ba07..f0404bc37123 100644 --- a/drivers/staging/greybus/pwm.c +++ b/drivers/staging/greybus/pwm.c @@ -284,7 +284,6 @@ static int gb_pwm_probe(struct gbphy_device *gbphy_dev, pwm->ops = &gb_pwm_ops; pwm->base = -1; /* Allocate base dynamically */ pwm->npwm = pwmc->pwm_max + 1; - pwm->can_sleep = true; /* FIXME */ ret = pwmchip_add(pwm); if (ret) { diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c index ab0dbf5cab5a..43255e2e9276 100644 --- a/drivers/staging/greybus/uart.c +++ b/drivers/staging/greybus/uart.c @@ -14,7 +14,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/uaccess.h> diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h index 7b8cc3a25214..cd1eb2c4c940 100644 --- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h +++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h @@ -39,7 +39,7 @@ struct fpgaimage { const struct firmware *fw_entry; /* - * the followings can be read from bitstream, + * the following can be read from bitstream, * but other image format should have as well */ char filename[MAX_STR]; diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c index cf902154f0aa..bcf9f3dd0310 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c @@ -34,7 +34,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/fs_struct.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include "../../../include/linux/libcfs/libcfs.h" diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h index b0eb80d70c23..60b827eeefe2 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h @@ -1704,7 +1704,7 @@ struct ost_lvb { * lquota data structures */ -/* The lquota_id structure is an union of all the possible identifier types that +/* The lquota_id structure is a union of all the possible identifier types that * can be used with quota, this includes: * - 64-bit user ID * - 64-bit group ID diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h index 21aec0ca9ad3..7d8628ce0d3b 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h @@ -44,6 +44,7 @@ #ifdef __KERNEL__ # include <linux/quota.h> +# include <linux/sched/signal.h> # include <linux/string.h> /* snprintf() */ # include <linux/version.h> #else /* !__KERNEL__ */ diff --git a/drivers/staging/lustre/lustre/include/lustre_compat.h b/drivers/staging/lustre/lustre/include/lustre_compat.h index 300e96fb032a..da9ce195c52e 100644 --- a/drivers/staging/lustre/lustre/include/lustre_compat.h +++ b/drivers/staging/lustre/lustre/include/lustre_compat.h @@ -35,6 +35,7 @@ #include <linux/fs_struct.h> #include <linux/namei.h> +#include <linux/cred.h> #include "lustre_patchless_compat.h" diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h index 27f3148c4344..b04d613846ee 100644 --- a/drivers/staging/lustre/lustre/include/lustre_lib.h +++ b/drivers/staging/lustre/lustre/include/lustre_lib.h @@ -42,7 +42,7 @@ * @{ */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/signal.h> #include <linux/types.h> #include "../../include/linux/libcfs/libcfs.h" diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h index aaedec7d793c..dace6591a0a4 100644 --- a/drivers/staging/lustre/lustre/include/obd_support.h +++ b/drivers/staging/lustre/lustre/include/obd_support.h @@ -34,6 +34,8 @@ #define _OBD_SUPPORT #include <linux/slab.h> +#include <linux/sched/signal.h> + #include "../../include/linux/libcfs/libcfs.h" #include "lustre_compat.h" #include "lprocfs_status.h" diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c index 10adfcdd7035..481c0d01d4c6 100644 --- a/drivers/staging/lustre/lustre/llite/file.c +++ b/drivers/staging/lustre/lustre/llite/file.c @@ -2952,15 +2952,16 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits) return rc; } -int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat) +int ll_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct inode *inode = d_inode(de); + struct inode *inode = d_inode(path->dentry); struct ll_sb_info *sbi = ll_i2sbi(inode); struct ll_inode_info *lli = ll_i2info(inode); int res; - res = ll_inode_revalidate(de, MDS_INODELOCK_UPDATE | - MDS_INODELOCK_LOOKUP); + res = ll_inode_revalidate(path->dentry, + MDS_INODELOCK_UPDATE | MDS_INODELOCK_LOOKUP); ll_stats_ops_tally(sbi, LPROC_LL_GETATTR, 1); if (res) diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index ecdfd0c29b7f..55f68acd85d1 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h @@ -750,7 +750,8 @@ int ll_file_open(struct inode *inode, struct file *file); int ll_file_release(struct inode *inode, struct file *file); int ll_release_openhandle(struct inode *, struct lookup_intent *); int ll_md_real_close(struct inode *inode, fmode_t fmode); -int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat); +int ll_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); struct posix_acl *ll_get_acl(struct inode *inode, int type); int ll_migrate(struct inode *parent, struct file *file, int mdtidx, const char *name, int namelen); diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c index e860df7c45a2..366f2ce20f5e 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c @@ -38,7 +38,9 @@ #include "../../include/linux/libcfs/libcfs.h" #include <linux/crypto.h> +#include <linux/cred.h> #include <linux/key.h> +#include <linux/sched/task.h> #include "../include/obd.h" #include "../include/obd_class.h" diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c index c75ae43095ba..c6c3de94adaa 100644 --- a/drivers/staging/media/lirc/lirc_sir.c +++ b/drivers/staging/media/lirc/lirc_sir.c @@ -36,7 +36,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/fs.h> diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c index 34aac3e2eb87..e4a533b6beb3 100644 --- a/drivers/staging/media/lirc/lirc_zilog.c +++ b/drivers/staging/media/lirc/lirc_zilog.c @@ -42,7 +42,7 @@ #include <linux/module.h> #include <linux/kmod.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/string.h> diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h index ee3f5ee06529..9e390648d93e 100644 --- a/drivers/staging/rtl8188eu/include/osdep_service.h +++ b/drivers/staging/rtl8188eu/include/osdep_service.h @@ -37,7 +37,7 @@ #include <linux/io.h> #include <linux/mutex.h> #include <linux/sem.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/etherdevice.h> #include <linux/wireless.h> #include <net/iw_handler.h> diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h index 0d247058bce4..097147071df0 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h @@ -1953,7 +1953,7 @@ struct ieee80211_device { /* ask to the driver to retune the radio . * This function can sleep. the driver should ensure - * the radio has been swithced before return. + * the radio has been switched before return. */ void (*set_chan)(struct net_device *dev, short ch); @@ -1964,7 +1964,7 @@ struct ieee80211_device { * The syncro version is similar to the start_scan but * does not return until all channels has been scanned. * this is called in user context and should sleep, - * it is called in a work_queue when swithcing to ad-hoc mode + * it is called in a work_queue when switching to ad-hoc mode * or in behalf of iwlist scan when the card is associated * and root user ask for a scan. * the function stop_scan should stop both the syncro and diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c index 1bff0e91cc0c..0ea90aae4283 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c @@ -2364,7 +2364,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work) // if((IS_DOT11D_ENABLE(ieee)) && (ieee->state == IEEE80211_NOLINK)) if (ieee->state == IEEE80211_NOLINK) ieee->current_network.channel = 6; - /* if not then the state is not linked. Maybe the user swithced to + /* if not then the state is not linked. Maybe the user switched to * ad-hoc mode just after being in monitor mode, or just after * being very few time in managed mode (so the card have had no * time to scan all the chans..) or we have just run up the iface diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h index b8a170978434..5d33020554cd 100644 --- a/drivers/staging/rtl8712/osdep_service.h +++ b/drivers/staging/rtl8712/osdep_service.h @@ -33,7 +33,7 @@ #include <linux/interrupt.h> #include <linux/semaphore.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/sem.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c index f19b6b27aa71..5346c657485d 100644 --- a/drivers/staging/rtl8712/rtl8712_cmd.c +++ b/drivers/staging/rtl8712/rtl8712_cmd.c @@ -32,6 +32,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/module.h> #include <linux/kref.h> #include <linux/netdevice.h> diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c index ff68a384f9c2..d2ff0afd685a 100644 --- a/drivers/staging/speakup/speakup_soft.c +++ b/drivers/staging/speakup/speakup_soft.c @@ -22,7 +22,7 @@ #include <linux/unistd.h> #include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */ #include <linux/poll.h> /* for poll_wait() */ -#include <linux/sched.h> /* schedule(), signal_pending(), TASK_INTERRUPTIBLE */ +#include <linux/sched/signal.h> /* schedule(), signal_pending(), TASK_INTERRUPTIBLE */ #include "spk_priv.h" #include "speakup.h" diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 1dc8627e65b0..8a0d214f6e9b 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -34,6 +34,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/sched/signal.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/cdev.h> @@ -1875,8 +1876,8 @@ vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state) ** ** VC_RESUME_IDLE - Initialise the resume completion at the same time. The ** resume completion is in it's 'done' state whenever -** videcore is running. Therfore, the VC_RESUME_IDLE state -** implies that videocore is suspended. +** videcore is running. Therefore, the VC_RESUME_IDLE +** state implies that videocore is suspended. ** Hence, any thread which needs to wait until videocore is ** running can wait on this completion - it will only block ** if videocore is suspended. diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h index 4055d4bf9f74..e63964f5a18a 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h @@ -47,7 +47,7 @@ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/random.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/ctype.h> #include <linux/uaccess.h> #include <linux/time.h> /* for time_t */ diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c index 9ab43935869e..2eebc6215cac 100644 --- a/drivers/staging/wilc1000/linux_wlan.c +++ b/drivers/staging/wilc1000/linux_wlan.c @@ -213,7 +213,7 @@ static void deinit_irq(struct net_device *dev) vif = netdev_priv(dev); wilc = vif->wilc; - /* Deintialize IRQ */ + /* Deinitialize IRQ */ if (wilc->dev_irq_num) { free_irq(wilc->dev_irq_num, wilc); gpio_free(wilc->gpio); diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c index f7ce47cac2aa..7961d1c56847 100644 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c @@ -2357,7 +2357,7 @@ int wilc_deinit_host_int(struct net_device *net) del_timer_sync(&wilc_during_ip_timer); if (s32Error) - netdev_err(net, "Error while deintializing host interface\n"); + netdev_err(net, "Error while deinitializing host interface\n"); return s32Error; } diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c index 2fb1bf1a26c5..37a05185dcbe 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c @@ -872,7 +872,8 @@ cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip, goto out; csk->mtu = ndev->mtu; csk->tx_chan = cxgb4_port_chan(ndev); - csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1; + csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type, + cxgb4_port_viid(ndev)); step = cdev->lldi.ntxq / cdev->lldi.nchan; csk->txq_idx = cxgb4_port_idx(ndev) * step; @@ -907,7 +908,8 @@ cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip, port_id = cxgb4_port_idx(ndev); csk->mtu = dst_mtu(dst); csk->tx_chan = cxgb4_port_chan(ndev); - csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1; + csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type, + cxgb4_port_viid(ndev)); step = cdev->lldi.ntxq / cdev->lldi.nports; csk->txq_idx = (port_id * step) + @@ -1066,6 +1068,7 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) struct sk_buff *skb; const struct tcphdr *tcph; struct cpl_t5_pass_accept_rpl *rpl5; + struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; unsigned int len = roundup(sizeof(*rpl5), 16); unsigned int mtu_idx; u64 opt0; @@ -1111,6 +1114,9 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) opt2 = RX_CHANNEL_V(0) | RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid); + if (!is_t5(lldi->adapter_type)) + opt2 |= RX_FC_DISABLE_F; + if (req->tcpopt.tstamp) opt2 |= TSTAMPS_EN_F; if (req->tcpopt.sack) @@ -1119,8 +1125,13 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) opt2 |= WND_SCALE_EN_F; hlen = ntohl(req->hdr_len); - tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) + - IP_HDR_LEN_G(hlen); + + if (is_t5(lldi->adapter_type)) + tcph = (struct tcphdr *)((u8 *)(req + 1) + + ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen)); + else + tcph = (struct tcphdr *)((u8 *)(req + 1) + + T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen)); if (tcph->ece && tcph->cwr) opt2 |= CCTRL_ECN_V(1); @@ -1726,7 +1737,7 @@ static bool cxgbit_credit_err(const struct cxgbit_sock *csk) } while (skb) { - credit += skb->csum; + credit += (__force u32)skb->csum; skb = cxgbit_skcb_tx_wr_next(skb); } @@ -1753,6 +1764,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) while (credits) { struct sk_buff *p = cxgbit_sock_peek_wr(csk); + const u32 csum = (__force u32)p->csum; if (unlikely(!p)) { pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n", @@ -1761,17 +1773,17 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) break; } - if (unlikely(credits < p->csum)) { + if (unlikely(credits < csum)) { pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n", csk, csk->tid, credits, csk->wr_cred, csk->wr_una_cred, - p->csum); - p->csum -= credits; + csum); + p->csum = (__force __wsum)(csum - credits); break; } cxgbit_sock_dequeue_wr(csk); - credits -= p->csum; + credits -= csum; kfree_skb(p); } diff --git a/drivers/target/iscsi/cxgbit/cxgbit_lro.h b/drivers/target/iscsi/cxgbit/cxgbit_lro.h index 28c11bd1b930..dcaed3a1d23f 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_lro.h +++ b/drivers/target/iscsi/cxgbit/cxgbit_lro.h @@ -31,8 +31,9 @@ enum cxgbit_pducb_flags { PDUCBF_RX_DATA = (1 << 1), /* received pdu payload */ PDUCBF_RX_STATUS = (1 << 2), /* received ddp status */ PDUCBF_RX_DATA_DDPD = (1 << 3), /* pdu payload ddp'd */ - PDUCBF_RX_HCRC_ERR = (1 << 4), /* header digest error */ - PDUCBF_RX_DCRC_ERR = (1 << 5), /* data digest error */ + PDUCBF_RX_DDP_CMP = (1 << 4), /* ddp completion */ + PDUCBF_RX_HCRC_ERR = (1 << 5), /* header digest error */ + PDUCBF_RX_DCRC_ERR = (1 << 6), /* data digest error */ }; struct cxgbit_lro_pdu_cb { diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c index 96eedfc49c94..4fd775ace541 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_main.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c @@ -165,29 +165,24 @@ static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state) } static void -cxgbit_proc_ddp_status(unsigned int tid, struct cpl_rx_data_ddp *cpl, - struct cxgbit_lro_pdu_cb *pdu_cb) +cxgbit_process_ddpvld(struct cxgbit_sock *csk, struct cxgbit_lro_pdu_cb *pdu_cb, + u32 ddpvld) { - unsigned int status = ntohl(cpl->ddpvld); - pdu_cb->flags |= PDUCBF_RX_STATUS; - pdu_cb->ddigest = ntohl(cpl->ulp_crc); - pdu_cb->pdulen = ntohs(cpl->len); - - if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) { - pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", tid, status); + if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) { + pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", csk->tid, ddpvld); pdu_cb->flags |= PDUCBF_RX_HCRC_ERR; } - if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) { - pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", tid, status); + if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) { + pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", csk->tid, ddpvld); pdu_cb->flags |= PDUCBF_RX_DCRC_ERR; } - if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT)) - pr_info("tid 0x%x, status 0x%x, pad bad.\n", tid, status); + if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT)) + pr_info("tid 0x%x, status 0x%x, pad bad.\n", csk->tid, ddpvld); - if ((status & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) && + if ((ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) && (!(pdu_cb->flags & PDUCBF_RX_DATA))) { pdu_cb->flags |= PDUCBF_RX_DATA_DDPD; } @@ -201,13 +196,17 @@ cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp) lro_cb->pdu_idx); struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1); - cxgbit_proc_ddp_status(lro_cb->csk->tid, cpl, pdu_cb); + cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, be32_to_cpu(cpl->ddpvld)); + + pdu_cb->flags |= PDUCBF_RX_STATUS; + pdu_cb->ddigest = ntohl(cpl->ulp_crc); + pdu_cb->pdulen = ntohs(cpl->len); if (pdu_cb->flags & PDUCBF_RX_HDR) pdu_cb->complete = true; - lro_cb->complete = true; lro_cb->pdu_totallen += pdu_cb->pdulen; + lro_cb->complete = true; lro_cb->pdu_idx++; } @@ -257,7 +256,7 @@ cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl) cxgbit_skcb_flags(skb) = 0; lro_cb->complete = false; - } else { + } else if (op == CPL_ISCSI_DATA) { struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va; offset = sizeof(struct cpl_iscsi_data); @@ -267,6 +266,36 @@ cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl) pdu_cb->doffset = lro_cb->offset; pdu_cb->nr_dfrags = gl->nfrags; pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags; + lro_cb->complete = false; + } else { + struct cpl_rx_iscsi_cmp *cpl; + + cpl = (struct cpl_rx_iscsi_cmp *)gl->va; + offset = sizeof(struct cpl_rx_iscsi_cmp); + pdu_cb->flags |= (PDUCBF_RX_HDR | PDUCBF_RX_STATUS); + len = be16_to_cpu(cpl->len); + pdu_cb->hdr = gl->va + offset; + pdu_cb->hlen = len; + pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags; + pdu_cb->ddigest = be32_to_cpu(cpl->ulp_crc); + pdu_cb->pdulen = ntohs(cpl->len); + + if (unlikely(gl->nfrags > 1)) + cxgbit_skcb_flags(skb) = 0; + + cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, + be32_to_cpu(cpl->ddpvld)); + + if (pdu_cb->flags & PDUCBF_RX_DATA_DDPD) { + pdu_cb->flags |= PDUCBF_RX_DDP_CMP; + pdu_cb->complete = true; + } else if (pdu_cb->flags & PDUCBF_RX_DATA) { + pdu_cb->complete = true; + } + + lro_cb->pdu_totallen += pdu_cb->hlen + pdu_cb->dlen; + lro_cb->complete = true; + lro_cb->pdu_idx++; } cxgbit_copy_frags(skb, gl, offset); @@ -413,6 +442,7 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp, switch (op) { case CPL_ISCSI_HDR: case CPL_ISCSI_DATA: + case CPL_RX_ISCSI_CMP: case CPL_RX_ISCSI_DDP: case CPL_FW4_ACK: lro_flush = false; @@ -454,12 +484,13 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp, if (unlikely(op != *(u8 *)gl->va)) { pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", gl->va, be64_to_cpu(*rsp), - be64_to_cpu(*(u64 *)gl->va), + get_unaligned_be64(gl->va), gl->tot_len); return 0; } - if (op == CPL_ISCSI_HDR || op == CPL_ISCSI_DATA) { + if ((op == CPL_ISCSI_HDR) || (op == CPL_ISCSI_DATA) || + (op == CPL_RX_ISCSI_CMP)) { if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr, napi)) return 0; diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index 8bcb9b71f764..bdcc8b4c522a 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -8,6 +8,8 @@ #include <linux/workqueue.h> #include <linux/kthread.h> +#include <linux/sched/signal.h> + #include <asm/unaligned.h> #include <net/tcp.h> #include <target/target_core_base.h> @@ -162,12 +164,14 @@ cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen, u32 len, u32 credits, u32 compl) { struct fw_ofld_tx_data_wr *req; + const struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; u32 submode = cxgbit_skcb_submode(skb); u32 wr_ulp_mode = 0; u32 hdr_size = sizeof(*req); u32 opcode = FW_OFLD_TX_DATA_WR; u32 immlen = 0; - u32 force = TX_FORCE_V(!submode); + u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) : + T6_TX_FORCE_F; if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) { opcode = FW_ISCSI_TX_DATA_WR; @@ -243,7 +247,7 @@ void cxgbit_push_tx_frames(struct cxgbit_sock *csk) } __skb_unlink(skb, &csk->txq); set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); - skb->csum = credits_needed + flowclen16; + skb->csum = (__force __wsum)(credits_needed + flowclen16); csk->wr_cred -= credits_needed; csk->wr_una_cred += credits_needed; @@ -651,26 +655,6 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) u32 max_npdu, max_iso_npdu; if (conn->login->leading_connection) { - param = iscsi_find_param_from_key(DATASEQUENCEINORDER, - conn->param_list); - if (!param) { - pr_err("param not found key %s\n", DATASEQUENCEINORDER); - return -1; - } - - if (strcmp(param->value, YES)) - return 0; - - param = iscsi_find_param_from_key(DATAPDUINORDER, - conn->param_list); - if (!param) { - pr_err("param not found key %s\n", DATAPDUINORDER); - return -1; - } - - if (strcmp(param->value, YES)) - return 0; - param = iscsi_find_param_from_key(MAXBURSTLENGTH, conn->param_list); if (!param) { @@ -681,11 +665,6 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) if (kstrtou32(param->value, 0, &mbl) < 0) return -1; } else { - if (!conn->sess->sess_ops->DataSequenceInOrder) - return 0; - if (!conn->sess->sess_ops->DataPDUInOrder) - return 0; - mbl = conn->sess->sess_ops->MaxBurstLength; } @@ -704,6 +683,53 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) return 0; } +/* + * cxgbit_seq_pdu_inorder() + * @csk: pointer to cxgbit socket structure + * + * This function checks whether data sequence and data + * pdu are in order. + * + * Return: returns -1 on error, 0 if data sequence and + * data pdu are in order, 1 if data sequence or data pdu + * is not in order. + */ +static int cxgbit_seq_pdu_inorder(struct cxgbit_sock *csk) +{ + struct iscsi_conn *conn = csk->conn; + struct iscsi_param *param; + + if (conn->login->leading_connection) { + param = iscsi_find_param_from_key(DATASEQUENCEINORDER, + conn->param_list); + if (!param) { + pr_err("param not found key %s\n", DATASEQUENCEINORDER); + return -1; + } + + if (strcmp(param->value, YES)) + return 1; + + param = iscsi_find_param_from_key(DATAPDUINORDER, + conn->param_list); + if (!param) { + pr_err("param not found key %s\n", DATAPDUINORDER); + return -1; + } + + if (strcmp(param->value, YES)) + return 1; + + } else { + if (!conn->sess->sess_ops->DataSequenceInOrder) + return 1; + if (!conn->sess->sess_ops->DataPDUInOrder) + return 1; + } + + return 0; +} + static int cxgbit_set_params(struct iscsi_conn *conn) { struct cxgbit_sock *csk = conn->context; @@ -730,11 +756,24 @@ static int cxgbit_set_params(struct iscsi_conn *conn) } if (!erl) { + int ret; + + ret = cxgbit_seq_pdu_inorder(csk); + if (ret < 0) { + return -1; + } else if (ret > 0) { + if (is_t5(cdev->lldi.adapter_type)) + goto enable_ddp; + else + goto enable_digest; + } + if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) { if (cxgbit_set_iso_npdu(csk)) return -1; } +enable_ddp: if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) { if (cxgbit_setup_conn_pgidx(csk, ppm->tformat.pgsz_idx_dflt)) @@ -743,6 +782,7 @@ static int cxgbit_set_params(struct iscsi_conn *conn) } } +enable_digest: if (cxgbit_set_digest(csk)) return -1; @@ -983,11 +1023,36 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk) int rc, sg_nents, sg_off; bool dcrc_err = false; - rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd); - if (rc < 0) - return rc; - else if (!cmd) - return 0; + if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) { + u32 offset = be32_to_cpu(hdr->offset); + u32 ddp_data_len; + u32 payload_length = ntoh24(hdr->dlength); + bool success = false; + + cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0); + if (!cmd) + return 0; + + ddp_data_len = offset - cmd->write_data_done; + atomic_long_add(ddp_data_len, &conn->sess->rx_data_octets); + + cmd->write_data_done = offset; + cmd->next_burst_len = ddp_data_len; + cmd->data_sn = be32_to_cpu(hdr->datasn); + + rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, + cmd, payload_length, &success); + if (rc < 0) + return rc; + else if (!success) + return 0; + } else { + rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd); + if (rc < 0) + return rc; + else if (!cmd) + return 0; + } if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { pr_err("ITT: 0x%08x, Offset: %u, Length: %u," @@ -1351,6 +1416,9 @@ static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk) for (i = 0; i < ssi->nr_frags; i++) put_page(skb_frag_page(&ssi->frags[i])); ssi->nr_frags = 0; + skb->data_len = 0; + skb->truesize -= skb->len; + skb->len = 0; } static void @@ -1364,39 +1432,42 @@ cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx) unsigned int len = 0; if (pdu_cb->flags & PDUCBF_RX_HDR) { - hpdu_cb->flags = pdu_cb->flags; + u8 hfrag_idx = hssi->nr_frags; + + hpdu_cb->flags |= pdu_cb->flags; hpdu_cb->seq = pdu_cb->seq; hpdu_cb->hdr = pdu_cb->hdr; hpdu_cb->hlen = pdu_cb->hlen; - memcpy(&hssi->frags[0], &ssi->frags[pdu_cb->hfrag_idx], + memcpy(&hssi->frags[hfrag_idx], &ssi->frags[pdu_cb->hfrag_idx], sizeof(skb_frag_t)); - get_page(skb_frag_page(&hssi->frags[0])); - hssi->nr_frags = 1; - hpdu_cb->frags = 1; - hpdu_cb->hfrag_idx = 0; + get_page(skb_frag_page(&hssi->frags[hfrag_idx])); + hssi->nr_frags++; + hpdu_cb->frags++; + hpdu_cb->hfrag_idx = hfrag_idx; - len = hssi->frags[0].size; - hskb->len = len; - hskb->data_len = len; - hskb->truesize = len; + len = hssi->frags[hfrag_idx].size; + hskb->len += len; + hskb->data_len += len; + hskb->truesize += len; } if (pdu_cb->flags & PDUCBF_RX_DATA) { - u8 hfrag_idx = 1, i; + u8 dfrag_idx = hssi->nr_frags, i; hpdu_cb->flags |= pdu_cb->flags; + hpdu_cb->dfrag_idx = dfrag_idx; len = 0; - for (i = 0; i < pdu_cb->nr_dfrags; hfrag_idx++, i++) { - memcpy(&hssi->frags[hfrag_idx], + for (i = 0; i < pdu_cb->nr_dfrags; dfrag_idx++, i++) { + memcpy(&hssi->frags[dfrag_idx], &ssi->frags[pdu_cb->dfrag_idx + i], sizeof(skb_frag_t)); - get_page(skb_frag_page(&hssi->frags[hfrag_idx])); + get_page(skb_frag_page(&hssi->frags[dfrag_idx])); - len += hssi->frags[hfrag_idx].size; + len += hssi->frags[dfrag_idx].size; hssi->nr_frags++; hpdu_cb->frags++; @@ -1405,7 +1476,6 @@ cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx) hpdu_cb->dlen = pdu_cb->dlen; hpdu_cb->doffset = hpdu_cb->hlen; hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags; - hpdu_cb->dfrag_idx = 1; hskb->len += len; hskb->data_len += len; hskb->truesize += len; @@ -1490,10 +1560,15 @@ static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) { + struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; int ret = -1; - if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) - ret = cxgbit_rx_lro_skb(csk, skb); + if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) { + if (is_t5(lldi->adapter_type)) + ret = cxgbit_rx_lro_skb(csk, skb); + else + ret = cxgbit_process_lro_skb(csk, skb); + } __kfree_skb(skb); return ret; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index da2c73a255de..a91802432f2f 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -24,6 +24,7 @@ #include <linux/vmalloc.h> #include <linux/idr.h> #include <linux/delay.h> +#include <linux/sched/signal.h> #include <asm/unaligned.h> #include <net/ipv6.h> #include <scsi/scsi_proto.h> @@ -1431,36 +1432,17 @@ static void iscsit_do_crypto_hash_buf( } int -iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf, - struct iscsi_cmd **out_cmd) +__iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf, + struct iscsi_cmd *cmd, u32 payload_length, + bool *success) { - struct iscsi_data *hdr = (struct iscsi_data *)buf; - struct iscsi_cmd *cmd = NULL; + struct iscsi_data *hdr = buf; struct se_cmd *se_cmd; - u32 payload_length = ntoh24(hdr->dlength); int rc; - if (!payload_length) { - pr_warn("DataOUT payload is ZERO, ignoring.\n"); - return 0; - } - /* iSCSI write */ atomic_long_add(payload_length, &conn->sess->rx_data_octets); - if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { - pr_err("DataSegmentLength: %u is greater than" - " MaxXmitDataSegmentLength: %u\n", payload_length, - conn->conn_ops->MaxXmitDataSegmentLength); - return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, - buf); - } - - cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, - payload_length); - if (!cmd) - return 0; - pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x," " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset), @@ -1545,7 +1527,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf, } } /* - * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and + * Perform DataSN, DataSequenceInOrder, DataPDUInOrder, and * within-command recovery checks before receiving the payload. */ rc = iscsit_check_pre_dataout(cmd, buf); @@ -1553,10 +1535,44 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf, return 0; else if (rc == DATAOUT_CANNOT_RECOVER) return -1; - - *out_cmd = cmd; + *success = true; return 0; } +EXPORT_SYMBOL(__iscsit_check_dataout_hdr); + +int +iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf, + struct iscsi_cmd **out_cmd) +{ + struct iscsi_data *hdr = buf; + struct iscsi_cmd *cmd; + u32 payload_length = ntoh24(hdr->dlength); + int rc; + bool success = false; + + if (!payload_length) { + pr_warn_ratelimited("DataOUT payload is ZERO, ignoring.\n"); + return 0; + } + + if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { + pr_err_ratelimited("DataSegmentLength: %u is greater than" + " MaxXmitDataSegmentLength: %u\n", payload_length, + conn->conn_ops->MaxXmitDataSegmentLength); + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf); + } + + cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, payload_length); + if (!cmd) + return 0; + + rc = __iscsit_check_dataout_hdr(conn, buf, cmd, payload_length, &success); + + if (success) + *out_cmd = cmd; + + return rc; +} EXPORT_SYMBOL(iscsit_check_dataout_hdr); static int @@ -1920,6 +1936,28 @@ out: return ret; } +static enum tcm_tmreq_table iscsit_convert_tmf(u8 iscsi_tmf) +{ + switch (iscsi_tmf) { + case ISCSI_TM_FUNC_ABORT_TASK: + return TMR_ABORT_TASK; + case ISCSI_TM_FUNC_ABORT_TASK_SET: + return TMR_ABORT_TASK_SET; + case ISCSI_TM_FUNC_CLEAR_ACA: + return TMR_CLEAR_ACA; + case ISCSI_TM_FUNC_CLEAR_TASK_SET: + return TMR_CLEAR_TASK_SET; + case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: + return TMR_LUN_RESET; + case ISCSI_TM_FUNC_TARGET_WARM_RESET: + return TMR_TARGET_WARM_RESET; + case ISCSI_TM_FUNC_TARGET_COLD_RESET: + return TMR_TARGET_COLD_RESET; + default: + return TMR_UNKNOWN; + } +} + int iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, unsigned char *buf) @@ -1929,7 +1967,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct iscsi_tm *hdr; int out_of_order_cmdsn = 0, ret; bool sess_ref = false; - u8 function; + u8 function, tcm_function = TMR_UNKNOWN; hdr = (struct iscsi_tm *) buf; hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; @@ -1975,54 +2013,27 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, * LIO-Target $FABRIC_MOD */ if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { - - u8 tcm_function; - int ret; - transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, conn->sess->se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG, cmd->sense_buffer + 2); target_get_sess_cmd(&cmd->se_cmd, true); sess_ref = true; - - switch (function) { - case ISCSI_TM_FUNC_ABORT_TASK: - tcm_function = TMR_ABORT_TASK; - break; - case ISCSI_TM_FUNC_ABORT_TASK_SET: - tcm_function = TMR_ABORT_TASK_SET; - break; - case ISCSI_TM_FUNC_CLEAR_ACA: - tcm_function = TMR_CLEAR_ACA; - break; - case ISCSI_TM_FUNC_CLEAR_TASK_SET: - tcm_function = TMR_CLEAR_TASK_SET; - break; - case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: - tcm_function = TMR_LUN_RESET; - break; - case ISCSI_TM_FUNC_TARGET_WARM_RESET: - tcm_function = TMR_TARGET_WARM_RESET; - break; - case ISCSI_TM_FUNC_TARGET_COLD_RESET: - tcm_function = TMR_TARGET_COLD_RESET; - break; - default: + tcm_function = iscsit_convert_tmf(function); + if (tcm_function == TMR_UNKNOWN) { pr_err("Unknown iSCSI TMR Function:" " 0x%02x\n", function); return iscsit_add_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); } - - ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, - tcm_function, GFP_KERNEL); - if (ret < 0) - return iscsit_add_reject_cmd(cmd, + } + ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function, + GFP_KERNEL); + if (ret < 0) + return iscsit_add_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); - cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; - } + cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC; cmd->i_state = ISTATE_SEND_TASKMGTRSP; @@ -4136,7 +4147,7 @@ int iscsit_close_connection( /* * During Connection recovery drop unacknowledged out of order * commands for this connection, and prepare the other commands - * for realligence. + * for reallegiance. * * During normal operation clear the out of order commands (but * do not free the struct iscsi_ooo_cmdsn's) and release all @@ -4144,7 +4155,7 @@ int iscsit_close_connection( */ if (atomic_read(&conn->connection_recovery)) { iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn); - iscsit_prepare_cmds_for_realligance(conn); + iscsit_prepare_cmds_for_reallegiance(conn); } else { iscsit_clear_ooo_cmdsns_for_conn(conn); iscsit_release_commands_from_conn(conn); diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index b54e72c7ab0f..9a96e17bf7cd 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -17,6 +17,8 @@ * GNU General Public License for more details. ******************************************************************************/ +#include <linux/sched/signal.h> + #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> @@ -44,10 +46,8 @@ void iscsit_set_dataout_sequence_values( */ if (cmd->unsolicited_data) { cmd->seq_start_offset = cmd->write_data_done; - cmd->seq_end_offset = (cmd->write_data_done + - ((cmd->se_cmd.data_length > - conn->sess->sess_ops->FirstBurstLength) ? - conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length)); + cmd->seq_end_offset = min(cmd->se_cmd.data_length, + conn->sess->sess_ops->FirstBurstLength); return; } diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index faf9ae014b30..8df9c90f3db3 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -312,7 +312,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) return 0; } -int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) +int iscsit_prepare_cmds_for_reallegiance(struct iscsi_conn *conn) { u32 cmd_count = 0; struct iscsi_cmd *cmd, *cmd_tmp; @@ -347,7 +347,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) && (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) { - pr_debug("Not performing realligence on" + pr_debug("Not performing reallegiance on" " Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x," " CID: %hu\n", cmd->iscsi_opcode, cmd->init_task_tag, cmd->cmd_sn, conn->cid); @@ -382,7 +382,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) cmd_count++; pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x," " CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for" - " realligence.\n", cmd->iscsi_opcode, + " reallegiance.\n", cmd->iscsi_opcode, cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn, conn->cid); diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h index 7965f1e86506..634d01e13652 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.h +++ b/drivers/target/iscsi/iscsi_target_erl2.h @@ -19,7 +19,7 @@ extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *, struct iscsi_session *); extern void iscsit_discard_cr_cmds_by_expstatsn(struct iscsi_conn_recovery *, u32); extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *); -extern int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *); +extern int iscsit_prepare_cmds_for_reallegiance(struct iscsi_conn *); extern int iscsit_connection_recovery_transport_reset(struct iscsi_conn *); #endif /*** ISCSI_TARGET_ERL2_H ***/ diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index eab274d17b5c..ad8f3011bdc2 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -20,6 +20,7 @@ #include <linux/module.h> #include <linux/string.h> #include <linux/kthread.h> +#include <linux/sched/signal.h> #include <linux/idr.h> #include <linux/tcp.h> /* TCP_NODELAY */ #include <net/ipv6.h> /* ipv6_addr_v4mapped() */ @@ -223,7 +224,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) return 0; pr_debug("%s iSCSI Session SID %u is still active for %s," - " preforming session reinstatement.\n", (sessiontype) ? + " performing session reinstatement.\n", (sessiontype) ? "Discovery" : "Normal", sess->sid, sess->sess_ops->InitiatorName); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 46388c9e08da..7ccc9c1cbfd1 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -19,6 +19,7 @@ #include <linux/ctype.h> #include <linux/kthread.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <net/sock.h> #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> @@ -1249,16 +1250,16 @@ int iscsi_target_start_negotiation( { int ret; - if (conn->sock) { - struct sock *sk = conn->sock->sk; + if (conn->sock) { + struct sock *sk = conn->sock->sk; - write_lock_bh(&sk->sk_callback_lock); - set_bit(LOGIN_FLAGS_READY, &conn->login_flags); - write_unlock_bh(&sk->sk_callback_lock); - } + write_lock_bh(&sk->sk_callback_lock); + set_bit(LOGIN_FLAGS_READY, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); + } - ret = iscsi_target_do_login(conn, login); - if (ret < 0) { + ret = iscsi_target_do_login(conn, login); + if (ret < 0) { cancel_delayed_work_sync(&conn->login_work); cancel_delayed_work_sync(&conn->login_cleanup_work); iscsi_target_restore_sock_callbacks(conn); diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index 3d637055c36f..cb231c907d51 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c @@ -440,14 +440,14 @@ static int iscsit_task_reassign_complete( break; default: pr_err("Illegal iSCSI Opcode 0x%02x during" - " command realligence\n", cmd->iscsi_opcode); + " command reallegiance\n", cmd->iscsi_opcode); return -1; } if (ret != 0) return ret; - pr_debug("Completed connection realligence for Opcode: 0x%02x," + pr_debug("Completed connection reallegiance for Opcode: 0x%02x," " ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode, cmd->init_task_tag, conn->cid); diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index b5a1b4ccba12..5041a9c8bdcb 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -417,6 +417,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( return NULL; } +EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump); struct iscsi_cmd *iscsit_find_cmd_from_ttt( struct iscsi_conn *conn, @@ -1304,39 +1305,6 @@ static int iscsit_do_rx_data( return total_rx; } -static int iscsit_do_tx_data( - struct iscsi_conn *conn, - struct iscsi_data_count *count) -{ - int ret, iov_len; - struct kvec *iov_p; - struct msghdr msg; - - if (!conn || !conn->sock || !conn->conn_ops) - return -1; - - if (count->data_length <= 0) { - pr_err("Data length is: %d\n", count->data_length); - return -1; - } - - memset(&msg, 0, sizeof(struct msghdr)); - - iov_p = count->iov; - iov_len = count->iov_count; - - ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, - count->data_length); - if (ret != count->data_length) { - pr_err("Unexpected ret: %d send data %d\n", - ret, count->data_length); - return -EPIPE; - } - pr_debug("ret: %d, sent data: %d\n", ret, count->data_length); - - return ret; -} - int rx_data( struct iscsi_conn *conn, struct kvec *iov, @@ -1363,45 +1331,35 @@ int tx_data( int iov_count, int data) { - struct iscsi_data_count c; + struct msghdr msg; + int total_tx = 0; if (!conn || !conn->sock || !conn->conn_ops) return -1; - memset(&c, 0, sizeof(struct iscsi_data_count)); - c.iov = iov; - c.iov_count = iov_count; - c.data_length = data; - c.type = ISCSI_TX_DATA; + if (data <= 0) { + pr_err("Data length is: %d\n", data); + return -1; + } - return iscsit_do_tx_data(conn, &c); -} + memset(&msg, 0, sizeof(struct msghdr)); -static bool sockaddr_equal(struct sockaddr_storage *x, struct sockaddr_storage *y) -{ - switch (x->ss_family) { - case AF_INET: { - struct sockaddr_in *sinx = (struct sockaddr_in *)x; - struct sockaddr_in *siny = (struct sockaddr_in *)y; - if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr) - return false; - if (sinx->sin_port != siny->sin_port) - return false; - break; - } - case AF_INET6: { - struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x; - struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y; - if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr)) - return false; - if (sinx->sin6_port != siny->sin6_port) - return false; - break; - } - default: - return false; + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, + iov, iov_count, data); + + while (msg_data_left(&msg)) { + int tx_loop = sock_sendmsg(conn->sock, &msg); + if (tx_loop <= 0) { + pr_debug("tx_loop: %d total_tx %d\n", + tx_loop, total_tx); + return tx_loop; + } + total_tx += tx_loop; + pr_debug("tx_loop: %d, total_tx: %d, data: %d\n", + tx_loop, total_tx, data); } - return true; + + return total_tx; } void iscsit_collect_login_stats( @@ -1420,13 +1378,6 @@ void iscsit_collect_login_stats( ls = &tiqn->login_stats; spin_lock(&ls->lock); - if (sockaddr_equal(&conn->login_sockaddr, &ls->last_intr_fail_sockaddr) && - ((get_jiffies_64() - ls->last_fail_time) < 10)) { - /* We already have the failure info for this login */ - spin_unlock(&ls->lock); - return; - } - if (status_class == ISCSI_STATUS_CLS_SUCCESS) ls->accepts++; else if (status_class == ISCSI_STATUS_CLS_REDIRECT) { @@ -1471,10 +1422,10 @@ struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn) { struct iscsi_portal_group *tpg; - if (!conn || !conn->sess) + if (!conn) return NULL; - tpg = conn->sess->tpg; + tpg = conn->tpg; if (!tpg) return NULL; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 26929c44d703..c754ae33bf7b 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -78,12 +78,16 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) &deve->read_bytes); se_lun = rcu_dereference(deve->se_lun); + + if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { + se_lun = NULL; + goto out_unlock; + } + se_cmd->se_lun = rcu_dereference(deve->se_lun); se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; - - percpu_ref_get(&se_lun->lun_ref); se_cmd->lun_ref_active = true; if ((se_cmd->data_direction == DMA_TO_DEVICE) && @@ -97,6 +101,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) goto ref_dev; } } +out_unlock: rcu_read_unlock(); if (!se_lun) { @@ -163,7 +168,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun) rcu_read_lock(); deve = target_nacl_find_deve(nacl, unpacked_lun); if (deve) { - se_tmr->tmr_lun = rcu_dereference(deve->se_lun); se_cmd->se_lun = rcu_dereference(deve->se_lun); se_lun = rcu_dereference(deve->se_lun); se_cmd->pr_res_key = deve->pr_res_key; @@ -816,6 +820,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) xcopy_lun = &dev->xcopy_lun; rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); init_completion(&xcopy_lun->lun_ref_comp); + init_completion(&xcopy_lun->lun_shutdown_comp); INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index df7b6e95c019..68d8aef7ab78 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -604,7 +604,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes spin_lock_irq(&cmd->t_state_lock); cmd->t_state = TRANSPORT_PROCESSING; - cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; + cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); __target_execute_cmd(cmd, false); diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 1a39033d2bff..8038255b21e8 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -158,12 +158,28 @@ static ssize_t target_stat_tgt_resets_show(struct config_item *item, atomic_long_read(&to_stat_tgt_dev(item)->num_resets)); } +static ssize_t target_stat_tgt_aborts_complete_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&to_stat_tgt_dev(item)->aborts_complete)); +} + +static ssize_t target_stat_tgt_aborts_no_task_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&to_stat_tgt_dev(item)->aborts_no_task)); +} + CONFIGFS_ATTR_RO(target_stat_tgt_, inst); CONFIGFS_ATTR_RO(target_stat_tgt_, indx); CONFIGFS_ATTR_RO(target_stat_tgt_, num_lus); CONFIGFS_ATTR_RO(target_stat_tgt_, status); CONFIGFS_ATTR_RO(target_stat_tgt_, non_access_lus); CONFIGFS_ATTR_RO(target_stat_tgt_, resets); +CONFIGFS_ATTR_RO(target_stat_tgt_, aborts_complete); +CONFIGFS_ATTR_RO(target_stat_tgt_, aborts_no_task); static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = { &target_stat_tgt_attr_inst, @@ -172,6 +188,8 @@ static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = { &target_stat_tgt_attr_status, &target_stat_tgt_attr_non_access_lus, &target_stat_tgt_attr_resets, + &target_stat_tgt_attr_aborts_complete, + &target_stat_tgt_attr_aborts_no_task, NULL, }; @@ -795,16 +813,34 @@ static ssize_t target_stat_transport_dev_name_show(struct config_item *item, return ret; } +static ssize_t target_stat_transport_proto_id_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_transport_stat(item); + struct se_device *dev; + struct se_portal_group *tpg = lun->lun_tpg; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->proto_id); + rcu_read_unlock(); + return ret; +} + CONFIGFS_ATTR_RO(target_stat_transport_, inst); CONFIGFS_ATTR_RO(target_stat_transport_, device); CONFIGFS_ATTR_RO(target_stat_transport_, indx); CONFIGFS_ATTR_RO(target_stat_transport_, dev_name); +CONFIGFS_ATTR_RO(target_stat_transport_, proto_id); static struct configfs_attribute *target_stat_scsi_transport_attrs[] = { &target_stat_transport_attr_inst, &target_stat_transport_attr_device, &target_stat_transport_attr_indx, &target_stat_transport_attr_dev_name, + &target_stat_transport_attr_proto_id, NULL, }; diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 4f229e711e1c..dce1e1b47316 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -175,10 +175,9 @@ void core_tmr_abort_task( printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", se_cmd->se_tfo->get_fabric_name(), ref_tag); - if (!__target_check_io_state(se_cmd, se_sess, 0)) { - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - goto out; - } + if (!__target_check_io_state(se_cmd, se_sess, 0)) + continue; + list_del_init(&se_cmd->se_cmd_list); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); @@ -191,14 +190,15 @@ void core_tmr_abort_task( printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" " ref_tag: %llu\n", ref_tag); tmr->response = TMR_FUNCTION_COMPLETE; + atomic_long_inc(&dev->aborts_complete); return; } spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); -out: printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n", tmr->ref_task_tag); tmr->response = TMR_TASK_DOES_NOT_EXIST; + atomic_long_inc(&dev->aborts_no_task); } static void core_tmr_drain_tmr_list( @@ -217,13 +217,8 @@ static void core_tmr_drain_tmr_list( * LUN_RESET tmr.. */ spin_lock_irqsave(&dev->se_tmr_lock, flags); + list_del_init(&tmr->tmr_list); list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { - /* - * Allow the received TMR to return with FUNCTION_COMPLETE. - */ - if (tmr_p == tmr) - continue; - cmd = tmr_p->task_cmd; if (!cmd) { pr_err("Unable to locate struct se_cmd for TMR\n"); diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 4a8b180c478b..c0dbfa016575 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -445,7 +445,7 @@ static void core_tpg_lun_ref_release(struct percpu_ref *ref) { struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); - complete(&lun->lun_ref_comp); + complete(&lun->lun_shutdown_comp); } /* Does not change se_wwn->priv. */ @@ -572,6 +572,7 @@ struct se_lun *core_tpg_alloc_lun( lun->lun_link_magic = SE_LUN_LINK_MAGIC; atomic_set(&lun->lun_acl_count, 0); init_completion(&lun->lun_ref_comp); + init_completion(&lun->lun_shutdown_comp); INIT_LIST_HEAD(&lun->lun_deve_list); INIT_LIST_HEAD(&lun->lun_dev_link); atomic_set(&lun->lun_tg_pt_secondary_offline, 0); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 437591bc7c08..434d9d693989 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -593,9 +593,6 @@ static void target_remove_from_state_list(struct se_cmd *cmd) if (!dev) return; - if (cmd->transport_state & CMD_T_BUSY) - return; - spin_lock_irqsave(&dev->execute_task_lock, flags); if (cmd->state_active) { list_del(&cmd->state_list); @@ -604,24 +601,18 @@ static void target_remove_from_state_list(struct se_cmd *cmd) spin_unlock_irqrestore(&dev->execute_task_lock, flags); } -static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, - bool write_pending) +static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) { unsigned long flags; - if (remove_from_lists) { - target_remove_from_state_list(cmd); + target_remove_from_state_list(cmd); - /* - * Clear struct se_cmd->se_lun before the handoff to FE. - */ - cmd->se_lun = NULL; - } + /* + * Clear struct se_cmd->se_lun before the handoff to FE. + */ + cmd->se_lun = NULL; spin_lock_irqsave(&cmd->t_state_lock, flags); - if (write_pending) - cmd->t_state = TRANSPORT_WRITE_PENDING; - /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. @@ -635,31 +626,18 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, complete_all(&cmd->t_transport_stop_comp); return 1; } - cmd->transport_state &= ~CMD_T_ACTIVE; - if (remove_from_lists) { - /* - * Some fabric modules like tcm_loop can release - * their internally allocated I/O reference now and - * struct se_cmd now. - * - * Fabric modules are expected to return '1' here if the - * se_cmd being passed is released at this point, - * or zero if not being released. - */ - if (cmd->se_tfo->check_stop_free != NULL) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return cmd->se_tfo->check_stop_free(cmd); - } - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return 0; -} -static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) -{ - return transport_cmd_check_stop(cmd, true, false); + /* + * Some fabric modules like tcm_loop can release their internally + * allocated I/O reference and struct se_cmd now. + * + * Fabric modules are expected to return '1' here if the se_cmd being + * passed is released at this point, or zero if not being released. + */ + return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd) + : 0; } static void transport_lun_remove_cmd(struct se_cmd *cmd) @@ -733,7 +711,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) spin_lock_irqsave(&cmd->t_state_lock, flags); - cmd->transport_state &= ~CMD_T_BUSY; if (dev && dev->transport->transport_complete) { dev->transport->transport_complete(cmd, @@ -1246,7 +1223,6 @@ void transport_init_se_cmd( init_completion(&cmd->cmd_wait_comp); spin_lock_init(&cmd->t_state_lock); kref_init(&cmd->cmd_kref); - cmd->transport_state = CMD_T_DEV_ACTIVE; cmd->se_tfo = tfo; cmd->se_sess = se_sess; @@ -1671,6 +1647,9 @@ void transport_generic_request_failure(struct se_cmd *cmd, { int ret = 0, post_ret = 0; + if (transport_check_aborted_status(cmd, 1)) + return; + pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", @@ -1801,7 +1780,7 @@ void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) return; err: spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); + cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); transport_generic_request_failure(cmd, ret); @@ -1829,7 +1808,7 @@ static int target_write_prot_action(struct se_cmd *cmd) sectors, 0, cmd->t_prot_sg, 0); if (unlikely(cmd->pi_err)) { spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); + cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); transport_generic_request_failure(cmd, cmd->pi_err); return -1; @@ -1918,7 +1897,7 @@ void target_execute_cmd(struct se_cmd *cmd) } cmd->t_state = TRANSPORT_PROCESSING; - cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; + cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); if (target_write_prot_action(cmd)) @@ -1926,7 +1905,7 @@ void target_execute_cmd(struct se_cmd *cmd) if (target_handle_task_attr(cmd)) { spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT); + cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); return; } @@ -1979,8 +1958,6 @@ static void transport_complete_task_attr(struct se_cmd *cmd) if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { atomic_dec_mb(&dev->simple_cmds); dev->dev_cur_ordered_id++; - pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n", - dev->dev_cur_ordered_id); } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", @@ -2387,6 +2364,7 @@ EXPORT_SYMBOL(target_alloc_sgl); sense_reason_t transport_generic_new_cmd(struct se_cmd *cmd) { + unsigned long flags; int ret = 0; bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); @@ -2452,7 +2430,24 @@ transport_generic_new_cmd(struct se_cmd *cmd) target_execute_cmd(cmd); return 0; } - transport_cmd_check_stop(cmd, false, true); + + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd->t_state = TRANSPORT_WRITE_PENDING; + /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend exceptions. + */ + if (cmd->transport_state & CMD_T_STOP) { + pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", + __func__, __LINE__, cmd->tag); + + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + complete_all(&cmd->t_transport_stop_comp); + return 0; + } + cmd->transport_state &= ~CMD_T_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); ret = cmd->se_tfo->write_pending(cmd); if (ret == -EAGAIN || ret == -ENOMEM) @@ -2595,39 +2590,38 @@ static void target_release_cmd_kref(struct kref *kref) unsigned long flags; bool fabric_stop; - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + if (se_sess) { + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - spin_lock(&se_cmd->t_state_lock); - fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) && - (se_cmd->transport_state & CMD_T_ABORTED); - spin_unlock(&se_cmd->t_state_lock); + spin_lock(&se_cmd->t_state_lock); + fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) && + (se_cmd->transport_state & CMD_T_ABORTED); + spin_unlock(&se_cmd->t_state_lock); - if (se_cmd->cmd_wait_set || fabric_stop) { + if (se_cmd->cmd_wait_set || fabric_stop) { + list_del_init(&se_cmd->se_cmd_list); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + target_free_cmd_mem(se_cmd); + complete(&se_cmd->cmd_wait_comp); + return; + } list_del_init(&se_cmd->se_cmd_list); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - target_free_cmd_mem(se_cmd); - complete(&se_cmd->cmd_wait_comp); - return; } - list_del_init(&se_cmd->se_cmd_list); - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); target_free_cmd_mem(se_cmd); se_cmd->se_tfo->release_cmd(se_cmd); } -/* target_put_sess_cmd - Check for active I/O shutdown via kref_put - * @se_cmd: command descriptor to drop +/** + * target_put_sess_cmd - decrease the command reference count + * @se_cmd: command to drop a reference from + * + * Returns 1 if and only if this target_put_sess_cmd() call caused the + * refcount to drop to zero. Returns zero otherwise. */ int target_put_sess_cmd(struct se_cmd *se_cmd) { - struct se_session *se_sess = se_cmd->se_sess; - - if (!se_sess) { - target_free_cmd_mem(se_cmd); - se_cmd->se_tfo->release_cmd(se_cmd); - return 1; - } return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); } EXPORT_SYMBOL(target_put_sess_cmd); @@ -2706,10 +2700,39 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) } EXPORT_SYMBOL(target_wait_for_sess_cmds); +static void target_lun_confirm(struct percpu_ref *ref) +{ + struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); + + complete(&lun->lun_ref_comp); +} + void transport_clear_lun_ref(struct se_lun *lun) { - percpu_ref_kill(&lun->lun_ref); + /* + * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop + * the initial reference and schedule confirm kill to be + * executed after one full RCU grace period has completed. + */ + percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm); + /* + * The first completion waits for percpu_ref_switch_to_atomic_rcu() + * to call target_lun_confirm after lun->lun_ref has been marked + * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t + * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref + * fails for all new incoming I/O. + */ wait_for_completion(&lun->lun_ref_comp); + /* + * The second completion waits for percpu_ref_put_many() to + * invoke ->release() after lun->lun_ref has switched to + * atomic_t mode, and lun->lun_ref.count has reached zero. + * + * At this point all target-core lun->lun_ref references have + * been dropped via transport_lun_remove_cmd(), and it's safe + * to proceed with the remaining LUN shutdown. + */ + wait_for_completion(&lun->lun_shutdown_comp); } static bool @@ -2765,11 +2788,8 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, } /** - * transport_wait_for_tasks - wait for completion to occur - * @cmd: command to wait - * - * Called from frontend fabric context to wait for storage engine - * to pause and/or release frontend generated struct se_cmd. + * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp + * @cmd: command to wait on */ bool transport_wait_for_tasks(struct se_cmd *cmd) { diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 5c1cb2df3a54..c3adefe95e50 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -642,9 +642,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); spin_lock(&udev->commands_lock); - cmd = idr_find(&udev->commands, entry->hdr.cmd_id); - if (cmd) - idr_remove(&udev->commands, cmd->cmd_id); + cmd = idr_remove(&udev->commands, entry->hdr.cmd_id); spin_unlock(&udev->commands_lock); if (!cmd) { diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 9af7842b8178..ec372860106f 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -83,14 +83,12 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) static void ft_free_cmd(struct ft_cmd *cmd) { struct fc_frame *fp; - struct fc_lport *lport; struct ft_sess *sess; if (!cmd) return; sess = cmd->sess; fp = cmd->req_frame; - lport = fr_dev(fp); if (fr_seq(fp)) fc_seq_release(fr_seq(fp)); fc_frame_free(fp); diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index c2c056cc7ea5..776b34396144 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -245,6 +245,15 @@ config RCAR_THERMAL Enable this to plug the R-Car thermal sensor driver into the Linux thermal framework. +config RCAR_GEN3_THERMAL + tristate "Renesas R-Car Gen3 thermal driver" + depends on ARCH_RENESAS || COMPILE_TEST + depends on HAS_IOMEM + depends on OF + help + Enable this to plug the R-Car Gen3 thermal sensor driver into the Linux + thermal framework. + config KIRKWOOD_THERMAL tristate "Temperature sensor on Marvell Kirkwood SoCs" depends on MACH_KIRKWOOD || COMPILE_TEST @@ -436,4 +445,12 @@ depends on (ARCH_QCOM && OF) || COMPILE_TEST source "drivers/thermal/qcom/Kconfig" endmenu +config ZX2967_THERMAL + tristate "Thermal sensors on zx2967 SoC" + depends on ARCH_ZX || COMPILE_TEST + help + Enable the zx2967 thermal sensors driver, which supports + the primitive temperature sensor embedded in zx2967 SoCs. + This sensor generates the real time die temperature. + endif diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 6a3d7b573036..7adae2029355 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_QCOM_SPMI_TEMP_ALARM) += qcom-spmi-temp-alarm.o obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o +obj-$(CONFIG_RCAR_GEN3_THERMAL) += rcar_gen3_thermal.o obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o obj-y += samsung/ obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o @@ -56,3 +57,4 @@ obj-$(CONFIG_TEGRA_SOCTHERM) += tegra/ obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o +obj-$(CONFIG_ZX2967_THERMAL) += zx2967_thermal.o diff --git a/drivers/thermal/clock_cooling.c b/drivers/thermal/clock_cooling.c index ed5dd0e88657..56711c25584d 100644 --- a/drivers/thermal/clock_cooling.c +++ b/drivers/thermal/clock_cooling.c @@ -65,42 +65,7 @@ struct clock_cooling_device { }; #define to_clock_cooling_device(x) \ container_of(x, struct clock_cooling_device, clk_rate_change_nb) -static DEFINE_IDR(clock_idr); -static DEFINE_MUTEX(cooling_clock_lock); - -/** - * clock_cooling_get_idr - function to get an unique id. - * @id: int * value generated by this function. - * - * This function will populate @id with an unique - * id, using the idr API. - * - * Return: 0 on success, an error code on failure. - */ -static int clock_cooling_get_idr(int *id) -{ - int ret; - - mutex_lock(&cooling_clock_lock); - ret = idr_alloc(&clock_idr, NULL, 0, 0, GFP_KERNEL); - mutex_unlock(&cooling_clock_lock); - if (unlikely(ret < 0)) - return ret; - *id = ret; - - return 0; -} - -/** - * release_idr - function to free the unique id. - * @id: int value representing the unique id. - */ -static void release_idr(int id) -{ - mutex_lock(&cooling_clock_lock); - idr_remove(&clock_idr, id); - mutex_unlock(&cooling_clock_lock); -} +static DEFINE_IDA(clock_ida); /* Below code defines functions to be used for clock as cooling device */ @@ -432,16 +397,17 @@ clock_cooling_register(struct device *dev, const char *clock_name) if (IS_ERR(ccdev->clk)) return ERR_CAST(ccdev->clk); - ret = clock_cooling_get_idr(&ccdev->id); - if (ret) - return ERR_PTR(-EINVAL); + ret = ida_simple_get(&clock_ida, 0, 0, GFP_KERNEL); + if (ret < 0) + return ERR_PTR(ret); + ccdev->id = ret; snprintf(dev_name, sizeof(dev_name), "thermal-clock-%d", ccdev->id); cdev = thermal_cooling_device_register(dev_name, ccdev, &clock_cooling_ops); if (IS_ERR(cdev)) { - release_idr(ccdev->id); + ida_simple_remove(&clock_ida, ccdev->id); return ERR_PTR(-EINVAL); } ccdev->cdev = cdev; @@ -450,7 +416,7 @@ clock_cooling_register(struct device *dev, const char *clock_name) /* Assuming someone has already filled the opp table for this device */ ret = dev_pm_opp_init_cpufreq_table(dev, &ccdev->freq_table); if (ret) { - release_idr(ccdev->id); + ida_simple_remove(&clock_ida, ccdev->id); return ERR_PTR(ret); } ccdev->clock_state = 0; @@ -481,6 +447,6 @@ void clock_cooling_unregister(struct thermal_cooling_device *cdev) dev_pm_opp_free_cpufreq_table(ccdev->dev, &ccdev->freq_table); thermal_cooling_device_unregister(ccdev->cdev); - release_idr(ccdev->id); + ida_simple_remove(&clock_ida, ccdev->id); } EXPORT_SYMBOL_GPL(clock_cooling_unregister); diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 85fdbf762fa0..91048eeca28b 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -26,6 +26,7 @@ #include <linux/thermal.h> #include <linux/cpufreq.h> #include <linux/err.h> +#include <linux/idr.h> #include <linux/pm_opp.h> #include <linux/slab.h> #include <linux/cpu.h> @@ -104,50 +105,13 @@ struct cpufreq_cooling_device { struct device *cpu_dev; get_static_t plat_get_static_power; }; -static DEFINE_IDR(cpufreq_idr); -static DEFINE_MUTEX(cooling_cpufreq_lock); +static DEFINE_IDA(cpufreq_ida); static unsigned int cpufreq_dev_count; static DEFINE_MUTEX(cooling_list_lock); static LIST_HEAD(cpufreq_dev_list); -/** - * get_idr - function to get a unique id. - * @idr: struct idr * handle used to create a id. - * @id: int * value generated by this function. - * - * This function will populate @id with an unique - * id, using the idr API. - * - * Return: 0 on success, an error code on failure. - */ -static int get_idr(struct idr *idr, int *id) -{ - int ret; - - mutex_lock(&cooling_cpufreq_lock); - ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL); - mutex_unlock(&cooling_cpufreq_lock); - if (unlikely(ret < 0)) - return ret; - *id = ret; - - return 0; -} - -/** - * release_idr - function to free the unique id. - * @idr: struct idr * handle used for creating the id. - * @id: int value representing the unique id. - */ -static void release_idr(struct idr *idr, int id) -{ - mutex_lock(&cooling_cpufreq_lock); - idr_remove(idr, id); - mutex_unlock(&cooling_cpufreq_lock); -} - /* Below code defines functions to be used for cpufreq as cooling device */ /** @@ -645,31 +609,39 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev, unsigned long state, u32 *power) { unsigned int freq, num_cpus; - cpumask_t cpumask; + cpumask_var_t cpumask; u32 static_power, dynamic_power; int ret; struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; - cpumask_and(&cpumask, &cpufreq_device->allowed_cpus, cpu_online_mask); - num_cpus = cpumask_weight(&cpumask); + if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) + return -ENOMEM; + + cpumask_and(cpumask, &cpufreq_device->allowed_cpus, cpu_online_mask); + num_cpus = cpumask_weight(cpumask); /* None of our cpus are online, so no power */ if (num_cpus == 0) { *power = 0; - return 0; + ret = 0; + goto out; } freq = cpufreq_device->freq_table[state]; - if (!freq) - return -EINVAL; + if (!freq) { + ret = -EINVAL; + goto out; + } dynamic_power = cpu_freq_to_power(cpufreq_device, freq) * num_cpus; ret = get_static_power(cpufreq_device, tz, freq, &static_power); if (ret) - return ret; + goto out; *power = static_power + dynamic_power; - return 0; +out: + free_cpumask_var(cpumask); + return ret; } /** @@ -795,16 +767,20 @@ __cpufreq_cooling_register(struct device_node *np, struct cpufreq_cooling_device *cpufreq_dev; char dev_name[THERMAL_NAME_LENGTH]; struct cpufreq_frequency_table *pos, *table; - struct cpumask temp_mask; + cpumask_var_t temp_mask; unsigned int freq, i, num_cpus; int ret; struct thermal_cooling_device_ops *cooling_ops; - cpumask_and(&temp_mask, clip_cpus, cpu_online_mask); - policy = cpufreq_cpu_get(cpumask_first(&temp_mask)); + if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL)) + return ERR_PTR(-ENOMEM); + + cpumask_and(temp_mask, clip_cpus, cpu_online_mask); + policy = cpufreq_cpu_get(cpumask_first(temp_mask)); if (!policy) { pr_debug("%s: CPUFreq policy not found\n", __func__); - return ERR_PTR(-EPROBE_DEFER); + cool_dev = ERR_PTR(-EPROBE_DEFER); + goto free_cpumask; } table = policy->freq_table; @@ -867,11 +843,12 @@ __cpufreq_cooling_register(struct device_node *np, cooling_ops = &cpufreq_cooling_ops; } - ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); - if (ret) { + ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL); + if (ret < 0) { cool_dev = ERR_PTR(ret); goto free_power_table; } + cpufreq_dev->id = ret; /* Fill freq-table in descending order of frequencies */ for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) { @@ -891,27 +868,24 @@ __cpufreq_cooling_register(struct device_node *np, cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, cooling_ops); if (IS_ERR(cool_dev)) - goto remove_idr; + goto remove_ida; cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0]; cpufreq_dev->cool_dev = cool_dev; - mutex_lock(&cooling_cpufreq_lock); - mutex_lock(&cooling_list_lock); list_add(&cpufreq_dev->node, &cpufreq_dev_list); - mutex_unlock(&cooling_list_lock); /* Register the notifier for first cpufreq cooling device */ if (!cpufreq_dev_count++) cpufreq_register_notifier(&thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); - mutex_unlock(&cooling_cpufreq_lock); + mutex_unlock(&cooling_list_lock); goto put_policy; -remove_idr: - release_idr(&cpufreq_idr, cpufreq_dev->id); +remove_ida: + ida_simple_remove(&cpufreq_ida, cpufreq_dev->id); free_power_table: kfree(cpufreq_dev->dyn_power_table); free_table: @@ -924,7 +898,8 @@ free_cdev: kfree(cpufreq_dev); put_policy: cpufreq_cpu_put(policy); - +free_cpumask: + free_cpumask_var(temp_mask); return cool_dev; } @@ -1052,20 +1027,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) cpufreq_dev = cdev->devdata; + mutex_lock(&cooling_list_lock); /* Unregister the notifier for the last cpufreq cooling device */ - mutex_lock(&cooling_cpufreq_lock); if (!--cpufreq_dev_count) cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); - mutex_lock(&cooling_list_lock); list_del(&cpufreq_dev->node); mutex_unlock(&cooling_list_lock); - mutex_unlock(&cooling_cpufreq_lock); - thermal_cooling_device_unregister(cpufreq_dev->cool_dev); - release_idr(&cpufreq_idr, cpufreq_dev->id); + ida_simple_remove(&cpufreq_ida, cpufreq_dev->id); kfree(cpufreq_dev->dyn_power_table); kfree(cpufreq_dev->time_in_idle_timestamp); kfree(cpufreq_dev->time_in_idle); diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c index ba7a5cd994dc..7743a78d4723 100644 --- a/drivers/thermal/devfreq_cooling.c +++ b/drivers/thermal/devfreq_cooling.c @@ -21,14 +21,14 @@ #include <linux/devfreq.h> #include <linux/devfreq_cooling.h> #include <linux/export.h> +#include <linux/idr.h> #include <linux/slab.h> #include <linux/pm_opp.h> #include <linux/thermal.h> #include <trace/events/thermal.h> -static DEFINE_MUTEX(devfreq_lock); -static DEFINE_IDR(devfreq_idr); +static DEFINE_IDA(devfreq_ida); /** * struct devfreq_cooling_device - Devfreq cooling device @@ -58,42 +58,6 @@ struct devfreq_cooling_device { }; /** - * get_idr - function to get a unique id. - * @idr: struct idr * handle used to create a id. - * @id: int * value generated by this function. - * - * This function will populate @id with an unique - * id, using the idr API. - * - * Return: 0 on success, an error code on failure. - */ -static int get_idr(struct idr *idr, int *id) -{ - int ret; - - mutex_lock(&devfreq_lock); - ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL); - mutex_unlock(&devfreq_lock); - if (unlikely(ret < 0)) - return ret; - *id = ret; - - return 0; -} - -/** - * release_idr - function to free the unique id. - * @idr: struct idr * handle used for creating the id. - * @id: int value representing the unique id. - */ -static void release_idr(struct idr *idr, int id) -{ - mutex_lock(&devfreq_lock); - idr_remove(idr, id); - mutex_unlock(&devfreq_lock); -} - -/** * partition_enable_opps() - disable all opps above a given state * @dfc: Pointer to devfreq we are operating on * @cdev_state: cooling device state we're setting @@ -489,9 +453,10 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, if (err) goto free_dfc; - err = get_idr(&devfreq_idr, &dfc->id); - if (err) + err = ida_simple_get(&devfreq_ida, 0, 0, GFP_KERNEL); + if (err < 0) goto free_tables; + dfc->id = err; snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d", dfc->id); @@ -502,15 +467,15 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, dev_err(df->dev.parent, "Failed to register devfreq cooling device (%d)\n", err); - goto release_idr; + goto release_ida; } dfc->cdev = cdev; return cdev; -release_idr: - release_idr(&devfreq_idr, dfc->id); +release_ida: + ida_simple_remove(&devfreq_ida, dfc->id); free_tables: kfree(dfc->power_table); kfree(dfc->freq_table); @@ -558,7 +523,7 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *cdev) dfc = cdev->devdata; thermal_cooling_device_unregister(dfc->cdev); - release_idr(&devfreq_idr, dfc->id); + ida_simple_remove(&devfreq_ida, dfc->id); kfree(dfc->power_table); kfree(dfc->freq_table); diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index 06912f0602b7..fb648a45754e 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -489,6 +489,10 @@ static int imx_thermal_probe(struct platform_device *pdev) data->tempmon = map; data->socdata = of_device_get_match_data(&pdev->dev); + if (!data->socdata) { + dev_err(&pdev->dev, "no device match found\n"); + return -ENODEV; + } /* make sure the IRQ flag is clear before enabling irq on i.MX6SX */ if (data->socdata->version == TEMPMON_IMX6SX) { diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index df64692e9e64..d718cd179ddb 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c @@ -50,6 +50,7 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/sched/rt.h> +#include <uapi/linux/sched/types.h> #include <asm/nmi.h> #include <asm/msr.h> @@ -461,16 +462,13 @@ static void poll_pkg_cstate(struct work_struct *dummy) { static u64 msr_last; static u64 tsc_last; - static unsigned long jiffies_last; u64 msr_now; - unsigned long jiffies_now; u64 tsc_now; u64 val64; msr_now = pkg_state_counter(); tsc_now = rdtsc(); - jiffies_now = jiffies; /* calculate pkg cstate vs tsc ratio */ if (!msr_last || !tsc_last) @@ -485,7 +483,6 @@ static void poll_pkg_cstate(struct work_struct *dummy) /* update record */ msr_last = msr_now; - jiffies_last = jiffies_now; tsc_last = tsc_now; if (true == clamping) diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c index 34169c32d495..1aff7fde54b1 100644 --- a/drivers/thermal/mtk_thermal.c +++ b/drivers/thermal/mtk_thermal.c @@ -183,37 +183,37 @@ struct mtk_thermal { }; /* MT8173 thermal sensor data */ -const int mt8173_bank_data[MT8173_NUM_ZONES][3] = { +static const int mt8173_bank_data[MT8173_NUM_ZONES][3] = { { MT8173_TS2, MT8173_TS3 }, { MT8173_TS2, MT8173_TS4 }, { MT8173_TS1, MT8173_TS2, MT8173_TSABB }, { MT8173_TS2 }, }; -const int mt8173_msr[MT8173_NUM_SENSORS_PER_ZONE] = { +static const int mt8173_msr[MT8173_NUM_SENSORS_PER_ZONE] = { TEMP_MSR0, TEMP_MSR1, TEMP_MSR2, TEMP_MSR2 }; -const int mt8173_adcpnp[MT8173_NUM_SENSORS_PER_ZONE] = { +static const int mt8173_adcpnp[MT8173_NUM_SENSORS_PER_ZONE] = { TEMP_ADCPNP0, TEMP_ADCPNP1, TEMP_ADCPNP2, TEMP_ADCPNP3 }; -const int mt8173_mux_values[MT8173_NUM_SENSORS] = { 0, 1, 2, 3, 16 }; +static const int mt8173_mux_values[MT8173_NUM_SENSORS] = { 0, 1, 2, 3, 16 }; /* MT2701 thermal sensor data */ -const int mt2701_bank_data[MT2701_NUM_SENSORS] = { +static const int mt2701_bank_data[MT2701_NUM_SENSORS] = { MT2701_TS1, MT2701_TS2, MT2701_TSABB }; -const int mt2701_msr[MT2701_NUM_SENSORS_PER_ZONE] = { +static const int mt2701_msr[MT2701_NUM_SENSORS_PER_ZONE] = { TEMP_MSR0, TEMP_MSR1, TEMP_MSR2 }; -const int mt2701_adcpnp[MT2701_NUM_SENSORS_PER_ZONE] = { +static const int mt2701_adcpnp[MT2701_NUM_SENSORS_PER_ZONE] = { TEMP_ADCPNP0, TEMP_ADCPNP1, TEMP_ADCPNP2 }; -const int mt2701_mux_values[MT2701_NUM_SENSORS] = { 0, 1, 16 }; +static const int mt2701_mux_values[MT2701_NUM_SENSORS] = { 0, 1, 16 }; /** * The MT8173 thermal controller has four banks. Each bank can read up to diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c new file mode 100644 index 000000000000..d33c845244b1 --- /dev/null +++ b/drivers/thermal/rcar_gen3_thermal.c @@ -0,0 +1,335 @@ +/* + * R-Car Gen3 THS thermal sensor driver + * Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen. + * + * Copyright (C) 2016 Renesas Electronics Corporation. + * Copyright (C) 2016 Sang Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + */ +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/thermal.h> + +/* Register offsets */ +#define REG_GEN3_IRQSTR 0x04 +#define REG_GEN3_IRQMSK 0x08 +#define REG_GEN3_IRQCTL 0x0C +#define REG_GEN3_IRQEN 0x10 +#define REG_GEN3_IRQTEMP1 0x14 +#define REG_GEN3_IRQTEMP2 0x18 +#define REG_GEN3_IRQTEMP3 0x1C +#define REG_GEN3_CTSR 0x20 +#define REG_GEN3_THCTR 0x20 +#define REG_GEN3_TEMP 0x28 +#define REG_GEN3_THCODE1 0x50 +#define REG_GEN3_THCODE2 0x54 +#define REG_GEN3_THCODE3 0x58 + +/* CTSR bits */ +#define CTSR_PONM BIT(8) +#define CTSR_AOUT BIT(7) +#define CTSR_THBGR BIT(5) +#define CTSR_VMEN BIT(4) +#define CTSR_VMST BIT(1) +#define CTSR_THSST BIT(0) + +/* THCTR bits */ +#define THCTR_PONM BIT(6) +#define THCTR_THSST BIT(0) + +#define CTEMP_MASK 0xFFF + +#define MCELSIUS(temp) ((temp) * 1000) +#define GEN3_FUSE_MASK 0xFFF + +#define TSC_MAX_NUM 3 + +/* Structure for thermal temperature calculation */ +struct equation_coefs { + int a1; + int b1; + int a2; + int b2; +}; + +struct rcar_gen3_thermal_tsc { + void __iomem *base; + struct thermal_zone_device *zone; + struct equation_coefs coef; + struct mutex lock; +}; + +struct rcar_gen3_thermal_priv { + struct rcar_gen3_thermal_tsc *tscs[TSC_MAX_NUM]; +}; + +struct rcar_gen3_thermal_data { + void (*thermal_init)(struct rcar_gen3_thermal_tsc *tsc); +}; + +static inline u32 rcar_gen3_thermal_read(struct rcar_gen3_thermal_tsc *tsc, + u32 reg) +{ + return ioread32(tsc->base + reg); +} + +static inline void rcar_gen3_thermal_write(struct rcar_gen3_thermal_tsc *tsc, + u32 reg, u32 data) +{ + iowrite32(data, tsc->base + reg); +} + +/* + * Linear approximation for temperature + * + * [reg] = [temp] * a + b => [temp] = ([reg] - b) / a + * + * The constants a and b are calculated using two triplets of int values PTAT + * and THCODE. PTAT and THCODE can either be read from hardware or use hard + * coded values from driver. The formula to calculate a and b are taken from + * BSP and sparsely documented and understood. + * + * Examining the linear formula and the formula used to calculate constants a + * and b while knowing that the span for PTAT and THCODE values are between + * 0x000 and 0xfff the largest integer possible is 0xfff * 0xfff == 0xffe001. + * Integer also needs to be signed so that leaves 7 bits for binary + * fixed point scaling. + */ + +#define FIXPT_SHIFT 7 +#define FIXPT_INT(_x) ((_x) << FIXPT_SHIFT) +#define FIXPT_DIV(_a, _b) DIV_ROUND_CLOSEST(((_a) << FIXPT_SHIFT), (_b)) +#define FIXPT_TO_MCELSIUS(_x) ((_x) * 1000 >> FIXPT_SHIFT) + +#define RCAR3_THERMAL_GRAN 500 /* mili Celsius */ + +/* no idea where these constants come from */ +#define TJ_1 96 +#define TJ_3 -41 + +static void rcar_gen3_thermal_calc_coefs(struct equation_coefs *coef, + int *ptat, int *thcode) +{ + int tj_2; + + /* TODO: Find documentation and document constant calculation formula */ + + /* + * Division is not scaled in BSP and if scaled it might overflow + * the dividend (4095 * 4095 << 14 > INT_MAX) so keep it unscaled + */ + tj_2 = (FIXPT_INT((ptat[1] - ptat[2]) * 137) + / (ptat[0] - ptat[2])) - FIXPT_INT(41); + + coef->a1 = FIXPT_DIV(FIXPT_INT(thcode[1] - thcode[2]), + tj_2 - FIXPT_INT(TJ_3)); + coef->b1 = FIXPT_INT(thcode[2]) - coef->a1 * TJ_3; + + coef->a2 = FIXPT_DIV(FIXPT_INT(thcode[1] - thcode[0]), + tj_2 - FIXPT_INT(TJ_1)); + coef->b2 = FIXPT_INT(thcode[0]) - coef->a2 * TJ_1; +} + +static int rcar_gen3_thermal_round(int temp) +{ + int result, round_offs; + + round_offs = temp >= 0 ? RCAR3_THERMAL_GRAN / 2 : + -RCAR3_THERMAL_GRAN / 2; + result = (temp + round_offs) / RCAR3_THERMAL_GRAN; + return result * RCAR3_THERMAL_GRAN; +} + +static int rcar_gen3_thermal_get_temp(void *devdata, int *temp) +{ + struct rcar_gen3_thermal_tsc *tsc = devdata; + int mcelsius, val1, val2; + u32 reg; + + /* Read register and convert to mili Celsius */ + mutex_lock(&tsc->lock); + + reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK; + + val1 = FIXPT_DIV(FIXPT_INT(reg) - tsc->coef.b1, tsc->coef.a1); + val2 = FIXPT_DIV(FIXPT_INT(reg) - tsc->coef.b2, tsc->coef.a2); + mcelsius = FIXPT_TO_MCELSIUS((val1 + val2) / 2); + + mutex_unlock(&tsc->lock); + + /* Make sure we are inside specifications */ + if ((mcelsius < MCELSIUS(-40)) || (mcelsius > MCELSIUS(125))) + return -EIO; + + /* Round value to device granularity setting */ + *temp = rcar_gen3_thermal_round(mcelsius); + + return 0; +} + +static struct thermal_zone_of_device_ops rcar_gen3_tz_of_ops = { + .get_temp = rcar_gen3_thermal_get_temp, +}; + +static void r8a7795_thermal_init(struct rcar_gen3_thermal_tsc *tsc) +{ + rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, CTSR_THBGR); + rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, 0x0); + + usleep_range(1000, 2000); + + rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, CTSR_PONM); + rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0x3F); + rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, + CTSR_PONM | CTSR_AOUT | CTSR_THBGR | CTSR_VMEN); + + usleep_range(100, 200); + + rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, + CTSR_PONM | CTSR_AOUT | CTSR_THBGR | CTSR_VMEN | + CTSR_VMST | CTSR_THSST); + + usleep_range(1000, 2000); +} + +static void r8a7796_thermal_init(struct rcar_gen3_thermal_tsc *tsc) +{ + u32 reg_val; + + reg_val = rcar_gen3_thermal_read(tsc, REG_GEN3_THCTR); + reg_val &= ~THCTR_PONM; + rcar_gen3_thermal_write(tsc, REG_GEN3_THCTR, reg_val); + + usleep_range(1000, 2000); + + rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0x3F); + reg_val = rcar_gen3_thermal_read(tsc, REG_GEN3_THCTR); + reg_val |= THCTR_THSST; + rcar_gen3_thermal_write(tsc, REG_GEN3_THCTR, reg_val); +} + +static const struct rcar_gen3_thermal_data r8a7795_data = { + .thermal_init = r8a7795_thermal_init, +}; + +static const struct rcar_gen3_thermal_data r8a7796_data = { + .thermal_init = r8a7796_thermal_init, +}; + +static const struct of_device_id rcar_gen3_thermal_dt_ids[] = { + { .compatible = "renesas,r8a7795-thermal", .data = &r8a7795_data}, + { .compatible = "renesas,r8a7796-thermal", .data = &r8a7796_data}, + {}, +}; +MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids); + +static int rcar_gen3_thermal_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return 0; +} + +static int rcar_gen3_thermal_probe(struct platform_device *pdev) +{ + struct rcar_gen3_thermal_priv *priv; + struct device *dev = &pdev->dev; + struct resource *res; + struct thermal_zone_device *zone; + int ret, i; + const struct rcar_gen3_thermal_data *match_data = + of_device_get_match_data(dev); + + /* default values if FUSEs are missing */ + /* TODO: Read values from hardware on supported platforms */ + int ptat[3] = { 2351, 1509, 435 }; + int thcode[TSC_MAX_NUM][3] = { + { 3248, 2800, 2221 }, + { 3245, 2795, 2216 }, + { 3250, 2805, 2237 }, + }; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + + for (i = 0; i < TSC_MAX_NUM; i++) { + struct rcar_gen3_thermal_tsc *tsc; + + tsc = devm_kzalloc(dev, sizeof(*tsc), GFP_KERNEL); + if (!tsc) { + ret = -ENOMEM; + goto error_unregister; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (!res) + break; + + tsc->base = devm_ioremap_resource(dev, res); + if (IS_ERR(tsc->base)) { + ret = PTR_ERR(tsc->base); + goto error_unregister; + } + + priv->tscs[i] = tsc; + mutex_init(&tsc->lock); + + match_data->thermal_init(tsc); + rcar_gen3_thermal_calc_coefs(&tsc->coef, ptat, thcode[i]); + + zone = devm_thermal_zone_of_sensor_register(dev, i, tsc, + &rcar_gen3_tz_of_ops); + if (IS_ERR(zone)) { + dev_err(dev, "Can't register thermal zone\n"); + ret = PTR_ERR(zone); + goto error_unregister; + } + tsc->zone = zone; + } + + return 0; + +error_unregister: + rcar_gen3_thermal_remove(pdev); + + return ret; +} + +static struct platform_driver rcar_gen3_thermal_driver = { + .driver = { + .name = "rcar_gen3_thermal", + .of_match_table = rcar_gen3_thermal_dt_ids, + }, + .probe = rcar_gen3_thermal_probe, + .remove = rcar_gen3_thermal_remove, +}; +module_platform_driver(rcar_gen3_thermal_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("R-Car Gen3 THS thermal sensor driver"); +MODULE_AUTHOR("Wolfram Sang <wsa+renesas@sang-engineering.com>"); diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index ad1186dd6132..7b8ef09d2b3c 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -1168,7 +1168,6 @@ static int exynos_of_sensor_conf(struct device_node *np, pdata->default_temp_offset = (u8)value; of_property_read_u32(np, "samsung,tmu_cal_type", &pdata->cal_type); - of_property_read_u32(np, "samsung,tmu_cal_mode", &pdata->cal_mode); of_node_put(np); return 0; diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h index 440c7140b660..5149c2a3030c 100644 --- a/drivers/thermal/samsung/exynos_tmu.h +++ b/drivers/thermal/samsung/exynos_tmu.h @@ -70,7 +70,6 @@ struct exynos_tmu_platform_data { enum soc_type type; u32 cal_type; - u32 cal_mode; }; #endif /* _EXYNOS_TMU_H */ diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 655591316a88..11f0675cb7e5 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -36,9 +36,8 @@ MODULE_AUTHOR("Zhang Rui"); MODULE_DESCRIPTION("Generic thermal management sysfs support"); MODULE_LICENSE("GPL v2"); -static DEFINE_IDR(thermal_tz_idr); -static DEFINE_IDR(thermal_cdev_idr); -static DEFINE_MUTEX(thermal_idr_lock); +static DEFINE_IDA(thermal_tz_ida); +static DEFINE_IDA(thermal_cdev_ida); static LIST_HEAD(thermal_tz_list); static LIST_HEAD(thermal_cdev_list); @@ -589,29 +588,6 @@ void thermal_zone_device_unbind_exception(struct thermal_zone_device *tz, * - thermal zone devices lifecycle: registration, unregistration, * binding, and unbinding. */ -static int get_idr(struct idr *idr, struct mutex *lock, int *id) -{ - int ret; - - if (lock) - mutex_lock(lock); - ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL); - if (lock) - mutex_unlock(lock); - if (unlikely(ret < 0)) - return ret; - *id = ret; - return 0; -} - -static void release_idr(struct idr *idr, struct mutex *lock, int id) -{ - if (lock) - mutex_lock(lock); - idr_remove(idr, id); - if (lock) - mutex_unlock(lock); -} /** * thermal_zone_bind_cooling_device() - bind a cooling device to a thermal zone @@ -685,15 +661,16 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, dev->target = THERMAL_NO_TARGET; dev->weight = weight; - result = get_idr(&tz->idr, &tz->lock, &dev->id); - if (result) + result = ida_simple_get(&tz->ida, 0, 0, GFP_KERNEL); + if (result < 0) goto free_mem; + dev->id = result; sprintf(dev->name, "cdev%d", dev->id); result = sysfs_create_link(&tz->device.kobj, &cdev->device.kobj, dev->name); if (result) - goto release_idr; + goto release_ida; sprintf(dev->attr_name, "cdev%d_trip_point", dev->id); sysfs_attr_init(&dev->attr.attr); @@ -737,8 +714,8 @@ remove_trip_file: device_remove_file(&tz->device, &dev->attr); remove_symbol_link: sysfs_remove_link(&tz->device.kobj, dev->name); -release_idr: - release_idr(&tz->idr, &tz->lock, dev->id); +release_ida: + ida_simple_remove(&tz->ida, dev->id); free_mem: kfree(dev); return result; @@ -785,7 +762,7 @@ unbind: device_remove_file(&tz->device, &pos->weight_attr); device_remove_file(&tz->device, &pos->attr); sysfs_remove_link(&tz->device.kobj, pos->name); - release_idr(&tz->idr, &tz->lock, pos->id); + ida_simple_remove(&tz->ida, pos->id); kfree(pos); return 0; } @@ -925,12 +902,13 @@ __thermal_cooling_device_register(struct device_node *np, if (!cdev) return ERR_PTR(-ENOMEM); - result = get_idr(&thermal_cdev_idr, &thermal_idr_lock, &cdev->id); - if (result) { + result = ida_simple_get(&thermal_cdev_ida, 0, 0, GFP_KERNEL); + if (result < 0) { kfree(cdev); return ERR_PTR(result); } + cdev->id = result; strlcpy(cdev->type, type ? : "", sizeof(cdev->type)); mutex_init(&cdev->lock); INIT_LIST_HEAD(&cdev->thermal_instances); @@ -943,7 +921,7 @@ __thermal_cooling_device_register(struct device_node *np, dev_set_name(&cdev->device, "cooling_device%d", cdev->id); result = device_register(&cdev->device); if (result) { - release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); + ida_simple_remove(&thermal_cdev_ida, cdev->id); kfree(cdev); return ERR_PTR(result); } @@ -1070,7 +1048,7 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) mutex_unlock(&thermal_list_lock); - release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); + ida_simple_remove(&thermal_cdev_ida, cdev->id); device_unregister(&cdev->device); } EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister); @@ -1172,14 +1150,15 @@ thermal_zone_device_register(const char *type, int trips, int mask, return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&tz->thermal_instances); - idr_init(&tz->idr); + ida_init(&tz->ida); mutex_init(&tz->lock); - result = get_idr(&thermal_tz_idr, &thermal_idr_lock, &tz->id); - if (result) { + result = ida_simple_get(&thermal_tz_ida, 0, 0, GFP_KERNEL); + if (result < 0) { kfree(tz); return ERR_PTR(result); } + tz->id = result; strlcpy(tz->type, type, sizeof(tz->type)); tz->ops = ops; tz->tzp = tzp; @@ -1201,7 +1180,7 @@ thermal_zone_device_register(const char *type, int trips, int mask, dev_set_name(&tz->device, "thermal_zone%d", tz->id); result = device_register(&tz->device); if (result) { - release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); + ida_simple_remove(&thermal_tz_ida, tz->id); kfree(tz); return ERR_PTR(result); } @@ -1255,7 +1234,7 @@ thermal_zone_device_register(const char *type, int trips, int mask, return tz; unregister: - release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); + ida_simple_remove(&thermal_tz_ida, tz->id); device_unregister(&tz->device); return ERR_PTR(result); } @@ -1313,8 +1292,8 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) thermal_set_governor(tz, NULL); thermal_remove_hwmon_sysfs(tz); - release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); - idr_destroy(&tz->idr); + ida_simple_remove(&thermal_tz_ida, tz->id); + ida_destroy(&tz->ida); mutex_destroy(&tz->lock); device_unregister(&tz->device); } @@ -1514,9 +1493,8 @@ unregister_class: unregister_governors: thermal_unregister_governors(); error: - idr_destroy(&thermal_tz_idr); - idr_destroy(&thermal_cdev_idr); - mutex_destroy(&thermal_idr_lock); + ida_destroy(&thermal_tz_ida); + ida_destroy(&thermal_cdev_ida); mutex_destroy(&thermal_list_lock); mutex_destroy(&thermal_governor_lock); return result; @@ -1529,9 +1507,8 @@ static void __exit thermal_exit(void) genetlink_exit(); class_unregister(&thermal_class); thermal_unregister_governors(); - idr_destroy(&thermal_tz_idr); - idr_destroy(&thermal_cdev_idr); - mutex_destroy(&thermal_idr_lock); + ida_destroy(&thermal_tz_ida); + ida_destroy(&thermal_cdev_ida); mutex_destroy(&thermal_list_lock); mutex_destroy(&thermal_governor_lock); } diff --git a/drivers/thermal/ti-soc-thermal/Kconfig b/drivers/thermal/ti-soc-thermal/Kconfig index ea8283f08aa6..fe0e877f84d0 100644 --- a/drivers/thermal/ti-soc-thermal/Kconfig +++ b/drivers/thermal/ti-soc-thermal/Kconfig @@ -11,7 +11,6 @@ config TI_SOC_THERMAL config TI_THERMAL bool "Texas Instruments SoCs thermal framework support" depends on TI_SOC_THERMAL - depends on CPU_THERMAL help If you say yes here you want to get support for generic thermal framework for the Texas Instruments on die bandgap temperature sensor. diff --git a/drivers/thermal/ti-soc-thermal/dra752-bandgap.h b/drivers/thermal/ti-soc-thermal/dra752-bandgap.h index 6b0f2b1160f7..a31e4b5e82cd 100644 --- a/drivers/thermal/ti-soc-thermal/dra752-bandgap.h +++ b/drivers/thermal/ti-soc-thermal/dra752-bandgap.h @@ -54,7 +54,6 @@ #define DRA752_STD_FUSE_OPP_BGAP_CORE_OFFSET 0x8 #define DRA752_TEMP_SENSOR_CORE_OFFSET 0x154 #define DRA752_BANDGAP_THRESHOLD_CORE_OFFSET 0x1ac -#define DRA752_BANDGAP_TSHUT_CORE_OFFSET 0x1b8 #define DRA752_BANDGAP_CUMUL_DTEMP_CORE_OFFSET 0x1c4 #define DRA752_DTEMP_CORE_0_OFFSET 0x208 #define DRA752_DTEMP_CORE_1_OFFSET 0x20c @@ -66,7 +65,6 @@ #define DRA752_STD_FUSE_OPP_BGAP_IVA_OFFSET 0x388 #define DRA752_TEMP_SENSOR_IVA_OFFSET 0x398 #define DRA752_BANDGAP_THRESHOLD_IVA_OFFSET 0x3a4 -#define DRA752_BANDGAP_TSHUT_IVA_OFFSET 0x3ac #define DRA752_BANDGAP_CUMUL_DTEMP_IVA_OFFSET 0x3b4 #define DRA752_DTEMP_IVA_0_OFFSET 0x3d0 #define DRA752_DTEMP_IVA_1_OFFSET 0x3d4 @@ -78,7 +76,6 @@ #define DRA752_STD_FUSE_OPP_BGAP_MPU_OFFSET 0x4 #define DRA752_TEMP_SENSOR_MPU_OFFSET 0x14c #define DRA752_BANDGAP_THRESHOLD_MPU_OFFSET 0x1a4 -#define DRA752_BANDGAP_TSHUT_MPU_OFFSET 0x1b0 #define DRA752_BANDGAP_CUMUL_DTEMP_MPU_OFFSET 0x1bc #define DRA752_DTEMP_MPU_0_OFFSET 0x1e0 #define DRA752_DTEMP_MPU_1_OFFSET 0x1e4 @@ -90,7 +87,6 @@ #define DRA752_STD_FUSE_OPP_BGAP_DSPEVE_OFFSET 0x384 #define DRA752_TEMP_SENSOR_DSPEVE_OFFSET 0x394 #define DRA752_BANDGAP_THRESHOLD_DSPEVE_OFFSET 0x3a0 -#define DRA752_BANDGAP_TSHUT_DSPEVE_OFFSET 0x3a8 #define DRA752_BANDGAP_CUMUL_DTEMP_DSPEVE_OFFSET 0x3b0 #define DRA752_DTEMP_DSPEVE_0_OFFSET 0x3bc #define DRA752_DTEMP_DSPEVE_1_OFFSET 0x3c0 @@ -102,7 +98,6 @@ #define DRA752_STD_FUSE_OPP_BGAP_GPU_OFFSET 0x0 #define DRA752_TEMP_SENSOR_GPU_OFFSET 0x150 #define DRA752_BANDGAP_THRESHOLD_GPU_OFFSET 0x1a8 -#define DRA752_BANDGAP_TSHUT_GPU_OFFSET 0x1b4 #define DRA752_BANDGAP_CUMUL_DTEMP_GPU_OFFSET 0x1c0 #define DRA752_DTEMP_GPU_0_OFFSET 0x1f4 #define DRA752_DTEMP_GPU_1_OFFSET 0x1f8 @@ -173,10 +168,6 @@ #define DRA752_BANDGAP_THRESHOLD_HOT_MASK (0x3ff << 16) #define DRA752_BANDGAP_THRESHOLD_COLD_MASK (0x3ff << 0) -/* DRA752.TSHUT_THRESHOLD */ -#define DRA752_TSHUT_THRESHOLD_MUXCTRL_MASK BIT(31) -#define DRA752_TSHUT_THRESHOLD_HOT_MASK (0x3ff << 16) -#define DRA752_TSHUT_THRESHOLD_COLD_MASK (0x3ff << 0) /* DRA752.BANDGAP_CUMUL_DTEMP_CORE */ #define DRA752_BANDGAP_CUMUL_DTEMP_CORE_MASK (0xffffffff << 0) @@ -216,8 +207,6 @@ #define DRA752_GPU_MAX_TEMP 125000 #define DRA752_GPU_HYST_VAL 5000 /* interrupts thresholds */ -#define DRA752_GPU_TSHUT_HOT 915 -#define DRA752_GPU_TSHUT_COLD 900 #define DRA752_GPU_T_HOT 800 #define DRA752_GPU_T_COLD 795 @@ -230,8 +219,6 @@ #define DRA752_MPU_MAX_TEMP 125000 #define DRA752_MPU_HYST_VAL 5000 /* interrupts thresholds */ -#define DRA752_MPU_TSHUT_HOT 915 -#define DRA752_MPU_TSHUT_COLD 900 #define DRA752_MPU_T_HOT 800 #define DRA752_MPU_T_COLD 795 @@ -244,8 +231,6 @@ #define DRA752_CORE_MAX_TEMP 125000 #define DRA752_CORE_HYST_VAL 5000 /* interrupts thresholds */ -#define DRA752_CORE_TSHUT_HOT 915 -#define DRA752_CORE_TSHUT_COLD 900 #define DRA752_CORE_T_HOT 800 #define DRA752_CORE_T_COLD 795 @@ -258,8 +243,6 @@ #define DRA752_DSPEVE_MAX_TEMP 125000 #define DRA752_DSPEVE_HYST_VAL 5000 /* interrupts thresholds */ -#define DRA752_DSPEVE_TSHUT_HOT 915 -#define DRA752_DSPEVE_TSHUT_COLD 900 #define DRA752_DSPEVE_T_HOT 800 #define DRA752_DSPEVE_T_COLD 795 @@ -272,8 +255,6 @@ #define DRA752_IVA_MAX_TEMP 125000 #define DRA752_IVA_HYST_VAL 5000 /* interrupts thresholds */ -#define DRA752_IVA_TSHUT_HOT 915 -#define DRA752_IVA_TSHUT_COLD 900 #define DRA752_IVA_T_HOT 800 #define DRA752_IVA_T_COLD 795 diff --git a/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c b/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c index 58b5c6694cd4..118d7d847715 100644 --- a/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c +++ b/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c @@ -49,9 +49,6 @@ dra752_core_temp_sensor_registers = { .bgap_threshold = DRA752_BANDGAP_THRESHOLD_CORE_OFFSET, .threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK, .threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK, - .tshut_threshold = DRA752_BANDGAP_TSHUT_CORE_OFFSET, - .tshut_hot_mask = DRA752_TSHUT_THRESHOLD_HOT_MASK, - .tshut_cold_mask = DRA752_TSHUT_THRESHOLD_COLD_MASK, .bgap_status = DRA752_BANDGAP_STATUS_1_OFFSET, .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK, .status_hot_mask = DRA752_BANDGAP_STATUS_1_HOT_CORE_MASK, @@ -85,9 +82,6 @@ dra752_iva_temp_sensor_registers = { .bgap_threshold = DRA752_BANDGAP_THRESHOLD_IVA_OFFSET, .threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK, .threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK, - .tshut_threshold = DRA752_BANDGAP_TSHUT_IVA_OFFSET, - .tshut_hot_mask = DRA752_TSHUT_THRESHOLD_HOT_MASK, - .tshut_cold_mask = DRA752_TSHUT_THRESHOLD_COLD_MASK, .bgap_status = DRA752_BANDGAP_STATUS_2_OFFSET, .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK, .status_hot_mask = DRA752_BANDGAP_STATUS_2_HOT_IVA_MASK, @@ -121,9 +115,6 @@ dra752_mpu_temp_sensor_registers = { .bgap_threshold = DRA752_BANDGAP_THRESHOLD_MPU_OFFSET, .threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK, .threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK, - .tshut_threshold = DRA752_BANDGAP_TSHUT_MPU_OFFSET, - .tshut_hot_mask = DRA752_TSHUT_THRESHOLD_HOT_MASK, - .tshut_cold_mask = DRA752_TSHUT_THRESHOLD_COLD_MASK, .bgap_status = DRA752_BANDGAP_STATUS_1_OFFSET, .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK, .status_hot_mask = DRA752_BANDGAP_STATUS_1_HOT_MPU_MASK, @@ -157,9 +148,6 @@ dra752_dspeve_temp_sensor_registers = { .bgap_threshold = DRA752_BANDGAP_THRESHOLD_DSPEVE_OFFSET, .threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK, .threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK, - .tshut_threshold = DRA752_BANDGAP_TSHUT_DSPEVE_OFFSET, - .tshut_hot_mask = DRA752_TSHUT_THRESHOLD_HOT_MASK, - .tshut_cold_mask = DRA752_TSHUT_THRESHOLD_COLD_MASK, .bgap_status = DRA752_BANDGAP_STATUS_2_OFFSET, .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK, .status_hot_mask = DRA752_BANDGAP_STATUS_2_HOT_DSPEVE_MASK, @@ -193,9 +181,6 @@ dra752_gpu_temp_sensor_registers = { .bgap_threshold = DRA752_BANDGAP_THRESHOLD_GPU_OFFSET, .threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK, .threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK, - .tshut_threshold = DRA752_BANDGAP_TSHUT_GPU_OFFSET, - .tshut_hot_mask = DRA752_TSHUT_THRESHOLD_HOT_MASK, - .tshut_cold_mask = DRA752_TSHUT_THRESHOLD_COLD_MASK, .bgap_status = DRA752_BANDGAP_STATUS_1_OFFSET, .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK, .status_hot_mask = DRA752_BANDGAP_STATUS_1_HOT_GPU_MASK, @@ -211,8 +196,6 @@ dra752_gpu_temp_sensor_registers = { /* Thresholds and limits for DRA752 MPU temperature sensor */ static struct temp_sensor_data dra752_mpu_temp_sensor_data = { - .tshut_hot = DRA752_MPU_TSHUT_HOT, - .tshut_cold = DRA752_MPU_TSHUT_COLD, .t_hot = DRA752_MPU_T_HOT, .t_cold = DRA752_MPU_T_COLD, .min_freq = DRA752_MPU_MIN_FREQ, @@ -226,8 +209,6 @@ static struct temp_sensor_data dra752_mpu_temp_sensor_data = { /* Thresholds and limits for DRA752 GPU temperature sensor */ static struct temp_sensor_data dra752_gpu_temp_sensor_data = { - .tshut_hot = DRA752_GPU_TSHUT_HOT, - .tshut_cold = DRA752_GPU_TSHUT_COLD, .t_hot = DRA752_GPU_T_HOT, .t_cold = DRA752_GPU_T_COLD, .min_freq = DRA752_GPU_MIN_FREQ, @@ -241,8 +222,6 @@ static struct temp_sensor_data dra752_gpu_temp_sensor_data = { /* Thresholds and limits for DRA752 CORE temperature sensor */ static struct temp_sensor_data dra752_core_temp_sensor_data = { - .tshut_hot = DRA752_CORE_TSHUT_HOT, - .tshut_cold = DRA752_CORE_TSHUT_COLD, .t_hot = DRA752_CORE_T_HOT, .t_cold = DRA752_CORE_T_COLD, .min_freq = DRA752_CORE_MIN_FREQ, @@ -256,8 +235,6 @@ static struct temp_sensor_data dra752_core_temp_sensor_data = { /* Thresholds and limits for DRA752 DSPEVE temperature sensor */ static struct temp_sensor_data dra752_dspeve_temp_sensor_data = { - .tshut_hot = DRA752_DSPEVE_TSHUT_HOT, - .tshut_cold = DRA752_DSPEVE_TSHUT_COLD, .t_hot = DRA752_DSPEVE_T_HOT, .t_cold = DRA752_DSPEVE_T_COLD, .min_freq = DRA752_DSPEVE_MIN_FREQ, @@ -271,8 +248,6 @@ static struct temp_sensor_data dra752_dspeve_temp_sensor_data = { /* Thresholds and limits for DRA752 IVA temperature sensor */ static struct temp_sensor_data dra752_iva_temp_sensor_data = { - .tshut_hot = DRA752_IVA_TSHUT_HOT, - .tshut_cold = DRA752_IVA_TSHUT_COLD, .t_hot = DRA752_IVA_T_HOT, .t_cold = DRA752_IVA_T_COLD, .min_freq = DRA752_IVA_MIN_FREQ, @@ -416,8 +391,7 @@ int dra752_adc_to_temp[DRA752_ADC_END_VALUE - DRA752_ADC_START_VALUE + 1] = { /* DRA752 data */ const struct ti_bandgap_data dra752_data = { - .features = TI_BANDGAP_FEATURE_TSHUT_CONFIG | - TI_BANDGAP_FEATURE_FREEZE_BIT | + .features = TI_BANDGAP_FEATURE_FREEZE_BIT | TI_BANDGAP_FEATURE_TALERT | TI_BANDGAP_FEATURE_COUNTER_DELAY | TI_BANDGAP_FEATURE_HISTORY_BUFFER | diff --git a/drivers/thermal/zx2967_thermal.c b/drivers/thermal/zx2967_thermal.c new file mode 100644 index 000000000000..a5670ad2cfc8 --- /dev/null +++ b/drivers/thermal/zx2967_thermal.c @@ -0,0 +1,258 @@ +/* + * ZTE's zx2967 family thermal sensor driver + * + * Copyright (C) 2017 ZTE Ltd. + * + * Author: Baoyou Xie <baoyou.xie@linaro.org> + * + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/thermal.h> + +/* Power Mode: 0->low 1->high */ +#define ZX2967_THERMAL_POWER_MODE 0 +#define ZX2967_POWER_MODE_LOW 0 +#define ZX2967_POWER_MODE_HIGH 1 + +/* DCF Control Register */ +#define ZX2967_THERMAL_DCF 0x4 +#define ZX2967_DCF_EN BIT(1) +#define ZX2967_DCF_FREEZE BIT(0) + +/* Selection Register */ +#define ZX2967_THERMAL_SEL 0x8 + +/* Control Register */ +#define ZX2967_THERMAL_CTRL 0x10 + +#define ZX2967_THERMAL_READY BIT(12) +#define ZX2967_THERMAL_TEMP_MASK GENMASK(11, 0) +#define ZX2967_THERMAL_ID_MASK 0x18 +#define ZX2967_THERMAL_ID 0x10 + +#define ZX2967_GET_TEMP_TIMEOUT_US (100 * 1024) + +/** + * struct zx2967_thermal_priv - zx2967 thermal sensor private structure + * @tzd: struct thermal_zone_device where the sensor is registered + * @lock: prevents read sensor in parallel + * @clk_topcrm: topcrm clk structure + * @clk_apb: apb clk structure + * @regs: pointer to base address of the thermal sensor + */ + +struct zx2967_thermal_priv { + struct thermal_zone_device *tzd; + struct mutex lock; + struct clk *clk_topcrm; + struct clk *clk_apb; + void __iomem *regs; + struct device *dev; +}; + +static int zx2967_thermal_get_temp(void *data, int *temp) +{ + void __iomem *regs; + struct zx2967_thermal_priv *priv = data; + u32 val; + int ret; + + if (!priv->tzd) + return -EAGAIN; + + regs = priv->regs; + mutex_lock(&priv->lock); + writel_relaxed(ZX2967_POWER_MODE_LOW, + regs + ZX2967_THERMAL_POWER_MODE); + writel_relaxed(ZX2967_DCF_EN, regs + ZX2967_THERMAL_DCF); + + val = readl_relaxed(regs + ZX2967_THERMAL_SEL); + val &= ~ZX2967_THERMAL_ID_MASK; + val |= ZX2967_THERMAL_ID; + writel_relaxed(val, regs + ZX2967_THERMAL_SEL); + + /* + * Must wait for a while, surely it's a bit odd. + * otherwise temperature value we got has a few deviation, even if + * the THERMAL_READY bit is set. + */ + usleep_range(100, 300); + ret = readx_poll_timeout(readl, regs + ZX2967_THERMAL_CTRL, + val, val & ZX2967_THERMAL_READY, 300, + ZX2967_GET_TEMP_TIMEOUT_US); + if (ret) { + dev_err(priv->dev, "Thermal sensor data timeout\n"); + goto unlock; + } + + writel_relaxed(ZX2967_DCF_FREEZE | ZX2967_DCF_EN, + regs + ZX2967_THERMAL_DCF); + val = readl_relaxed(regs + ZX2967_THERMAL_CTRL) + & ZX2967_THERMAL_TEMP_MASK; + writel_relaxed(ZX2967_POWER_MODE_HIGH, + regs + ZX2967_THERMAL_POWER_MODE); + + /* + * Calculate temperature + * In dts, slope is multiplied by 1000. + */ + *temp = DIV_ROUND_CLOSEST(((s32)val + priv->tzd->tzp->offset) * 1000, + priv->tzd->tzp->slope); + +unlock: + mutex_unlock(&priv->lock); + return ret; +} + +static struct thermal_zone_of_device_ops zx2967_of_thermal_ops = { + .get_temp = zx2967_thermal_get_temp, +}; + +static int zx2967_thermal_probe(struct platform_device *pdev) +{ + struct zx2967_thermal_priv *priv; + struct resource *res; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->regs)) + return PTR_ERR(priv->regs); + + priv->clk_topcrm = devm_clk_get(&pdev->dev, "topcrm"); + if (IS_ERR(priv->clk_topcrm)) { + ret = PTR_ERR(priv->clk_topcrm); + dev_err(&pdev->dev, "failed to get topcrm clock: %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(priv->clk_topcrm); + if (ret) { + dev_err(&pdev->dev, "failed to enable topcrm clock: %d\n", + ret); + return ret; + } + + priv->clk_apb = devm_clk_get(&pdev->dev, "apb"); + if (IS_ERR(priv->clk_apb)) { + ret = PTR_ERR(priv->clk_apb); + dev_err(&pdev->dev, "failed to get apb clock: %d\n", ret); + goto disable_clk_topcrm; + } + + ret = clk_prepare_enable(priv->clk_apb); + if (ret) { + dev_err(&pdev->dev, "failed to enable apb clock: %d\n", + ret); + goto disable_clk_topcrm; + } + + mutex_init(&priv->lock); + priv->tzd = thermal_zone_of_sensor_register(&pdev->dev, + 0, priv, &zx2967_of_thermal_ops); + + if (IS_ERR(priv->tzd)) { + ret = PTR_ERR(priv->tzd); + dev_err(&pdev->dev, "failed to register sensor: %d\n", ret); + goto disable_clk_all; + } + + if (priv->tzd->tzp->slope == 0) { + thermal_zone_of_sensor_unregister(&pdev->dev, priv->tzd); + dev_err(&pdev->dev, "coefficients of sensor is invalid\n"); + ret = -EINVAL; + goto disable_clk_all; + } + + priv->dev = &pdev->dev; + platform_set_drvdata(pdev, priv); + + return 0; + +disable_clk_all: + clk_disable_unprepare(priv->clk_apb); +disable_clk_topcrm: + clk_disable_unprepare(priv->clk_topcrm); + return ret; +} + +static int zx2967_thermal_exit(struct platform_device *pdev) +{ + struct zx2967_thermal_priv *priv = platform_get_drvdata(pdev); + + thermal_zone_of_sensor_unregister(&pdev->dev, priv->tzd); + clk_disable_unprepare(priv->clk_topcrm); + clk_disable_unprepare(priv->clk_apb); + + return 0; +} + +static const struct of_device_id zx2967_thermal_id_table[] = { + { .compatible = "zte,zx296718-thermal" }, + {} +}; +MODULE_DEVICE_TABLE(of, zx2967_thermal_id_table); + +#ifdef CONFIG_PM_SLEEP +static int zx2967_thermal_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct zx2967_thermal_priv *priv = platform_get_drvdata(pdev); + + if (priv && priv->clk_topcrm) + clk_disable_unprepare(priv->clk_topcrm); + + if (priv && priv->clk_apb) + clk_disable_unprepare(priv->clk_apb); + + return 0; +} + +static int zx2967_thermal_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct zx2967_thermal_priv *priv = platform_get_drvdata(pdev); + int error; + + error = clk_prepare_enable(priv->clk_topcrm); + if (error) + return error; + + error = clk_prepare_enable(priv->clk_apb); + if (error) { + clk_disable_unprepare(priv->clk_topcrm); + return error; + } + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(zx2967_thermal_pm_ops, + zx2967_thermal_suspend, zx2967_thermal_resume); + +static struct platform_driver zx2967_thermal_driver = { + .probe = zx2967_thermal_probe, + .remove = zx2967_thermal_exit, + .driver = { + .name = "zx2967_thermal", + .of_match_table = zx2967_thermal_id_table, + .pm = &zx2967_thermal_pm_ops, + }, +}; +module_platform_driver(zx2967_thermal_driver); + +MODULE_AUTHOR("Baoyou Xie <baoyou.xie@linaro.org>"); +MODULE_DESCRIPTION("ZTE zx2967 thermal driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index f3932baed07d..55577cf9b6a4 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -39,7 +39,7 @@ #include <linux/errno.h> #include <linux/signal.h> #include <linux/fcntl.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/ctype.h> diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c index eb278832f5ce..1bacbc3b19a0 100644 --- a/drivers/tty/n_hdlc.c +++ b/drivers/tty/n_hdlc.c @@ -667,7 +667,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file, struct n_hdlc_buf *tbuf; if (debuglevel >= DEBUG_LEVEL_INFO) - printk("%s(%d)n_hdlc_tty_write() called count=%Zd\n", + printk("%s(%d)n_hdlc_tty_write() called count=%zd\n", __FILE__,__LINE__,count); /* Verify pointers */ diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index a23fa5ed1d67..66b59a15780d 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -12,7 +12,7 @@ #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/fcntl.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/string.h> #include <linux/major.h> #include <linux/mm.h> diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c index e92c23470e51..59a2a7e18b5a 100644 --- a/drivers/tty/serial/crisv10.c +++ b/drivers/tty/serial/crisv10.c @@ -12,7 +12,7 @@ static char *serial_version = "$Revision: 1.25 $"; #include <linux/types.h> #include <linux/errno.h> #include <linux/signal.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/tty.h> diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c index 6ad26f802b51..f96bcf9bee25 100644 --- a/drivers/tty/serial/ioc4_serial.c +++ b/drivers/tty/serial/ioc4_serial.c @@ -210,7 +210,7 @@ #define IOC4_SSCR_PAUSE_STATE 0x40000000 /* Sets when PAUSE takes effect */ #define IOC4_SSCR_RESET 0x80000000 /* Reset DMA channels */ -/* All producer/comsumer pointers are the same bitfield */ +/* All producer/consumer pointers are the same bitfield */ #define IOC4_PROD_CONS_PTR_4K 0x00000ff8 /* For 4K buffers */ #define IOC4_PROD_CONS_PTR_1K 0x000003f8 /* For 1K buffers */ #define IOC4_PROD_CONS_PTR_OFF 3 diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 793395451982..ca54ce074a5f 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -29,6 +29,7 @@ #include <linux/tty_flip.h> #include <linux/spi/spi.h> #include <linux/uaccess.h> +#include <uapi/linux/sched/types.h> #define SC16IS7XX_NAME "sc16is7xx" #define SC16IS7XX_MAX_DEVS 8 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 9939c3d9912b..3fe56894974a 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -24,6 +24,7 @@ #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/init.h> #include <linux/console.h> #include <linux/of.h> diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 71136742e606..c6fc7141d7b2 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -14,8 +14,10 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/sched/rt.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/fs.h> diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index a1fd3f7d487a..e6d1a6510886 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -69,7 +69,8 @@ #include <linux/errno.h> #include <linux/signal.h> #include <linux/fcntl.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/tty_driver.h> diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index f27fc0f14c11..a9a978731c5b 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -9,7 +9,7 @@ #include <linux/types.h> #include <linux/termios.h> #include <linux/errno.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/tty.h> diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c index 9229de43e19d..52b7baef4f7a 100644 --- a/drivers/tty/tty_ldsem.c +++ b/drivers/tty/tty_ldsem.c @@ -32,6 +32,8 @@ #include <linux/atomic.h> #include <linux/tty.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 5cd3cd932293..1d21a9c1d33e 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -11,7 +11,7 @@ #include <linux/timer.h> #include <linux/string.h> #include <linux/slab.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/bitops.h> #include <linux/delay.h> diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index 397e1509fe51..c5f0fc906136 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -26,7 +26,9 @@ #include <linux/consolemap.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> +#include <linux/sched/debug.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/mm.h> diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 9d3ce505e7ab..5c4933bb4b53 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -72,7 +72,7 @@ #include <linux/module.h> #include <linux/types.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/kernel.h> diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index a56edf2d58eb..0cbfe1ff6f6c 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -10,7 +10,7 @@ #include <linux/types.h> #include <linux/errno.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/tty.h> #include <linux/timer.h> #include <linux/kernel.h> diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 31d95dc9c202..60ce7fd54e89 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -20,7 +20,7 @@ #include <linux/slab.h> #include <linux/mm.h> #include <linux/idr.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/string.h> #include <linux/kobject.h> #include <linux/cdev.h> diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index 5a59da0dc98a..3e80aa3b917a 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -74,7 +74,7 @@ #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/stat.h> diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 235e305f8473..d5388938bc7a 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -32,6 +32,7 @@ #undef VERBOSE_DEBUG #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 071964c7847f..cc61055fb9be 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -49,7 +49,7 @@ #include <linux/module.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/signal.h> #include <linux/poll.h> #include <linux/slab.h> diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 52747b6ac89a..cfc3cff6e8d5 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -36,6 +36,7 @@ #include <linux/fs.h> #include <linux/mm.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/signal.h> #include <linux/poll.h> @@ -2335,7 +2336,7 @@ static int proc_drop_privileges(struct usb_dev_state *ps, void __user *arg) if (copy_from_user(&data, arg, sizeof(data))) return -EFAULT; - /* This is an one way operation. Once privileges are + /* This is a one way operation. Once privileges are * dropped, you cannot regain them. You may however reissue * this ioctl to shrink the allowed interfaces mask. */ diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index a56c75e09786..f0dd08198d74 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -15,7 +15,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/completion.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/ioctl.h> diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 87fccf611b69..a5b7cd615698 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -23,6 +23,7 @@ #include <linux/export.h> #include <linux/hid.h> #include <linux/module.h> +#include <linux/sched/signal.h> #include <linux/uio.h> #include <asm/unaligned.h> diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 8f3659b65f53..4c8aacc232c0 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -207,6 +207,7 @@ #include <linux/fs.h> #include <linux/kref.h> #include <linux/kthread.h> +#include <linux/sched/signal.h> #include <linux/limits.h> #include <linux/rwsem.h> #include <linux/slab.h> diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 6bde4396927c..a2615d64d07c 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1848,7 +1848,7 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) fail: spin_unlock_irq (&dev->lock); - pr_debug ("%s: %s fail %Zd, %p\n", shortname, __func__, value, dev); + pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev); kfree (dev->buf); dev->buf = NULL; return value; diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index 2e41ef36b944..b76fcdb763a0 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -520,7 +520,7 @@ static void struct_ep_qh_setup(struct fsl_udc *udc, unsigned char ep_num, /* Setup qh structure and ep register for ep0. */ static void ep0_setup(struct fsl_udc *udc) { - /* the intialization of an ep includes: fields in QH, Regs, + /* the initialization of an ep includes: fields in QH, Regs, * fsl_ep struct */ struct_ep_qh_setup(udc, 0, USB_RECV, USB_ENDPOINT_XFER_CONTROL, USB_MAX_CTRL_PAYLOAD, 0, 0); @@ -2349,7 +2349,7 @@ static int struct_ep_setup(struct fsl_udc *udc, unsigned char index, } /* Driver probe function - * all intialization operations implemented here except enabling usb_intr reg + * all initialization operations implemented here except enabling usb_intr reg * board setup should have been done in the platform code */ static int fsl_udc_probe(struct platform_device *pdev) diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index fb8fc34827ab..2218f91e92a6 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -1791,7 +1791,7 @@ static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev, dev_dbg(dev, "%s: num_usb3_eps = %d\n", __func__, usb3->num_usb3_eps); /* - * This driver prepares pipes as the followings: + * This driver prepares pipes as follows: * - odd pipes = IN pipe * - even pipes = OUT pipe (except pipe 0) */ @@ -1841,7 +1841,7 @@ static void renesas_usb3_init_ram(struct renesas_usb3 *usb3, struct device *dev, memset(basead, 0, sizeof(basead)); /* - * This driver prepares pipes as the followings: + * This driver prepares pipes as follows: * - all pipes = the same size as "ramsize_per_pipe" * Please refer to the "Method of Specifying RAM Mapping" */ diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 063064801ceb..ac2c4eab478d 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -1322,7 +1322,7 @@ static int __init ehci_hcd_init(void) printk(KERN_WARNING "Warning! ehci_hcd should always be loaded" " before uhci_hcd and ohci_hcd, not after\n"); - pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n", + pr_debug("%s: block sizes: qh %zd qtd %zd itd %zd sitd %zd\n", hcd_name, sizeof(struct ehci_qh), sizeof(struct ehci_qtd), sizeof(struct ehci_itd), sizeof(struct ehci_sitd)); diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c index 9d0b0518290a..1c5b34b74860 100644 --- a/drivers/usb/host/fotg210-hcd.c +++ b/drivers/usb/host/fotg210-hcd.c @@ -5697,7 +5697,7 @@ static int __init fotg210_hcd_init(void) test_bit(USB_OHCI_LOADED, &usb_hcds_loaded)) pr_warn("Warning! fotg210_hcd should always be loaded before uhci_hcd and ohci_hcd, not after\n"); - pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd\n", + pr_debug("%s: block sizes: qh %zd qtd %zd itd %zd\n", hcd_name, sizeof(struct fotg210_qh), sizeof(struct fotg210_qtd), sizeof(struct fotg210_itd)); diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 8685cf3e6292..b6daf2e69989 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -1252,7 +1252,7 @@ static int __init ohci_hcd_mod_init(void) return -ENODEV; printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name); - pr_debug ("%s: block sizes: ed %Zd td %Zd\n", hcd_name, + pr_debug ("%s: block sizes: ed %zd td %zd\n", hcd_name, sizeof (struct ed), sizeof (struct td)); set_bit(USB_OHCI_LOADED, &usb_hcds_loaded); diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c index 5cf2633cdb04..e92540a21b6b 100644 --- a/drivers/usb/image/mdc800.c +++ b/drivers/usb/image/mdc800.c @@ -85,7 +85,7 @@ * (20/10/1999) */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/signal.h> #include <linux/spinlock.h> #include <linux/errno.h> diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index a540e4f206c4..db9a9e6ff6be 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -21,6 +21,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> @@ -563,20 +564,20 @@ static ssize_t adu_write(struct file *file, const __user char *buffer, } dev_dbg(&dev->udev->dev, - "%s : in progress, count = %Zd\n", + "%s : in progress, count = %zd\n", __func__, count); } else { spin_unlock_irqrestore(&dev->buflock, flags); set_current_state(TASK_RUNNING); remove_wait_queue(&dev->write_wait, &waita); - dev_dbg(&dev->udev->dev, "%s : sending, count = %Zd\n", + dev_dbg(&dev->udev->dev, "%s : sending, count = %zd\n", __func__, count); /* write the data into interrupt_out_buffer from userspace */ buffer_size = usb_endpoint_maxp(dev->interrupt_out_endpoint); bytes_to_write = count > buffer_size ? buffer_size : count; dev_dbg(&dev->udev->dev, - "%s : buffer_size = %Zd, count = %Zd, bytes_to_write = %Zd\n", + "%s : buffer_size = %zd, count = %zd, bytes_to_write = %zd\n", __func__, buffer_size, count, bytes_to_write); if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write) != 0) { diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c index debc1fd74b0d..8b9fd7534f69 100644 --- a/drivers/usb/misc/idmouse.c +++ b/drivers/usb/misc/idmouse.c @@ -17,6 +17,7 @@ */ #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/slab.h> diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index b10e26c74a90..322a042d6e59 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c @@ -673,7 +673,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t /* write the data into interrupt_out_buffer from userspace */ bytes_to_write = min_t(int, count, write_buffer_size); - dev_dbg(&dev->udev->dev, "%s: count = %Zd, bytes_to_write = %Zd\n", + dev_dbg(&dev->udev->dev, "%s: count = %zd, bytes_to_write = %zd\n", __func__, count, bytes_to_write); if (copy_from_user (dev->interrupt_out_buffer, buffer, bytes_to_write)) { diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c index fc329c98a6e8..b106ce76997b 100644 --- a/drivers/usb/misc/rio500.c +++ b/drivers/usb/misc/rio500.c @@ -31,7 +31,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/signal.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/mutex.h> #include <linux/errno.h> #include <linux/random.h> diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index 356d312add57..e45a3a680db8 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -50,6 +50,7 @@ #include <linux/completion.h> #include <linux/kref.h> #include <linux/slab.h> +#include <linux/sched/signal.h> /* * Version Information @@ -526,7 +527,7 @@ static size_t parport_uss720_epp_write_data(struct parport *pp, const void *buf, return 0; i = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 1), (void *)buf, length, &rlen, 20000); if (i) - printk(KERN_ERR "uss720: sendbulk ep 1 buf %p len %Zu rlen %u\n", buf, length, rlen); + printk(KERN_ERR "uss720: sendbulk ep 1 buf %p len %zu rlen %u\n", buf, length, rlen); change_mode(pp, ECR_PS2); return rlen; #endif @@ -587,7 +588,7 @@ static size_t parport_uss720_ecp_write_data(struct parport *pp, const void *buff return 0; i = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 1), (void *)buffer, len, &rlen, 20000); if (i) - printk(KERN_ERR "uss720: sendbulk ep 1 buf %p len %Zu rlen %u\n", buffer, len, rlen); + printk(KERN_ERR "uss720: sendbulk ep 1 buf %p len %zu rlen %u\n", buffer, len, rlen); change_mode(pp, ECR_PS2); return rlen; } @@ -605,7 +606,7 @@ static size_t parport_uss720_ecp_read_data(struct parport *pp, void *buffer, siz return 0; i = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, 2), buffer, len, &rlen, 20000); if (i) - printk(KERN_ERR "uss720: recvbulk ep 2 buf %p len %Zu rlen %u\n", buffer, len, rlen); + printk(KERN_ERR "uss720: recvbulk ep 2 buf %p len %zu rlen %u\n", buffer, len, rlen); change_mode(pp, ECR_PS2); return rlen; } @@ -638,7 +639,7 @@ static size_t parport_uss720_write_compat(struct parport *pp, const void *buffer return 0; i = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 1), (void *)buffer, len, &rlen, 20000); if (i) - printk(KERN_ERR "uss720: sendbulk ep 1 buf %p len %Zu rlen %u\n", buffer, len, rlen); + printk(KERN_ERR "uss720: sendbulk ep 1 buf %p len %zu rlen %u\n", buffer, len, rlen); change_mode(pp, ECR_PS2); return rlen; } diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index 9fb8b1e6ecc2..b6d8bf475c92 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -8,6 +8,7 @@ */ #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/cdev.h> diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index db1a4abf2806..19c416d69eb9 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c @@ -8,6 +8,7 @@ #include <linux/list.h> #include <linux/usb.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/time.h> #include <linux/ktime.h> #include <linux/export.h> diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index eb433922598c..ab78111e0968 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c @@ -27,6 +27,7 @@ #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/wait.h> +#include <linux/sched/signal.h> #include <linux/usb/serial.h> /* Defines */ diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 944de657a07a..49ce2be90fa0 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -10,6 +10,7 @@ */ #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/sysrq.h> diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index 8b232290be6b..cab2b71a80d0 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -327,13 +327,11 @@ EXPORT_SYMBOL_GPL(usbip_dump_header); int usbip_recv(struct socket *sock, void *buf, int size) { int result; - struct msghdr msg; - struct kvec iov; + struct kvec iov = {.iov_base = buf, .iov_len = size}; + struct msghdr msg = {.msg_flags = MSG_NOSIGNAL}; int total = 0; - /* for blocks of if (usbip_dbg_flag_xmit) */ - char *bp = buf; - int osize = size; + iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size); usbip_dbg_xmit("enter\n"); @@ -344,26 +342,18 @@ int usbip_recv(struct socket *sock, void *buf, int size) } do { + int sz = msg_data_left(&msg); sock->sk->sk_allocation = GFP_NOIO; - iov.iov_base = buf; - iov.iov_len = size; - msg.msg_name = NULL; - msg.msg_namelen = 0; - msg.msg_control = NULL; - msg.msg_controllen = 0; - msg.msg_flags = MSG_NOSIGNAL; - - result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL); + + result = sock_recvmsg(sock, &msg, MSG_WAITALL); if (result <= 0) { pr_debug("receive sock %p buf %p size %u ret %d total %d\n", - sock, buf, size, result, total); + sock, buf + total, sz, result, total); goto err; } - size -= result; - buf += result; total += result; - } while (size > 0); + } while (msg_data_left(&msg)); if (usbip_dbg_flag_xmit) { if (!in_interrupt()) @@ -372,9 +362,9 @@ int usbip_recv(struct socket *sock, void *buf, int size) pr_debug("interrupt :"); pr_debug("receiving....\n"); - usbip_dump_buffer(bp, osize); - pr_debug("received, osize %d ret %d size %d total %d\n", - osize, result, size, total); + usbip_dump_buffer(buf, size); + pr_debug("received, osize %d ret %d size %zd total %d\n", + size, result, msg_data_left(&msg), total); } return total; @@ -707,7 +697,7 @@ void usbip_pad_iso(struct usbip_device *ud, struct urb *urb) return; /* - * loop over all packets from last to first (to prevent overwritting + * loop over all packets from last to first (to prevent overwriting * memory when padding) and move them into the proper place */ for (i = np-1; i > 0; i--) { diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index 9f490375ac92..f8573a52e41a 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h @@ -31,6 +31,7 @@ #include <linux/types.h> #include <linux/usb.h> #include <linux/wait.h> +#include <linux/sched/task.h> #include <uapi/linux/usbip.h> #define USBIP_VERSION "1.0.0" diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 59b3f62a2d64..cf3de91fbfe7 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -20,6 +20,9 @@ #include <linux/err.h> #include <linux/vfio.h> #include <linux/vmalloc.h> +#include <linux/sched/mm.h> +#include <linux/sched/signal.h> + #include <asm/iommu.h> #include <asm/tce.h> #include <asm/mmu_context.h> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index bd6f293c4ebd..c26fa1f3ed86 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -31,7 +31,8 @@ #include <linux/module.h> #include <linux/mm.h> #include <linux/rbtree.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/vfio.h> diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 2fe35354f20e..9b519897cc17 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -17,6 +17,8 @@ #include <linux/workqueue.h> #include <linux/file.h> #include <linux/slab.h> +#include <linux/sched/clock.h> +#include <linux/sched/signal.h> #include <linux/vmalloc.h> #include <linux/net.h> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 4269e621e254..f0ba362d4c10 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -27,6 +27,8 @@ #include <linux/cgroup.h> #include <linux/module.h> #include <linux/sort.h> +#include <linux/sched/mm.h> +#include <linux/sched/signal.h> #include <linux/interval_tree_generic.h> #include "vhost.h" @@ -282,6 +284,22 @@ void vhost_poll_queue(struct vhost_poll *poll) } EXPORT_SYMBOL_GPL(vhost_poll_queue); +static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq) +{ + int j; + + for (j = 0; j < VHOST_NUM_ADDRS; j++) + vq->meta_iotlb[j] = NULL; +} + +static void vhost_vq_meta_reset(struct vhost_dev *d) +{ + int i; + + for (i = 0; i < d->nvqs; ++i) + __vhost_vq_meta_reset(d->vqs[i]); +} + static void vhost_vq_reset(struct vhost_dev *dev, struct vhost_virtqueue *vq) { @@ -312,6 +330,7 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->busyloop_timeout = 0; vq->umem = NULL; vq->iotlb = NULL; + __vhost_vq_meta_reset(vq); } static int vhost_worker(void *data) @@ -691,6 +710,18 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem, return 1; } +static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, + u64 addr, unsigned int size, + int type) +{ + const struct vhost_umem_node *node = vq->meta_iotlb[type]; + + if (!node) + return NULL; + + return (void *)(uintptr_t)(node->userspace_addr + addr - node->start); +} + /* Can we switch to this memory table? */ /* Caller should have device mutex but not vq mutex */ static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem, @@ -733,8 +764,14 @@ static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to, * could be access through iotlb. So -EAGAIN should * not happen in this case. */ - /* TODO: more fast path */ struct iov_iter t; + void __user *uaddr = vhost_vq_meta_fetch(vq, + (u64)(uintptr_t)to, size, + VHOST_ADDR_DESC); + + if (uaddr) + return __copy_to_user(uaddr, from, size); + ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov, ARRAY_SIZE(vq->iotlb_iov), VHOST_ACCESS_WO); @@ -762,8 +799,14 @@ static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, * could be access through iotlb. So -EAGAIN should * not happen in this case. */ - /* TODO: more fast path */ + void __user *uaddr = vhost_vq_meta_fetch(vq, + (u64)(uintptr_t)from, size, + VHOST_ADDR_DESC); struct iov_iter f; + + if (uaddr) + return __copy_from_user(to, uaddr, size); + ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov, ARRAY_SIZE(vq->iotlb_iov), VHOST_ACCESS_RO); @@ -783,17 +826,12 @@ out: return ret; } -static void __user *__vhost_get_user(struct vhost_virtqueue *vq, - void __user *addr, unsigned size) +static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq, + void __user *addr, unsigned int size, + int type) { int ret; - /* This function should be called after iotlb - * prefetch, which means we're sure that vq - * could be access through iotlb. So -EAGAIN should - * not happen in this case. - */ - /* TODO: more fast path */ ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov, ARRAY_SIZE(vq->iotlb_iov), VHOST_ACCESS_RO); @@ -814,14 +852,32 @@ static void __user *__vhost_get_user(struct vhost_virtqueue *vq, return vq->iotlb_iov[0].iov_base; } -#define vhost_put_user(vq, x, ptr) \ +/* This function should be called after iotlb + * prefetch, which means we're sure that vq + * could be access through iotlb. So -EAGAIN should + * not happen in this case. + */ +static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, + void *addr, unsigned int size, + int type) +{ + void __user *uaddr = vhost_vq_meta_fetch(vq, + (u64)(uintptr_t)addr, size, type); + if (uaddr) + return uaddr; + + return __vhost_get_user_slow(vq, addr, size, type); +} + +#define vhost_put_user(vq, x, ptr) \ ({ \ int ret = -EFAULT; \ if (!vq->iotlb) { \ ret = __put_user(x, ptr); \ } else { \ __typeof__(ptr) to = \ - (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \ + (__typeof__(ptr)) __vhost_get_user(vq, ptr, \ + sizeof(*ptr), VHOST_ADDR_USED); \ if (to != NULL) \ ret = __put_user(x, to); \ else \ @@ -830,14 +886,16 @@ static void __user *__vhost_get_user(struct vhost_virtqueue *vq, ret; \ }) -#define vhost_get_user(vq, x, ptr) \ +#define vhost_get_user(vq, x, ptr, type) \ ({ \ int ret; \ if (!vq->iotlb) { \ ret = __get_user(x, ptr); \ } else { \ __typeof__(ptr) from = \ - (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \ + (__typeof__(ptr)) __vhost_get_user(vq, ptr, \ + sizeof(*ptr), \ + type); \ if (from != NULL) \ ret = __get_user(x, from); \ else \ @@ -846,6 +904,12 @@ static void __user *__vhost_get_user(struct vhost_virtqueue *vq, ret; \ }) +#define vhost_get_avail(vq, x, ptr) \ + vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL) + +#define vhost_get_used(vq, x, ptr) \ + vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) + static void vhost_dev_lock_vqs(struct vhost_dev *d) { int i = 0; @@ -951,6 +1015,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, ret = -EFAULT; break; } + vhost_vq_meta_reset(dev); if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size, msg->iova + msg->size - 1, msg->uaddr, msg->perm)) { @@ -960,6 +1025,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, vhost_iotlb_notify_vq(dev, msg); break; case VHOST_IOTLB_INVALIDATE: + vhost_vq_meta_reset(dev); vhost_del_umem_range(dev->iotlb, msg->iova, msg->iova + msg->size - 1); break; @@ -1103,12 +1169,26 @@ static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, sizeof *used + num * sizeof *used->ring + s); } +static void vhost_vq_meta_update(struct vhost_virtqueue *vq, + const struct vhost_umem_node *node, + int type) +{ + int access = (type == VHOST_ADDR_USED) ? + VHOST_ACCESS_WO : VHOST_ACCESS_RO; + + if (likely(node->perm & access)) + vq->meta_iotlb[type] = node; +} + static int iotlb_access_ok(struct vhost_virtqueue *vq, - int access, u64 addr, u64 len) + int access, u64 addr, u64 len, int type) { const struct vhost_umem_node *node; struct vhost_umem *umem = vq->iotlb; - u64 s = 0, size; + u64 s = 0, size, orig_addr = addr; + + if (vhost_vq_meta_fetch(vq, addr, len, type)) + return true; while (len > s) { node = vhost_umem_interval_tree_iter_first(&umem->umem_tree, @@ -1125,6 +1205,10 @@ static int iotlb_access_ok(struct vhost_virtqueue *vq, } size = node->size - addr + node->start; + + if (orig_addr == addr && size >= len) + vhost_vq_meta_update(vq, node, type); + s += size; addr += size; } @@ -1141,13 +1225,15 @@ int vq_iotlb_prefetch(struct vhost_virtqueue *vq) return 1; return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc, - num * sizeof *vq->desc) && + num * sizeof(*vq->desc), VHOST_ADDR_DESC) && iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail, sizeof *vq->avail + - num * sizeof *vq->avail->ring + s) && + num * sizeof(*vq->avail->ring) + s, + VHOST_ADDR_AVAIL) && iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used, sizeof *vq->used + - num * sizeof *vq->used->ring + s); + num * sizeof(*vq->used->ring) + s, + VHOST_ADDR_USED); } EXPORT_SYMBOL_GPL(vq_iotlb_prefetch); @@ -1728,7 +1814,7 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq) r = -EFAULT; goto err; } - r = vhost_get_user(vq, last_used_idx, &vq->used->idx); + r = vhost_get_used(vq, last_used_idx, &vq->used->idx); if (r) { vq_err(vq, "Can't access used idx at %p\n", &vq->used->idx); @@ -1930,29 +2016,36 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, /* Check it isn't doing very strange things with descriptor numbers. */ last_avail_idx = vq->last_avail_idx; - if (unlikely(vhost_get_user(vq, avail_idx, &vq->avail->idx))) { - vq_err(vq, "Failed to access avail idx at %p\n", - &vq->avail->idx); - return -EFAULT; - } - vq->avail_idx = vhost16_to_cpu(vq, avail_idx); - if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { - vq_err(vq, "Guest moved used index from %u to %u", - last_avail_idx, vq->avail_idx); - return -EFAULT; - } + if (vq->avail_idx == vq->last_avail_idx) { + if (unlikely(vhost_get_avail(vq, avail_idx, &vq->avail->idx))) { + vq_err(vq, "Failed to access avail idx at %p\n", + &vq->avail->idx); + return -EFAULT; + } + vq->avail_idx = vhost16_to_cpu(vq, avail_idx); - /* If there's nothing new since last we looked, return invalid. */ - if (vq->avail_idx == last_avail_idx) - return vq->num; + if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { + vq_err(vq, "Guest moved used index from %u to %u", + last_avail_idx, vq->avail_idx); + return -EFAULT; + } + + /* If there's nothing new since last we looked, return + * invalid. + */ + if (vq->avail_idx == last_avail_idx) + return vq->num; - /* Only get avail ring entries after they have been exposed by guest. */ - smp_rmb(); + /* Only get avail ring entries after they have been + * exposed by guest. + */ + smp_rmb(); + } /* Grab the next descriptor number they're advertising, and increment * the index we've seen. */ - if (unlikely(vhost_get_user(vq, ring_head, + if (unlikely(vhost_get_avail(vq, ring_head, &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) { vq_err(vq, "Failed to read head: idx %d address %p\n", last_avail_idx, @@ -2168,7 +2261,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) * with the barrier that the Guest executes when enabling * interrupts. */ smp_mb(); - if (vhost_get_user(vq, flags, &vq->avail->flags)) { + if (vhost_get_avail(vq, flags, &vq->avail->flags)) { vq_err(vq, "Failed to get flags"); return true; } @@ -2195,7 +2288,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) * interrupts. */ smp_mb(); - if (vhost_get_user(vq, event, vhost_used_event(vq))) { + if (vhost_get_avail(vq, event, vhost_used_event(vq))) { vq_err(vq, "Failed to get used event idx"); return true; } @@ -2242,7 +2335,7 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) if (vq->avail_idx != vq->last_avail_idx) return false; - r = vhost_get_user(vq, avail_idx, &vq->avail->idx); + r = vhost_get_avail(vq, avail_idx, &vq->avail->idx); if (unlikely(r)) return false; vq->avail_idx = vhost16_to_cpu(vq, avail_idx); @@ -2278,7 +2371,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) /* They could have slipped one in as we were doing that: make * sure it's written, then check again. */ smp_mb(); - r = vhost_get_user(vq, avail_idx, &vq->avail->idx); + r = vhost_get_avail(vq, avail_idx, &vq->avail->idx); if (r) { vq_err(vq, "Failed to check avail idx at %p: %d\n", &vq->avail->idx, r); diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index a9cbbb148f46..f55671d53f28 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -76,6 +76,13 @@ struct vhost_umem { int numem; }; +enum vhost_uaddr_type { + VHOST_ADDR_DESC = 0, + VHOST_ADDR_AVAIL = 1, + VHOST_ADDR_USED = 2, + VHOST_NUM_ADDRS = 3, +}; + /* The virtqueue structure describes a queue attached to a device. */ struct vhost_virtqueue { struct vhost_dev *dev; @@ -86,6 +93,7 @@ struct vhost_virtqueue { struct vring_desc __user *desc; struct vring_avail __user *avail; struct vring_used __user *used; + const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS]; struct file *kick; struct file *call; struct file *error; diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index a44f5627b82a..12ded23f1aaf 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -412,11 +412,9 @@ static void fbcon_add_cursor_timer(struct fb_info *info) if (!info->queue.func) INIT_WORK(&info->queue, fb_flashcursor); - init_timer(&ops->cursor_timer); - ops->cursor_timer.function = cursor_timer_handler; - ops->cursor_timer.expires = jiffies + ops->cur_blink_jiffies; - ops->cursor_timer.data = (unsigned long ) info; - add_timer(&ops->cursor_timer); + setup_timer(&ops->cursor_timer, cursor_timer_handler, + (unsigned long) info); + mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies); ops->flags |= FBCON_FLAGS_CURSOR_TIMER; } } @@ -1165,6 +1163,8 @@ static void fbcon_free_font(struct display *p, bool freefont) p->userfont = 0; } +static void set_vc_hi_font(struct vc_data *vc, bool set); + static void fbcon_deinit(struct vc_data *vc) { struct display *p = &fb_display[vc->vc_num]; @@ -1200,6 +1200,9 @@ finished: if (free_font) vc->vc_font.data = NULL; + if (vc->vc_hi_font_mask) + set_vc_hi_font(vc, false); + if (!con_is_bound(&fb_con)) fbcon_exit(); @@ -2436,32 +2439,10 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) return 0; } -static int fbcon_do_set_font(struct vc_data *vc, int w, int h, - const u8 * data, int userfont) +/* set/clear vc_hi_font_mask and update vc attrs accordingly */ +static void set_vc_hi_font(struct vc_data *vc, bool set) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; - struct fbcon_ops *ops = info->fbcon_par; - struct display *p = &fb_display[vc->vc_num]; - int resize; - int cnt; - char *old_data = NULL; - - if (con_is_visible(vc) && softback_lines) - fbcon_set_origin(vc); - - resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); - if (p->userfont) - old_data = vc->vc_font.data; - if (userfont) - cnt = FNTCHARCNT(data); - else - cnt = 256; - vc->vc_font.data = (void *)(p->fontdata = data); - if ((p->userfont = userfont)) - REFCOUNT(data)++; - vc->vc_font.width = w; - vc->vc_font.height = h; - if (vc->vc_hi_font_mask && cnt == 256) { + if (!set) { vc->vc_hi_font_mask = 0; if (vc->vc_can_do_color) { vc->vc_complement_mask >>= 1; @@ -2484,7 +2465,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, ((c & 0xfe00) >> 1) | (c & 0xff); vc->vc_attr >>= 1; } - } else if (!vc->vc_hi_font_mask && cnt == 512) { + } else { vc->vc_hi_font_mask = 0x100; if (vc->vc_can_do_color) { vc->vc_complement_mask <<= 1; @@ -2516,8 +2497,38 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, } else vc->vc_video_erase_char = c & ~0x100; } - } +} + +static int fbcon_do_set_font(struct vc_data *vc, int w, int h, + const u8 * data, int userfont) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + struct display *p = &fb_display[vc->vc_num]; + int resize; + int cnt; + char *old_data = NULL; + + if (con_is_visible(vc) && softback_lines) + fbcon_set_origin(vc); + + resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); + if (p->userfont) + old_data = vc->vc_font.data; + if (userfont) + cnt = FNTCHARCNT(data); + else + cnt = 256; + vc->vc_font.data = (void *)(p->fontdata = data); + if ((p->userfont = userfont)) + REFCOUNT(data)++; + vc->vc_font.width = w; + vc->vc_font.height = h; + if (vc->vc_hi_font_mask && cnt == 256) + set_vc_hi_font(vc, false); + else if (!vc->vc_hi_font_mask && cnt == 512) + set_vc_hi_font(vc, true); if (resize) { int cols, rows; diff --git a/drivers/video/fbdev/amba-clcd-nomadik.c b/drivers/video/fbdev/amba-clcd-nomadik.c index 476ff3f4d466..cd2db1113e67 100644 --- a/drivers/video/fbdev/amba-clcd-nomadik.c +++ b/drivers/video/fbdev/amba-clcd-nomadik.c @@ -213,15 +213,8 @@ static void tpg110_init(struct device *dev, struct device_node *np, board->disable = tpg110_disable; } -int nomadik_clcd_init_panel(struct clcd_fb *fb, - struct device_node *endpoint) +int nomadik_clcd_init_panel(struct clcd_fb *fb, struct device_node *panel) { - struct device_node *panel; - - panel = of_graph_get_remote_port_parent(endpoint); - if (!panel) - return -ENODEV; - if (of_device_is_compatible(panel, "tpo,tpg110")) tpg110_init(&fb->dev->dev, panel, fb->board); else diff --git a/drivers/video/fbdev/amba-clcd-nomadik.h b/drivers/video/fbdev/amba-clcd-nomadik.h index 50aa9bda69fd..a24032c8156e 100644 --- a/drivers/video/fbdev/amba-clcd-nomadik.h +++ b/drivers/video/fbdev/amba-clcd-nomadik.h @@ -6,8 +6,7 @@ #ifdef CONFIG_ARCH_NOMADIK int nomadik_clcd_init_board(struct amba_device *adev, struct clcd_board *board); -int nomadik_clcd_init_panel(struct clcd_fb *fb, - struct device_node *endpoint); +int nomadik_clcd_init_panel(struct clcd_fb *fb, struct device_node *panel); #else static inline int nomadik_clcd_init_board(struct amba_device *adev, struct clcd_board *board) @@ -15,7 +14,7 @@ static inline int nomadik_clcd_init_board(struct amba_device *adev, return 0; } static inline int nomadik_clcd_init_panel(struct clcd_fb *fb, - struct device_node *endpoint) + struct device_node *panel) { return 0; } diff --git a/drivers/video/fbdev/amba-clcd-versatile.c b/drivers/video/fbdev/amba-clcd-versatile.c index e5d9bfc1703a..d42047dc4e4e 100644 --- a/drivers/video/fbdev/amba-clcd-versatile.c +++ b/drivers/video/fbdev/amba-clcd-versatile.c @@ -452,11 +452,9 @@ static const struct versatile_panel versatile_panels[] = { }, }; -static void versatile_panel_probe(struct device *dev, - struct device_node *endpoint) +static void versatile_panel_probe(struct device *dev, struct device_node *panel) { struct versatile_panel const *vpanel = NULL; - struct device_node *panel = NULL; u32 val; int ret; int i; @@ -488,11 +486,6 @@ static void versatile_panel_probe(struct device *dev, return; } - panel = of_graph_get_remote_port_parent(endpoint); - if (!panel) { - dev_err(dev, "could not locate panel in DT\n"); - return; - } if (!of_device_is_compatible(panel, vpanel->compatible)) dev_err(dev, "panel in DT is not compatible with the " "auto-detected panel, continuing anyway\n"); @@ -514,8 +507,7 @@ static void versatile_panel_probe(struct device *dev, } } -int versatile_clcd_init_panel(struct clcd_fb *fb, - struct device_node *endpoint) +int versatile_clcd_init_panel(struct clcd_fb *fb, struct device_node *panel) { const struct of_device_id *clcd_id; enum versatile_clcd versatile_clcd_type; @@ -551,7 +543,7 @@ int versatile_clcd_init_panel(struct clcd_fb *fb, fb->board->enable = versatile_clcd_enable; fb->board->disable = versatile_clcd_disable; fb->board->decode = versatile_clcd_decode; - versatile_panel_probe(dev, endpoint); + versatile_panel_probe(dev, panel); dev_info(dev, "set up callbacks for Versatile\n"); break; case REALVIEW_CLCD_EB: diff --git a/drivers/video/fbdev/amba-clcd-versatile.h b/drivers/video/fbdev/amba-clcd-versatile.h index 1b14359c2cf6..4692c3092823 100644 --- a/drivers/video/fbdev/amba-clcd-versatile.h +++ b/drivers/video/fbdev/amba-clcd-versatile.h @@ -6,11 +6,10 @@ #include <linux/platform_data/video-clcd-versatile.h> #if defined(CONFIG_PLAT_VERSATILE_CLCD) && defined(CONFIG_OF) -int versatile_clcd_init_panel(struct clcd_fb *fb, - struct device_node *endpoint); +int versatile_clcd_init_panel(struct clcd_fb *fb, struct device_node *panel); #else static inline int versatile_clcd_init_panel(struct clcd_fb *fb, - struct device_node *endpoint) + struct device_node *panel) { return 0; } diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index ec2671d98abc..0fab92c62828 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -10,27 +10,22 @@ * * ARM PrimeCell PL110 Color LCD Controller */ -#include <linux/dma-mapping.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/slab.h> +#include <linux/amba/bus.h> +#include <linux/amba/clcd.h> +#include <linux/backlight.h> +#include <linux/clk.h> #include <linux/delay.h> -#include <linux/mm.h> +#include <linux/dma-mapping.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/list.h> -#include <linux/amba/bus.h> -#include <linux/amba/clcd.h> -#include <linux/bitops.h> -#include <linux/clk.h> -#include <linux/hardirq.h> -#include <linux/of.h> +#include <linux/mm.h> +#include <linux/module.h> #include <linux/of_address.h> #include <linux/of_graph.h> -#include <linux/backlight.h> +#include <linux/slab.h> +#include <linux/string.h> #include <video/display_timing.h> #include <video/of_display_timing.h> #include <video/videomode.h> @@ -629,16 +624,11 @@ static int clcdfb_snprintf_mode(char *buf, int size, struct fb_videomode *mode) mode->refresh); } -static int clcdfb_of_get_backlight(struct device_node *endpoint, +static int clcdfb_of_get_backlight(struct device_node *panel, struct clcd_panel *clcd_panel) { - struct device_node *panel; struct device_node *backlight; - panel = of_graph_get_remote_port_parent(endpoint); - if (!panel) - return -ENODEV; - /* Look up the optional backlight phandle */ backlight = of_parse_phandle(panel, "backlight", 0); if (backlight) { @@ -651,19 +641,14 @@ static int clcdfb_of_get_backlight(struct device_node *endpoint, return 0; } -static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint, - struct clcd_panel *clcd_panel) +static int clcdfb_of_get_mode(struct device *dev, struct device_node *panel, + struct clcd_panel *clcd_panel) { int err; - struct device_node *panel; struct fb_videomode *mode; char *name; int len; - panel = of_graph_get_remote_port_parent(endpoint); - if (!panel) - return -ENODEV; - /* Only directly connected DPI panels supported for now */ if (of_device_is_compatible(panel, "panel-dpi")) err = clcdfb_of_get_dpi_panel_mode(panel, clcd_panel); @@ -769,7 +754,7 @@ static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0) static int clcdfb_of_init_display(struct clcd_fb *fb) { - struct device_node *endpoint; + struct device_node *endpoint, *panel; int err; unsigned int bpp; u32 max_bandwidth; @@ -786,17 +771,21 @@ static int clcdfb_of_init_display(struct clcd_fb *fb) if (!endpoint) return -ENODEV; + panel = of_graph_get_remote_port_parent(endpoint); + if (!panel) + return -ENODEV; + if (fb->vendor->init_panel) { - err = fb->vendor->init_panel(fb, endpoint); + err = fb->vendor->init_panel(fb, panel); if (err) return err; } - err = clcdfb_of_get_backlight(endpoint, fb->panel); + err = clcdfb_of_get_backlight(panel, fb->panel); if (err) return err; - err = clcdfb_of_get_mode(&fb->dev->dev, endpoint, fb->panel); + err = clcdfb_of_get_mode(&fb->dev->dev, panel, fb->panel); if (err) return err; diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c index 1d702e13aaff..cc11c6061298 100644 --- a/drivers/video/fbdev/amifb.c +++ b/drivers/video/fbdev/amifb.c @@ -1484,13 +1484,11 @@ static int ami_decode_var(struct fb_var_screeninfo *var, struct amifb_par *par, par->xoffset = var->xoffset; par->yoffset = var->yoffset; if (par->vmode & FB_VMODE_YWRAP) { - if (par->xoffset || par->yoffset < 0 || - par->yoffset >= par->vyres) + if (par->yoffset >= par->vyres) par->xoffset = par->yoffset = 0; } else { - if (par->xoffset < 0 || - par->xoffset > upx(16 << maxfmode, par->vxres - par->xres) || - par->yoffset < 0 || par->yoffset > par->vyres - par->yres) + if (par->xoffset > upx(16 << maxfmode, par->vxres - par->xres) || + par->yoffset > par->vyres - par->yres) par->xoffset = par->yoffset = 0; } } else diff --git a/drivers/video/fbdev/aty/radeon_monitor.c b/drivers/video/fbdev/aty/radeon_monitor.c index 278b421ab3fe..dd823f5fe4c9 100644 --- a/drivers/video/fbdev/aty/radeon_monitor.c +++ b/drivers/video/fbdev/aty/radeon_monitor.c @@ -646,7 +646,7 @@ void radeon_probe_screens(struct radeonfb_info *rinfo, /* - * This functions applyes any arch/model/machine specific fixups + * This function applies any arch/model/machine specific fixups * to the panel info. It may eventually alter EDID block as * well or whatever is specific to a given model and not probed * properly by the default code diff --git a/drivers/video/fbdev/auo_k190x.c b/drivers/video/fbdev/auo_k190x.c index 9580374667ba..0d06038324e0 100644 --- a/drivers/video/fbdev/auo_k190x.c +++ b/drivers/video/fbdev/auo_k190x.c @@ -9,6 +9,7 @@ */ #include <linux/module.h> +#include <linux/sched/mm.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <linux/platform_device.h> diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c index 038ac6934fe9..9da90bd242f4 100644 --- a/drivers/video/fbdev/cobalt_lcdfb.c +++ b/drivers/video/fbdev/cobalt_lcdfb.c @@ -26,6 +26,7 @@ #include <linux/uaccess.h> #include <linux/platform_device.h> #include <linux/module.h> +#include <linux/sched/signal.h> /* * Cursor position address diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c index fe00a07c122e..ca3d6b366471 100644 --- a/drivers/video/fbdev/fsl-diu-fb.c +++ b/drivers/video/fbdev/fsl-diu-fb.c @@ -439,12 +439,12 @@ static struct mfb_info mfb_template[] = { static void __attribute__ ((unused)) fsl_diu_dump(struct diu __iomem *hw) { mb(); - pr_debug("DIU: desc=%08x,%08x,%08x, gamma=%08x pallete=%08x " + pr_debug("DIU: desc=%08x,%08x,%08x, gamma=%08x palette=%08x " "cursor=%08x curs_pos=%08x diu_mode=%08x bgnd=%08x " "disp_size=%08x hsyn_para=%08x vsyn_para=%08x syn_pol=%08x " "thresholds=%08x int_mask=%08x plut=%08x\n", hw->desc[0], hw->desc[1], hw->desc[2], hw->gamma, - hw->pallete, hw->cursor, hw->curs_pos, hw->diu_mode, + hw->palette, hw->cursor, hw->curs_pos, hw->diu_mode, hw->bgnd, hw->disp_size, hw->hsyn_para, hw->vsyn_para, hw->syn_pol, hw->thresholds, hw->int_mask, hw->plut); rmb(); @@ -703,12 +703,6 @@ static int fsl_diu_check_var(struct fb_var_screeninfo *var, if (var->yres_virtual < var->yres) var->yres_virtual = var->yres; - if (var->xoffset < 0) - var->xoffset = 0; - - if (var->yoffset < 0) - var->yoffset = 0; - if (var->xoffset + info->var.xres > info->var.xres_virtual) var->xoffset = info->var.xres_virtual - info->var.xres; @@ -1254,8 +1248,7 @@ static int fsl_diu_pan_display(struct fb_var_screeninfo *var, (info->var.yoffset == var->yoffset)) return 0; /* No change, do nothing */ - if (var->xoffset < 0 || var->yoffset < 0 - || var->xoffset + info->var.xres > info->var.xres_virtual + if (var->xoffset + info->var.xres > info->var.xres_virtual || var->yoffset + info->var.yres > info->var.yres_virtual) return -EINVAL; diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c index fe0c4eeff2e4..1b0faadb3080 100644 --- a/drivers/video/fbdev/imxfb.c +++ b/drivers/video/fbdev/imxfb.c @@ -985,7 +985,11 @@ static int imxfb_probe(struct platform_device *pdev) */ imxfb_check_var(&info->var, info); - ret = fb_alloc_cmap(&info->cmap, 1 << info->var.bits_per_pixel, 0); + /* + * For modes > 8bpp, the color map is bypassed. + * Therefore, 256 entries are enough. + */ + ret = fb_alloc_cmap(&info->cmap, 256, 0); if (ret < 0) goto failed_cmap; diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c index a01147fdf270..b380a393cbc3 100644 --- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c +++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c @@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo) #ifdef CONFIG_FB_MATROX_MYSTIQUE struct matrox_switch matrox_mystique = { - MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore, + .preinit = MGA1064_preinit, + .reset = MGA1064_reset, + .init = MGA1064_init, + .restore = MGA1064_restore, }; EXPORT_SYMBOL(matrox_mystique); #endif #ifdef CONFIG_FB_MATROX_G struct matrox_switch matrox_G100 = { - MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore, + .preinit = MGAG100_preinit, + .reset = MGAG100_reset, + .init = MGAG100_init, + .restore = MGAG100_restore, }; EXPORT_SYMBOL(matrox_G100); #endif diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c index 68fa037d8cbc..9ff9be85759e 100644 --- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c +++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c @@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo) } struct matrox_switch matrox_millennium = { - Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore + .preinit = Ti3026_preinit, + .reset = Ti3026_reset, + .init = Ti3026_init, + .restore = Ti3026_restore }; EXPORT_SYMBOL(matrox_millennium); #endif diff --git a/drivers/video/fbdev/maxinefb.c b/drivers/video/fbdev/maxinefb.c index 5cf52d3c8e75..cab7333208ea 100644 --- a/drivers/video/fbdev/maxinefb.c +++ b/drivers/video/fbdev/maxinefb.c @@ -51,7 +51,7 @@ static struct fb_var_screeninfo maxinefb_defined = { .vmode = FB_VMODE_NONINTERLACED, }; -static struct fb_fix_screeninfo maxinefb_fix = { +static struct fb_fix_screeninfo maxinefb_fix __initdata = { .id = "Maxine", .smem_len = (1024*768), .type = FB_TYPE_PACKED_PIXELS, diff --git a/drivers/video/fbdev/mbx/mbxdebugfs.c b/drivers/video/fbdev/mbx/mbxdebugfs.c index e3bc00a75296..2528d3e609a4 100644 --- a/drivers/video/fbdev/mbx/mbxdebugfs.c +++ b/drivers/video/fbdev/mbx/mbxdebugfs.c @@ -15,12 +15,6 @@ struct mbxfb_debugfs_data { struct dentry *misc; }; -static int open_file_generic(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - static ssize_t write_file_dummy(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { @@ -174,42 +168,42 @@ static ssize_t misc_read_file(struct file *file, char __user *userbuf, static const struct file_operations sysconf_fops = { .read = sysconf_read_file, .write = write_file_dummy, - .open = open_file_generic, + .open = simple_open, .llseek = default_llseek, }; static const struct file_operations clock_fops = { .read = clock_read_file, .write = write_file_dummy, - .open = open_file_generic, + .open = simple_open, .llseek = default_llseek, }; static const struct file_operations display_fops = { .read = display_read_file, .write = write_file_dummy, - .open = open_file_generic, + .open = simple_open, .llseek = default_llseek, }; static const struct file_operations gsctl_fops = { .read = gsctl_read_file, .write = write_file_dummy, - .open = open_file_generic, + .open = simple_open, .llseek = default_llseek, }; static const struct file_operations sdram_fops = { .read = sdram_read_file, .write = write_file_dummy, - .open = open_file_generic, + .open = simple_open, .llseek = default_llseek, }; static const struct file_operations misc_fops = { .read = misc_read_file, .write = write_file_dummy, - .open = open_file_generic, + .open = simple_open, .llseek = default_llseek, }; diff --git a/drivers/video/fbdev/metronomefb.c b/drivers/video/fbdev/metronomefb.c index abb6bbf226d5..9085e9525341 100644 --- a/drivers/video/fbdev/metronomefb.c +++ b/drivers/video/fbdev/metronomefb.c @@ -187,7 +187,7 @@ static int load_waveform(u8 *mem, size_t size, int m, int t, epd_frame_table[par->dt].wfm_size = user_wfm_size; if (size != epd_frame_table[par->dt].wfm_size) { - dev_err(dev, "Error: unexpected size %Zd != %d\n", size, + dev_err(dev, "Error: unexpected size %zd != %d\n", size, epd_frame_table[par->dt].wfm_size); return -EINVAL; } diff --git a/drivers/video/fbdev/nvidia/nv_accel.c b/drivers/video/fbdev/nvidia/nv_accel.c index ad6472a894ea..7341fed63e35 100644 --- a/drivers/video/fbdev/nvidia/nv_accel.c +++ b/drivers/video/fbdev/nvidia/nv_accel.c @@ -48,6 +48,8 @@ */ #include <linux/fb.h> +#include <linux/nmi.h> + #include "nv_type.h" #include "nv_proto.h" #include "nv_dma.h" diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c index 906c6e75c260..9be884b0c778 100644 --- a/drivers/video/fbdev/offb.c +++ b/drivers/video/fbdev/offb.c @@ -668,14 +668,14 @@ static int __init offb_init(void) offb_init_nodriver(of_chosen, 1); } - for (dp = NULL; (dp = of_find_node_by_type(dp, "display"));) { + for_each_node_by_type(dp, "display") { if (of_get_property(dp, "linux,opened", NULL) && of_get_property(dp, "linux,boot-display", NULL)) { boot_disp = dp; offb_init_nodriver(dp, 0); } } - for (dp = NULL; (dp = of_find_node_by_type(dp, "display"));) { + for_each_node_by_type(dp, "display") { if (of_get_property(dp, "linux,opened", NULL) && dp != boot_disp) offb_init_nodriver(dp, 0); diff --git a/drivers/video/fbdev/omap/lcd_ams_delta.c b/drivers/video/fbdev/omap/lcd_ams_delta.c index f912a207b394..a4ee947006c7 100644 --- a/drivers/video/fbdev/omap/lcd_ams_delta.c +++ b/drivers/video/fbdev/omap/lcd_ams_delta.c @@ -136,11 +136,6 @@ static void ams_delta_panel_disable(struct lcd_panel *panel) gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_NDISP, 0); } -static unsigned long ams_delta_panel_get_caps(struct lcd_panel *panel) -{ - return 0; -} - static struct lcd_panel ams_delta_panel = { .name = "ams-delta", .config = 0, @@ -163,7 +158,6 @@ static struct lcd_panel ams_delta_panel = { .cleanup = ams_delta_panel_cleanup, .enable = ams_delta_panel_enable, .disable = ams_delta_panel_disable, - .get_caps = ams_delta_panel_get_caps, }; @@ -195,27 +189,8 @@ static int ams_delta_panel_probe(struct platform_device *pdev) return 0; } -static int ams_delta_panel_remove(struct platform_device *pdev) -{ - return 0; -} - -static int ams_delta_panel_suspend(struct platform_device *pdev, - pm_message_t mesg) -{ - return 0; -} - -static int ams_delta_panel_resume(struct platform_device *pdev) -{ - return 0; -} - static struct platform_driver ams_delta_panel_driver = { .probe = ams_delta_panel_probe, - .remove = ams_delta_panel_remove, - .suspend = ams_delta_panel_suspend, - .resume = ams_delta_panel_resume, .driver = { .name = "lcd_ams_delta", }, diff --git a/drivers/video/fbdev/omap/lcd_h3.c b/drivers/video/fbdev/omap/lcd_h3.c index 21512b027ff7..9d2da146813e 100644 --- a/drivers/video/fbdev/omap/lcd_h3.c +++ b/drivers/video/fbdev/omap/lcd_h3.c @@ -28,15 +28,6 @@ #define MODULE_NAME "omapfb-lcd_h3" -static int h3_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) -{ - return 0; -} - -static void h3_panel_cleanup(struct lcd_panel *panel) -{ -} - static int h3_panel_enable(struct lcd_panel *panel) { int r = 0; @@ -63,12 +54,7 @@ static void h3_panel_disable(struct lcd_panel *panel) pr_err(MODULE_NAME ": Unable to turn off LCD panel\n"); } -static unsigned long h3_panel_get_caps(struct lcd_panel *panel) -{ - return 0; -} - -struct lcd_panel h3_panel = { +static struct lcd_panel h3_panel = { .name = "h3", .config = OMAP_LCDC_PANEL_TFT, @@ -85,11 +71,8 @@ struct lcd_panel h3_panel = { .vbp = 0, .pcd = 0, - .init = h3_panel_init, - .cleanup = h3_panel_cleanup, .enable = h3_panel_enable, .disable = h3_panel_disable, - .get_caps = h3_panel_get_caps, }; static int h3_panel_probe(struct platform_device *pdev) @@ -98,26 +81,8 @@ static int h3_panel_probe(struct platform_device *pdev) return 0; } -static int h3_panel_remove(struct platform_device *pdev) -{ - return 0; -} - -static int h3_panel_suspend(struct platform_device *pdev, pm_message_t mesg) -{ - return 0; -} - -static int h3_panel_resume(struct platform_device *pdev) -{ - return 0; -} - static struct platform_driver h3_panel_driver = { .probe = h3_panel_probe, - .remove = h3_panel_remove, - .suspend = h3_panel_suspend, - .resume = h3_panel_resume, .driver = { .name = "lcd_h3", }, diff --git a/drivers/video/fbdev/omap/lcd_htcherald.c b/drivers/video/fbdev/omap/lcd_htcherald.c index 8b4dfa058258..9d692f5b8025 100644 --- a/drivers/video/fbdev/omap/lcd_htcherald.c +++ b/drivers/video/fbdev/omap/lcd_htcherald.c @@ -31,32 +31,8 @@ #include "omapfb.h" -static int htcherald_panel_init(struct lcd_panel *panel, - struct omapfb_device *fbdev) -{ - return 0; -} - -static void htcherald_panel_cleanup(struct lcd_panel *panel) -{ -} - -static int htcherald_panel_enable(struct lcd_panel *panel) -{ - return 0; -} - -static void htcherald_panel_disable(struct lcd_panel *panel) -{ -} - -static unsigned long htcherald_panel_get_caps(struct lcd_panel *panel) -{ - return 0; -} - /* Found on WIZ200 (miknix) and some HERA110 models (darkstar62) */ -struct lcd_panel htcherald_panel_1 = { +static struct lcd_panel htcherald_panel_1 = { .name = "lcd_herald", .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_HSYNC | @@ -74,12 +50,6 @@ struct lcd_panel htcherald_panel_1 = { .vsw = 3, .vfp = 2, .vbp = 2, - - .init = htcherald_panel_init, - .cleanup = htcherald_panel_cleanup, - .enable = htcherald_panel_enable, - .disable = htcherald_panel_disable, - .get_caps = htcherald_panel_get_caps, }; static int htcherald_panel_probe(struct platform_device *pdev) @@ -88,27 +58,8 @@ static int htcherald_panel_probe(struct platform_device *pdev) return 0; } -static int htcherald_panel_remove(struct platform_device *pdev) -{ - return 0; -} - -static int htcherald_panel_suspend(struct platform_device *pdev, - pm_message_t mesg) -{ - return 0; -} - -static int htcherald_panel_resume(struct platform_device *pdev) -{ - return 0; -} - static struct platform_driver htcherald_panel_driver = { .probe = htcherald_panel_probe, - .remove = htcherald_panel_remove, - .suspend = htcherald_panel_suspend, - .resume = htcherald_panel_resume, .driver = { .name = "lcd_htcherald", }, diff --git a/drivers/video/fbdev/omap/lcd_inn1510.c b/drivers/video/fbdev/omap/lcd_inn1510.c index 49907fab36ac..b284050f5471 100644 --- a/drivers/video/fbdev/omap/lcd_inn1510.c +++ b/drivers/video/fbdev/omap/lcd_inn1510.c @@ -27,16 +27,6 @@ #include "omapfb.h" -static int innovator1510_panel_init(struct lcd_panel *panel, - struct omapfb_device *fbdev) -{ - return 0; -} - -static void innovator1510_panel_cleanup(struct lcd_panel *panel) -{ -} - static int innovator1510_panel_enable(struct lcd_panel *panel) { __raw_writeb(0x7, OMAP1510_FPGA_LCD_PANEL_CONTROL); @@ -48,12 +38,7 @@ static void innovator1510_panel_disable(struct lcd_panel *panel) __raw_writeb(0x0, OMAP1510_FPGA_LCD_PANEL_CONTROL); } -static unsigned long innovator1510_panel_get_caps(struct lcd_panel *panel) -{ - return 0; -} - -struct lcd_panel innovator1510_panel = { +static struct lcd_panel innovator1510_panel = { .name = "inn1510", .config = OMAP_LCDC_PANEL_TFT, @@ -70,11 +55,8 @@ struct lcd_panel innovator1510_panel = { .vbp = 0, .pcd = 12, - .init = innovator1510_panel_init, - .cleanup = innovator1510_panel_cleanup, .enable = innovator1510_panel_enable, .disable = innovator1510_panel_disable, - .get_caps = innovator1510_panel_get_caps, }; static int innovator1510_panel_probe(struct platform_device *pdev) @@ -83,27 +65,8 @@ static int innovator1510_panel_probe(struct platform_device *pdev) return 0; } -static int innovator1510_panel_remove(struct platform_device *pdev) -{ - return 0; -} - -static int innovator1510_panel_suspend(struct platform_device *pdev, - pm_message_t mesg) -{ - return 0; -} - -static int innovator1510_panel_resume(struct platform_device *pdev) -{ - return 0; -} - static struct platform_driver innovator1510_panel_driver = { .probe = innovator1510_panel_probe, - .remove = innovator1510_panel_remove, - .suspend = innovator1510_panel_suspend, - .resume = innovator1510_panel_resume, .driver = { .name = "lcd_inn1510", }, diff --git a/drivers/video/fbdev/omap/lcd_inn1610.c b/drivers/video/fbdev/omap/lcd_inn1610.c index 8b42894eeb77..1841710e796f 100644 --- a/drivers/video/fbdev/omap/lcd_inn1610.c +++ b/drivers/video/fbdev/omap/lcd_inn1610.c @@ -69,12 +69,7 @@ static void innovator1610_panel_disable(struct lcd_panel *panel) gpio_set_value(15, 0); } -static unsigned long innovator1610_panel_get_caps(struct lcd_panel *panel) -{ - return 0; -} - -struct lcd_panel innovator1610_panel = { +static struct lcd_panel innovator1610_panel = { .name = "inn1610", .config = OMAP_LCDC_PANEL_TFT, @@ -95,7 +90,6 @@ struct lcd_panel innovator1610_panel = { .cleanup = innovator1610_panel_cleanup, .enable = innovator1610_panel_enable, .disable = innovator1610_panel_disable, - .get_caps = innovator1610_panel_get_caps, }; static int innovator1610_panel_probe(struct platform_device *pdev) @@ -104,27 +98,8 @@ static int innovator1610_panel_probe(struct platform_device *pdev) return 0; } -static int innovator1610_panel_remove(struct platform_device *pdev) -{ - return 0; -} - -static int innovator1610_panel_suspend(struct platform_device *pdev, - pm_message_t mesg) -{ - return 0; -} - -static int innovator1610_panel_resume(struct platform_device *pdev) -{ - return 0; -} - static struct platform_driver innovator1610_panel_driver = { .probe = innovator1610_panel_probe, - .remove = innovator1610_panel_remove, - .suspend = innovator1610_panel_suspend, - .resume = innovator1610_panel_resume, .driver = { .name = "lcd_inn1610", }, diff --git a/drivers/video/fbdev/omap/lcd_osk.c b/drivers/video/fbdev/omap/lcd_osk.c index b56886c7055e..b0be5771fe90 100644 --- a/drivers/video/fbdev/omap/lcd_osk.c +++ b/drivers/video/fbdev/omap/lcd_osk.c @@ -29,16 +29,6 @@ #include "omapfb.h" -static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) -{ - /* gpio2 was allocated in board init */ - return 0; -} - -static void osk_panel_cleanup(struct lcd_panel *panel) -{ -} - static int osk_panel_enable(struct lcd_panel *panel) { /* configure PWL pin */ @@ -68,12 +58,7 @@ static void osk_panel_disable(struct lcd_panel *panel) gpio_set_value(2, 0); } -static unsigned long osk_panel_get_caps(struct lcd_panel *panel) -{ - return 0; -} - -struct lcd_panel osk_panel = { +static struct lcd_panel osk_panel = { .name = "osk", .config = OMAP_LCDC_PANEL_TFT, @@ -90,11 +75,8 @@ struct lcd_panel osk_panel = { .vbp = 0, .pcd = 12, - .init = osk_panel_init, - .cleanup = osk_panel_cleanup, .enable = osk_panel_enable, .disable = osk_panel_disable, - .get_caps = osk_panel_get_caps, }; static int osk_panel_probe(struct platform_device *pdev) @@ -103,26 +85,8 @@ static int osk_panel_probe(struct platform_device *pdev) return 0; } -static int osk_panel_remove(struct platform_device *pdev) -{ - return 0; -} - -static int osk_panel_suspend(struct platform_device *pdev, pm_message_t mesg) -{ - return 0; -} - -static int osk_panel_resume(struct platform_device *pdev) -{ - return 0; -} - static struct platform_driver osk_panel_driver = { .probe = osk_panel_probe, - .remove = osk_panel_remove, - .suspend = osk_panel_suspend, - .resume = osk_panel_resume, .driver = { .name = "lcd_osk", }, diff --git a/drivers/video/fbdev/omap/lcd_palmte.c b/drivers/video/fbdev/omap/lcd_palmte.c index 2713fed286f7..cef96386cf80 100644 --- a/drivers/video/fbdev/omap/lcd_palmte.c +++ b/drivers/video/fbdev/omap/lcd_palmte.c @@ -25,31 +25,7 @@ #include "omapfb.h" -static int palmte_panel_init(struct lcd_panel *panel, - struct omapfb_device *fbdev) -{ - return 0; -} - -static void palmte_panel_cleanup(struct lcd_panel *panel) -{ -} - -static int palmte_panel_enable(struct lcd_panel *panel) -{ - return 0; -} - -static void palmte_panel_disable(struct lcd_panel *panel) -{ -} - -static unsigned long palmte_panel_get_caps(struct lcd_panel *panel) -{ - return 0; -} - -struct lcd_panel palmte_panel = { +static struct lcd_panel palmte_panel = { .name = "palmte", .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC | OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE | @@ -67,12 +43,6 @@ struct lcd_panel palmte_panel = { .vfp = 8, .vbp = 7, .pcd = 0, - - .init = palmte_panel_init, - .cleanup = palmte_panel_cleanup, - .enable = palmte_panel_enable, - .disable = palmte_panel_disable, - .get_caps = palmte_panel_get_caps, }; static int palmte_panel_probe(struct platform_device *pdev) @@ -81,26 +51,8 @@ static int palmte_panel_probe(struct platform_device *pdev) return 0; } -static int palmte_panel_remove(struct platform_device *pdev) -{ - return 0; -} - -static int palmte_panel_suspend(struct platform_device *pdev, pm_message_t mesg) -{ - return 0; -} - -static int palmte_panel_resume(struct platform_device *pdev) -{ - return 0; -} - static struct platform_driver palmte_panel_driver = { .probe = palmte_panel_probe, - .remove = palmte_panel_remove, - .suspend = palmte_panel_suspend, - .resume = palmte_panel_resume, .driver = { .name = "lcd_palmte", }, diff --git a/drivers/video/fbdev/omap/lcd_palmtt.c b/drivers/video/fbdev/omap/lcd_palmtt.c index 1a936d5c7b6f..627f13dae5ad 100644 --- a/drivers/video/fbdev/omap/lcd_palmtt.c +++ b/drivers/video/fbdev/omap/lcd_palmtt.c @@ -32,31 +32,12 @@ GPIO13 - screen blanking #include "omapfb.h" -static int palmtt_panel_init(struct lcd_panel *panel, - struct omapfb_device *fbdev) -{ - return 0; -} - -static void palmtt_panel_cleanup(struct lcd_panel *panel) -{ -} - -static int palmtt_panel_enable(struct lcd_panel *panel) -{ - return 0; -} - -static void palmtt_panel_disable(struct lcd_panel *panel) -{ -} - static unsigned long palmtt_panel_get_caps(struct lcd_panel *panel) { return OMAPFB_CAPS_SET_BACKLIGHT; } -struct lcd_panel palmtt_panel = { +static struct lcd_panel palmtt_panel = { .name = "palmtt", .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC | OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE | @@ -74,10 +55,6 @@ struct lcd_panel palmtt_panel = { .vbp = 7, .pcd = 0, - .init = palmtt_panel_init, - .cleanup = palmtt_panel_cleanup, - .enable = palmtt_panel_enable, - .disable = palmtt_panel_disable, .get_caps = palmtt_panel_get_caps, }; @@ -87,26 +64,8 @@ static int palmtt_panel_probe(struct platform_device *pdev) return 0; } -static int palmtt_panel_remove(struct platform_device *pdev) -{ - return 0; -} - -static int palmtt_panel_suspend(struct platform_device *pdev, pm_message_t mesg) -{ - return 0; -} - -static int palmtt_panel_resume(struct platform_device *pdev) -{ - return 0; -} - static struct platform_driver palmtt_panel_driver = { .probe = palmtt_panel_probe, - .remove = palmtt_panel_remove, - .suspend = palmtt_panel_suspend, - .resume = palmtt_panel_resume, .driver = { .name = "lcd_palmtt", }, diff --git a/drivers/video/fbdev/omap/lcd_palmz71.c b/drivers/video/fbdev/omap/lcd_palmz71.c index a20db4f7ea99..c46d4db1f839 100644 --- a/drivers/video/fbdev/omap/lcd_palmz71.c +++ b/drivers/video/fbdev/omap/lcd_palmz71.c @@ -26,32 +26,12 @@ #include "omapfb.h" -static int palmz71_panel_init(struct lcd_panel *panel, - struct omapfb_device *fbdev) -{ - return 0; -} - -static void palmz71_panel_cleanup(struct lcd_panel *panel) -{ - -} - -static int palmz71_panel_enable(struct lcd_panel *panel) -{ - return 0; -} - -static void palmz71_panel_disable(struct lcd_panel *panel) -{ -} - static unsigned long palmz71_panel_get_caps(struct lcd_panel *panel) { return OMAPFB_CAPS_SET_BACKLIGHT; } -struct lcd_panel palmz71_panel = { +static struct lcd_panel palmz71_panel = { .name = "palmz71", .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC | OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE | @@ -69,10 +49,6 @@ struct lcd_panel palmz71_panel = { .vbp = 7, .pcd = 0, - .init = palmz71_panel_init, - .cleanup = palmz71_panel_cleanup, - .enable = palmz71_panel_enable, - .disable = palmz71_panel_disable, .get_caps = palmz71_panel_get_caps, }; @@ -82,27 +58,8 @@ static int palmz71_panel_probe(struct platform_device *pdev) return 0; } -static int palmz71_panel_remove(struct platform_device *pdev) -{ - return 0; -} - -static int palmz71_panel_suspend(struct platform_device *pdev, - pm_message_t mesg) -{ - return 0; -} - -static int palmz71_panel_resume(struct platform_device *pdev) -{ - return 0; -} - static struct platform_driver palmz71_panel_driver = { .probe = palmz71_panel_probe, - .remove = palmz71_panel_remove, - .suspend = palmz71_panel_suspend, - .resume = palmz71_panel_resume, .driver = { .name = "lcd_palmz71", }, diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c index 6429f33167f5..1abba07b84b3 100644 --- a/drivers/video/fbdev/omap/omapfb_main.c +++ b/drivers/video/fbdev/omap/omapfb_main.c @@ -337,7 +337,8 @@ static int omapfb_blank(int blank, struct fb_info *fbi) if (fbdev->state == OMAPFB_SUSPENDED) { if (fbdev->ctrl->resume) fbdev->ctrl->resume(); - fbdev->panel->enable(fbdev->panel); + if (fbdev->panel->enable) + fbdev->panel->enable(fbdev->panel); fbdev->state = OMAPFB_ACTIVE; if (fbdev->ctrl->get_update_mode() == OMAPFB_MANUAL_UPDATE) @@ -346,7 +347,8 @@ static int omapfb_blank(int blank, struct fb_info *fbi) break; case FB_BLANK_POWERDOWN: if (fbdev->state == OMAPFB_ACTIVE) { - fbdev->panel->disable(fbdev->panel); + if (fbdev->panel->disable) + fbdev->panel->disable(fbdev->panel); if (fbdev->ctrl->suspend) fbdev->ctrl->suspend(); fbdev->state = OMAPFB_SUSPENDED; @@ -1030,7 +1032,8 @@ static void omapfb_get_caps(struct omapfb_device *fbdev, int plane, { memset(caps, 0, sizeof(*caps)); fbdev->ctrl->get_caps(plane, caps); - caps->ctrl |= fbdev->panel->get_caps(fbdev->panel); + if (fbdev->panel->get_caps) + caps->ctrl |= fbdev->panel->get_caps(fbdev->panel); } /* For lcd testing */ @@ -1549,7 +1552,8 @@ static void omapfb_free_resources(struct omapfb_device *fbdev, int state) case 7: omapfb_unregister_sysfs(fbdev); case 6: - fbdev->panel->disable(fbdev->panel); + if (fbdev->panel->disable) + fbdev->panel->disable(fbdev->panel); case 5: omapfb_set_update_mode(fbdev, OMAPFB_UPDATE_DISABLED); case 4: @@ -1557,7 +1561,8 @@ static void omapfb_free_resources(struct omapfb_device *fbdev, int state) case 3: ctrl_cleanup(fbdev); case 2: - fbdev->panel->cleanup(fbdev->panel); + if (fbdev->panel->cleanup) + fbdev->panel->cleanup(fbdev->panel); case 1: dev_set_drvdata(fbdev->dev, NULL); kfree(fbdev); @@ -1680,9 +1685,11 @@ static int omapfb_do_probe(struct platform_device *pdev, goto cleanup; } - r = fbdev->panel->init(fbdev->panel, fbdev); - if (r) - goto cleanup; + if (fbdev->panel->init) { + r = fbdev->panel->init(fbdev->panel, fbdev); + if (r) + goto cleanup; + } pr_info("omapfb: configured for panel %s\n", fbdev->panel->name); @@ -1725,9 +1732,11 @@ static int omapfb_do_probe(struct platform_device *pdev, OMAPFB_MANUAL_UPDATE : OMAPFB_AUTO_UPDATE); init_state++; - r = fbdev->panel->enable(fbdev->panel); - if (r) - goto cleanup; + if (fbdev->panel->enable) { + r = fbdev->panel->enable(fbdev->panel); + if (r) + goto cleanup; + } init_state++; r = omapfb_register_sysfs(fbdev); diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c index 8b810696a42b..fd2b372d0264 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c @@ -19,7 +19,7 @@ #include <linux/jiffies.h> #include <linux/module.h> #include <linux/platform_device.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/of_device.h> diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c index 5872bc4af3ce..df02fb4b7fd1 100644 --- a/drivers/video/fbdev/pmag-ba-fb.c +++ b/drivers/video/fbdev/pmag-ba-fb.c @@ -129,7 +129,7 @@ static struct fb_ops pmagbafb_ops = { /* * Turn the hardware cursor off. */ -static void __init pmagbafb_erase_cursor(struct fb_info *info) +static void pmagbafb_erase_cursor(struct fb_info *info) { struct pmagbafb_par *par = info->par; diff --git a/drivers/video/fbdev/pmagb-b-fb.c b/drivers/video/fbdev/pmagb-b-fb.c index 0822b6f8dddc..a7a179a0bb33 100644 --- a/drivers/video/fbdev/pmagb-b-fb.c +++ b/drivers/video/fbdev/pmagb-b-fb.c @@ -133,7 +133,7 @@ static struct fb_ops pmagbbfb_ops = { /* * Turn the hardware cursor off. */ -static void __init pmagbbfb_erase_cursor(struct fb_info *info) +static void pmagbbfb_erase_cursor(struct fb_info *info) { struct pmagbbfb_par *par = info->par; diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c index 82c0a8caa9b8..885ee3a563aa 100644 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.c +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c @@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle) } static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = { - lcdc_sys_write_index, - lcdc_sys_write_data, - lcdc_sys_read_data, + .write_index = lcdc_sys_write_index, + .write_data = lcdc_sys_write_data, + .read_data = lcdc_sys_read_data, }; static int sh_mobile_lcdc_sginit(struct fb_info *info, @@ -2782,8 +2782,10 @@ static int sh_mobile_lcdc_probe(struct platform_device *pdev) priv->forced_fourcc = pdata->ch[0].fourcc; priv->base = ioremap_nocache(res->start, resource_size(res)); - if (!priv->base) + if (!priv->base) { + error = -ENOMEM; goto err1; + } error = sh_mobile_lcdc_setup_clocks(priv, pdata->clock_source); if (error) { diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c index 61f799a515dc..a3c44ecf4523 100644 --- a/drivers/video/fbdev/simplefb.c +++ b/drivers/video/fbdev/simplefb.c @@ -180,10 +180,12 @@ static int simplefb_parse_pd(struct platform_device *pdev, struct simplefb_par { u32 palette[PSEUDO_PALETTE_SIZE]; #if defined CONFIG_OF && defined CONFIG_COMMON_CLK + bool clks_enabled; unsigned int clk_count; struct clk **clks; #endif #if defined CONFIG_OF && defined CONFIG_REGULATOR + bool regulators_enabled; u32 regulator_count; struct regulator **regulators; #endif @@ -208,12 +210,12 @@ struct simplefb_par { * the fb probe will not help us much either. So just complain and carry on, * and hope that the user actually gets a working fb at the end of things. */ -static int simplefb_clocks_init(struct simplefb_par *par, - struct platform_device *pdev) +static int simplefb_clocks_get(struct simplefb_par *par, + struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct clk *clock; - int i, ret; + int i; if (dev_get_platdata(&pdev->dev) || !np) return 0; @@ -244,6 +246,14 @@ static int simplefb_clocks_init(struct simplefb_par *par, par->clks[i] = clock; } + return 0; +} + +static void simplefb_clocks_enable(struct simplefb_par *par, + struct platform_device *pdev) +{ + int i, ret; + for (i = 0; i < par->clk_count; i++) { if (par->clks[i]) { ret = clk_prepare_enable(par->clks[i]); @@ -256,8 +266,7 @@ static int simplefb_clocks_init(struct simplefb_par *par, } } } - - return 0; + par->clks_enabled = true; } static void simplefb_clocks_destroy(struct simplefb_par *par) @@ -269,7 +278,8 @@ static void simplefb_clocks_destroy(struct simplefb_par *par) for (i = 0; i < par->clk_count; i++) { if (par->clks[i]) { - clk_disable_unprepare(par->clks[i]); + if (par->clks_enabled) + clk_disable_unprepare(par->clks[i]); clk_put(par->clks[i]); } } @@ -277,8 +287,10 @@ static void simplefb_clocks_destroy(struct simplefb_par *par) kfree(par->clks); } #else -static int simplefb_clocks_init(struct simplefb_par *par, +static int simplefb_clocks_get(struct simplefb_par *par, struct platform_device *pdev) { return 0; } +static void simplefb_clocks_enable(struct simplefb_par *par, + struct platform_device *pdev) { } static void simplefb_clocks_destroy(struct simplefb_par *par) { } #endif @@ -305,14 +317,14 @@ static void simplefb_clocks_destroy(struct simplefb_par *par) { } * the fb probe will not help us much either. So just complain and carry on, * and hope that the user actually gets a working fb at the end of things. */ -static int simplefb_regulators_init(struct simplefb_par *par, - struct platform_device *pdev) +static int simplefb_regulators_get(struct simplefb_par *par, + struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct property *prop; struct regulator *regulator; const char *p; - int count = 0, i = 0, ret; + int count = 0, i = 0; if (dev_get_platdata(&pdev->dev) || !np) return 0; @@ -354,6 +366,14 @@ static int simplefb_regulators_init(struct simplefb_par *par, } par->regulator_count = i; + return 0; +} + +static void simplefb_regulators_enable(struct simplefb_par *par, + struct platform_device *pdev) +{ + int i, ret; + /* Enable all the regulators */ for (i = 0; i < par->regulator_count; i++) { ret = regulator_enable(par->regulators[i]); @@ -365,15 +385,14 @@ static int simplefb_regulators_init(struct simplefb_par *par, par->regulators[i] = NULL; } } - - return 0; + par->regulators_enabled = true; } static void simplefb_regulators_destroy(struct simplefb_par *par) { int i; - if (!par->regulators) + if (!par->regulators || !par->regulators_enabled) return; for (i = 0; i < par->regulator_count; i++) @@ -381,8 +400,10 @@ static void simplefb_regulators_destroy(struct simplefb_par *par) regulator_disable(par->regulators[i]); } #else -static int simplefb_regulators_init(struct simplefb_par *par, +static int simplefb_regulators_get(struct simplefb_par *par, struct platform_device *pdev) { return 0; } +static void simplefb_regulators_enable(struct simplefb_par *par, + struct platform_device *pdev) { } static void simplefb_regulators_destroy(struct simplefb_par *par) { } #endif @@ -453,14 +474,17 @@ static int simplefb_probe(struct platform_device *pdev) } info->pseudo_palette = par->palette; - ret = simplefb_clocks_init(par, pdev); + ret = simplefb_clocks_get(par, pdev); if (ret < 0) goto error_unmap; - ret = simplefb_regulators_init(par, pdev); + ret = simplefb_regulators_get(par, pdev); if (ret < 0) goto error_clocks; + simplefb_clocks_enable(par, pdev); + simplefb_regulators_enable(par, pdev); + dev_info(&pdev->dev, "framebuffer at 0x%lx, 0x%x bytes, mapped to 0x%p\n", info->fix.smem_start, info->fix.smem_len, info->screen_base); diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index 2925d5ce8d3e..bd017b57c47f 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -9,6 +9,7 @@ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/fb.h> +#include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/module.h> @@ -16,6 +17,7 @@ #include <linux/of_gpio.h> #include <linux/pwm.h> #include <linux/uaccess.h> +#include <linux/regulator/consumer.h> #define SSD1307FB_DATA 0x40 #define SSD1307FB_COMMAND 0x80 @@ -73,7 +75,8 @@ struct ssd1307fb_par { u32 prechargep2; struct pwm_device *pwm; u32 pwm_period; - int reset; + struct gpio_desc *reset; + struct regulator *vbat_reg; u32 seg_remap; u32 vcomh; u32 width; @@ -439,6 +442,9 @@ static int ssd1307fb_init(struct ssd1307fb_par *par) if (ret < 0) return ret; + /* Clear the screen */ + ssd1307fb_update_display(par); + /* Turn on the display */ ret = ssd1307fb_write_cmd(par->client, SSD1307FB_DISPLAY_ON); if (ret < 0) @@ -561,10 +567,20 @@ static int ssd1307fb_probe(struct i2c_client *client, par->device_info = of_device_get_match_data(&client->dev); - par->reset = of_get_named_gpio(client->dev.of_node, - "reset-gpios", 0); - if (!gpio_is_valid(par->reset)) { - ret = -EINVAL; + par->reset = devm_gpiod_get_optional(&client->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(par->reset)) { + dev_err(&client->dev, "failed to get reset gpio: %ld\n", + PTR_ERR(par->reset)); + ret = PTR_ERR(par->reset); + goto fb_alloc_error; + } + + par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat"); + if (IS_ERR(par->vbat_reg)) { + dev_err(&client->dev, "failed to get VBAT regulator: %ld\n", + PTR_ERR(par->vbat_reg)); + ret = PTR_ERR(par->vbat_reg); goto fb_alloc_error; } @@ -642,27 +658,25 @@ static int ssd1307fb_probe(struct i2c_client *client, fb_deferred_io_init(info); - ret = devm_gpio_request_one(&client->dev, par->reset, - GPIOF_OUT_INIT_HIGH, - "oled-reset"); + i2c_set_clientdata(client, info); + + if (par->reset) { + /* Reset the screen */ + gpiod_set_value(par->reset, 0); + udelay(4); + gpiod_set_value(par->reset, 1); + udelay(4); + } + + ret = regulator_enable(par->vbat_reg); if (ret) { - dev_err(&client->dev, - "failed to request gpio %d: %d\n", - par->reset, ret); + dev_err(&client->dev, "failed to enable VBAT: %d\n", ret); goto reset_oled_error; } - i2c_set_clientdata(client, info); - - /* Reset the screen */ - gpio_set_value(par->reset, 0); - udelay(4); - gpio_set_value(par->reset, 1); - udelay(4); - ret = ssd1307fb_init(par); if (ret) - goto reset_oled_error; + goto regulator_enable_error; ret = register_framebuffer(info); if (ret) { @@ -695,6 +709,8 @@ panel_init_error: pwm_disable(par->pwm); pwm_put(par->pwm); }; +regulator_enable_error: + regulator_disable(par->vbat_reg); reset_oled_error: fb_deferred_io_cleanup(info); fb_alloc_error: diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c index accfef71e984..6ded5c198998 100644 --- a/drivers/video/fbdev/stifb.c +++ b/drivers/video/fbdev/stifb.c @@ -1294,6 +1294,10 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) strcpy(fix->id, "stifb"); info->fbops = &stifb_ops; info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len); + if (!info->screen_base) { + printk(KERN_ERR "stifb: failed to map memory\n"); + goto out_err0; + } info->screen_size = fix->smem_len; info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA; info->pseudo_palette = &fb->pseudo_palette; diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c index e925619da39b..253ffe9baab2 100644 --- a/drivers/video/fbdev/wm8505fb.c +++ b/drivers/video/fbdev/wm8505fb.c @@ -182,7 +182,7 @@ static ssize_t contrast_store(struct device *dev, return count; } -static DEVICE_ATTR(contrast, 0644, contrast_show, contrast_store); +static DEVICE_ATTR_RW(contrast); static inline u_int chan_to_field(u_int chan, struct fb_bitfield *bf) { diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 9d2738e9217f..4e1191508228 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -31,6 +31,7 @@ #include <linux/wait.h> #include <linux/mm.h> #include <linux/mount.h> +#include <linux/magic.h> /* * Balloon device works in 4K page units. So each page is pointed to by @@ -413,7 +414,8 @@ static int init_vqs(struct virtio_balloon *vb) * optionally stat. */ nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2; - err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names); + err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names, + NULL); if (err) return err; diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c index 350a2a5a49db..79f1293cda93 100644 --- a/drivers/virtio/virtio_input.c +++ b/drivers/virtio/virtio_input.c @@ -173,7 +173,8 @@ static int virtinput_init_vqs(struct virtio_input *vi) static const char * const names[] = { "events", "status" }; int err; - err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names); + err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names, + NULL); if (err) return err; vi->evt = vqs[0]; diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index c71fde5fe835..78343b8f9034 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -70,7 +70,7 @@ #include <linux/spinlock.h> #include <linux/virtio.h> #include <linux/virtio_config.h> -#include <linux/virtio_mmio.h> +#include <uapi/linux/virtio_mmio.h> #include <linux/virtio_ring.h> @@ -446,7 +446,8 @@ error_available: static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], + struct irq_affinity *desc) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); unsigned int irq = platform_get_irq(vm_dev->pdev, 0); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 186cbab327b8..df548a6fb844 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -33,10 +33,8 @@ void vp_synchronize_vectors(struct virtio_device *vdev) struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i; - if (vp_dev->intx_enabled) - synchronize_irq(vp_dev->pci_dev->irq); - - for (i = 0; i < vp_dev->msix_vectors; ++i) + synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0)); + for (i = 1; i < vp_dev->msix_vectors; i++) synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i)); } @@ -62,16 +60,13 @@ static irqreturn_t vp_config_changed(int irq, void *opaque) static irqreturn_t vp_vring_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; - struct virtio_pci_vq_info *info; irqreturn_t ret = IRQ_NONE; - unsigned long flags; + struct virtqueue *vq; - spin_lock_irqsave(&vp_dev->lock, flags); - list_for_each_entry(info, &vp_dev->virtqueues, node) { - if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) + list_for_each_entry(vq, &vp_dev->vdev.vqs, list) { + if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED) ret = IRQ_HANDLED; } - spin_unlock_irqrestore(&vp_dev->lock, flags); return ret; } @@ -102,237 +97,185 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) return vp_vring_interrupt(irq, opaque); } -static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, - bool per_vq_vectors) +static void vp_remove_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - const char *name = dev_name(&vp_dev->vdev.dev); - unsigned i, v; - int err = -ENOMEM; - - vp_dev->msix_vectors = nvectors; - - vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, - GFP_KERNEL); - if (!vp_dev->msix_names) - goto error; - vp_dev->msix_affinity_masks - = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks, - GFP_KERNEL); - if (!vp_dev->msix_affinity_masks) - goto error; - for (i = 0; i < nvectors; ++i) - if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], - GFP_KERNEL)) - goto error; - - err = pci_alloc_irq_vectors(vp_dev->pci_dev, nvectors, nvectors, - PCI_IRQ_MSIX); - if (err < 0) - goto error; - vp_dev->msix_enabled = 1; - - /* Set the vector used for configuration */ - v = vp_dev->msix_used_vectors; - snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, - "%s-config", name); - err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), - vp_config_changed, 0, vp_dev->msix_names[v], - vp_dev); - if (err) - goto error; - ++vp_dev->msix_used_vectors; - - v = vp_dev->config_vector(vp_dev, v); - /* Verify we had enough resources to assign the vector */ - if (v == VIRTIO_MSI_NO_VECTOR) { - err = -EBUSY; - goto error; - } - - if (!per_vq_vectors) { - /* Shared vector for all VQs */ - v = vp_dev->msix_used_vectors; - snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, - "%s-virtqueues", name); - err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), - vp_vring_interrupt, 0, vp_dev->msix_names[v], - vp_dev); - if (err) - goto error; - ++vp_dev->msix_used_vectors; - } - return 0; -error: - return err; -} - -static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index, - void (*callback)(struct virtqueue *vq), - const char *name, - u16 msix_vec) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL); - struct virtqueue *vq; - unsigned long flags; - - /* fill out our structure that represents an active queue */ - if (!info) - return ERR_PTR(-ENOMEM); + struct virtqueue *vq, *n; - vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, msix_vec); - if (IS_ERR(vq)) - goto out_info; + list_for_each_entry_safe(vq, n, &vdev->vqs, list) { + if (vp_dev->msix_vector_map) { + int v = vp_dev->msix_vector_map[vq->index]; - info->vq = vq; - if (callback) { - spin_lock_irqsave(&vp_dev->lock, flags); - list_add(&info->node, &vp_dev->virtqueues); - spin_unlock_irqrestore(&vp_dev->lock, flags); - } else { - INIT_LIST_HEAD(&info->node); + if (v != VIRTIO_MSI_NO_VECTOR) + free_irq(pci_irq_vector(vp_dev->pci_dev, v), + vq); + } + vp_dev->del_vq(vq); } - - vp_dev->vqs[index] = info; - return vq; - -out_info: - kfree(info); - return vq; -} - -static void vp_del_vq(struct virtqueue *vq) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); - struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; - unsigned long flags; - - spin_lock_irqsave(&vp_dev->lock, flags); - list_del(&info->node); - spin_unlock_irqrestore(&vp_dev->lock, flags); - - vp_dev->del_vq(info); - kfree(info); } /* the config->del_vqs() implementation */ void vp_del_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtqueue *vq, *n; int i; - list_for_each_entry_safe(vq, n, &vdev->vqs, list) { - if (vp_dev->per_vq_vectors) { - int v = vp_dev->vqs[vq->index]->msix_vector; - - if (v != VIRTIO_MSI_NO_VECTOR) - free_irq(pci_irq_vector(vp_dev->pci_dev, v), - vq); - } - vp_del_vq(vq); - } - vp_dev->per_vq_vectors = false; - - if (vp_dev->intx_enabled) { - free_irq(vp_dev->pci_dev->irq, vp_dev); - vp_dev->intx_enabled = 0; - } + if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs))) + return; - for (i = 0; i < vp_dev->msix_used_vectors; ++i) - free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev); + vp_remove_vqs(vdev); - for (i = 0; i < vp_dev->msix_vectors; i++) - if (vp_dev->msix_affinity_masks[i]) + if (vp_dev->pci_dev->msix_enabled) { + for (i = 0; i < vp_dev->msix_vectors; i++) free_cpumask_var(vp_dev->msix_affinity_masks[i]); - if (vp_dev->msix_enabled) { /* Disable the vector used for configuration */ vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); - pci_free_irq_vectors(vp_dev->pci_dev); - vp_dev->msix_enabled = 0; + kfree(vp_dev->msix_affinity_masks); + kfree(vp_dev->msix_names); + kfree(vp_dev->msix_vector_map); } - vp_dev->msix_vectors = 0; - vp_dev->msix_used_vectors = 0; - kfree(vp_dev->msix_names); - vp_dev->msix_names = NULL; - kfree(vp_dev->msix_affinity_masks); - vp_dev->msix_affinity_masks = NULL; - kfree(vp_dev->vqs); - vp_dev->vqs = NULL; + free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); + pci_free_irq_vectors(vp_dev->pci_dev); } static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[], - bool per_vq_vectors) + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); + const char *name = dev_name(&vp_dev->vdev.dev); + int i, err = -ENOMEM, allocated_vectors, nvectors; + unsigned flags = PCI_IRQ_MSIX; + bool shared = false; u16 msix_vec; - int i, err, nvectors, allocated_vectors; - vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); - if (!vp_dev->vqs) - return -ENOMEM; + if (desc) { + flags |= PCI_IRQ_AFFINITY; + desc->pre_vectors++; /* virtio config vector */ + } - if (per_vq_vectors) { - /* Best option: one for change interrupt, one per vq. */ - nvectors = 1; - for (i = 0; i < nvqs; ++i) - if (callbacks[i]) - ++nvectors; - } else { - /* Second best: one for change, shared for all vqs. */ - nvectors = 2; + nvectors = 1; + for (i = 0; i < nvqs; i++) + if (callbacks[i]) + nvectors++; + + /* Try one vector per queue first. */ + err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, + nvectors, flags, desc); + if (err < 0) { + /* Fallback to one vector for config, one shared for queues. */ + shared = true; + err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2, + PCI_IRQ_MSIX); + if (err < 0) + return err; + } + if (err < 0) + return err; + + vp_dev->msix_vectors = nvectors; + vp_dev->msix_names = kmalloc_array(nvectors, + sizeof(*vp_dev->msix_names), GFP_KERNEL); + if (!vp_dev->msix_names) + goto out_free_irq_vectors; + + vp_dev->msix_affinity_masks = kcalloc(nvectors, + sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL); + if (!vp_dev->msix_affinity_masks) + goto out_free_msix_names; + + for (i = 0; i < nvectors; ++i) { + if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], + GFP_KERNEL)) + goto out_free_msix_affinity_masks; } - err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors); + /* Set the vector used for configuration */ + snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names), + "%s-config", name); + err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed, + 0, vp_dev->msix_names[0], vp_dev); if (err) - goto error_find; + goto out_free_msix_affinity_masks; + + /* Verify we had enough resources to assign the vector */ + if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto out_free_config_irq; + } + + vp_dev->msix_vector_map = kmalloc_array(nvqs, + sizeof(*vp_dev->msix_vector_map), GFP_KERNEL); + if (!vp_dev->msix_vector_map) + goto out_disable_config_irq; - vp_dev->per_vq_vectors = per_vq_vectors; - allocated_vectors = vp_dev->msix_used_vectors; + allocated_vectors = 1; /* vector 0 is the config interrupt */ for (i = 0; i < nvqs; ++i) { if (!names[i]) { vqs[i] = NULL; continue; } - if (!callbacks[i]) - msix_vec = VIRTIO_MSI_NO_VECTOR; - else if (vp_dev->per_vq_vectors) - msix_vec = allocated_vectors++; + if (callbacks[i]) + msix_vec = allocated_vectors; else - msix_vec = VP_MSIX_VQ_VECTOR; - vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], msix_vec); + msix_vec = VIRTIO_MSI_NO_VECTOR; + + vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], + msix_vec); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); - goto error_find; + goto out_remove_vqs; } - if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) + if (msix_vec == VIRTIO_MSI_NO_VECTOR) { + vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; continue; + } - /* allocate per-vq irq if available and necessary */ - snprintf(vp_dev->msix_names[msix_vec], - sizeof *vp_dev->msix_names, - "%s-%s", + snprintf(vp_dev->msix_names[i + 1], + sizeof(*vp_dev->msix_names), "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]); err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), - vring_interrupt, 0, - vp_dev->msix_names[msix_vec], - vqs[i]); - if (err) - goto error_find; + vring_interrupt, IRQF_SHARED, + vp_dev->msix_names[i + 1], vqs[i]); + if (err) { + /* don't free this irq on error */ + vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; + goto out_remove_vqs; + } + vp_dev->msix_vector_map[i] = msix_vec; + + /* + * Use a different vector for each queue if they are available, + * else share the same vector for all VQs. + */ + if (!shared) + allocated_vectors++; } + return 0; -error_find: - vp_del_vqs(vdev); +out_remove_vqs: + vp_remove_vqs(vdev); + kfree(vp_dev->msix_vector_map); +out_disable_config_irq: + vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); +out_free_config_irq: + free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); +out_free_msix_affinity_masks: + for (i = 0; i < nvectors; i++) { + if (vp_dev->msix_affinity_masks[i]) + free_cpumask_var(vp_dev->msix_affinity_masks[i]); + } + kfree(vp_dev->msix_affinity_masks); +out_free_msix_names: + kfree(vp_dev->msix_names); +out_free_irq_vectors: + pci_free_irq_vectors(vp_dev->pci_dev); return err; } @@ -343,53 +286,42 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i, err; - vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); - if (!vp_dev->vqs) - return -ENOMEM; - err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vp_dev); if (err) - goto out_del_vqs; + return err; - vp_dev->intx_enabled = 1; - vp_dev->per_vq_vectors = false; for (i = 0; i < nvqs; ++i) { if (!names[i]) { vqs[i] = NULL; continue; } - vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], + vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], VIRTIO_MSI_NO_VECTOR); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); - goto out_del_vqs; + goto out_remove_vqs; } } return 0; -out_del_vqs: - vp_del_vqs(vdev); + +out_remove_vqs: + vp_remove_vqs(vdev); + free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); return err; } /* the config->find_vqs() implementation */ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[]) + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], struct irq_affinity *desc) { int err; - /* Try MSI-X with one vector per queue. */ - err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true); + err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, desc); if (!err) return 0; - /* Fallback: MSI-X with one vector for config, one shared for queues. */ - err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false); - if (!err) - return 0; - /* Finally fall back to regular interrupts. */ return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names); } @@ -409,16 +341,15 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) { struct virtio_device *vdev = vq->vdev; struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; - struct cpumask *mask; - unsigned int irq; if (!vq->callback) return -EINVAL; - if (vp_dev->msix_enabled) { - mask = vp_dev->msix_affinity_masks[info->msix_vector]; - irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector); + if (vp_dev->pci_dev->msix_enabled) { + int vec = vp_dev->msix_vector_map[vq->index]; + struct cpumask *mask = vp_dev->msix_affinity_masks[vec]; + unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec); + if (cpu == -1) irq_set_affinity_hint(irq, NULL); else { @@ -430,6 +361,17 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) return 0; } +const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + unsigned int *map = vp_dev->msix_vector_map; + + if (!map || map[index] == VIRTIO_MSI_NO_VECTOR) + return NULL; + + return pci_irq_get_affinity(vp_dev->pci_dev, map[index]); +} + #ifdef CONFIG_PM_SLEEP static int virtio_pci_freeze(struct device *dev) { @@ -498,8 +440,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev, vp_dev->vdev.dev.parent = &pci_dev->dev; vp_dev->vdev.dev.release = virtio_pci_release_dev; vp_dev->pci_dev = pci_dev; - INIT_LIST_HEAD(&vp_dev->virtqueues); - spin_lock_init(&vp_dev->lock); /* enable the device */ rc = pci_enable_device(pci_dev); diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index b2f666250ae0..ac8c9d788964 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -31,17 +31,6 @@ #include <linux/highmem.h> #include <linux/spinlock.h> -struct virtio_pci_vq_info { - /* the actual virtqueue */ - struct virtqueue *vq; - - /* the list node for the virtqueues list */ - struct list_head node; - - /* MSI-X vector (or none) */ - unsigned msix_vector; -}; - /* Our device structure */ struct virtio_pci_device { struct virtio_device vdev; @@ -75,47 +64,25 @@ struct virtio_pci_device { /* the IO mapping for the PCI config space */ void __iomem *ioaddr; - /* a list of queues so we can dispatch IRQs */ - spinlock_t lock; - struct list_head virtqueues; - - /* array of all queues for house-keeping */ - struct virtio_pci_vq_info **vqs; - - /* MSI-X support */ - int msix_enabled; - int intx_enabled; cpumask_var_t *msix_affinity_masks; /* Name strings for interrupts. This size should be enough, * and I'm too lazy to allocate each name separately. */ char (*msix_names)[256]; - /* Number of available vectors */ - unsigned msix_vectors; - /* Vectors allocated, excluding per-vq vectors if any */ - unsigned msix_used_vectors; - - /* Whether we have vector per vq */ - bool per_vq_vectors; + /* Total Number of MSI-X vectors (including per-VQ ones). */ + int msix_vectors; + /* Map of per-VQ MSI-X vectors, may be NULL */ + unsigned *msix_vector_map; struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, - struct virtio_pci_vq_info *info, unsigned idx, void (*callback)(struct virtqueue *vq), const char *name, u16 msix_vec); - void (*del_vq)(struct virtio_pci_vq_info *info); + void (*del_vq)(struct virtqueue *vq); u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); }; -/* Constants for MSI-X */ -/* Use first vector for configuration changes, second and the rest for - * virtqueues Thus, we need at least 2 vectors for MSI. */ -enum { - VP_MSIX_CONFIG_VECTOR = 0, - VP_MSIX_VQ_VECTOR = 1, -}; - /* Convert a generic virtio device to our structure */ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) { @@ -130,9 +97,8 @@ bool vp_notify(struct virtqueue *vq); void vp_del_vqs(struct virtio_device *vdev); /* the config->find_vqs() implementation */ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[]); + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], struct irq_affinity *desc); const char *vp_bus_name(struct virtio_device *vdev); /* Setup the affinity for a virtqueue: @@ -142,6 +108,8 @@ const char *vp_bus_name(struct virtio_device *vdev); */ int vp_set_vq_affinity(struct virtqueue *vq, int cpu); +const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index); + #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY) int virtio_pci_legacy_probe(struct virtio_pci_device *); void virtio_pci_legacy_remove(struct virtio_pci_device *); diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index 6d9e5173d5fa..f7362c5fe18a 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c @@ -112,7 +112,6 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) } static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, - struct virtio_pci_vq_info *info, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, @@ -130,8 +129,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) return ERR_PTR(-ENOENT); - info->msix_vector = msix_vec; - /* create the vring */ vq = vring_create_virtqueue(index, num, VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, @@ -162,14 +159,13 @@ out_deactivate: return ERR_PTR(err); } -static void del_vq(struct virtio_pci_vq_info *info) +static void del_vq(struct virtqueue *vq) { - struct virtqueue *vq = info->vq; struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); - if (vp_dev->msix_enabled) { + if (vp_dev->pci_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); /* Flush the write out to device */ @@ -194,6 +190,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = { .finalize_features = vp_finalize_features, .bus_name = vp_bus_name, .set_vq_affinity = vp_set_vq_affinity, + .get_vq_affinity = vp_get_vq_affinity, }; /* the PCI probing function */ diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 4bf7ab375894..7bc3004b840e 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -293,7 +293,6 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) } static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, - struct virtio_pci_vq_info *info, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, @@ -323,8 +322,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, /* get offset of notification word for this vq */ off = vp_ioread16(&cfg->queue_notify_off); - info->msix_vector = msix_vec; - /* create the vring */ vq = vring_create_virtqueue(index, num, SMP_CACHE_BYTES, &vp_dev->vdev, @@ -387,13 +384,12 @@ err_map_notify: } static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[]) + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtqueue *vq; - int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names); + int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, desc); if (rc) return rc; @@ -409,14 +405,13 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, return 0; } -static void del_vq(struct virtio_pci_vq_info *info) +static void del_vq(struct virtqueue *vq) { - struct virtqueue *vq = info->vq; struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); vp_iowrite16(vq->index, &vp_dev->common->queue_select); - if (vp_dev->msix_enabled) { + if (vp_dev->pci_dev->msix_enabled) { vp_iowrite16(VIRTIO_MSI_NO_VECTOR, &vp_dev->common->queue_msix_vector); /* Flush the write out to device */ @@ -442,6 +437,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = { .finalize_features = vp_finalize_features, .bus_name = vp_bus_name, .set_vq_affinity = vp_set_vq_affinity, + .get_vq_affinity = vp_get_vq_affinity, }; static const struct virtio_config_ops virtio_pci_config_ops = { @@ -457,6 +453,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = { .finalize_features = vp_finalize_features, .bus_name = vp_bus_name, .set_vq_affinity = vp_set_vq_affinity, + .get_vq_affinity = vp_get_vq_affinity, }; /** diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c index df1c9bb90eb5..2096f460498f 100644 --- a/drivers/w1/w1_family.c +++ b/drivers/w1/w1_family.c @@ -14,7 +14,7 @@ #include <linux/spinlock.h> #include <linux/list.h> -#include <linux/sched.h> /* schedule_timeout() */ +#include <linux/sched/signal.h> #include <linux/delay.h> #include <linux/export.h> diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c index 4ce1b66d5092..2cae7b29bb5f 100644 --- a/drivers/w1/w1_int.c +++ b/drivers/w1/w1_int.c @@ -17,6 +17,7 @@ #include <linux/delay.h> #include <linux/kthread.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/export.h> #include <linux/moduleparam.h> diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index acb00b53a520..52a70ee6014f 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -71,9 +71,17 @@ config SOFT_WATCHDOG To compile this driver as a module, choose M here: the module will be called softdog. +config SOFT_WATCHDOG_PRETIMEOUT + bool "Software watchdog pretimeout governor support" + depends on SOFT_WATCHDOG && WATCHDOG_PRETIMEOUT_GOV + help + Enable this if you want to use pretimeout governors with the software + watchdog. Be aware that governors might affect the watchdog because it + is purely software, e.g. the panic governor will stall it! + config DA9052_WATCHDOG tristate "Dialog DA9052 Watchdog" - depends on PMIC_DA9052 + depends on PMIC_DA9052 || COMPILE_TEST select WATCHDOG_CORE help Support for the watchdog in the DA9052 PMIC. Watchdog trigger @@ -85,7 +93,7 @@ config DA9052_WATCHDOG config DA9055_WATCHDOG tristate "Dialog Semiconductor DA9055 Watchdog" - depends on MFD_DA9055 + depends on MFD_DA9055 || COMPILE_TEST select WATCHDOG_CORE help If you say yes here you get support for watchdog on the Dialog @@ -96,7 +104,7 @@ config DA9055_WATCHDOG config DA9063_WATCHDOG tristate "Dialog DA9063 Watchdog" - depends on MFD_DA9063 + depends on MFD_DA9063 || COMPILE_TEST select WATCHDOG_CORE help Support for the watchdog in the DA9063 PMIC. @@ -105,7 +113,7 @@ config DA9063_WATCHDOG config DA9062_WATCHDOG tristate "Dialog DA9062/61 Watchdog" - depends on MFD_DA9062 + depends on MFD_DA9062 || COMPILE_TEST select WATCHDOG_CORE help Support for the watchdog in the DA9062 and DA9061 PMICs. @@ -133,7 +141,8 @@ config GPIO_WATCHDOG_ARCH_INITCALL config MENF21BMC_WATCHDOG tristate "MEN 14F021P00 BMC Watchdog" - depends on MFD_MENF21BMC + depends on MFD_MENF21BMC || COMPILE_TEST + depends on I2C select WATCHDOG_CORE help Say Y here to include support for the MEN 14F021P00 BMC Watchdog. @@ -209,7 +218,7 @@ config ZIIRAVE_WATCHDOG config ARM_SP805_WATCHDOG tristate "ARM SP805 Watchdog" - depends on (ARM || ARM64) && ARM_AMBA + depends on (ARM || ARM64 || COMPILE_TEST) && ARM_AMBA select WATCHDOG_CORE help ARM Primecell SP805 Watchdog timer. This will reboot your system when @@ -237,7 +246,7 @@ config ARM_SBSA_WATCHDOG config ASM9260_WATCHDOG tristate "Alphascale ASM9260 watchdog" - depends on MACH_ASM9260 + depends on MACH_ASM9260 || COMPILE_TEST depends on OF select WATCHDOG_CORE select RESET_CONTROLLER @@ -247,14 +256,14 @@ config ASM9260_WATCHDOG config AT91RM9200_WATCHDOG tristate "AT91RM9200 watchdog" - depends on SOC_AT91RM9200 && MFD_SYSCON + depends on (SOC_AT91RM9200 && MFD_SYSCON) || COMPILE_TEST help Watchdog timer embedded into AT91RM9200 chips. This will reboot your system when the timeout is reached. config AT91SAM9X_WATCHDOG tristate "AT91SAM9X / AT91CAP9 watchdog" - depends on ARCH_AT91 + depends on ARCH_AT91 || COMPILE_TEST select WATCHDOG_CORE help Watchdog timer embedded into AT91SAM9X and AT91CAP9 chips. This will @@ -262,7 +271,7 @@ config AT91SAM9X_WATCHDOG config SAMA5D4_WATCHDOG tristate "Atmel SAMA5D4 Watchdog Timer" - depends on ARCH_AT91 + depends on ARCH_AT91 || COMPILE_TEST select WATCHDOG_CORE help Atmel SAMA5D4 watchdog timer is embedded into SAMA5D4 chips. @@ -293,7 +302,7 @@ config 21285_WATCHDOG config 977_WATCHDOG tristate "NetWinder WB83C977 watchdog" - depends on FOOTBRIDGE && ARCH_NETWINDER + depends on (FOOTBRIDGE && ARCH_NETWINDER) || (ARM && COMPILE_TEST) help Say Y here to include support for the WB977 watchdog included in NetWinder machines. Alternatively say M to compile the driver as @@ -301,6 +310,17 @@ config 977_WATCHDOG Not sure? It's safe to say N. +config GEMINI_WATCHDOG + tristate "Gemini watchdog" + depends on ARCH_GEMINI + select WATCHDOG_CORE + help + Say Y here if to include support for the watchdog timer + embedded in the Cortina Systems Gemini family of devices. + + To compile this driver as a module, choose M here: the + module will be called gemini_wdt. + config IXP4XX_WATCHDOG tristate "IXP4xx Watchdog" depends on ARCH_IXP4XX @@ -333,9 +353,9 @@ config HAVE_S3C2410_WATCHDOG config S3C2410_WATCHDOG tristate "S3C2410 Watchdog" - depends on HAVE_S3C2410_WATCHDOG + depends on HAVE_S3C2410_WATCHDOG || COMPILE_TEST select WATCHDOG_CORE - select MFD_SYSCON if ARCH_EXYNOS5 + select MFD_SYSCON if ARCH_EXYNOS help Watchdog timer block in the Samsung SoCs. This will reboot the system when the timer expires with the watchdog enabled. @@ -372,7 +392,7 @@ config DW_WATCHDOG config EP93XX_WATCHDOG tristate "EP93xx Watchdog" - depends on ARCH_EP93XX + depends on ARCH_EP93XX || COMPILE_TEST select WATCHDOG_CORE help Say Y here if to include support for the watchdog timer @@ -383,7 +403,7 @@ config EP93XX_WATCHDOG config OMAP_WATCHDOG tristate "OMAP Watchdog" - depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS + depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || COMPILE_TEST select WATCHDOG_CORE help Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog. Say 'Y' @@ -419,7 +439,7 @@ config IOP_WATCHDOG config DAVINCI_WATCHDOG tristate "DaVinci watchdog" - depends on ARCH_DAVINCI || ARCH_KEYSTONE + depends on ARCH_DAVINCI || ARCH_KEYSTONE || COMPILE_TEST select WATCHDOG_CORE help Say Y here if to include support for the watchdog timer @@ -432,7 +452,7 @@ config DAVINCI_WATCHDOG config ORION_WATCHDOG tristate "Orion watchdog" - depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU + depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU || COMPILE_TEST depends on ARM select WATCHDOG_CORE help @@ -443,7 +463,7 @@ config ORION_WATCHDOG config RN5T618_WATCHDOG tristate "Ricoh RN5T618 watchdog" - depends on MFD_RN5T618 + depends on MFD_RN5T618 || COMPILE_TEST select WATCHDOG_CORE help If you say yes here you get support for watchdog on the Ricoh @@ -454,7 +474,7 @@ config RN5T618_WATCHDOG config SUNXI_WATCHDOG tristate "Allwinner SoCs watchdog support" - depends on ARCH_SUNXI + depends on ARCH_SUNXI || COMPILE_TEST select WATCHDOG_CORE help Say Y here to include support for the watchdog timer @@ -464,7 +484,7 @@ config SUNXI_WATCHDOG config COH901327_WATCHDOG bool "ST-Ericsson COH 901 327 watchdog" - depends on ARCH_U300 + depends on ARCH_U300 || (ARM && COMPILE_TEST) default y if MACH_U300 select WATCHDOG_CORE help @@ -483,7 +503,7 @@ config TWL4030_WATCHDOG config STMP3XXX_RTC_WATCHDOG tristate "Freescale STMP3XXX & i.MX23/28 watchdog" - depends on RTC_DRV_STMP + depends on RTC_DRV_STMP || COMPILE_TEST select WATCHDOG_CORE help Say Y here to include support for the watchdog timer inside @@ -493,7 +513,7 @@ config STMP3XXX_RTC_WATCHDOG config NUC900_WATCHDOG tristate "Nuvoton NUC900 watchdog" - depends on ARCH_W90X900 + depends on ARCH_W90X900 || COMPILE_TEST help Say Y here if to include support for the watchdog timer for the Nuvoton NUC900 series SoCs. @@ -513,7 +533,7 @@ config TS4800_WATCHDOG config TS72XX_WATCHDOG tristate "TS-72XX SBC Watchdog" - depends on MACH_TS72XX + depends on MACH_TS72XX || COMPILE_TEST help Technologic Systems TS-7200, TS-7250 and TS-7260 boards have watchdog timer implemented in a external CPLD chip. Say Y here @@ -531,7 +551,7 @@ config MAX63XX_WATCHDOG config MAX77620_WATCHDOG tristate "Maxim Max77620 Watchdog Timer" - depends on MFD_MAX77620 + depends on MFD_MAX77620 || COMPILE_TEST help This is the driver for the Max77620 watchdog timer. Say 'Y' here to enable the watchdog timer support for @@ -540,7 +560,7 @@ config MAX77620_WATCHDOG config IMX2_WDT tristate "IMX2+ Watchdog" - depends on ARCH_MXC || ARCH_LAYERSCAPE + depends on ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST select REGMAP_MMIO select WATCHDOG_CORE help @@ -578,7 +598,7 @@ config RETU_WATCHDOG config MOXART_WDT tristate "MOXART watchdog" - depends on ARCH_MOXART + depends on ARCH_MOXART || COMPILE_TEST help Say Y here to include Watchdog timer support for the watchdog existing on the MOXA ART SoC series platforms. @@ -588,7 +608,7 @@ config MOXART_WDT config SIRFSOC_WATCHDOG tristate "SiRFSOC watchdog" - depends on ARCH_SIRF + depends on ARCH_SIRF || COMPILE_TEST select WATCHDOG_CORE default y help @@ -597,7 +617,7 @@ config SIRFSOC_WATCHDOG config ST_LPC_WATCHDOG tristate "STMicroelectronics LPC Watchdog" - depends on ARCH_STI + depends on ARCH_STI || COMPILE_TEST depends on OF select WATCHDOG_CORE help @@ -621,7 +641,7 @@ config TEGRA_WATCHDOG config QCOM_WDT tristate "QCOM watchdog" depends on HAS_IOMEM - depends on ARCH_QCOM + depends on ARCH_QCOM || COMPILE_TEST select WATCHDOG_CORE help Say Y here to include Watchdog timer support for the watchdog found @@ -633,7 +653,7 @@ config QCOM_WDT config MESON_GXBB_WATCHDOG tristate "Amlogic Meson GXBB SoCs watchdog support" - depends on ARCH_MESON + depends on ARCH_MESON || COMPILE_TEST select WATCHDOG_CORE help Say Y here to include support for the watchdog timer @@ -643,7 +663,7 @@ config MESON_GXBB_WATCHDOG config MESON_WATCHDOG tristate "Amlogic Meson SoCs watchdog support" - depends on ARCH_MESON + depends on ARCH_MESON || COMPILE_TEST select WATCHDOG_CORE help Say Y here to include support for the watchdog timer @@ -653,7 +673,7 @@ config MESON_WATCHDOG config MEDIATEK_WATCHDOG tristate "Mediatek SoCs watchdog support" - depends on ARCH_MEDIATEK + depends on ARCH_MEDIATEK || COMPILE_TEST select WATCHDOG_CORE help Say Y here to include support for the watchdog timer @@ -663,7 +683,7 @@ config MEDIATEK_WATCHDOG config DIGICOLOR_WATCHDOG tristate "Conexant Digicolor SoCs watchdog support" - depends on ARCH_DIGICOLOR + depends on ARCH_DIGICOLOR || COMPILE_TEST select WATCHDOG_CORE help Say Y here to include support for the watchdog timer @@ -685,7 +705,7 @@ config LPC18XX_WATCHDOG config ATLAS7_WATCHDOG tristate "CSRatlas7 watchdog" - depends on ARCH_ATLAS7 + depends on ARCH_ATLAS7 || COMPILE_TEST help Say Y here to include Watchdog timer support for the watchdog existing on the CSRatlas7 series platforms. @@ -714,11 +734,21 @@ config ASPEED_WATCHDOG To compile this driver as a module, choose M here: the module will be called aspeed_wdt. +config ZX2967_WATCHDOG + tristate "ZTE zx2967 SoCs watchdog support" + depends on ARCH_ZX + select WATCHDOG_CORE + help + Say Y here to include support for the watchdog timer + in ZTE zx2967 SoCs. + To compile this driver as a module, choose M here: the + module will be called zx2967_wdt. + # AVR32 Architecture config AT32AP700X_WDT tristate "AT32AP700x watchdog" - depends on CPU_AT32AP700X + depends on CPU_AT32AP700X || COMPILE_TEST help Watchdog timer embedded into AT32AP700x devices. This will reboot your system when the timeout is reached. @@ -835,7 +865,7 @@ config GEODE_WDT config SC520_WDT tristate "AMD Elan SC520 processor Watchdog" - depends on MELAN + depends on MELAN || COMPILE_TEST help This is the driver for the hardware watchdog built in to the AMD "Elan" SC520 microcomputer commonly used in embedded systems. @@ -1108,7 +1138,8 @@ config NV_TCO config RDC321X_WDT tristate "RDC R-321x SoC watchdog" - depends on X86_RDC321X + depends on X86_RDC321X || COMPILE_TEST + depends on PCI help This is the driver for the built in hardware watchdog in the RDC R-321x SoC. @@ -1326,6 +1357,16 @@ config NI903X_WDT To compile this driver as a module, choose M here: the module will be called ni903x_wdt. +config NIC7018_WDT + tristate "NIC7018 Watchdog" + depends on X86 && ACPI + select WATCHDOG_CORE + ---help--- + Support for National Instruments NIC7018 Watchdog. + + To compile this driver as a module, choose M here: the module will be + called nic7018_wdt. + # M32R Architecture # M68K Architecture @@ -1343,14 +1384,14 @@ config M54xx_WATCHDOG config ATH79_WDT tristate "Atheros AR71XX/AR724X/AR913X hardware watchdog" - depends on ATH79 + depends on ATH79 || (ARM && COMPILE_TEST) help Hardware driver for the built-in watchdog timer on the Atheros AR71XX/AR724X/AR913X SoCs. config BCM47XX_WDT tristate "Broadcom BCM47xx Watchdog Timer" - depends on BCM47XX || ARCH_BCM_5301X + depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST select WATCHDOG_CORE help Hardware driver for the Broadcom BCM47xx Watchdog Timer. @@ -1367,7 +1408,7 @@ config RC32434_WDT config INDYDOG tristate "Indy/I2 Hardware Watchdog" - depends on SGI_HAS_INDYDOG + depends on SGI_HAS_INDYDOG || (MIPS && COMPILE_TEST) help Hardware driver for the Indy's/I2's watchdog. This is a watchdog timer that will reboot the machine after a 60 second @@ -1383,7 +1424,7 @@ config JZ4740_WDT config WDT_MTX1 tristate "MTX-1 Hardware Watchdog" - depends on MIPS_MTX1 + depends on MIPS_MTX1 || (MIPS && COMPILE_TEST) help Hardware driver for the MTX-1 boards. This is a watchdog timer that will reboot the machine after a 100 seconds timer expired. @@ -1391,6 +1432,7 @@ config WDT_MTX1 config PNX833X_WDT tristate "PNX833x Hardware Watchdog" depends on SOC_PNX8335 + depends on BROKEN help Hardware driver for the PNX833x's watchdog. This is a watchdog timer that will reboot the machine after a programmable @@ -1399,7 +1441,7 @@ config PNX833X_WDT config SIBYTE_WDOG tristate "Sibyte SoC hardware watchdog" - depends on CPU_SB1 + depends on CPU_SB1 || (MIPS && COMPILE_TEST) help Watchdog driver for the built in watchdog hardware in Sibyte SoC processors. There are apparently two watchdog timers @@ -1412,13 +1454,13 @@ config SIBYTE_WDOG config AR7_WDT tristate "TI AR7 Watchdog Timer" - depends on AR7 + depends on AR7 || (MIPS && COMPILE_TEST) help Hardware driver for the TI AR7 Watchdog Timer. config TXX9_WDT tristate "Toshiba TXx9 Watchdog Timer" - depends on CPU_TX39XX || CPU_TX49XX + depends on CPU_TX39XX || CPU_TX49XX || (MIPS && COMPILE_TEST) select WATCHDOG_CORE help Hardware driver for the built-in watchdog timer on TXx9 MIPS SoCs. @@ -1454,7 +1496,7 @@ config BCM63XX_WDT config BCM2835_WDT tristate "Broadcom BCM2835 hardware watchdog" - depends on ARCH_BCM2835 + depends on ARCH_BCM2835 || (OF && COMPILE_TEST) select WATCHDOG_CORE help Watchdog driver for the built in watchdog hardware in Broadcom @@ -1465,7 +1507,7 @@ config BCM2835_WDT config BCM_KONA_WDT tristate "BCM Kona Watchdog" - depends on ARCH_BCM_MOBILE + depends on ARCH_BCM_MOBILE || COMPILE_TEST select WATCHDOG_CORE help Support for the watchdog timer on the following Broadcom BCM281xx @@ -1477,7 +1519,7 @@ config BCM_KONA_WDT config BCM_KONA_WDT_DEBUG bool "DEBUGFS support for BCM Kona Watchdog" - depends on BCM_KONA_WDT + depends on BCM_KONA_WDT || COMPILE_TEST help If enabled, adds /sys/kernel/debug/bcm_kona_wdt/info which provides access to the driver's internal data structures as well as watchdog @@ -1538,7 +1580,7 @@ config MT7621_WDT config PIC32_WDT tristate "Microchip PIC32 hardware watchdog" select WATCHDOG_CORE - depends on MACH_PIC32 + depends on MACH_PIC32 || (MIPS && COMPILE_TEST) help Watchdog driver for the built in watchdog hardware in a PIC32. @@ -1551,7 +1593,7 @@ config PIC32_WDT config PIC32_DMT tristate "Microchip PIC32 Deadman Timer" select WATCHDOG_CORE - depends on MACH_PIC32 + depends on MACH_PIC32 || (MIPS && COMPILE_TEST) help Watchdog driver for PIC32 instruction fetch counting timer. This specific timer is typically be used in misson critical and safety critical @@ -1573,7 +1615,7 @@ config GEF_WDT config MPC5200_WDT bool "MPC52xx Watchdog Timer" - depends on PPC_MPC52xx + depends on PPC_MPC52xx || COMPILE_TEST help Use General Purpose Timer (GPT) 0 on the MPC5200 as Watchdog. @@ -1592,11 +1634,11 @@ config 8xxx_WDT config MV64X60_WDT tristate "MV64X60 (Marvell Discovery) Watchdog Timer" - depends on MV64X60 + depends on MV64X60 || COMPILE_TEST config PIKA_WDT tristate "PIKA FPGA Watchdog" - depends on WARP + depends on WARP || (PPC64 && COMPILE_TEST) default y help This enables the watchdog in the PIKA FPGA. Currently used on @@ -1646,7 +1688,7 @@ config MEN_A21_WDT config WATCHDOG_RTAS tristate "RTAS watchdog" - depends on PPC_RTAS + depends on PPC_RTAS || (PPC64 && COMPILE_TEST) help This driver adds watchdog support for the RTAS watchdog. @@ -1674,7 +1716,7 @@ config DIAG288_WATCHDOG config SH_WDT tristate "SuperH Watchdog" - depends on SUPERH && (CPU_SH3 || CPU_SH4) + depends on SUPERH && (CPU_SH3 || CPU_SH4 || COMPILE_TEST) select WATCHDOG_CORE help This driver adds watchdog support for the integrated watchdog in the @@ -1741,7 +1783,7 @@ config XEN_WDT config UML_WATCHDOG tristate "UML watchdog" - depends on UML + depends on UML || COMPILE_TEST # # ISA-based Watchdog Cards diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 0c3d35e3c334..a2126e2a99ae 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_OMAP_WATCHDOG) += omap_wdt.o obj-$(CONFIG_TWL4030_WATCHDOG) += twl4030_wdt.o obj-$(CONFIG_21285_WATCHDOG) += wdt285.o obj-$(CONFIG_977_WATCHDOG) += wdt977.o +obj-$(CONFIG_GEMINI_WATCHDOG) += gemini_wdt.o obj-$(CONFIG_IXP4XX_WATCHDOG) += ixp4xx_wdt.o obj-$(CONFIG_KS8695_WATCHDOG) += ks8695_wdt.o obj-$(CONFIG_S3C2410_WATCHDOG) += s3c2410_wdt.o @@ -82,6 +83,7 @@ obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o obj-$(CONFIG_ATLAS7_WATCHDOG) += atlas7_wdt.o obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o obj-$(CONFIG_ASPEED_WATCHDOG) += aspeed_wdt.o +obj-$(CONFIG_ZX2967_WATCHDOG) += zx2967_wdt.o # AVR32 Architecture obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o @@ -139,6 +141,7 @@ obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o obj-$(CONFIG_INTEL_MEI_WDT) += mei_wdt.o obj-$(CONFIG_NI903X_WDT) += ni903x_wdt.o +obj-$(CONFIG_NIC7018_WDT) += nic7018_wdt.o # M32R Architecture diff --git a/drivers/watchdog/asm9260_wdt.c b/drivers/watchdog/asm9260_wdt.c index d0b59ba0f661..53da001f0838 100644 --- a/drivers/watchdog/asm9260_wdt.c +++ b/drivers/watchdog/asm9260_wdt.c @@ -14,7 +14,6 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> -#include <linux/reboot.h> #include <linux/reset.h> #include <linux/watchdog.h> @@ -59,7 +58,6 @@ struct asm9260_wdt_priv { struct clk *clk; struct clk *clk_ahb; struct reset_control *rst; - struct notifier_block restart_handler; void __iomem *iobase; int irq; @@ -172,15 +170,14 @@ static irqreturn_t asm9260_wdt_irq(int irq, void *devid) return IRQ_HANDLED; } -static int asm9260_restart_handler(struct notifier_block *this, - unsigned long mode, void *cmd) +static int asm9260_restart(struct watchdog_device *wdd, unsigned long action, + void *data) { - struct asm9260_wdt_priv *priv = - container_of(this, struct asm9260_wdt_priv, restart_handler); + struct asm9260_wdt_priv *priv = watchdog_get_drvdata(wdd); asm9260_wdt_sys_reset(priv); - return NOTIFY_DONE; + return 0; } static const struct watchdog_info asm9260_wdt_ident = { @@ -189,13 +186,14 @@ static const struct watchdog_info asm9260_wdt_ident = { .identity = "Alphascale asm9260 Watchdog", }; -static struct watchdog_ops asm9260_wdt_ops = { +static const struct watchdog_ops asm9260_wdt_ops = { .owner = THIS_MODULE, .start = asm9260_wdt_enable, .stop = asm9260_wdt_disable, .get_timeleft = asm9260_wdt_gettimeleft, .ping = asm9260_wdt_feed, .set_timeout = asm9260_wdt_settimeout, + .restart = asm9260_restart, }; static int asm9260_wdt_get_dt_clks(struct asm9260_wdt_priv *priv) @@ -335,18 +333,14 @@ static int asm9260_wdt_probe(struct platform_device *pdev) dev_warn(&pdev->dev, "failed to request IRQ\n"); } + watchdog_set_restart_priority(wdd, 128); + ret = watchdog_register_device(wdd); if (ret) goto clk_off; platform_set_drvdata(pdev, priv); - priv->restart_handler.notifier_call = asm9260_restart_handler; - priv->restart_handler.priority = 128; - ret = register_restart_handler(&priv->restart_handler); - if (ret) - dev_warn(&pdev->dev, "cannot register restart handler\n"); - dev_info(&pdev->dev, "Watchdog enabled (timeout: %d sec, mode: %s)\n", wdd->timeout, mode_name[priv->mode]); return 0; @@ -370,8 +364,6 @@ static int asm9260_wdt_remove(struct platform_device *pdev) asm9260_wdt_disable(&priv->wdd); - unregister_restart_handler(&priv->restart_handler); - watchdog_unregister_device(&priv->wdd); clk_disable_unprepare(priv->clk); diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c index f5ad8023c2e6..1c652582de40 100644 --- a/drivers/watchdog/aspeed_wdt.c +++ b/drivers/watchdog/aspeed_wdt.c @@ -136,15 +136,6 @@ static const struct watchdog_info aspeed_wdt_info = { .identity = KBUILD_MODNAME, }; -static int aspeed_wdt_remove(struct platform_device *pdev) -{ - struct aspeed_wdt *wdt = platform_get_drvdata(pdev); - - watchdog_unregister_device(&wdt->wdd); - - return 0; -} - static int aspeed_wdt_probe(struct platform_device *pdev) { struct aspeed_wdt *wdt; @@ -187,20 +178,17 @@ static int aspeed_wdt_probe(struct platform_device *pdev) set_bit(WDOG_HW_RUNNING, &wdt->wdd.status); } - ret = watchdog_register_device(&wdt->wdd); + ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd); if (ret) { dev_err(&pdev->dev, "failed to register\n"); return ret; } - platform_set_drvdata(pdev, wdt); - return 0; } static struct platform_driver aspeed_watchdog_driver = { .probe = aspeed_wdt_probe, - .remove = aspeed_wdt_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = of_match_ptr(aspeed_wdt_of_table), diff --git a/drivers/watchdog/atlas7_wdt.c b/drivers/watchdog/atlas7_wdt.c index ed80734befae..4abdcabd8219 100644 --- a/drivers/watchdog/atlas7_wdt.c +++ b/drivers/watchdog/atlas7_wdt.c @@ -105,7 +105,7 @@ static const struct watchdog_info atlas7_wdt_ident = { .identity = "atlas7 Watchdog", }; -static struct watchdog_ops atlas7_wdt_ops = { +static const struct watchdog_ops atlas7_wdt_ops = { .owner = THIS_MODULE, .start = atlas7_wdt_enable, .stop = atlas7_wdt_disable, diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c index c32c45bd8b09..b339e0e67b4c 100644 --- a/drivers/watchdog/bcm2835_wdt.c +++ b/drivers/watchdog/bcm2835_wdt.c @@ -14,7 +14,6 @@ */ #include <linux/delay.h> -#include <linux/reboot.h> #include <linux/types.h> #include <linux/module.h> #include <linux/io.h> @@ -37,9 +36,9 @@ #define PM_RSTC_RESET 0x00000102 /* - * The Raspberry Pi firmware uses the RSTS register to know which partiton - * to boot from. The partiton value is spread into bits 0, 2, 4, 6, 8, 10. - * Partiton 63 is a special partition used by the firmware to indicate halt. + * The Raspberry Pi firmware uses the RSTS register to know which partition + * to boot from. The partition value is spread into bits 0, 2, 4, 6, 8, 10. + * Partition 63 is a special partition used by the firmware to indicate halt. */ #define PM_RSTS_RASPBERRYPI_HALT 0x555 @@ -49,7 +48,6 @@ struct bcm2835_wdt { void __iomem *base; spinlock_t lock; - struct notifier_block restart_handler; }; static unsigned int heartbeat; @@ -99,11 +97,37 @@ static unsigned int bcm2835_wdt_get_timeleft(struct watchdog_device *wdog) return WDOG_TICKS_TO_SECS(ret & PM_WDOG_TIME_SET); } +static void __bcm2835_restart(struct bcm2835_wdt *wdt) +{ + u32 val; + + /* use a timeout of 10 ticks (~150us) */ + writel_relaxed(10 | PM_PASSWORD, wdt->base + PM_WDOG); + val = readl_relaxed(wdt->base + PM_RSTC); + val &= PM_RSTC_WRCFG_CLR; + val |= PM_PASSWORD | PM_RSTC_WRCFG_FULL_RESET; + writel_relaxed(val, wdt->base + PM_RSTC); + + /* No sleeping, possibly atomic. */ + mdelay(1); +} + +static int bcm2835_restart(struct watchdog_device *wdog, + unsigned long action, void *data) +{ + struct bcm2835_wdt *wdt = watchdog_get_drvdata(wdog); + + __bcm2835_restart(wdt); + + return 0; +} + static const struct watchdog_ops bcm2835_wdt_ops = { .owner = THIS_MODULE, .start = bcm2835_wdt_start, .stop = bcm2835_wdt_stop, .get_timeleft = bcm2835_wdt_get_timeleft, + .restart = bcm2835_restart, }; static const struct watchdog_info bcm2835_wdt_info = { @@ -120,26 +144,6 @@ static struct watchdog_device bcm2835_wdt_wdd = { .timeout = WDOG_TICKS_TO_SECS(PM_WDOG_TIME_SET), }; -static int -bcm2835_restart(struct notifier_block *this, unsigned long mode, void *cmd) -{ - struct bcm2835_wdt *wdt = container_of(this, struct bcm2835_wdt, - restart_handler); - u32 val; - - /* use a timeout of 10 ticks (~150us) */ - writel_relaxed(10 | PM_PASSWORD, wdt->base + PM_WDOG); - val = readl_relaxed(wdt->base + PM_RSTC); - val &= PM_RSTC_WRCFG_CLR; - val |= PM_PASSWORD | PM_RSTC_WRCFG_FULL_RESET; - writel_relaxed(val, wdt->base + PM_RSTC); - - /* No sleeping, possibly atomic. */ - mdelay(1); - - return 0; -} - /* * We can't really power off, but if we do the normal reset scheme, and * indicate to bootcode.bin not to reboot, then most of the chip will be @@ -163,13 +167,13 @@ static void bcm2835_power_off(void) writel_relaxed(val, wdt->base + PM_RSTS); /* Continue with normal reset mechanism */ - bcm2835_restart(&wdt->restart_handler, REBOOT_HARD, NULL); + __bcm2835_restart(wdt); } static int bcm2835_wdt_probe(struct platform_device *pdev) { + struct resource *res; struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; struct bcm2835_wdt *wdt; int err; @@ -180,16 +184,15 @@ static int bcm2835_wdt_probe(struct platform_device *pdev) spin_lock_init(&wdt->lock); - wdt->base = of_iomap(np, 0); - if (!wdt->base) { - dev_err(dev, "Failed to remap watchdog regs"); - return -ENODEV; - } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + wdt->base = devm_ioremap_resource(dev, res); + if (IS_ERR(wdt->base)) + return PTR_ERR(wdt->base); watchdog_set_drvdata(&bcm2835_wdt_wdd, wdt); watchdog_init_timeout(&bcm2835_wdt_wdd, heartbeat, dev); watchdog_set_nowayout(&bcm2835_wdt_wdd, nowayout); - bcm2835_wdt_wdd.parent = &pdev->dev; + bcm2835_wdt_wdd.parent = dev; if (bcm2835_wdt_is_running(wdt)) { /* * The currently active timeout value (set by the @@ -201,16 +204,16 @@ static int bcm2835_wdt_probe(struct platform_device *pdev) */ set_bit(WDOG_HW_RUNNING, &bcm2835_wdt_wdd.status); } - err = watchdog_register_device(&bcm2835_wdt_wdd); + + watchdog_set_restart_priority(&bcm2835_wdt_wdd, 128); + + watchdog_stop_on_reboot(&bcm2835_wdt_wdd); + err = devm_watchdog_register_device(dev, &bcm2835_wdt_wdd); if (err) { dev_err(dev, "Failed to register watchdog device"); - iounmap(wdt->base); return err; } - wdt->restart_handler.notifier_call = bcm2835_restart; - wdt->restart_handler.priority = 128; - register_restart_handler(&wdt->restart_handler); if (pm_power_off == NULL) pm_power_off = bcm2835_power_off; @@ -220,22 +223,12 @@ static int bcm2835_wdt_probe(struct platform_device *pdev) static int bcm2835_wdt_remove(struct platform_device *pdev) { - struct bcm2835_wdt *wdt = platform_get_drvdata(pdev); - - unregister_restart_handler(&wdt->restart_handler); if (pm_power_off == bcm2835_power_off) pm_power_off = NULL; - watchdog_unregister_device(&bcm2835_wdt_wdd); - iounmap(wdt->base); return 0; } -static void bcm2835_wdt_shutdown(struct platform_device *pdev) -{ - bcm2835_wdt_stop(&bcm2835_wdt_wdd); -} - static const struct of_device_id bcm2835_wdt_of_match[] = { { .compatible = "brcm,bcm2835-pm-wdt", }, {}, @@ -245,7 +238,6 @@ MODULE_DEVICE_TABLE(of, bcm2835_wdt_of_match); static struct platform_driver bcm2835_wdt_driver = { .probe = bcm2835_wdt_probe, .remove = bcm2835_wdt_remove, - .shutdown = bcm2835_wdt_shutdown, .driver = { .name = "bcm2835-wdt", .of_match_table = bcm2835_wdt_of_match, diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c index a1900b9ab6c4..35725e21b18a 100644 --- a/drivers/watchdog/bcm47xx_wdt.c +++ b/drivers/watchdog/bcm47xx_wdt.c @@ -226,9 +226,6 @@ static int bcm47xx_wdt_remove(struct platform_device *pdev) { struct bcm47xx_wdt *wdt = dev_get_platdata(&pdev->dev); - if (!wdt) - return -ENXIO; - watchdog_unregister_device(&wdt->wdd); return 0; diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c index 4814c00b32f6..c1b8e534fb55 100644 --- a/drivers/watchdog/bcm7038_wdt.c +++ b/drivers/watchdog/bcm7038_wdt.c @@ -101,7 +101,7 @@ static unsigned int bcm7038_wdt_get_timeleft(struct watchdog_device *wdog) return time_left / wdt->rate; } -static struct watchdog_info bcm7038_wdt_info = { +static const struct watchdog_info bcm7038_wdt_info = { .identity = "Broadcom BCM7038 Watchdog Timer", .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c index e0c98423f2c9..6fce17d5b9f1 100644 --- a/drivers/watchdog/bcm_kona_wdt.c +++ b/drivers/watchdog/bcm_kona_wdt.c @@ -266,7 +266,7 @@ static int bcm_kona_wdt_stop(struct watchdog_device *wdog) SECWDOG_SRSTEN_MASK, 0); } -static struct watchdog_ops bcm_kona_wdt_ops = { +static const struct watchdog_ops bcm_kona_wdt_ops = { .owner = THIS_MODULE, .start = bcm_kona_wdt_start, .stop = bcm_kona_wdt_stop, @@ -274,7 +274,7 @@ static struct watchdog_ops bcm_kona_wdt_ops = { .get_timeleft = bcm_kona_wdt_get_timeleft, }; -static struct watchdog_info bcm_kona_wdt_info = { +static const struct watchdog_info bcm_kona_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .identity = "Broadcom Kona Watchdog Timer", diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c index 04da4b66c75e..3ad1e44bef44 100644 --- a/drivers/watchdog/booke_wdt.c +++ b/drivers/watchdog/booke_wdt.c @@ -192,12 +192,12 @@ static int booke_wdt_set_timeout(struct watchdog_device *wdt_dev, return 0; } -static struct watchdog_info booke_wdt_info = { +static struct watchdog_info booke_wdt_info __ro_after_init = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, .identity = "PowerPC Book-E Watchdog", }; -static struct watchdog_ops booke_wdt_ops = { +static const struct watchdog_ops booke_wdt_ops = { .owner = THIS_MODULE, .start = booke_wdt_start, .stop = booke_wdt_stop, diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c index 98acef72334d..8d61e8bfe60b 100644 --- a/drivers/watchdog/cadence_wdt.c +++ b/drivers/watchdog/cadence_wdt.c @@ -262,7 +262,7 @@ static irqreturn_t cdns_wdt_irq_handler(int irq, void *dev_id) * Info structure used to indicate the features supported by the device * to the upper layers. This is defined in watchdog.h header file. */ -static struct watchdog_info cdns_wdt_info = { +static const struct watchdog_info cdns_wdt_info = { .identity = "cdns_wdt watchdog", .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c index a099b77fc0b9..38dd60f0cfcc 100644 --- a/drivers/watchdog/coh901327_wdt.c +++ b/drivers/watchdog/coh901327_wdt.c @@ -68,17 +68,10 @@ /* Default timeout in seconds = 1 minute */ static unsigned int margin = 60; -static resource_size_t phybase; -static resource_size_t physize; static int irq; static void __iomem *virtbase; static struct device *parent; -/* - * The watchdog block is of course always clocked, the - * clk_enable()/clk_disable() calls are mainly for performing reference - * counting higher up in the clock hierarchy. - */ static struct clk *clk; /* @@ -90,7 +83,6 @@ static void coh901327_enable(u16 timeout) unsigned long freq; unsigned long delay_ns; - clk_enable(clk); /* Restart timer if it is disabled */ val = readw(virtbase + U300_WDOG_D2R); if (val == U300_WDOG_D2R_DISABLE_STATUS_DISABLED) @@ -118,7 +110,6 @@ static void coh901327_enable(u16 timeout) */ (void) readw(virtbase + U300_WDOG_CR); val = readw(virtbase + U300_WDOG_D2R); - clk_disable(clk); if (val != U300_WDOG_D2R_DISABLE_STATUS_ENABLED) dev_err(parent, "%s(): watchdog not enabled! D2R value %04x\n", @@ -129,7 +120,6 @@ static void coh901327_disable(void) { u16 val; - clk_enable(clk); /* Disable the watchdog interrupt if it is active */ writew(0x0000U, virtbase + U300_WDOG_IMR); /* If the watchdog is currently enabled, attempt to disable it */ @@ -144,7 +134,6 @@ static void coh901327_disable(void) virtbase + U300_WDOG_D2R); } val = readw(virtbase + U300_WDOG_D2R); - clk_disable(clk); if (val != U300_WDOG_D2R_DISABLE_STATUS_DISABLED) dev_err(parent, "%s(): watchdog not disabled! D2R value %04x\n", @@ -165,11 +154,9 @@ static int coh901327_stop(struct watchdog_device *wdt_dev) static int coh901327_ping(struct watchdog_device *wdd) { - clk_enable(clk); /* Feed the watchdog */ writew(U300_WDOG_FR_FEED_RESTART_TIMER, virtbase + U300_WDOG_FR); - clk_disable(clk); return 0; } @@ -177,13 +164,11 @@ static int coh901327_settimeout(struct watchdog_device *wdt_dev, unsigned int time) { wdt_dev->timeout = time; - clk_enable(clk); /* Set new timeout value */ writew(time * 100, virtbase + U300_WDOG_TR); /* Feed the dog */ writew(U300_WDOG_FR_FEED_RESTART_TIMER, virtbase + U300_WDOG_FR); - clk_disable(clk); return 0; } @@ -191,13 +176,11 @@ static unsigned int coh901327_gettimeleft(struct watchdog_device *wdt_dev) { u16 val; - clk_enable(clk); /* Read repeatedly until the value is stable! */ val = readw(virtbase + U300_WDOG_CR); while (val & U300_WDOG_CR_VALID_IND) val = readw(virtbase + U300_WDOG_CR); val &= U300_WDOG_CR_COUNT_VALUE_MASK; - clk_disable(clk); if (val != 0) val /= 100; @@ -221,13 +204,11 @@ static irqreturn_t coh901327_interrupt(int irq, void *data) * to prevent a watchdog reset by feeding the watchdog at this * point. */ - clk_enable(clk); val = readw(virtbase + U300_WDOG_IER); if (val == U300_WDOG_IER_WILL_BARK_IRQ_EVENT_IND) writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE, virtbase + U300_WDOG_IER); writew(0x0000U, virtbase + U300_WDOG_IMR); - clk_disable(clk); dev_crit(parent, "watchdog is barking!\n"); return IRQ_HANDLED; } @@ -263,81 +244,63 @@ static int __exit coh901327_remove(struct platform_device *pdev) watchdog_unregister_device(&coh901327_wdt); coh901327_disable(); free_irq(irq, pdev); - clk_unprepare(clk); + clk_disable_unprepare(clk); clk_put(clk); - iounmap(virtbase); - release_mem_region(phybase, physize); return 0; } static int __init coh901327_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; int ret; u16 val; struct resource *res; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENOENT; - - parent = &pdev->dev; - physize = resource_size(res); - phybase = res->start; + parent = dev; - if (request_mem_region(phybase, physize, DRV_NAME) == NULL) { - ret = -EBUSY; - goto out; - } - - virtbase = ioremap(phybase, physize); - if (!virtbase) { - ret = -ENOMEM; - goto out_no_remap; - } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + virtbase = devm_ioremap_resource(dev, res); + if (IS_ERR(virtbase)) + return PTR_ERR(virtbase); - clk = clk_get(&pdev->dev, NULL); + clk = clk_get(dev, NULL); if (IS_ERR(clk)) { ret = PTR_ERR(clk); - dev_err(&pdev->dev, "could not get clock\n"); - goto out_no_clk; + dev_err(dev, "could not get clock\n"); + return ret; } ret = clk_prepare_enable(clk); if (ret) { - dev_err(&pdev->dev, "could not prepare and enable clock\n"); + dev_err(dev, "could not prepare and enable clock\n"); goto out_no_clk_enable; } val = readw(virtbase + U300_WDOG_SR); switch (val) { case U300_WDOG_SR_STATUS_TIMED_OUT: - dev_info(&pdev->dev, - "watchdog timed out since last chip reset!\n"); + dev_info(dev, "watchdog timed out since last chip reset!\n"); coh901327_wdt.bootstatus |= WDIOF_CARDRESET; /* Status will be cleared below */ break; case U300_WDOG_SR_STATUS_NORMAL: - dev_info(&pdev->dev, - "in normal status, no timeouts have occurred.\n"); + dev_info(dev, "in normal status, no timeouts have occurred.\n"); break; default: - dev_info(&pdev->dev, - "contains an illegal status code (%08x)\n", val); + dev_info(dev, "contains an illegal status code (%08x)\n", val); break; } val = readw(virtbase + U300_WDOG_D2R); switch (val) { case U300_WDOG_D2R_DISABLE_STATUS_DISABLED: - dev_info(&pdev->dev, "currently disabled.\n"); + dev_info(dev, "currently disabled.\n"); break; case U300_WDOG_D2R_DISABLE_STATUS_ENABLED: - dev_info(&pdev->dev, - "currently enabled! (disabling it now)\n"); + dev_info(dev, "currently enabled! (disabling it now)\n"); coh901327_disable(); break; default: - dev_err(&pdev->dev, - "contains an illegal enable/disable code (%08x)\n", + dev_err(dev, "contains an illegal enable/disable code (%08x)\n", val); break; } @@ -352,20 +315,16 @@ static int __init coh901327_probe(struct platform_device *pdev) goto out_no_irq; } - clk_disable(clk); - - ret = watchdog_init_timeout(&coh901327_wdt, margin, &pdev->dev); + ret = watchdog_init_timeout(&coh901327_wdt, margin, dev); if (ret < 0) coh901327_wdt.timeout = 60; - coh901327_wdt.parent = &pdev->dev; + coh901327_wdt.parent = dev; ret = watchdog_register_device(&coh901327_wdt); - if (ret == 0) - dev_info(&pdev->dev, - "initialized. timer margin=%d sec\n", margin); - else + if (ret) goto out_no_wdog; + dev_info(dev, "initialized. timer margin=%d sec\n", margin); return 0; out_no_wdog: @@ -374,11 +333,6 @@ out_no_irq: clk_disable_unprepare(clk); out_no_clk_enable: clk_put(clk); -out_no_clk: - iounmap(virtbase); -out_no_remap: - release_mem_region(phybase, SZ_4K); -out: return ret; } diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c index 2fc19a32a320..d6d5006efa71 100644 --- a/drivers/watchdog/da9052_wdt.c +++ b/drivers/watchdog/da9052_wdt.c @@ -128,19 +128,17 @@ static int da9052_wdt_ping(struct watchdog_device *wdt_dev) ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG, DA9052_CONTROLD_WATCHDOG, 1 << 7); if (ret < 0) - goto err_strobe; + return ret; /* * FIXME: Reset the watchdog core, in general PMIC * is supposed to do this */ - ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG, - DA9052_CONTROLD_WATCHDOG, 0 << 7); -err_strobe: - return ret; + return da9052_reg_update(da9052, DA9052_CONTROL_D_REG, + DA9052_CONTROLD_WATCHDOG, 0 << 7); } -static struct watchdog_info da9052_wdt_info = { +static const struct watchdog_info da9052_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, .identity = "DA9052 Watchdog", }; @@ -163,10 +161,8 @@ static int da9052_wdt_probe(struct platform_device *pdev) driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data), GFP_KERNEL); - if (!driver_data) { - ret = -ENOMEM; - goto err; - } + if (!driver_data) + return -ENOMEM; driver_data->da9052 = da9052; da9052_wdt = &driver_data->wdt; @@ -182,33 +178,21 @@ static int da9052_wdt_probe(struct platform_device *pdev) if (ret < 0) { dev_err(&pdev->dev, "Failed to disable watchdog bits, %d\n", ret); - goto err; + return ret; } - ret = watchdog_register_device(&driver_data->wdt); + ret = devm_watchdog_register_device(&pdev->dev, &driver_data->wdt); if (ret != 0) { dev_err(da9052->dev, "watchdog_register_device() failed: %d\n", ret); - goto err; + return ret; } - platform_set_drvdata(pdev, driver_data); -err: return ret; } -static int da9052_wdt_remove(struct platform_device *pdev) -{ - struct da9052_wdt_data *driver_data = platform_get_drvdata(pdev); - - watchdog_unregister_device(&driver_data->wdt); - - return 0; -} - static struct platform_driver da9052_wdt_driver = { .probe = da9052_wdt_probe, - .remove = da9052_wdt_remove, .driver = { .name = "da9052-watchdog", }, diff --git a/drivers/watchdog/da9055_wdt.c b/drivers/watchdog/da9055_wdt.c index 8377c43f3f20..50bdd1022186 100644 --- a/drivers/watchdog/da9055_wdt.c +++ b/drivers/watchdog/da9055_wdt.c @@ -108,7 +108,7 @@ static int da9055_wdt_stop(struct watchdog_device *wdt_dev) return da9055_wdt_set_timeout(wdt_dev, 0); } -static struct watchdog_info da9055_wdt_info = { +static const struct watchdog_info da9055_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, .identity = "DA9055 Watchdog", }; @@ -147,32 +147,19 @@ static int da9055_wdt_probe(struct platform_device *pdev) ret = da9055_wdt_stop(da9055_wdt); if (ret < 0) { dev_err(&pdev->dev, "Failed to stop watchdog, %d\n", ret); - goto err; + return ret; } - platform_set_drvdata(pdev, driver_data); - - ret = watchdog_register_device(&driver_data->wdt); + ret = devm_watchdog_register_device(&pdev->dev, &driver_data->wdt); if (ret != 0) dev_err(da9055->dev, "watchdog_register_device() failed: %d\n", ret); -err: return ret; } -static int da9055_wdt_remove(struct platform_device *pdev) -{ - struct da9055_wdt_data *driver_data = platform_get_drvdata(pdev); - - watchdog_unregister_device(&driver_data->wdt); - - return 0; -} - static struct platform_driver da9055_wdt_driver = { .probe = da9055_wdt_probe, - .remove = da9055_wdt_remove, .driver = { .name = "da9055-watchdog", }, diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c index a02cee6820a1..9083d3d922b0 100644 --- a/drivers/watchdog/da9062_wdt.c +++ b/drivers/watchdog/da9062_wdt.c @@ -220,9 +220,8 @@ static int da9062_wdt_probe(struct platform_device *pdev) wdt->wdtdev.parent = &pdev->dev; watchdog_set_drvdata(&wdt->wdtdev, wdt); - dev_set_drvdata(&pdev->dev, wdt); - ret = watchdog_register_device(&wdt->wdtdev); + ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdtdev); if (ret < 0) { dev_err(wdt->hw->dev, "watchdog registration failed (%d)\n", ret); @@ -231,24 +230,11 @@ static int da9062_wdt_probe(struct platform_device *pdev) da9062_set_window_start(wdt); - ret = da9062_wdt_ping(&wdt->wdtdev); - if (ret < 0) - watchdog_unregister_device(&wdt->wdtdev); - - return ret; -} - -static int da9062_wdt_remove(struct platform_device *pdev) -{ - struct da9062_watchdog *wdt = dev_get_drvdata(&pdev->dev); - - watchdog_unregister_device(&wdt->wdtdev); - return 0; + return da9062_wdt_ping(&wdt->wdtdev); } static struct platform_driver da9062_wdt_driver = { .probe = da9062_wdt_probe, - .remove = da9062_wdt_remove, .driver = { .name = "da9062-watchdog", .of_match_table = da9062_compatible_id_table, diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c index 5d6b4e5f7989..4691c5509129 100644 --- a/drivers/watchdog/da9063_wdt.c +++ b/drivers/watchdog/da9063_wdt.c @@ -151,7 +151,6 @@ static const struct watchdog_ops da9063_watchdog_ops = { static int da9063_wdt_probe(struct platform_device *pdev) { - int ret; struct da9063 *da9063; struct da9063_watchdog *wdt; @@ -181,27 +180,12 @@ static int da9063_wdt_probe(struct platform_device *pdev) watchdog_set_restart_priority(&wdt->wdtdev, 128); watchdog_set_drvdata(&wdt->wdtdev, wdt); - dev_set_drvdata(&pdev->dev, wdt); - - ret = watchdog_register_device(&wdt->wdtdev); - if (ret) - return ret; - - return 0; -} - -static int da9063_wdt_remove(struct platform_device *pdev) -{ - struct da9063_watchdog *wdt = dev_get_drvdata(&pdev->dev); - - watchdog_unregister_device(&wdt->wdtdev); - return 0; + return devm_watchdog_register_device(&pdev->dev, &wdt->wdtdev); } static struct platform_driver da9063_wdt_driver = { .probe = da9063_wdt_probe, - .remove = da9063_wdt_remove, .driver = { .name = DA9063_DRVNAME_WATCHDOG, }, diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c index 861d3d3133f8..6f591084bb7a 100644 --- a/drivers/watchdog/diag288_wdt.c +++ b/drivers/watchdog/diag288_wdt.c @@ -205,7 +205,7 @@ static int wdt_set_timeout(struct watchdog_device * dev, unsigned int new_to) return wdt_ping(dev); } -static struct watchdog_ops wdt_ops = { +static const struct watchdog_ops wdt_ops = { .owner = THIS_MODULE, .start = wdt_start, .stop = wdt_stop, diff --git a/drivers/watchdog/digicolor_wdt.c b/drivers/watchdog/digicolor_wdt.c index 77df772406b0..5e4ef93caa02 100644 --- a/drivers/watchdog/digicolor_wdt.c +++ b/drivers/watchdog/digicolor_wdt.c @@ -96,7 +96,7 @@ static unsigned int dc_wdt_get_timeleft(struct watchdog_device *wdog) return count / clk_get_rate(wdt->clk); } -static struct watchdog_ops dc_wdt_ops = { +static const struct watchdog_ops dc_wdt_ops = { .owner = THIS_MODULE, .start = dc_wdt_start, .stop = dc_wdt_stop, @@ -105,7 +105,7 @@ static struct watchdog_ops dc_wdt_ops = { .restart = dc_wdt_restart, }; -static struct watchdog_info dc_wdt_info = { +static const struct watchdog_info dc_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .identity = "Conexant Digicolor Watchdog", @@ -119,62 +119,40 @@ static struct watchdog_device dc_wdt_wdd = { static int dc_wdt_probe(struct platform_device *pdev) { + struct resource *res; struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; struct dc_wdt *wdt; int ret; wdt = devm_kzalloc(dev, sizeof(struct dc_wdt), GFP_KERNEL); if (!wdt) return -ENOMEM; - platform_set_drvdata(pdev, wdt); - wdt->base = of_iomap(np, 0); - if (!wdt->base) { - dev_err(dev, "Failed to remap watchdog regs"); - return -ENODEV; - } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + wdt->base = devm_ioremap_resource(dev, res); + if (IS_ERR(wdt->base)) + return PTR_ERR(wdt->base); - wdt->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(wdt->clk)) { - ret = PTR_ERR(wdt->clk); - goto err_iounmap; - } + wdt->clk = devm_clk_get(dev, NULL); + if (IS_ERR(wdt->clk)) + return PTR_ERR(wdt->clk); dc_wdt_wdd.max_timeout = U32_MAX / clk_get_rate(wdt->clk); dc_wdt_wdd.timeout = dc_wdt_wdd.max_timeout; - dc_wdt_wdd.parent = &pdev->dev; + dc_wdt_wdd.parent = dev; spin_lock_init(&wdt->lock); watchdog_set_drvdata(&dc_wdt_wdd, wdt); watchdog_set_restart_priority(&dc_wdt_wdd, 128); watchdog_init_timeout(&dc_wdt_wdd, timeout, dev); - ret = watchdog_register_device(&dc_wdt_wdd); + watchdog_stop_on_reboot(&dc_wdt_wdd); + ret = devm_watchdog_register_device(dev, &dc_wdt_wdd); if (ret) { dev_err(dev, "Failed to register watchdog device"); - goto err_iounmap; + return ret; } return 0; - -err_iounmap: - iounmap(wdt->base); - return ret; -} - -static int dc_wdt_remove(struct platform_device *pdev) -{ - struct dc_wdt *wdt = platform_get_drvdata(pdev); - - watchdog_unregister_device(&dc_wdt_wdd); - iounmap(wdt->base); - - return 0; -} - -static void dc_wdt_shutdown(struct platform_device *pdev) -{ - dc_wdt_stop(&dc_wdt_wdd); } static const struct of_device_id dc_wdt_of_match[] = { @@ -185,8 +163,6 @@ MODULE_DEVICE_TABLE(of, dc_wdt_of_match); static struct platform_driver dc_wdt_driver = { .probe = dc_wdt_probe, - .remove = dc_wdt_remove, - .shutdown = dc_wdt_shutdown, .driver = { .name = "digicolor-wdt", .of_match_table = dc_wdt_of_match, diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c index 3c6a3de13a1b..914da3a4d334 100644 --- a/drivers/watchdog/dw_wdt.c +++ b/drivers/watchdog/dw_wdt.c @@ -26,11 +26,9 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/notifier.h> #include <linux/of.h> #include <linux/pm.h> #include <linux/platform_device.h> -#include <linux/reboot.h> #include <linux/watchdog.h> #define WDOG_CONTROL_REG_OFFSET 0x00 @@ -55,7 +53,6 @@ struct dw_wdt { void __iomem *regs; struct clk *clk; unsigned long rate; - struct notifier_block restart_handler; struct watchdog_device wdd; }; @@ -136,14 +133,12 @@ static int dw_wdt_start(struct watchdog_device *wdd) return 0; } -static int dw_wdt_restart_handle(struct notifier_block *this, - unsigned long mode, void *cmd) +static int dw_wdt_restart(struct watchdog_device *wdd, + unsigned long action, void *data) { - struct dw_wdt *dw_wdt; + struct dw_wdt *dw_wdt = to_dw_wdt(wdd); u32 val; - dw_wdt = container_of(this, struct dw_wdt, restart_handler); - writel(0, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET); val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); if (val & WDOG_CONTROL_REG_WDT_EN_MASK) @@ -156,7 +151,7 @@ static int dw_wdt_restart_handle(struct notifier_block *this, /* wait for reset to assert... */ mdelay(500); - return NOTIFY_DONE; + return 0; } static unsigned int dw_wdt_get_timeleft(struct watchdog_device *wdd) @@ -179,6 +174,7 @@ static const struct watchdog_ops dw_wdt_ops = { .ping = dw_wdt_ping, .set_timeout = dw_wdt_set_timeout, .get_timeleft = dw_wdt_get_timeleft, + .restart = dw_wdt_restart, }; #ifdef CONFIG_PM_SLEEP @@ -265,16 +261,12 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dw_wdt); + watchdog_set_restart_priority(wdd, 128); + ret = watchdog_register_device(wdd); if (ret) goto out_disable_clk; - dw_wdt->restart_handler.notifier_call = dw_wdt_restart_handle; - dw_wdt->restart_handler.priority = 128; - ret = register_restart_handler(&dw_wdt->restart_handler); - if (ret) - pr_warn("cannot register restart handler\n"); - return 0; out_disable_clk: @@ -286,7 +278,6 @@ static int dw_wdt_drv_remove(struct platform_device *pdev) { struct dw_wdt *dw_wdt = platform_get_drvdata(pdev); - unregister_restart_handler(&dw_wdt->restart_handler); watchdog_unregister_device(&dw_wdt->wdd); clk_disable_unprepare(dw_wdt->clk); diff --git a/drivers/watchdog/ebc-c384_wdt.c b/drivers/watchdog/ebc-c384_wdt.c index 4b849b8e37c2..2170b275ea01 100644 --- a/drivers/watchdog/ebc-c384_wdt.c +++ b/drivers/watchdog/ebc-c384_wdt.c @@ -121,18 +121,7 @@ static int ebc_c384_wdt_probe(struct device *dev, unsigned int id) dev_warn(dev, "Invalid timeout (%u seconds), using default (%u seconds)\n", timeout, WATCHDOG_TIMEOUT); - dev_set_drvdata(dev, wdd); - - return watchdog_register_device(wdd); -} - -static int ebc_c384_wdt_remove(struct device *dev, unsigned int id) -{ - struct watchdog_device *wdd = dev_get_drvdata(dev); - - watchdog_unregister_device(wdd); - - return 0; + return devm_watchdog_register_device(dev, wdd); } static struct isa_driver ebc_c384_wdt_driver = { @@ -140,7 +129,6 @@ static struct isa_driver ebc_c384_wdt_driver = { .driver = { .name = MODULE_NAME }, - .remove = ebc_c384_wdt_remove }; static int __init ebc_c384_wdt_init(void) diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c index 0a4d7cc05d54..f9b14e6efd9a 100644 --- a/drivers/watchdog/ep93xx_wdt.c +++ b/drivers/watchdog/ep93xx_wdt.c @@ -19,21 +19,13 @@ * for us to rely on the user space daemon alone. So we ping the * wdt each ~200msec and eventually stop doing it if the user space * daemon dies. - * - * TODO: - * - * - Test last reset from watchdog status - * - Add a few missing ioctls */ #include <linux/platform_device.h> #include <linux/module.h> #include <linux/watchdog.h> -#include <linux/timer.h> #include <linux/io.h> -#define WDT_VERSION "0.4" - /* default timeout (secs) */ #define WDT_TIMEOUT 30 @@ -41,117 +33,101 @@ static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); -static unsigned int timeout = WDT_TIMEOUT; +static unsigned int timeout; module_param(timeout, uint, 0); -MODULE_PARM_DESC(timeout, - "Watchdog timeout in seconds. (1<=timeout<=3600, default=" - __MODULE_STRING(WDT_TIMEOUT) ")"); - -static void __iomem *mmio_base; -static struct timer_list timer; -static unsigned long next_heartbeat; +MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds."); #define EP93XX_WATCHDOG 0x00 #define EP93XX_WDSTATUS 0x04 -/* reset the wdt every ~200ms - the heartbeat of the device is 0.250 seconds*/ -#define WDT_INTERVAL (HZ/5) - -static void ep93xx_wdt_timer_ping(unsigned long data) -{ - if (time_before(jiffies, next_heartbeat)) - writel(0x5555, mmio_base + EP93XX_WATCHDOG); - - /* Re-set the timer interval */ - mod_timer(&timer, jiffies + WDT_INTERVAL); -} +struct ep93xx_wdt_priv { + void __iomem *mmio; + struct watchdog_device wdd; +}; static int ep93xx_wdt_start(struct watchdog_device *wdd) { - next_heartbeat = jiffies + (timeout * HZ); + struct ep93xx_wdt_priv *priv = watchdog_get_drvdata(wdd); - writel(0xaaaa, mmio_base + EP93XX_WATCHDOG); - mod_timer(&timer, jiffies + WDT_INTERVAL); + writel(0xaaaa, priv->mmio + EP93XX_WATCHDOG); return 0; } static int ep93xx_wdt_stop(struct watchdog_device *wdd) { - del_timer_sync(&timer); - writel(0xaa55, mmio_base + EP93XX_WATCHDOG); + struct ep93xx_wdt_priv *priv = watchdog_get_drvdata(wdd); + + writel(0xaa55, priv->mmio + EP93XX_WATCHDOG); return 0; } -static int ep93xx_wdt_keepalive(struct watchdog_device *wdd) +static int ep93xx_wdt_ping(struct watchdog_device *wdd) { - /* user land ping */ - next_heartbeat = jiffies + (timeout * HZ); + struct ep93xx_wdt_priv *priv = watchdog_get_drvdata(wdd); + + writel(0x5555, priv->mmio + EP93XX_WATCHDOG); return 0; } static const struct watchdog_info ep93xx_wdt_ident = { .options = WDIOF_CARDRESET | + WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .identity = "EP93xx Watchdog", }; -static struct watchdog_ops ep93xx_wdt_ops = { +static const struct watchdog_ops ep93xx_wdt_ops = { .owner = THIS_MODULE, .start = ep93xx_wdt_start, .stop = ep93xx_wdt_stop, - .ping = ep93xx_wdt_keepalive, -}; - -static struct watchdog_device ep93xx_wdt_wdd = { - .info = &ep93xx_wdt_ident, - .ops = &ep93xx_wdt_ops, + .ping = ep93xx_wdt_ping, }; static int ep93xx_wdt_probe(struct platform_device *pdev) { + struct ep93xx_wdt_priv *priv; + struct watchdog_device *wdd; struct resource *res; unsigned long val; - int err; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mmio_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(mmio_base)) - return PTR_ERR(mmio_base); + priv->mmio = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->mmio)) + return PTR_ERR(priv->mmio); - if (timeout < 1 || timeout > 3600) { - timeout = WDT_TIMEOUT; - dev_warn(&pdev->dev, - "timeout value must be 1<=x<=3600, using %d\n", - timeout); - } + val = readl(priv->mmio + EP93XX_WATCHDOG); - val = readl(mmio_base + EP93XX_WATCHDOG); - ep93xx_wdt_wdd.bootstatus = (val & 0x01) ? WDIOF_CARDRESET : 0; - ep93xx_wdt_wdd.timeout = timeout; - ep93xx_wdt_wdd.parent = &pdev->dev; + wdd = &priv->wdd; + wdd->bootstatus = (val & 0x01) ? WDIOF_CARDRESET : 0; + wdd->info = &ep93xx_wdt_ident; + wdd->ops = &ep93xx_wdt_ops; + wdd->min_timeout = 1; + wdd->max_hw_heartbeat_ms = 200; + wdd->parent = &pdev->dev; - watchdog_set_nowayout(&ep93xx_wdt_wdd, nowayout); + watchdog_set_nowayout(wdd, nowayout); - setup_timer(&timer, ep93xx_wdt_timer_ping, 1); + wdd->timeout = WDT_TIMEOUT; + watchdog_init_timeout(wdd, timeout, &pdev->dev); - err = watchdog_register_device(&ep93xx_wdt_wdd); - if (err) - return err; + watchdog_set_drvdata(wdd, priv); - dev_info(&pdev->dev, - "EP93XX watchdog, driver version " WDT_VERSION "%s\n", - (val & 0x08) ? " (nCS1 disable detected)" : ""); + ret = devm_watchdog_register_device(&pdev->dev, wdd); + if (ret) + return ret; - return 0; -} + dev_info(&pdev->dev, "EP93XX watchdog driver %s\n", + (val & 0x08) ? " (nCS1 disable detected)" : ""); -static int ep93xx_wdt_remove(struct platform_device *pdev) -{ - watchdog_unregister_device(&ep93xx_wdt_wdd); return 0; } @@ -160,7 +136,6 @@ static struct platform_driver ep93xx_wdt_driver = { .name = "ep93xx-wdt", }, .probe = ep93xx_wdt_probe, - .remove = ep93xx_wdt_remove, }; module_platform_driver(ep93xx_wdt_driver); @@ -170,4 +145,3 @@ MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>"); MODULE_DESCRIPTION("EP93xx Watchdog"); MODULE_LICENSE("GPL"); -MODULE_VERSION(WDT_VERSION); diff --git a/drivers/watchdog/gemini_wdt.c b/drivers/watchdog/gemini_wdt.c new file mode 100644 index 000000000000..8155aa619e4c --- /dev/null +++ b/drivers/watchdog/gemini_wdt.c @@ -0,0 +1,229 @@ +/* + * Watchdog driver for Cortina Systems Gemini SoC + * + * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org> + * + * Inspired by the out-of-tree drivers from OpenWRT: + * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/bitops.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/watchdog.h> + +#define GEMINI_WDCOUNTER 0x0 +#define GEMINI_WDLOAD 0x4 +#define GEMINI_WDRESTART 0x8 +#define GEMINI_WDCR 0xC + +#define WDRESTART_MAGIC 0x5AB9 + +#define WDCR_CLOCK_5MHZ BIT(4) +#define WDCR_SYS_RST BIT(1) +#define WDCR_ENABLE BIT(0) + +#define WDT_CLOCK 5000000 /* 5 MHz */ + +struct gemini_wdt { + struct watchdog_device wdd; + struct device *dev; + void __iomem *base; +}; + +static inline +struct gemini_wdt *to_gemini_wdt(struct watchdog_device *wdd) +{ + return container_of(wdd, struct gemini_wdt, wdd); +} + +static int gemini_wdt_start(struct watchdog_device *wdd) +{ + struct gemini_wdt *gwdt = to_gemini_wdt(wdd); + + writel(wdd->timeout * WDT_CLOCK, gwdt->base + GEMINI_WDLOAD); + writel(WDRESTART_MAGIC, gwdt->base + GEMINI_WDRESTART); + /* set clock before enabling */ + writel(WDCR_CLOCK_5MHZ | WDCR_SYS_RST, + gwdt->base + GEMINI_WDCR); + writel(WDCR_CLOCK_5MHZ | WDCR_SYS_RST | WDCR_ENABLE, + gwdt->base + GEMINI_WDCR); + + return 0; +} + +static int gemini_wdt_stop(struct watchdog_device *wdd) +{ + struct gemini_wdt *gwdt = to_gemini_wdt(wdd); + + writel(0, gwdt->base + GEMINI_WDCR); + + return 0; +} + +static int gemini_wdt_ping(struct watchdog_device *wdd) +{ + struct gemini_wdt *gwdt = to_gemini_wdt(wdd); + + writel(WDRESTART_MAGIC, gwdt->base + GEMINI_WDRESTART); + + return 0; +} + +static int gemini_wdt_set_timeout(struct watchdog_device *wdd, + unsigned int timeout) +{ + wdd->timeout = timeout; + if (watchdog_active(wdd)) + gemini_wdt_start(wdd); + + return 0; +} + +static irqreturn_t gemini_wdt_interrupt(int irq, void *data) +{ + struct gemini_wdt *gwdt = data; + + watchdog_notify_pretimeout(&gwdt->wdd); + + return IRQ_HANDLED; +} + +static const struct watchdog_ops gemini_wdt_ops = { + .start = gemini_wdt_start, + .stop = gemini_wdt_stop, + .ping = gemini_wdt_ping, + .set_timeout = gemini_wdt_set_timeout, + .owner = THIS_MODULE, +}; + +static const struct watchdog_info gemini_wdt_info = { + .options = WDIOF_KEEPALIVEPING + | WDIOF_MAGICCLOSE + | WDIOF_SETTIMEOUT, + .identity = KBUILD_MODNAME, +}; + + +static int gemini_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct gemini_wdt *gwdt; + unsigned int reg; + int irq; + int ret; + + gwdt = devm_kzalloc(dev, sizeof(*gwdt), GFP_KERNEL); + if (!gwdt) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gwdt->base = devm_ioremap_resource(dev, res); + if (IS_ERR(gwdt->base)) + return PTR_ERR(gwdt->base); + + irq = platform_get_irq(pdev, 0); + if (!irq) + return -EINVAL; + + gwdt->dev = dev; + gwdt->wdd.info = &gemini_wdt_info; + gwdt->wdd.ops = &gemini_wdt_ops; + gwdt->wdd.min_timeout = 1; + gwdt->wdd.max_timeout = 0xFFFFFFFF / WDT_CLOCK; + gwdt->wdd.parent = dev; + + /* + * If 'timeout-sec' unspecified in devicetree, assume a 13 second + * default. + */ + gwdt->wdd.timeout = 13U; + watchdog_init_timeout(&gwdt->wdd, 0, dev); + + reg = readw(gwdt->base + GEMINI_WDCR); + if (reg & WDCR_ENABLE) { + /* Watchdog was enabled by the bootloader, disable it. */ + reg &= ~WDCR_ENABLE; + writel(reg, gwdt->base + GEMINI_WDCR); + } + + ret = devm_request_irq(dev, irq, gemini_wdt_interrupt, 0, + "watchdog bark", gwdt); + if (ret) + return ret; + + ret = devm_watchdog_register_device(dev, &gwdt->wdd); + if (ret) { + dev_err(&pdev->dev, "failed to register watchdog\n"); + return ret; + } + + /* Set up platform driver data */ + platform_set_drvdata(pdev, gwdt); + dev_info(dev, "Gemini watchdog driver enabled\n"); + + return 0; +} + +static int __maybe_unused gemini_wdt_suspend(struct device *dev) +{ + struct gemini_wdt *gwdt = dev_get_drvdata(dev); + unsigned int reg; + + reg = readw(gwdt->base + GEMINI_WDCR); + reg &= ~WDCR_ENABLE; + writel(reg, gwdt->base + GEMINI_WDCR); + + return 0; +} + +static int __maybe_unused gemini_wdt_resume(struct device *dev) +{ + struct gemini_wdt *gwdt = dev_get_drvdata(dev); + unsigned int reg; + + if (watchdog_active(&gwdt->wdd)) { + reg = readw(gwdt->base + GEMINI_WDCR); + reg |= WDCR_ENABLE; + writel(reg, gwdt->base + GEMINI_WDCR); + } + + return 0; +} + +static const struct dev_pm_ops gemini_wdt_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(gemini_wdt_suspend, + gemini_wdt_resume) +}; + +#ifdef CONFIG_OF +static const struct of_device_id gemini_wdt_match[] = { + { .compatible = "cortina,gemini-watchdog" }, + {}, +}; +MODULE_DEVICE_TABLE(of, gemini_wdt_match); +#endif + +static struct platform_driver gemini_wdt_driver = { + .probe = gemini_wdt_probe, + .driver = { + .name = "gemini-wdt", + .of_match_table = of_match_ptr(gemini_wdt_match), + .pm = &gemini_wdt_dev_pm_ops, + }, +}; +module_platform_driver(gemini_wdt_driver); +MODULE_AUTHOR("Linus Walleij"); +MODULE_DESCRIPTION("Watchdog driver for Gemini"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 06fcb6c8c917..3d0abc0d59b4 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -72,22 +72,24 @@ /* Address definitions for the TCO */ /* TCO base address */ -#define TCOBASE (iTCO_wdt_private.tco_res->start) +#define TCOBASE(p) ((p)->tco_res->start) /* SMI Control and Enable Register */ -#define SMI_EN (iTCO_wdt_private.smi_res->start) - -#define TCO_RLD (TCOBASE + 0x00) /* TCO Timer Reload and Curr. Value */ -#define TCOv1_TMR (TCOBASE + 0x01) /* TCOv1 Timer Initial Value */ -#define TCO_DAT_IN (TCOBASE + 0x02) /* TCO Data In Register */ -#define TCO_DAT_OUT (TCOBASE + 0x03) /* TCO Data Out Register */ -#define TCO1_STS (TCOBASE + 0x04) /* TCO1 Status Register */ -#define TCO2_STS (TCOBASE + 0x06) /* TCO2 Status Register */ -#define TCO1_CNT (TCOBASE + 0x08) /* TCO1 Control Register */ -#define TCO2_CNT (TCOBASE + 0x0a) /* TCO2 Control Register */ -#define TCOv2_TMR (TCOBASE + 0x12) /* TCOv2 Timer Initial Value */ +#define SMI_EN(p) ((p)->smi_res->start) + +#define TCO_RLD(p) (TCOBASE(p) + 0x00) /* TCO Timer Reload/Curr. Value */ +#define TCOv1_TMR(p) (TCOBASE(p) + 0x01) /* TCOv1 Timer Initial Value*/ +#define TCO_DAT_IN(p) (TCOBASE(p) + 0x02) /* TCO Data In Register */ +#define TCO_DAT_OUT(p) (TCOBASE(p) + 0x03) /* TCO Data Out Register */ +#define TCO1_STS(p) (TCOBASE(p) + 0x04) /* TCO1 Status Register */ +#define TCO2_STS(p) (TCOBASE(p) + 0x06) /* TCO2 Status Register */ +#define TCO1_CNT(p) (TCOBASE(p) + 0x08) /* TCO1 Control Register */ +#define TCO2_CNT(p) (TCOBASE(p) + 0x0a) /* TCO2 Control Register */ +#define TCOv2_TMR(p) (TCOBASE(p) + 0x12) /* TCOv2 Timer Initial Value*/ /* internal variables */ -static struct { /* this is private data for the iTCO_wdt device */ +struct iTCO_wdt_private { + struct watchdog_device wddev; + /* TCO version/generation */ unsigned int iTCO_version; struct resource *tco_res; @@ -100,12 +102,11 @@ static struct { /* this is private data for the iTCO_wdt device */ unsigned long __iomem *gcs_pmc; /* the lock for io operations */ spinlock_t io_lock; - struct platform_device *dev; /* the PCI-device */ - struct pci_dev *pdev; + struct pci_dev *pci_dev; /* whether or not the watchdog has been suspended */ bool suspended; -} iTCO_wdt_private; +}; /* module parameters */ #define WATCHDOG_TIMEOUT 30 /* 30 sec default heartbeat */ @@ -135,21 +136,23 @@ MODULE_PARM_DESC(turn_SMI_watchdog_clear_off, * every 0.6 seconds. v3's internal timer is stored as seconds (some * datasheets incorrectly state 0.6 seconds). */ -static inline unsigned int seconds_to_ticks(int secs) +static inline unsigned int seconds_to_ticks(struct iTCO_wdt_private *p, + int secs) { - return iTCO_wdt_private.iTCO_version == 3 ? secs : (secs * 10) / 6; + return p->iTCO_version == 3 ? secs : (secs * 10) / 6; } -static inline unsigned int ticks_to_seconds(int ticks) +static inline unsigned int ticks_to_seconds(struct iTCO_wdt_private *p, + int ticks) { - return iTCO_wdt_private.iTCO_version == 3 ? ticks : (ticks * 6) / 10; + return p->iTCO_version == 3 ? ticks : (ticks * 6) / 10; } -static inline u32 no_reboot_bit(void) +static inline u32 no_reboot_bit(struct iTCO_wdt_private *p) { u32 enable_bit; - switch (iTCO_wdt_private.iTCO_version) { + switch (p->iTCO_version) { case 5: case 3: enable_bit = 0x00000010; @@ -167,40 +170,40 @@ static inline u32 no_reboot_bit(void) return enable_bit; } -static void iTCO_wdt_set_NO_REBOOT_bit(void) +static void iTCO_wdt_set_NO_REBOOT_bit(struct iTCO_wdt_private *p) { u32 val32; /* Set the NO_REBOOT bit: this disables reboots */ - if (iTCO_wdt_private.iTCO_version >= 2) { - val32 = readl(iTCO_wdt_private.gcs_pmc); - val32 |= no_reboot_bit(); - writel(val32, iTCO_wdt_private.gcs_pmc); - } else if (iTCO_wdt_private.iTCO_version == 1) { - pci_read_config_dword(iTCO_wdt_private.pdev, 0xd4, &val32); - val32 |= no_reboot_bit(); - pci_write_config_dword(iTCO_wdt_private.pdev, 0xd4, val32); + if (p->iTCO_version >= 2) { + val32 = readl(p->gcs_pmc); + val32 |= no_reboot_bit(p); + writel(val32, p->gcs_pmc); + } else if (p->iTCO_version == 1) { + pci_read_config_dword(p->pci_dev, 0xd4, &val32); + val32 |= no_reboot_bit(p); + pci_write_config_dword(p->pci_dev, 0xd4, val32); } } -static int iTCO_wdt_unset_NO_REBOOT_bit(void) +static int iTCO_wdt_unset_NO_REBOOT_bit(struct iTCO_wdt_private *p) { - u32 enable_bit = no_reboot_bit(); + u32 enable_bit = no_reboot_bit(p); u32 val32 = 0; /* Unset the NO_REBOOT bit: this enables reboots */ - if (iTCO_wdt_private.iTCO_version >= 2) { - val32 = readl(iTCO_wdt_private.gcs_pmc); + if (p->iTCO_version >= 2) { + val32 = readl(p->gcs_pmc); val32 &= ~enable_bit; - writel(val32, iTCO_wdt_private.gcs_pmc); + writel(val32, p->gcs_pmc); - val32 = readl(iTCO_wdt_private.gcs_pmc); - } else if (iTCO_wdt_private.iTCO_version == 1) { - pci_read_config_dword(iTCO_wdt_private.pdev, 0xd4, &val32); + val32 = readl(p->gcs_pmc); + } else if (p->iTCO_version == 1) { + pci_read_config_dword(p->pci_dev, 0xd4, &val32); val32 &= ~enable_bit; - pci_write_config_dword(iTCO_wdt_private.pdev, 0xd4, val32); + pci_write_config_dword(p->pci_dev, 0xd4, val32); - pci_read_config_dword(iTCO_wdt_private.pdev, 0xd4, &val32); + pci_read_config_dword(p->pci_dev, 0xd4, &val32); } if (val32 & enable_bit) @@ -211,32 +214,33 @@ static int iTCO_wdt_unset_NO_REBOOT_bit(void) static int iTCO_wdt_start(struct watchdog_device *wd_dev) { + struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev); unsigned int val; - spin_lock(&iTCO_wdt_private.io_lock); + spin_lock(&p->io_lock); - iTCO_vendor_pre_start(iTCO_wdt_private.smi_res, wd_dev->timeout); + iTCO_vendor_pre_start(p->smi_res, wd_dev->timeout); /* disable chipset's NO_REBOOT bit */ - if (iTCO_wdt_unset_NO_REBOOT_bit()) { - spin_unlock(&iTCO_wdt_private.io_lock); + if (iTCO_wdt_unset_NO_REBOOT_bit(p)) { + spin_unlock(&p->io_lock); pr_err("failed to reset NO_REBOOT flag, reboot disabled by hardware/BIOS\n"); return -EIO; } /* Force the timer to its reload value by writing to the TCO_RLD register */ - if (iTCO_wdt_private.iTCO_version >= 2) - outw(0x01, TCO_RLD); - else if (iTCO_wdt_private.iTCO_version == 1) - outb(0x01, TCO_RLD); + if (p->iTCO_version >= 2) + outw(0x01, TCO_RLD(p)); + else if (p->iTCO_version == 1) + outb(0x01, TCO_RLD(p)); /* Bit 11: TCO Timer Halt -> 0 = The TCO timer is enabled to count */ - val = inw(TCO1_CNT); + val = inw(TCO1_CNT(p)); val &= 0xf7ff; - outw(val, TCO1_CNT); - val = inw(TCO1_CNT); - spin_unlock(&iTCO_wdt_private.io_lock); + outw(val, TCO1_CNT(p)); + val = inw(TCO1_CNT(p)); + spin_unlock(&p->io_lock); if (val & 0x0800) return -1; @@ -245,22 +249,23 @@ static int iTCO_wdt_start(struct watchdog_device *wd_dev) static int iTCO_wdt_stop(struct watchdog_device *wd_dev) { + struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev); unsigned int val; - spin_lock(&iTCO_wdt_private.io_lock); + spin_lock(&p->io_lock); - iTCO_vendor_pre_stop(iTCO_wdt_private.smi_res); + iTCO_vendor_pre_stop(p->smi_res); /* Bit 11: TCO Timer Halt -> 1 = The TCO timer is disabled */ - val = inw(TCO1_CNT); + val = inw(TCO1_CNT(p)); val |= 0x0800; - outw(val, TCO1_CNT); - val = inw(TCO1_CNT); + outw(val, TCO1_CNT(p)); + val = inw(TCO1_CNT(p)); /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ - iTCO_wdt_set_NO_REBOOT_bit(); + iTCO_wdt_set_NO_REBOOT_bit(p); - spin_unlock(&iTCO_wdt_private.io_lock); + spin_unlock(&p->io_lock); if ((val & 0x0800) == 0) return -1; @@ -269,67 +274,70 @@ static int iTCO_wdt_stop(struct watchdog_device *wd_dev) static int iTCO_wdt_ping(struct watchdog_device *wd_dev) { - spin_lock(&iTCO_wdt_private.io_lock); + struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev); - iTCO_vendor_pre_keepalive(iTCO_wdt_private.smi_res, wd_dev->timeout); + spin_lock(&p->io_lock); + + iTCO_vendor_pre_keepalive(p->smi_res, wd_dev->timeout); /* Reload the timer by writing to the TCO Timer Counter register */ - if (iTCO_wdt_private.iTCO_version >= 2) { - outw(0x01, TCO_RLD); - } else if (iTCO_wdt_private.iTCO_version == 1) { + if (p->iTCO_version >= 2) { + outw(0x01, TCO_RLD(p)); + } else if (p->iTCO_version == 1) { /* Reset the timeout status bit so that the timer * needs to count down twice again before rebooting */ - outw(0x0008, TCO1_STS); /* write 1 to clear bit */ + outw(0x0008, TCO1_STS(p)); /* write 1 to clear bit */ - outb(0x01, TCO_RLD); + outb(0x01, TCO_RLD(p)); } - spin_unlock(&iTCO_wdt_private.io_lock); + spin_unlock(&p->io_lock); return 0; } static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t) { + struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev); unsigned int val16; unsigned char val8; unsigned int tmrval; - tmrval = seconds_to_ticks(t); + tmrval = seconds_to_ticks(p, t); /* For TCO v1 the timer counts down twice before rebooting */ - if (iTCO_wdt_private.iTCO_version == 1) + if (p->iTCO_version == 1) tmrval /= 2; /* from the specs: */ /* "Values of 0h-3h are ignored and should not be attempted" */ if (tmrval < 0x04) return -EINVAL; - if (((iTCO_wdt_private.iTCO_version >= 2) && (tmrval > 0x3ff)) || - ((iTCO_wdt_private.iTCO_version == 1) && (tmrval > 0x03f))) + if ((p->iTCO_version >= 2 && tmrval > 0x3ff) || + (p->iTCO_version == 1 && tmrval > 0x03f)) return -EINVAL; iTCO_vendor_pre_set_heartbeat(tmrval); /* Write new heartbeat to watchdog */ - if (iTCO_wdt_private.iTCO_version >= 2) { - spin_lock(&iTCO_wdt_private.io_lock); - val16 = inw(TCOv2_TMR); + if (p->iTCO_version >= 2) { + spin_lock(&p->io_lock); + val16 = inw(TCOv2_TMR(p)); val16 &= 0xfc00; val16 |= tmrval; - outw(val16, TCOv2_TMR); - val16 = inw(TCOv2_TMR); - spin_unlock(&iTCO_wdt_private.io_lock); + outw(val16, TCOv2_TMR(p)); + val16 = inw(TCOv2_TMR(p)); + spin_unlock(&p->io_lock); if ((val16 & 0x3ff) != tmrval) return -EINVAL; - } else if (iTCO_wdt_private.iTCO_version == 1) { - spin_lock(&iTCO_wdt_private.io_lock); - val8 = inb(TCOv1_TMR); + } else if (p->iTCO_version == 1) { + spin_lock(&p->io_lock); + val8 = inb(TCOv1_TMR(p)); val8 &= 0xc0; val8 |= (tmrval & 0xff); - outb(val8, TCOv1_TMR); - val8 = inb(TCOv1_TMR); - spin_unlock(&iTCO_wdt_private.io_lock); + outb(val8, TCOv1_TMR(p)); + val8 = inb(TCOv1_TMR(p)); + spin_unlock(&p->io_lock); if ((val8 & 0x3f) != tmrval) return -EINVAL; @@ -341,27 +349,28 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t) static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev) { + struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev); unsigned int val16; unsigned char val8; unsigned int time_left = 0; /* read the TCO Timer */ - if (iTCO_wdt_private.iTCO_version >= 2) { - spin_lock(&iTCO_wdt_private.io_lock); - val16 = inw(TCO_RLD); + if (p->iTCO_version >= 2) { + spin_lock(&p->io_lock); + val16 = inw(TCO_RLD(p)); val16 &= 0x3ff; - spin_unlock(&iTCO_wdt_private.io_lock); + spin_unlock(&p->io_lock); - time_left = ticks_to_seconds(val16); - } else if (iTCO_wdt_private.iTCO_version == 1) { - spin_lock(&iTCO_wdt_private.io_lock); - val8 = inb(TCO_RLD); + time_left = ticks_to_seconds(p, val16); + } else if (p->iTCO_version == 1) { + spin_lock(&p->io_lock); + val8 = inb(TCO_RLD(p)); val8 &= 0x3f; - if (!(inw(TCO1_STS) & 0x0008)) - val8 += (inb(TCOv1_TMR) & 0x3f); - spin_unlock(&iTCO_wdt_private.io_lock); + if (!(inw(TCO1_STS(p)) & 0x0008)) + val8 += (inb(TCOv1_TMR(p)) & 0x3f); + spin_unlock(&p->io_lock); - time_left = ticks_to_seconds(val8); + time_left = ticks_to_seconds(p, val8); } return time_left; } @@ -387,209 +396,152 @@ static const struct watchdog_ops iTCO_wdt_ops = { .get_timeleft = iTCO_wdt_get_timeleft, }; -static struct watchdog_device iTCO_wdt_watchdog_dev = { - .info = &ident, - .ops = &iTCO_wdt_ops, -}; - /* * Init & exit routines */ -static void iTCO_wdt_cleanup(void) -{ - /* Stop the timer before we leave */ - if (!nowayout) - iTCO_wdt_stop(&iTCO_wdt_watchdog_dev); - - /* Deregister */ - watchdog_unregister_device(&iTCO_wdt_watchdog_dev); - - /* release resources */ - release_region(iTCO_wdt_private.tco_res->start, - resource_size(iTCO_wdt_private.tco_res)); - release_region(iTCO_wdt_private.smi_res->start, - resource_size(iTCO_wdt_private.smi_res)); - if (iTCO_wdt_private.iTCO_version >= 2) { - iounmap(iTCO_wdt_private.gcs_pmc); - release_mem_region(iTCO_wdt_private.gcs_pmc_res->start, - resource_size(iTCO_wdt_private.gcs_pmc_res)); - } - - iTCO_wdt_private.tco_res = NULL; - iTCO_wdt_private.smi_res = NULL; - iTCO_wdt_private.gcs_pmc_res = NULL; - iTCO_wdt_private.gcs_pmc = NULL; -} - -static int iTCO_wdt_probe(struct platform_device *dev) +static int iTCO_wdt_probe(struct platform_device *pdev) { - int ret = -ENODEV; + struct device *dev = &pdev->dev; + struct itco_wdt_platform_data *pdata = dev_get_platdata(dev); + struct iTCO_wdt_private *p; unsigned long val32; - struct itco_wdt_platform_data *pdata = dev_get_platdata(&dev->dev); + int ret; if (!pdata) - goto out; + return -ENODEV; + + p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; - spin_lock_init(&iTCO_wdt_private.io_lock); + spin_lock_init(&p->io_lock); - iTCO_wdt_private.tco_res = - platform_get_resource(dev, IORESOURCE_IO, ICH_RES_IO_TCO); - if (!iTCO_wdt_private.tco_res) - goto out; + p->tco_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_TCO); + if (!p->tco_res) + return -ENODEV; - iTCO_wdt_private.smi_res = - platform_get_resource(dev, IORESOURCE_IO, ICH_RES_IO_SMI); - if (!iTCO_wdt_private.smi_res) - goto out; + p->smi_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_SMI); + if (!p->smi_res) + return -ENODEV; - iTCO_wdt_private.iTCO_version = pdata->version; - iTCO_wdt_private.dev = dev; - iTCO_wdt_private.pdev = to_pci_dev(dev->dev.parent); + p->iTCO_version = pdata->version; + p->pci_dev = to_pci_dev(dev->parent); /* * Get the Memory-Mapped GCS or PMC register, we need it for the * NO_REBOOT flag (TCO v2 and v3). */ - if (iTCO_wdt_private.iTCO_version >= 2) { - iTCO_wdt_private.gcs_pmc_res = platform_get_resource(dev, - IORESOURCE_MEM, - ICH_RES_MEM_GCS_PMC); - - if (!iTCO_wdt_private.gcs_pmc_res) - goto out; - - if (!request_mem_region(iTCO_wdt_private.gcs_pmc_res->start, - resource_size(iTCO_wdt_private.gcs_pmc_res), dev->name)) { - ret = -EBUSY; - goto out; - } - iTCO_wdt_private.gcs_pmc = ioremap(iTCO_wdt_private.gcs_pmc_res->start, - resource_size(iTCO_wdt_private.gcs_pmc_res)); - if (!iTCO_wdt_private.gcs_pmc) { - ret = -EIO; - goto unreg_gcs_pmc; - } + if (p->iTCO_version >= 2) { + p->gcs_pmc_res = platform_get_resource(pdev, + IORESOURCE_MEM, + ICH_RES_MEM_GCS_PMC); + p->gcs_pmc = devm_ioremap_resource(dev, p->gcs_pmc_res); + if (IS_ERR(p->gcs_pmc)) + return PTR_ERR(p->gcs_pmc); } /* Check chipset's NO_REBOOT bit */ - if (iTCO_wdt_unset_NO_REBOOT_bit() && iTCO_vendor_check_noreboot_on()) { + if (iTCO_wdt_unset_NO_REBOOT_bit(p) && + iTCO_vendor_check_noreboot_on()) { pr_info("unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n"); - ret = -ENODEV; /* Cannot reset NO_REBOOT bit */ - goto unmap_gcs_pmc; + return -ENODEV; /* Cannot reset NO_REBOOT bit */ } /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ - iTCO_wdt_set_NO_REBOOT_bit(); + iTCO_wdt_set_NO_REBOOT_bit(p); /* The TCO logic uses the TCO_EN bit in the SMI_EN register */ - if (!request_region(iTCO_wdt_private.smi_res->start, - resource_size(iTCO_wdt_private.smi_res), dev->name)) { + if (!devm_request_region(dev, p->smi_res->start, + resource_size(p->smi_res), + pdev->name)) { pr_err("I/O address 0x%04llx already in use, device disabled\n", - (u64)SMI_EN); - ret = -EBUSY; - goto unmap_gcs_pmc; + (u64)SMI_EN(p)); + return -EBUSY; } - if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) { + if (turn_SMI_watchdog_clear_off >= p->iTCO_version) { /* * Bit 13: TCO_EN -> 0 * Disables TCO logic generating an SMI# */ - val32 = inl(SMI_EN); + val32 = inl(SMI_EN(p)); val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */ - outl(val32, SMI_EN); + outl(val32, SMI_EN(p)); } - if (!request_region(iTCO_wdt_private.tco_res->start, - resource_size(iTCO_wdt_private.tco_res), dev->name)) { + if (!devm_request_region(dev, p->tco_res->start, + resource_size(p->tco_res), + pdev->name)) { pr_err("I/O address 0x%04llx already in use, device disabled\n", - (u64)TCOBASE); - ret = -EBUSY; - goto unreg_smi; + (u64)TCOBASE(p)); + return -EBUSY; } pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n", - pdata->name, pdata->version, (u64)TCOBASE); + pdata->name, pdata->version, (u64)TCOBASE(p)); /* Clear out the (probably old) status */ - switch (iTCO_wdt_private.iTCO_version) { + switch (p->iTCO_version) { case 5: case 4: - outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */ - outw(0x0002, TCO2_STS); /* Clear SECOND_TO_STS bit */ + outw(0x0008, TCO1_STS(p)); /* Clear the Time Out Status bit */ + outw(0x0002, TCO2_STS(p)); /* Clear SECOND_TO_STS bit */ break; case 3: - outl(0x20008, TCO1_STS); + outl(0x20008, TCO1_STS(p)); break; case 2: case 1: default: - outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */ - outw(0x0002, TCO2_STS); /* Clear SECOND_TO_STS bit */ - outw(0x0004, TCO2_STS); /* Clear BOOT_STS bit */ + outw(0x0008, TCO1_STS(p)); /* Clear the Time Out Status bit */ + outw(0x0002, TCO2_STS(p)); /* Clear SECOND_TO_STS bit */ + outw(0x0004, TCO2_STS(p)); /* Clear BOOT_STS bit */ break; } - iTCO_wdt_watchdog_dev.bootstatus = 0; - iTCO_wdt_watchdog_dev.timeout = WATCHDOG_TIMEOUT; - watchdog_set_nowayout(&iTCO_wdt_watchdog_dev, nowayout); - iTCO_wdt_watchdog_dev.parent = &dev->dev; + p->wddev.info = &ident, + p->wddev.ops = &iTCO_wdt_ops, + p->wddev.bootstatus = 0; + p->wddev.timeout = WATCHDOG_TIMEOUT; + watchdog_set_nowayout(&p->wddev, nowayout); + p->wddev.parent = dev; + + watchdog_set_drvdata(&p->wddev, p); + platform_set_drvdata(pdev, p); /* Make sure the watchdog is not running */ - iTCO_wdt_stop(&iTCO_wdt_watchdog_dev); + iTCO_wdt_stop(&p->wddev); /* Check that the heartbeat value is within it's range; if not reset to the default */ - if (iTCO_wdt_set_timeout(&iTCO_wdt_watchdog_dev, heartbeat)) { - iTCO_wdt_set_timeout(&iTCO_wdt_watchdog_dev, WATCHDOG_TIMEOUT); + if (iTCO_wdt_set_timeout(&p->wddev, heartbeat)) { + iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT); pr_info("timeout value out of range, using %d\n", WATCHDOG_TIMEOUT); } - ret = watchdog_register_device(&iTCO_wdt_watchdog_dev); + watchdog_stop_on_reboot(&p->wddev); + ret = devm_watchdog_register_device(dev, &p->wddev); if (ret != 0) { pr_err("cannot register watchdog device (err=%d)\n", ret); - goto unreg_tco; + return ret; } pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n", heartbeat, nowayout); return 0; - -unreg_tco: - release_region(iTCO_wdt_private.tco_res->start, - resource_size(iTCO_wdt_private.tco_res)); -unreg_smi: - release_region(iTCO_wdt_private.smi_res->start, - resource_size(iTCO_wdt_private.smi_res)); -unmap_gcs_pmc: - if (iTCO_wdt_private.iTCO_version >= 2) - iounmap(iTCO_wdt_private.gcs_pmc); -unreg_gcs_pmc: - if (iTCO_wdt_private.iTCO_version >= 2) - release_mem_region(iTCO_wdt_private.gcs_pmc_res->start, - resource_size(iTCO_wdt_private.gcs_pmc_res)); -out: - iTCO_wdt_private.tco_res = NULL; - iTCO_wdt_private.smi_res = NULL; - iTCO_wdt_private.gcs_pmc_res = NULL; - iTCO_wdt_private.gcs_pmc = NULL; - - return ret; } -static int iTCO_wdt_remove(struct platform_device *dev) +static int iTCO_wdt_remove(struct platform_device *pdev) { - if (iTCO_wdt_private.tco_res || iTCO_wdt_private.smi_res) - iTCO_wdt_cleanup(); + struct iTCO_wdt_private *p = platform_get_drvdata(pdev); - return 0; -} + /* Stop the timer before we leave */ + if (!nowayout) + iTCO_wdt_stop(&p->wddev); -static void iTCO_wdt_shutdown(struct platform_device *dev) -{ - iTCO_wdt_stop(NULL); + return 0; } #ifdef CONFIG_PM_SLEEP @@ -610,21 +562,24 @@ static inline bool need_suspend(void) { return true; } static int iTCO_wdt_suspend_noirq(struct device *dev) { + struct iTCO_wdt_private *p = dev_get_drvdata(dev); int ret = 0; - iTCO_wdt_private.suspended = false; - if (watchdog_active(&iTCO_wdt_watchdog_dev) && need_suspend()) { - ret = iTCO_wdt_stop(&iTCO_wdt_watchdog_dev); + p->suspended = false; + if (watchdog_active(&p->wddev) && need_suspend()) { + ret = iTCO_wdt_stop(&p->wddev); if (!ret) - iTCO_wdt_private.suspended = true; + p->suspended = true; } return ret; } static int iTCO_wdt_resume_noirq(struct device *dev) { - if (iTCO_wdt_private.suspended) - iTCO_wdt_start(&iTCO_wdt_watchdog_dev); + struct iTCO_wdt_private *p = dev_get_drvdata(dev); + + if (p->suspended) + iTCO_wdt_start(&p->wddev); return 0; } @@ -642,7 +597,6 @@ static const struct dev_pm_ops iTCO_wdt_pm = { static struct platform_driver iTCO_wdt_driver = { .probe = iTCO_wdt_probe, .remove = iTCO_wdt_remove, - .shutdown = iTCO_wdt_shutdown, .driver = { .name = DRV_NAME, .pm = ITCO_WDT_PM_OPS, @@ -651,15 +605,9 @@ static struct platform_driver iTCO_wdt_driver = { static int __init iTCO_wdt_init_module(void) { - int err; - pr_info("Intel TCO WatchDog Timer Driver v%s\n", DRV_VERSION); - err = platform_driver_register(&iTCO_wdt_driver); - if (err) - return err; - - return 0; + return platform_driver_register(&iTCO_wdt_driver); } static void __exit iTCO_wdt_cleanup_module(void) diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c index 516fbef00856..6ed39dee995f 100644 --- a/drivers/watchdog/imgpdc_wdt.c +++ b/drivers/watchdog/imgpdc_wdt.c @@ -161,7 +161,7 @@ static int pdc_wdt_restart(struct watchdog_device *wdt_dev, return 0; } -static struct watchdog_info pdc_wdt_info = { +static const struct watchdog_info pdc_wdt_info = { .identity = "IMG PDC Watchdog", .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c index a4b729259b12..45e4d02221b5 100644 --- a/drivers/watchdog/intel-mid_wdt.c +++ b/drivers/watchdog/intel-mid_wdt.c @@ -137,7 +137,6 @@ static int mid_wdt_probe(struct platform_device *pdev) wdt_dev->parent = &pdev->dev; watchdog_set_drvdata(wdt_dev, &pdev->dev); - platform_set_drvdata(pdev, wdt_dev); ret = devm_request_irq(&pdev->dev, pdata->irq, mid_wdt_irq, IRQF_SHARED | IRQF_NO_SUSPEND, "watchdog", @@ -151,7 +150,7 @@ static int mid_wdt_probe(struct platform_device *pdev) /* Make sure the watchdog is not running */ wdt_stop(wdt_dev); - ret = watchdog_register_device(wdt_dev); + ret = devm_watchdog_register_device(&pdev->dev, wdt_dev); if (ret) { dev_err(&pdev->dev, "error registering watchdog device\n"); return ret; @@ -162,16 +161,8 @@ static int mid_wdt_probe(struct platform_device *pdev) return 0; } -static int mid_wdt_remove(struct platform_device *pdev) -{ - struct watchdog_device *wd = platform_get_drvdata(pdev); - watchdog_unregister_device(wd); - return 0; -} - static struct platform_driver mid_wdt_driver = { .probe = mid_wdt_probe, - .remove = mid_wdt_remove, .driver = { .name = "intel_mid_wdt", }, diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c index 8e302d0e346c..2f3b049ea301 100644 --- a/drivers/watchdog/kempld_wdt.c +++ b/drivers/watchdog/kempld_wdt.c @@ -140,12 +140,19 @@ static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data, unsigned int timeout) { struct kempld_device_data *pld = wdt_data->pld; - u32 prescaler = kempld_prescaler[PRESCALER_21]; + u32 prescaler; u64 stage_timeout64; u32 stage_timeout; u32 remainder; u8 stage_cfg; +#if GCC_VERSION < 40400 + /* work around a bug compiling do_div() */ + prescaler = READ_ONCE(kempld_prescaler[PRESCALER_21]); +#else + prescaler = kempld_prescaler[PRESCALER_21]; +#endif + if (!stage) return -EINVAL; @@ -422,7 +429,7 @@ static int kempld_wdt_probe_stages(struct watchdog_device *wdd) return 0; } -static struct watchdog_info kempld_wdt_info = { +static const struct watchdog_info kempld_wdt_info = { .identity = "KEMPLD Watchdog", .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c index 582f2fa1b8d9..e0823677d8c1 100644 --- a/drivers/watchdog/lantiq_wdt.c +++ b/drivers/watchdog/lantiq_wdt.c @@ -3,7 +3,7 @@ * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * - * Copyright (C) 2010 John Crispin <blogic@openwrt.org> + * Copyright (C) 2010 John Crispin <john@phrozen.org> * Based on EP93xx wdt driver */ @@ -240,6 +240,6 @@ module_platform_driver(ltq_wdt_driver); module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); -MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); +MODULE_AUTHOR("John Crispin <john@phrozen.org>"); MODULE_DESCRIPTION("Lantiq SoC Watchdog"); MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c index fd171e6caa16..3b8bb59adf02 100644 --- a/drivers/watchdog/lpc18xx_wdt.c +++ b/drivers/watchdog/lpc18xx_wdt.c @@ -181,7 +181,7 @@ static int lpc18xx_wdt_restart(struct watchdog_device *wdt_dev, return 0; } -static struct watchdog_info lpc18xx_wdt_info = { +static const struct watchdog_info lpc18xx_wdt_info = { .identity = "NXP LPC18xx Watchdog", .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c index af6a7c489f08..045201a6fdb3 100644 --- a/drivers/watchdog/mena21_wdt.c +++ b/drivers/watchdog/mena21_wdt.c @@ -212,34 +212,15 @@ static int a21_wdt_probe(struct platform_device *pdev) drv->wdt = a21_wdt; dev_set_drvdata(&pdev->dev, drv); - ret = watchdog_register_device(&a21_wdt); + ret = devm_watchdog_register_device(&pdev->dev, &a21_wdt); if (ret) { dev_err(&pdev->dev, "Cannot register watchdog device\n"); - goto err_register_wd; + return ret; } dev_info(&pdev->dev, "MEN A21 watchdog timer driver enabled\n"); return 0; - -err_register_wd: - mutex_destroy(&drv->lock); - - return ret; -} - -static int a21_wdt_remove(struct platform_device *pdev) -{ - struct a21_wdt_drv *drv = dev_get_drvdata(&pdev->dev); - - dev_warn(&pdev->dev, - "Unregistering A21 watchdog driver, board may reboot\n"); - - watchdog_unregister_device(&drv->wdt); - - mutex_destroy(&drv->lock); - - return 0; } static void a21_wdt_shutdown(struct platform_device *pdev) @@ -257,7 +238,6 @@ MODULE_DEVICE_TABLE(of, a21_wdt_ids); static struct platform_driver a21_wdt_driver = { .probe = a21_wdt_probe, - .remove = a21_wdt_remove, .shutdown = a21_wdt_shutdown, .driver = { .name = "a21-watchdog", diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c index 56ea1caf71c3..491b9bf13d84 100644 --- a/drivers/watchdog/meson_wdt.c +++ b/drivers/watchdog/meson_wdt.c @@ -201,38 +201,19 @@ static int meson_wdt_probe(struct platform_device *pdev) meson_wdt_stop(&meson_wdt->wdt_dev); - err = watchdog_register_device(&meson_wdt->wdt_dev); + watchdog_stop_on_reboot(&meson_wdt->wdt_dev); + err = devm_watchdog_register_device(&pdev->dev, &meson_wdt->wdt_dev); if (err) return err; - platform_set_drvdata(pdev, meson_wdt); - dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)", meson_wdt->wdt_dev.timeout, nowayout); return 0; } -static int meson_wdt_remove(struct platform_device *pdev) -{ - struct meson_wdt_dev *meson_wdt = platform_get_drvdata(pdev); - - watchdog_unregister_device(&meson_wdt->wdt_dev); - - return 0; -} - -static void meson_wdt_shutdown(struct platform_device *pdev) -{ - struct meson_wdt_dev *meson_wdt = platform_get_drvdata(pdev); - - meson_wdt_stop(&meson_wdt->wdt_dev); -} - static struct platform_driver meson_wdt_driver = { .probe = meson_wdt_probe, - .remove = meson_wdt_remove, - .shutdown = meson_wdt_shutdown, .driver = { .name = DRV_NAME, .of_match_table = meson_wdt_dt_ids, diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c index d5735c12067d..48a06067075d 100644 --- a/drivers/watchdog/mt7621_wdt.c +++ b/drivers/watchdog/mt7621_wdt.c @@ -1,7 +1,7 @@ /* * Ralink MT7621/MT7628 built-in hardware watchdog timer * - * Copyright (C) 2014 John Crispin <blogic@openwrt.org> + * Copyright (C) 2014 John Crispin <john@phrozen.org> * * This driver was based on: drivers/watchdog/rt2880_wdt.c * @@ -110,7 +110,7 @@ static struct watchdog_info mt7621_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, }; -static struct watchdog_ops mt7621_wdt_ops = { +static const struct watchdog_ops mt7621_wdt_ops = { .owner = THIS_MODULE, .start = mt7621_wdt_start, .stop = mt7621_wdt_stop, @@ -181,5 +181,5 @@ static struct platform_driver mt7621_wdt_driver = { module_platform_driver(mt7621_wdt_driver); MODULE_DESCRIPTION("MediaTek MT762x hardware watchdog driver"); -MODULE_AUTHOR("John Crispin <blogic@openwrt.org"); +MODULE_AUTHOR("John Crispin <john@phrozen.org"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/watchdog/nic7018_wdt.c b/drivers/watchdog/nic7018_wdt.c new file mode 100644 index 000000000000..dcd265685837 --- /dev/null +++ b/drivers/watchdog/nic7018_wdt.c @@ -0,0 +1,265 @@ +/* + * Copyright (C) 2016 National Instruments Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/acpi.h> +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/watchdog.h> + +#define LOCK 0xA5 +#define UNLOCK 0x5A + +#define WDT_CTRL_RESET_EN BIT(7) +#define WDT_RELOAD_PORT_EN BIT(7) + +#define WDT_CTRL 1 +#define WDT_RELOAD_CTRL 2 +#define WDT_PRESET_PRESCALE 4 +#define WDT_REG_LOCK 5 +#define WDT_COUNT 6 +#define WDT_RELOAD_PORT 7 + +#define WDT_MIN_TIMEOUT 1 +#define WDT_MAX_TIMEOUT 464 +#define WDT_DEFAULT_TIMEOUT 80 + +#define WDT_MAX_COUNTER 15 + +static unsigned int timeout; +module_param(timeout, uint, 0); +MODULE_PARM_DESC(timeout, + "Watchdog timeout in seconds. (default=" + __MODULE_STRING(WDT_DEFAULT_TIMEOUT) ")"); + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, + "Watchdog cannot be stopped once started. (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +struct nic7018_wdt { + u16 io_base; + u32 period; + struct watchdog_device wdd; +}; + +struct nic7018_config { + u32 period; + u8 divider; +}; + +static const struct nic7018_config nic7018_configs[] = { + { 2, 4 }, + { 32, 5 }, +}; + +static inline u32 nic7018_timeout(u32 period, u8 counter) +{ + return period * counter - period / 2; +} + +static const struct nic7018_config *nic7018_get_config(u32 timeout, + u8 *counter) +{ + const struct nic7018_config *config; + u8 count; + + if (timeout < 30 && timeout != 16) { + config = &nic7018_configs[0]; + count = timeout / 2 + 1; + } else { + config = &nic7018_configs[1]; + count = DIV_ROUND_UP(timeout + 16, 32); + + if (count > WDT_MAX_COUNTER) + count = WDT_MAX_COUNTER; + } + *counter = count; + return config; +} + +static int nic7018_set_timeout(struct watchdog_device *wdd, + unsigned int timeout) +{ + struct nic7018_wdt *wdt = watchdog_get_drvdata(wdd); + const struct nic7018_config *config; + u8 counter; + + config = nic7018_get_config(timeout, &counter); + + outb(counter << 4 | config->divider, + wdt->io_base + WDT_PRESET_PRESCALE); + + wdd->timeout = nic7018_timeout(config->period, counter); + wdt->period = config->period; + + return 0; +} + +static int nic7018_start(struct watchdog_device *wdd) +{ + struct nic7018_wdt *wdt = watchdog_get_drvdata(wdd); + u8 control; + + nic7018_set_timeout(wdd, wdd->timeout); + + control = inb(wdt->io_base + WDT_RELOAD_CTRL); + outb(control | WDT_RELOAD_PORT_EN, wdt->io_base + WDT_RELOAD_CTRL); + + outb(1, wdt->io_base + WDT_RELOAD_PORT); + + control = inb(wdt->io_base + WDT_CTRL); + outb(control | WDT_CTRL_RESET_EN, wdt->io_base + WDT_CTRL); + + return 0; +} + +static int nic7018_stop(struct watchdog_device *wdd) +{ + struct nic7018_wdt *wdt = watchdog_get_drvdata(wdd); + + outb(0, wdt->io_base + WDT_CTRL); + outb(0, wdt->io_base + WDT_RELOAD_CTRL); + outb(0xF0, wdt->io_base + WDT_PRESET_PRESCALE); + + return 0; +} + +static int nic7018_ping(struct watchdog_device *wdd) +{ + struct nic7018_wdt *wdt = watchdog_get_drvdata(wdd); + + outb(1, wdt->io_base + WDT_RELOAD_PORT); + + return 0; +} + +static unsigned int nic7018_get_timeleft(struct watchdog_device *wdd) +{ + struct nic7018_wdt *wdt = watchdog_get_drvdata(wdd); + u8 count; + + count = inb(wdt->io_base + WDT_COUNT) & 0xF; + if (!count) + return 0; + + return nic7018_timeout(wdt->period, count); +} + +static const struct watchdog_info nic7018_wdd_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, + .identity = "NIC7018 Watchdog", +}; + +static const struct watchdog_ops nic7018_wdd_ops = { + .owner = THIS_MODULE, + .start = nic7018_start, + .stop = nic7018_stop, + .ping = nic7018_ping, + .set_timeout = nic7018_set_timeout, + .get_timeleft = nic7018_get_timeleft, +}; + +static int nic7018_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct watchdog_device *wdd; + struct nic7018_wdt *wdt; + struct resource *io_rc; + int ret; + + wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) + return -ENOMEM; + + platform_set_drvdata(pdev, wdt); + + io_rc = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!io_rc) { + dev_err(dev, "missing IO resources\n"); + return -EINVAL; + } + + if (!devm_request_region(dev, io_rc->start, resource_size(io_rc), + KBUILD_MODNAME)) { + dev_err(dev, "failed to get IO region\n"); + return -EBUSY; + } + + wdt->io_base = io_rc->start; + wdd = &wdt->wdd; + wdd->info = &nic7018_wdd_info; + wdd->ops = &nic7018_wdd_ops; + wdd->min_timeout = WDT_MIN_TIMEOUT; + wdd->max_timeout = WDT_MAX_TIMEOUT; + wdd->timeout = WDT_DEFAULT_TIMEOUT; + wdd->parent = dev; + + watchdog_set_drvdata(wdd, wdt); + watchdog_set_nowayout(wdd, nowayout); + + ret = watchdog_init_timeout(wdd, timeout, dev); + if (ret) + dev_warn(dev, "unable to set timeout value, using default\n"); + + /* Unlock WDT register */ + outb(UNLOCK, wdt->io_base + WDT_REG_LOCK); + + ret = watchdog_register_device(wdd); + if (ret) { + outb(LOCK, wdt->io_base + WDT_REG_LOCK); + dev_err(dev, "failed to register watchdog\n"); + return ret; + } + + dev_dbg(dev, "io_base=0x%04X, timeout=%d, nowayout=%d\n", + wdt->io_base, timeout, nowayout); + return 0; +} + +static int nic7018_remove(struct platform_device *pdev) +{ + struct nic7018_wdt *wdt = platform_get_drvdata(pdev); + + watchdog_unregister_device(&wdt->wdd); + + /* Lock WDT register */ + outb(LOCK, wdt->io_base + WDT_REG_LOCK); + + return 0; +} + +static const struct acpi_device_id nic7018_device_ids[] = { + {"NIC7018", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, nic7018_device_ids); + +static struct platform_driver watchdog_driver = { + .probe = nic7018_probe, + .remove = nic7018_remove, + .driver = { + .name = KBUILD_MODNAME, + .acpi_match_table = ACPI_PTR(nic7018_device_ids), + }, +}; + +module_platform_driver(watchdog_driver); + +MODULE_DESCRIPTION("National Instruments NIC7018 Watchdog driver"); +MODULE_AUTHOR("Hui Chun Ong <hui.chun.ong@ni.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c index c6b8f4a43bde..39be4dd8035e 100644 --- a/drivers/watchdog/orion_wdt.c +++ b/drivers/watchdog/orion_wdt.c @@ -395,7 +395,7 @@ static void __iomem *orion_wdt_ioremap_rstout(struct platform_device *pdev, rstout = internal_regs + ORION_RSTOUT_MASK_OFFSET; - WARN(1, FW_BUG "falling back to harcoded RSTOUT reg %pa\n", &rstout); + WARN(1, FW_BUG "falling back to hardcoded RSTOUT reg %pa\n", &rstout); return devm_ioremap(&pdev->dev, rstout, 0x4); } diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c index 0cdfee266690..e35cf5e87907 100644 --- a/drivers/watchdog/pika_wdt.c +++ b/drivers/watchdog/pika_wdt.c @@ -54,7 +54,7 @@ static struct { struct timer_list timer; /* The timer that pings the watchdog */ } pikawdt_private; -static struct watchdog_info ident = { +static struct watchdog_info ident __ro_after_init = { .identity = DRV_NAME, .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT | diff --git a/drivers/watchdog/rn5t618_wdt.c b/drivers/watchdog/rn5t618_wdt.c index 0805ee2acd7a..e60f55702ab7 100644 --- a/drivers/watchdog/rn5t618_wdt.c +++ b/drivers/watchdog/rn5t618_wdt.c @@ -130,7 +130,7 @@ static int rn5t618_wdt_ping(struct watchdog_device *wdt_dev) RN5T618_PWRIRQ_IR_WDOG, 0); } -static struct watchdog_info rn5t618_wdt_info = { +static const struct watchdog_info rn5t618_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .identity = DRIVER_NAME, diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c index 14b4fd428fff..05524baf7dcc 100644 --- a/drivers/watchdog/rt2880_wdt.c +++ b/drivers/watchdog/rt2880_wdt.c @@ -2,7 +2,7 @@ * Ralink RT288x/RT3xxx/MT76xx built-in hardware watchdog timer * * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2013 John Crispin <blogic@openwrt.org> + * Copyright (C) 2013 John Crispin <john@phrozen.org> * * This driver was based on: drivers/watchdog/softdog.c * @@ -124,7 +124,7 @@ static struct watchdog_info rt288x_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, }; -static struct watchdog_ops rt288x_wdt_ops = { +static const struct watchdog_ops rt288x_wdt_ops = { .owner = THIS_MODULE, .start = rt288x_wdt_start, .stop = rt288x_wdt_stop, diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 59e95762a6de..6ed97596ca80 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -23,8 +23,6 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> @@ -46,6 +44,7 @@ #define S3C2410_WTCON 0x00 #define S3C2410_WTDAT 0x04 #define S3C2410_WTCNT 0x08 +#define S3C2410_WTCLRINT 0x0c #define S3C2410_WTCNT_MAXCNT 0xffff @@ -64,14 +63,15 @@ #define S3C2410_WTCON_PRESCALE_MASK (0xff << 8) #define S3C2410_WTCON_PRESCALE_MAX 0xff -#define CONFIG_S3C2410_WATCHDOG_ATBOOT (0) -#define CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME (15) +#define S3C2410_WATCHDOG_ATBOOT (0) +#define S3C2410_WATCHDOG_DEFAULT_TIME (15) #define EXYNOS5_RST_STAT_REG_OFFSET 0x0404 #define EXYNOS5_WDT_DISABLE_REG_OFFSET 0x0408 #define EXYNOS5_WDT_MASK_RESET_REG_OFFSET 0x040c #define QUIRK_HAS_PMU_CONFIG (1 << 0) #define QUIRK_HAS_RST_STAT (1 << 1) +#define QUIRK_HAS_WTCLRINT_REG (1 << 2) /* These quirks require that we have a PMU register map */ #define QUIRKS_HAVE_PMUREG (QUIRK_HAS_PMU_CONFIG | \ @@ -79,26 +79,23 @@ static bool nowayout = WATCHDOG_NOWAYOUT; static int tmr_margin; -static int tmr_atboot = CONFIG_S3C2410_WATCHDOG_ATBOOT; +static int tmr_atboot = S3C2410_WATCHDOG_ATBOOT; static int soft_noboot; -static int debug; module_param(tmr_margin, int, 0); module_param(tmr_atboot, int, 0); module_param(nowayout, bool, 0); module_param(soft_noboot, int, 0); -module_param(debug, int, 0); MODULE_PARM_DESC(tmr_margin, "Watchdog tmr_margin in seconds. (default=" - __MODULE_STRING(CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME) ")"); + __MODULE_STRING(S3C2410_WATCHDOG_DEFAULT_TIME) ")"); MODULE_PARM_DESC(tmr_atboot, "Watchdog is started at boot time if set to 1, default=" - __MODULE_STRING(CONFIG_S3C2410_WATCHDOG_ATBOOT)); + __MODULE_STRING(S3C2410_WATCHDOG_ATBOOT)); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, " "0 to reboot (default 0)"); -MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug (default 0)"); /** * struct s3c2410_wdt_variant - Per-variant config data @@ -143,13 +140,18 @@ static const struct s3c2410_wdt_variant drv_data_s3c2410 = { }; #ifdef CONFIG_OF +static const struct s3c2410_wdt_variant drv_data_s3c6410 = { + .quirks = QUIRK_HAS_WTCLRINT_REG, +}; + static const struct s3c2410_wdt_variant drv_data_exynos5250 = { .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET, .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET, .mask_bit = 20, .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, .rst_stat_bit = 20, - .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT, + .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \ + | QUIRK_HAS_WTCLRINT_REG, }; static const struct s3c2410_wdt_variant drv_data_exynos5420 = { @@ -158,7 +160,8 @@ static const struct s3c2410_wdt_variant drv_data_exynos5420 = { .mask_bit = 0, .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, .rst_stat_bit = 9, - .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT, + .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \ + | QUIRK_HAS_WTCLRINT_REG, }; static const struct s3c2410_wdt_variant drv_data_exynos7 = { @@ -167,12 +170,15 @@ static const struct s3c2410_wdt_variant drv_data_exynos7 = { .mask_bit = 23, .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, .rst_stat_bit = 23, /* A57 WDTRESET */ - .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT, + .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \ + | QUIRK_HAS_WTCLRINT_REG, }; static const struct of_device_id s3c2410_wdt_match[] = { { .compatible = "samsung,s3c2410-wdt", .data = &drv_data_s3c2410 }, + { .compatible = "samsung,s3c6410-wdt", + .data = &drv_data_s3c6410 }, { .compatible = "samsung,exynos5250-wdt", .data = &drv_data_exynos5250 }, { .compatible = "samsung,exynos5420-wdt", @@ -193,14 +199,6 @@ static const struct platform_device_id s3c2410_wdt_ids[] = { }; MODULE_DEVICE_TABLE(platform, s3c2410_wdt_ids); -/* watchdog control routines */ - -#define DBG(fmt, ...) \ -do { \ - if (debug) \ - pr_info(fmt, ##__VA_ARGS__); \ -} while (0) - /* functions */ static inline unsigned int s3c2410wdt_max_timeout(struct clk *clock) @@ -296,8 +294,8 @@ static int s3c2410wdt_start(struct watchdog_device *wdd) wtcon |= S3C2410_WTCON_RSTEN; } - DBG("%s: count=0x%08x, wtcon=%08lx\n", - __func__, wdt->count, wtcon); + dev_dbg(wdt->dev, "Starting watchdog: count=0x%08x, wtcon=%08lx\n", + wdt->count, wtcon); writel(wdt->count, wdt->reg_base + S3C2410_WTDAT); writel(wdt->count, wdt->reg_base + S3C2410_WTCNT); @@ -326,8 +324,8 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeou freq = DIV_ROUND_UP(freq, 128); count = timeout * freq; - DBG("%s: count=%d, timeout=%d, freq=%lu\n", - __func__, count, timeout, freq); + dev_dbg(wdt->dev, "Heartbeat: count=%d, timeout=%d, freq=%lu\n", + count, timeout, freq); /* if the count is bigger than the watchdog register, then work out what we need to do (and if) we can @@ -343,8 +341,8 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeou } } - DBG("%s: timeout=%d, divisor=%d, count=%d (%08x)\n", - __func__, timeout, divisor, count, DIV_ROUND_UP(count, divisor)); + dev_dbg(wdt->dev, "Heartbeat: timeout=%d, divisor=%d, count=%d (%08x)\n", + timeout, divisor, count, DIV_ROUND_UP(count, divisor)); count = DIV_ROUND_UP(count, divisor); wdt->count = count; @@ -394,7 +392,7 @@ static const struct watchdog_info s3c2410_wdt_ident = { .identity = "S3C2410 Watchdog", }; -static struct watchdog_ops s3c2410wdt_ops = { +static const struct watchdog_ops s3c2410wdt_ops = { .owner = THIS_MODULE, .start = s3c2410wdt_start, .stop = s3c2410wdt_stop, @@ -406,7 +404,7 @@ static struct watchdog_ops s3c2410wdt_ops = { static struct watchdog_device s3c2410_wdd = { .info = &s3c2410_wdt_ident, .ops = &s3c2410wdt_ops, - .timeout = CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME, + .timeout = S3C2410_WATCHDOG_DEFAULT_TIME, }; /* interrupt handler code */ @@ -418,6 +416,10 @@ static irqreturn_t s3c2410wdt_irq(int irqno, void *param) dev_info(wdt->dev, "watchdog timer expired (irq)\n"); s3c2410wdt_keepalive(&wdt->wdt_device); + + if (wdt->drv_data->quirks & QUIRK_HAS_WTCLRINT_REG) + writel(0x1, wdt->reg_base + S3C2410_WTCLRINT); + return IRQ_HANDLED; } @@ -505,9 +507,8 @@ static inline unsigned int s3c2410wdt_get_bootstatus(struct s3c2410_wdt *wdt) return 0; } -/* s3c2410_get_wdt_driver_data */ static inline struct s3c2410_wdt_variant * -get_wdt_drv_data(struct platform_device *pdev) +s3c2410_get_wdt_drv_data(struct platform_device *pdev) { if (pdev->dev.of_node) { const struct of_device_id *match; @@ -529,8 +530,6 @@ static int s3c2410wdt_probe(struct platform_device *pdev) int started = 0; int ret; - DBG("%s: probe=%p\n", __func__, pdev); - dev = &pdev->dev; wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL); @@ -541,7 +540,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev) spin_lock_init(&wdt->lock); wdt->wdt_device = s3c2410_wdd; - wdt->drv_data = get_wdt_drv_data(pdev); + wdt->drv_data = s3c2410_get_wdt_drv_data(pdev); if (wdt->drv_data->quirks & QUIRKS_HAVE_PMUREG) { wdt->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node, "samsung,syscon-phandle"); @@ -566,8 +565,6 @@ static int s3c2410wdt_probe(struct platform_device *pdev) goto err; } - DBG("probe: mapped reg_base=%p\n", wdt->reg_base); - wdt->clock = devm_clk_get(dev, "watchdog"); if (IS_ERR(wdt->clock)) { dev_err(dev, "failed to find watchdog clock source\n"); @@ -600,12 +597,12 @@ static int s3c2410wdt_probe(struct platform_device *pdev) wdt->wdt_device.timeout); if (ret) { started = s3c2410wdt_set_heartbeat(&wdt->wdt_device, - CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME); + S3C2410_WATCHDOG_DEFAULT_TIME); if (started == 0) dev_info(dev, "tmr_margin value out of range, default %d used\n", - CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME); + S3C2410_WATCHDOG_DEFAULT_TIME); else dev_info(dev, "default timer value is out of range, " "cannot start\n"); diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c index 8965e3f536c3..d3be4f831db5 100644 --- a/drivers/watchdog/sa1100_wdt.c +++ b/drivers/watchdog/sa1100_wdt.c @@ -188,12 +188,14 @@ static int __init sa1100dog_init(void) pre_margin = oscr_freq * margin; ret = misc_register(&sa1100dog_miscdev); - if (ret == 0) + if (ret == 0) { pr_info("SA1100/PXA2xx Watchdog Timer: timer margin %d sec\n", margin); - return ret; -err: + return 0; + } + clk_disable_unprepare(clk); +err: clk_put(clk); return ret; } diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c index a49634cdc1cc..f709962018ac 100644 --- a/drivers/watchdog/sama5d4_wdt.c +++ b/drivers/watchdog/sama5d4_wdt.c @@ -28,7 +28,7 @@ struct sama5d4_wdt { struct watchdog_device wdd; void __iomem *reg_base; - u32 config; + u32 mr; }; static int wdt_timeout = WDT_DEFAULT_TIMEOUT; @@ -53,11 +53,9 @@ MODULE_PARM_DESC(nowayout, static int sama5d4_wdt_start(struct watchdog_device *wdd) { struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd); - u32 reg; - reg = wdt_read(wdt, AT91_WDT_MR); - reg &= ~AT91_WDT_WDDIS; - wdt_write(wdt, AT91_WDT_MR, reg); + wdt->mr &= ~AT91_WDT_WDDIS; + wdt_write(wdt, AT91_WDT_MR, wdt->mr); return 0; } @@ -65,11 +63,9 @@ static int sama5d4_wdt_start(struct watchdog_device *wdd) static int sama5d4_wdt_stop(struct watchdog_device *wdd) { struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd); - u32 reg; - reg = wdt_read(wdt, AT91_WDT_MR); - reg |= AT91_WDT_WDDIS; - wdt_write(wdt, AT91_WDT_MR, reg); + wdt->mr |= AT91_WDT_WDDIS; + wdt_write(wdt, AT91_WDT_MR, wdt->mr); return 0; } @@ -88,14 +84,12 @@ static int sama5d4_wdt_set_timeout(struct watchdog_device *wdd, { struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd); u32 value = WDT_SEC2TICKS(timeout); - u32 reg; - reg = wdt_read(wdt, AT91_WDT_MR); - reg &= ~AT91_WDT_WDV; - reg &= ~AT91_WDT_WDD; - reg |= AT91_WDT_SET_WDV(value); - reg |= AT91_WDT_SET_WDD(value); - wdt_write(wdt, AT91_WDT_MR, reg); + wdt->mr &= ~AT91_WDT_WDV; + wdt->mr &= ~AT91_WDT_WDD; + wdt->mr |= AT91_WDT_SET_WDV(value); + wdt->mr |= AT91_WDT_SET_WDD(value); + wdt_write(wdt, AT91_WDT_MR, wdt->mr); wdd->timeout = timeout; @@ -107,7 +101,7 @@ static const struct watchdog_info sama5d4_wdt_info = { .identity = "Atmel SAMA5D4 Watchdog", }; -static struct watchdog_ops sama5d4_wdt_ops = { +static const struct watchdog_ops sama5d4_wdt_ops = { .owner = THIS_MODULE, .start = sama5d4_wdt_start, .stop = sama5d4_wdt_stop, @@ -132,19 +126,19 @@ static int of_sama5d4_wdt_init(struct device_node *np, struct sama5d4_wdt *wdt) { const char *tmp; - wdt->config = AT91_WDT_WDDIS; + wdt->mr = AT91_WDT_WDDIS; if (!of_property_read_string(np, "atmel,watchdog-type", &tmp) && !strcmp(tmp, "software")) - wdt->config |= AT91_WDT_WDFIEN; + wdt->mr |= AT91_WDT_WDFIEN; else - wdt->config |= AT91_WDT_WDRSTEN; + wdt->mr |= AT91_WDT_WDRSTEN; if (of_property_read_bool(np, "atmel,idle-halt")) - wdt->config |= AT91_WDT_WDIDLEHLT; + wdt->mr |= AT91_WDT_WDIDLEHLT; if (of_property_read_bool(np, "atmel,dbg-halt")) - wdt->config |= AT91_WDT_WDDBGHLT; + wdt->mr |= AT91_WDT_WDDBGHLT; return 0; } @@ -163,11 +157,10 @@ static int sama5d4_wdt_init(struct sama5d4_wdt *wdt) reg &= ~AT91_WDT_WDDIS; wdt_write(wdt, AT91_WDT_MR, reg); - reg = wdt->config; - reg |= AT91_WDT_SET_WDD(value); - reg |= AT91_WDT_SET_WDV(value); + wdt->mr |= AT91_WDT_SET_WDD(value); + wdt->mr |= AT91_WDT_SET_WDV(value); - wdt_write(wdt, AT91_WDT_MR, reg); + wdt_write(wdt, AT91_WDT_MR, wdt->mr); return 0; } @@ -211,7 +204,7 @@ static int sama5d4_wdt_probe(struct platform_device *pdev) return ret; } - if ((wdt->config & AT91_WDT_WDFIEN) && irq) { + if ((wdt->mr & AT91_WDT_WDFIEN) && irq) { ret = devm_request_irq(&pdev->dev, irq, sama5d4_wdt_irq_handler, IRQF_SHARED | IRQF_IRQPOLL | IRQF_NO_SUSPEND, pdev->name, pdev); @@ -265,11 +258,28 @@ static const struct of_device_id sama5d4_wdt_of_match[] = { }; MODULE_DEVICE_TABLE(of, sama5d4_wdt_of_match); +#ifdef CONFIG_PM_SLEEP +static int sama5d4_wdt_resume(struct device *dev) +{ + struct sama5d4_wdt *wdt = dev_get_drvdata(dev); + + wdt_write(wdt, AT91_WDT_MR, wdt->mr & ~AT91_WDT_WDDIS); + if (wdt->mr & AT91_WDT_WDDIS) + wdt_write(wdt, AT91_WDT_MR, wdt->mr); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(sama5d4_wdt_pm_ops, NULL, + sama5d4_wdt_resume); + static struct platform_driver sama5d4_wdt_driver = { .probe = sama5d4_wdt_probe, .remove = sama5d4_wdt_remove, .driver = { .name = "sama5d4_wdt", + .pm = &sama5d4_wdt_pm_ops, .of_match_table = sama5d4_wdt_of_match, } }; diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c index ce0c38bd0f00..316c2eb122d2 100644 --- a/drivers/watchdog/sbsa_gwdt.c +++ b/drivers/watchdog/sbsa_gwdt.c @@ -207,7 +207,7 @@ static irqreturn_t sbsa_gwdt_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static struct watchdog_info sbsa_gwdt_info = { +static const struct watchdog_info sbsa_gwdt_info = { .identity = WATCHDOG_NAME, .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | @@ -215,7 +215,7 @@ static struct watchdog_info sbsa_gwdt_info = { WDIOF_CARDRESET, }; -static struct watchdog_ops sbsa_gwdt_ops = { +static const struct watchdog_ops sbsa_gwdt_ops = { .owner = THIS_MODULE, .start = sbsa_gwdt_start, .stop = sbsa_gwdt_stop, diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c index 3050a0031479..4eea351e09b0 100644 --- a/drivers/watchdog/sirfsoc_wdt.c +++ b/drivers/watchdog/sirfsoc_wdt.c @@ -127,7 +127,7 @@ static const struct watchdog_info sirfsoc_wdt_ident = { .identity = "SiRFSOC Watchdog", }; -static struct watchdog_ops sirfsoc_wdt_ops = { +static const struct watchdog_ops sirfsoc_wdt_ops = { .owner = THIS_MODULE, .start = sirfsoc_wdt_enable, .stop = sirfsoc_wdt_disable, diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c index c7bdc986dca1..060740625485 100644 --- a/drivers/watchdog/softdog.c +++ b/drivers/watchdog/softdog.c @@ -21,13 +21,12 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/hrtimer.h> #include <linux/init.h> -#include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/reboot.h> -#include <linux/timer.h> #include <linux/types.h> #include <linux/watchdog.h> @@ -54,7 +53,10 @@ module_param(soft_panic, int, 0); MODULE_PARM_DESC(soft_panic, "Softdog action, set to 1 to panic, 0 to reboot (default=0)"); -static void softdog_fire(unsigned long data) +static struct hrtimer softdog_ticktock; +static struct hrtimer softdog_preticktock; + +static enum hrtimer_restart softdog_fire(struct hrtimer *timer) { module_put(THIS_MODULE); if (soft_noboot) { @@ -67,49 +69,52 @@ static void softdog_fire(unsigned long data) emergency_restart(); pr_crit("Reboot didn't ?????\n"); } -} -static struct timer_list softdog_ticktock = - TIMER_INITIALIZER(softdog_fire, 0, 0); + return HRTIMER_NORESTART; +} static struct watchdog_device softdog_dev; -static void softdog_pretimeout(unsigned long data) +static enum hrtimer_restart softdog_pretimeout(struct hrtimer *timer) { watchdog_notify_pretimeout(&softdog_dev); -} -static struct timer_list softdog_preticktock = - TIMER_INITIALIZER(softdog_pretimeout, 0, 0); + return HRTIMER_NORESTART; +} static int softdog_ping(struct watchdog_device *w) { - if (!mod_timer(&softdog_ticktock, jiffies + (w->timeout * HZ))) + if (!hrtimer_active(&softdog_ticktock)) __module_get(THIS_MODULE); - - if (w->pretimeout) - mod_timer(&softdog_preticktock, jiffies + - (w->timeout - w->pretimeout) * HZ); - else - del_timer(&softdog_preticktock); + hrtimer_start(&softdog_ticktock, ktime_set(w->timeout, 0), + HRTIMER_MODE_REL); + + if (IS_ENABLED(CONFIG_SOFT_WATCHDOG_PRETIMEOUT)) { + if (w->pretimeout) + hrtimer_start(&softdog_preticktock, + ktime_set(w->timeout - w->pretimeout, 0), + HRTIMER_MODE_REL); + else + hrtimer_cancel(&softdog_preticktock); + } return 0; } static int softdog_stop(struct watchdog_device *w) { - if (del_timer(&softdog_ticktock)) + if (hrtimer_cancel(&softdog_ticktock)) module_put(THIS_MODULE); - del_timer(&softdog_preticktock); + if (IS_ENABLED(CONFIG_SOFT_WATCHDOG_PRETIMEOUT)) + hrtimer_cancel(&softdog_preticktock); return 0; } static struct watchdog_info softdog_info = { .identity = "Software Watchdog", - .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | - WDIOF_PRETIMEOUT, + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, }; static const struct watchdog_ops softdog_ops = { @@ -134,6 +139,16 @@ static int __init softdog_init(void) watchdog_set_nowayout(&softdog_dev, nowayout); watchdog_stop_on_reboot(&softdog_dev); + hrtimer_init(&softdog_ticktock, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + softdog_ticktock.function = softdog_fire; + + if (IS_ENABLED(CONFIG_SOFT_WATCHDOG_PRETIMEOUT)) { + softdog_info.options |= WDIOF_PRETIMEOUT; + hrtimer_init(&softdog_preticktock, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + softdog_preticktock.function = softdog_pretimeout; + } + ret = watchdog_register_device(&softdog_dev); if (ret) return ret; diff --git a/drivers/watchdog/sun4v_wdt.c b/drivers/watchdog/sun4v_wdt.c index 1467fe50a76f..00907973608c 100644 --- a/drivers/watchdog/sun4v_wdt.c +++ b/drivers/watchdog/sun4v_wdt.c @@ -77,7 +77,7 @@ static const struct watchdog_info sun4v_wdt_ident = { .firmware_version = 0, }; -static struct watchdog_ops sun4v_wdt_ops = { +static const struct watchdog_ops sun4v_wdt_ops = { .owner = THIS_MODULE, .start = sun4v_wdt_ping, .stop = sun4v_wdt_stop, diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c index 953bb7b7446f..9728fa32c357 100644 --- a/drivers/watchdog/sunxi_wdt.c +++ b/drivers/watchdog/sunxi_wdt.c @@ -242,8 +242,6 @@ static int sunxi_wdt_probe(struct platform_device *pdev) if (!sunxi_wdt) return -EINVAL; - platform_set_drvdata(pdev, sunxi_wdt); - device = of_match_device(sunxi_wdt_dt_ids, &pdev->dev); if (!device) return -ENODEV; @@ -270,7 +268,8 @@ static int sunxi_wdt_probe(struct platform_device *pdev) sunxi_wdt_stop(&sunxi_wdt->wdt_dev); - err = watchdog_register_device(&sunxi_wdt->wdt_dev); + watchdog_stop_on_reboot(&sunxi_wdt->wdt_dev); + err = devm_watchdog_register_device(&pdev->dev, &sunxi_wdt->wdt_dev); if (unlikely(err)) return err; @@ -280,27 +279,8 @@ static int sunxi_wdt_probe(struct platform_device *pdev) return 0; } -static int sunxi_wdt_remove(struct platform_device *pdev) -{ - struct sunxi_wdt_dev *sunxi_wdt = platform_get_drvdata(pdev); - - watchdog_unregister_device(&sunxi_wdt->wdt_dev); - watchdog_set_drvdata(&sunxi_wdt->wdt_dev, NULL); - - return 0; -} - -static void sunxi_wdt_shutdown(struct platform_device *pdev) -{ - struct sunxi_wdt_dev *sunxi_wdt = platform_get_drvdata(pdev); - - sunxi_wdt_stop(&sunxi_wdt->wdt_dev); -} - static struct platform_driver sunxi_wdt_driver = { .probe = sunxi_wdt_probe, - .remove = sunxi_wdt_remove, - .shutdown = sunxi_wdt_shutdown, .driver = { .name = DRV_NAME, .of_match_table = sunxi_wdt_dt_ids, diff --git a/drivers/watchdog/tangox_wdt.c b/drivers/watchdog/tangox_wdt.c index 202c4b9cc921..d5fcce062920 100644 --- a/drivers/watchdog/tangox_wdt.c +++ b/drivers/watchdog/tangox_wdt.c @@ -15,9 +15,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/notifier.h> #include <linux/platform_device.h> -#include <linux/reboot.h> #include <linux/watchdog.h> #define DEFAULT_TIMEOUT 30 @@ -47,7 +45,6 @@ struct tangox_wdt_device { void __iomem *base; unsigned long clk_rate; struct clk *clk; - struct notifier_block restart; }; static int tangox_wdt_set_timeout(struct watchdog_device *wdt, @@ -96,24 +93,24 @@ static const struct watchdog_info tangox_wdt_info = { .identity = "tangox watchdog", }; +static int tangox_wdt_restart(struct watchdog_device *wdt, + unsigned long action, void *data) +{ + struct tangox_wdt_device *dev = watchdog_get_drvdata(wdt); + + writel(1, dev->base + WD_COUNTER); + + return 0; +} + static const struct watchdog_ops tangox_wdt_ops = { .start = tangox_wdt_start, .stop = tangox_wdt_stop, .set_timeout = tangox_wdt_set_timeout, .get_timeleft = tangox_wdt_get_timeleft, + .restart = tangox_wdt_restart, }; -static int tangox_wdt_restart(struct notifier_block *nb, unsigned long action, - void *data) -{ - struct tangox_wdt_device *dev = - container_of(nb, struct tangox_wdt_device, restart); - - writel(1, dev->base + WD_COUNTER); - - return NOTIFY_DONE; -} - static int tangox_wdt_probe(struct platform_device *pdev) { struct tangox_wdt_device *dev; @@ -174,18 +171,14 @@ static int tangox_wdt_probe(struct platform_device *pdev) tangox_wdt_start(&dev->wdt); } + watchdog_set_restart_priority(&dev->wdt, 128); + err = watchdog_register_device(&dev->wdt); if (err) goto err; platform_set_drvdata(pdev, dev); - dev->restart.notifier_call = tangox_wdt_restart; - dev->restart.priority = 128; - err = register_restart_handler(&dev->restart); - if (err) - dev_warn(&pdev->dev, "failed to register restart handler\n"); - dev_info(&pdev->dev, "SMP86xx/SMP87xx watchdog registered\n"); return 0; @@ -202,7 +195,6 @@ static int tangox_wdt_remove(struct platform_device *pdev) tangox_wdt_stop(&dev->wdt); clk_disable_unprepare(dev->clk); - unregister_restart_handler(&dev->restart); watchdog_unregister_device(&dev->wdt); return 0; diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c index 2d53c3f9394f..9403c08816e3 100644 --- a/drivers/watchdog/tegra_wdt.c +++ b/drivers/watchdog/tegra_wdt.c @@ -226,7 +226,7 @@ static int tegra_wdt_probe(struct platform_device *pdev) watchdog_set_nowayout(wdd, nowayout); - ret = watchdog_register_device(wdd); + ret = devm_watchdog_register_device(&pdev->dev, wdd); if (ret) { dev_err(&pdev->dev, "failed to register watchdog device\n"); @@ -248,8 +248,6 @@ static int tegra_wdt_remove(struct platform_device *pdev) tegra_wdt_stop(&wdt->wdd); - watchdog_unregister_device(&wdt->wdd); - dev_info(&pdev->dev, "removed wdt\n"); return 0; diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c index 4b541934b6c5..17c25daebcce 100644 --- a/drivers/watchdog/ts72xx_wdt.c +++ b/drivers/watchdog/ts72xx_wdt.c @@ -13,428 +13,159 @@ * warranty of any kind, whether express or implied. */ -#include <linux/fs.h> -#include <linux/io.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/miscdevice.h> -#include <linux/mutex.h> #include <linux/platform_device.h> -#include <linux/slab.h> +#include <linux/module.h> #include <linux/watchdog.h> -#include <linux/uaccess.h> +#include <linux/io.h> -#define TS72XX_WDT_FEED_VAL 0x05 -#define TS72XX_WDT_DEFAULT_TIMEOUT 8 +#define TS72XX_WDT_DEFAULT_TIMEOUT 30 -static int timeout = TS72XX_WDT_DEFAULT_TIMEOUT; +static int timeout; module_param(timeout, int, 0); -MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. " - "(1 <= timeout <= 8, default=" - __MODULE_STRING(TS72XX_WDT_DEFAULT_TIMEOUT) - ")"); +MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds."); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close"); -/** - * struct ts72xx_wdt - watchdog control structure - * @lock: lock that protects this structure - * @regval: watchdog timeout value suitable for control register - * @flags: flags controlling watchdog device state - * @control_reg: watchdog control register - * @feed_reg: watchdog feed register - * @pdev: back pointer to platform dev - */ -struct ts72xx_wdt { - struct mutex lock; - int regval; - -#define TS72XX_WDT_BUSY_FLAG 1 -#define TS72XX_WDT_EXPECT_CLOSE_FLAG 2 - int flags; +/* priv->control_reg */ +#define TS72XX_WDT_CTRL_DISABLE 0x00 +#define TS72XX_WDT_CTRL_250MS 0x01 +#define TS72XX_WDT_CTRL_500MS 0x02 +#define TS72XX_WDT_CTRL_1SEC 0x03 +#define TS72XX_WDT_CTRL_RESERVED 0x04 +#define TS72XX_WDT_CTRL_2SEC 0x05 +#define TS72XX_WDT_CTRL_4SEC 0x06 +#define TS72XX_WDT_CTRL_8SEC 0x07 + +/* priv->feed_reg */ +#define TS72XX_WDT_FEED_VAL 0x05 +struct ts72xx_wdt_priv { void __iomem *control_reg; void __iomem *feed_reg; - - struct platform_device *pdev; + struct watchdog_device wdd; + unsigned char regval; }; -static struct platform_device *ts72xx_wdt_pdev; - -/* - * TS-72xx Watchdog supports following timeouts (value written - * to control register): - * value description - * ------------------------- - * 0x00 watchdog disabled - * 0x01 250ms - * 0x02 500ms - * 0x03 1s - * 0x04 reserved - * 0x05 2s - * 0x06 4s - * 0x07 8s - * - * Timeouts below 1s are not very usable so we don't - * allow them at all. - * - * We provide two functions that convert between these: - * timeout_to_regval() and regval_to_timeout(). - */ -static const struct { - int timeout; - int regval; -} ts72xx_wdt_map[] = { - { 1, 3 }, - { 2, 5 }, - { 4, 6 }, - { 8, 7 }, -}; - -/** - * timeout_to_regval() - converts given timeout to control register value - * @new_timeout: timeout in seconds to be converted - * - * Function converts given @new_timeout into valid value that can - * be programmed into watchdog control register. When conversion is - * not possible, function returns %-EINVAL. - */ -static int timeout_to_regval(int new_timeout) -{ - int i; - - /* first limit it to 1 - 8 seconds */ - new_timeout = clamp_val(new_timeout, 1, 8); - - for (i = 0; i < ARRAY_SIZE(ts72xx_wdt_map); i++) { - if (ts72xx_wdt_map[i].timeout >= new_timeout) - return ts72xx_wdt_map[i].regval; - } - - return -EINVAL; -} - -/** - * regval_to_timeout() - converts control register value to timeout - * @regval: control register value to be converted - * - * Function converts given @regval to timeout in seconds (1, 2, 4 or 8). - * If @regval cannot be converted, function returns %-EINVAL. - */ -static int regval_to_timeout(int regval) +static int ts72xx_wdt_start(struct watchdog_device *wdd) { - int i; + struct ts72xx_wdt_priv *priv = watchdog_get_drvdata(wdd); - for (i = 0; i < ARRAY_SIZE(ts72xx_wdt_map); i++) { - if (ts72xx_wdt_map[i].regval == regval) - return ts72xx_wdt_map[i].timeout; - } + writeb(TS72XX_WDT_FEED_VAL, priv->feed_reg); + writeb(priv->regval, priv->control_reg); - return -EINVAL; + return 0; } -/** - * ts72xx_wdt_kick() - kick the watchdog - * @wdt: watchdog to be kicked - * - * Called with @wdt->lock held. - */ -static inline void ts72xx_wdt_kick(struct ts72xx_wdt *wdt) +static int ts72xx_wdt_stop(struct watchdog_device *wdd) { - __raw_writeb(TS72XX_WDT_FEED_VAL, wdt->feed_reg); -} + struct ts72xx_wdt_priv *priv = watchdog_get_drvdata(wdd); -/** - * ts72xx_wdt_start() - starts the watchdog timer - * @wdt: watchdog to be started - * - * This function programs timeout to watchdog timer - * and starts it. - * - * Called with @wdt->lock held. - */ -static void ts72xx_wdt_start(struct ts72xx_wdt *wdt) -{ - /* - * To program the wdt, it first must be "fed" and - * only after that (within 30 usecs) the configuration - * can be changed. - */ - ts72xx_wdt_kick(wdt); - __raw_writeb((u8)wdt->regval, wdt->control_reg); -} + writeb(TS72XX_WDT_FEED_VAL, priv->feed_reg); + writeb(TS72XX_WDT_CTRL_DISABLE, priv->control_reg); -/** - * ts72xx_wdt_stop() - stops the watchdog timer - * @wdt: watchdog to be stopped - * - * Called with @wdt->lock held. - */ -static void ts72xx_wdt_stop(struct ts72xx_wdt *wdt) -{ - ts72xx_wdt_kick(wdt); - __raw_writeb(0, wdt->control_reg); + return 0; } -static int ts72xx_wdt_open(struct inode *inode, struct file *file) +static int ts72xx_wdt_ping(struct watchdog_device *wdd) { - struct ts72xx_wdt *wdt = platform_get_drvdata(ts72xx_wdt_pdev); - int regval; - - /* - * Try to convert default timeout to valid register - * value first. - */ - regval = timeout_to_regval(timeout); - if (regval < 0) { - dev_err(&wdt->pdev->dev, - "failed to convert timeout (%d) to register value\n", - timeout); - return regval; - } - - if (mutex_lock_interruptible(&wdt->lock)) - return -ERESTARTSYS; + struct ts72xx_wdt_priv *priv = watchdog_get_drvdata(wdd); - if ((wdt->flags & TS72XX_WDT_BUSY_FLAG) != 0) { - mutex_unlock(&wdt->lock); - return -EBUSY; - } - - wdt->flags = TS72XX_WDT_BUSY_FLAG; - wdt->regval = regval; - file->private_data = wdt; - - ts72xx_wdt_start(wdt); + writeb(TS72XX_WDT_FEED_VAL, priv->feed_reg); - mutex_unlock(&wdt->lock); - return nonseekable_open(inode, file); + return 0; } -static int ts72xx_wdt_release(struct inode *inode, struct file *file) +static int ts72xx_wdt_settimeout(struct watchdog_device *wdd, unsigned int to) { - struct ts72xx_wdt *wdt = file->private_data; - - if (mutex_lock_interruptible(&wdt->lock)) - return -ERESTARTSYS; - - if ((wdt->flags & TS72XX_WDT_EXPECT_CLOSE_FLAG) != 0) { - ts72xx_wdt_stop(wdt); + struct ts72xx_wdt_priv *priv = watchdog_get_drvdata(wdd); + + if (to == 1) { + priv->regval = TS72XX_WDT_CTRL_1SEC; + } else if (to == 2) { + priv->regval = TS72XX_WDT_CTRL_2SEC; + } else if (to <= 4) { + priv->regval = TS72XX_WDT_CTRL_4SEC; + to = 4; } else { - dev_warn(&wdt->pdev->dev, - "TS-72XX WDT device closed unexpectly. " - "Watchdog timer will not stop!\n"); - /* - * Kick it one more time, to give userland some time - * to recover (for example, respawning the kicker - * daemon). - */ - ts72xx_wdt_kick(wdt); + priv->regval = TS72XX_WDT_CTRL_8SEC; + if (to <= 8) + to = 8; } - wdt->flags = 0; + wdd->timeout = to; - mutex_unlock(&wdt->lock); - return 0; -} - -static ssize_t ts72xx_wdt_write(struct file *file, - const char __user *data, - size_t len, - loff_t *ppos) -{ - struct ts72xx_wdt *wdt = file->private_data; - - if (!len) - return 0; - - if (mutex_lock_interruptible(&wdt->lock)) - return -ERESTARTSYS; - - ts72xx_wdt_kick(wdt); - - /* - * Support for magic character closing. User process - * writes 'V' into the device, just before it is closed. - * This means that we know that the wdt timer can be - * stopped after user closes the device. - */ - if (!nowayout) { - int i; - - for (i = 0; i < len; i++) { - char c; - - /* In case it was set long ago */ - wdt->flags &= ~TS72XX_WDT_EXPECT_CLOSE_FLAG; - - if (get_user(c, data + i)) { - mutex_unlock(&wdt->lock); - return -EFAULT; - } - if (c == 'V') { - wdt->flags |= TS72XX_WDT_EXPECT_CLOSE_FLAG; - break; - } - } + if (watchdog_active(wdd)) { + ts72xx_wdt_stop(wdd); + ts72xx_wdt_start(wdd); } - mutex_unlock(&wdt->lock); - return len; + return 0; } -static const struct watchdog_info winfo = { - .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | +static const struct watchdog_info ts72xx_wdt_ident = { + .options = WDIOF_KEEPALIVEPING | + WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = "TS-72XX WDT", }; -static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct ts72xx_wdt *wdt = file->private_data; - void __user *argp = (void __user *)arg; - int __user *p = (int __user *)argp; - int error = 0; - - if (mutex_lock_interruptible(&wdt->lock)) - return -ERESTARTSYS; - - switch (cmd) { - case WDIOC_GETSUPPORT: - if (copy_to_user(argp, &winfo, sizeof(winfo))) - error = -EFAULT; - break; - - case WDIOC_GETSTATUS: - case WDIOC_GETBOOTSTATUS: - error = put_user(0, p); - break; - - case WDIOC_KEEPALIVE: - ts72xx_wdt_kick(wdt); - break; - - case WDIOC_SETOPTIONS: { - int options; - - error = get_user(options, p); - if (error) - break; - - error = -EINVAL; - - if ((options & WDIOS_DISABLECARD) != 0) { - ts72xx_wdt_stop(wdt); - error = 0; - } - if ((options & WDIOS_ENABLECARD) != 0) { - ts72xx_wdt_start(wdt); - error = 0; - } - - break; - } - - case WDIOC_SETTIMEOUT: { - int new_timeout; - int regval; - - error = get_user(new_timeout, p); - if (error) - break; - - regval = timeout_to_regval(new_timeout); - if (regval < 0) { - error = regval; - break; - } - ts72xx_wdt_stop(wdt); - wdt->regval = regval; - ts72xx_wdt_start(wdt); - - /*FALLTHROUGH*/ - } - - case WDIOC_GETTIMEOUT: - error = put_user(regval_to_timeout(wdt->regval), p); - break; - - default: - error = -ENOTTY; - break; - } - - mutex_unlock(&wdt->lock); - return error; -} - -static const struct file_operations ts72xx_wdt_fops = { +static struct watchdog_ops ts72xx_wdt_ops = { .owner = THIS_MODULE, - .llseek = no_llseek, - .open = ts72xx_wdt_open, - .release = ts72xx_wdt_release, - .write = ts72xx_wdt_write, - .unlocked_ioctl = ts72xx_wdt_ioctl, -}; - -static struct miscdevice ts72xx_wdt_miscdev = { - .minor = WATCHDOG_MINOR, - .name = "watchdog", - .fops = &ts72xx_wdt_fops, + .start = ts72xx_wdt_start, + .stop = ts72xx_wdt_stop, + .ping = ts72xx_wdt_ping, + .set_timeout = ts72xx_wdt_settimeout, }; static int ts72xx_wdt_probe(struct platform_device *pdev) { - struct ts72xx_wdt *wdt; - struct resource *r1, *r2; - int error = 0; + struct ts72xx_wdt_priv *priv; + struct watchdog_device *wdd; + struct resource *res; + int ret; - wdt = devm_kzalloc(&pdev->dev, sizeof(struct ts72xx_wdt), GFP_KERNEL); - if (!wdt) + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) return -ENOMEM; - r1 = platform_get_resource(pdev, IORESOURCE_MEM, 0); - wdt->control_reg = devm_ioremap_resource(&pdev->dev, r1); - if (IS_ERR(wdt->control_reg)) - return PTR_ERR(wdt->control_reg); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->control_reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->control_reg)) + return PTR_ERR(priv->control_reg); - r2 = platform_get_resource(pdev, IORESOURCE_MEM, 1); - wdt->feed_reg = devm_ioremap_resource(&pdev->dev, r2); - if (IS_ERR(wdt->feed_reg)) - return PTR_ERR(wdt->feed_reg); + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->feed_reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->feed_reg)) + return PTR_ERR(priv->feed_reg); - platform_set_drvdata(pdev, wdt); - ts72xx_wdt_pdev = pdev; - wdt->pdev = pdev; - mutex_init(&wdt->lock); + wdd = &priv->wdd; + wdd->info = &ts72xx_wdt_ident; + wdd->ops = &ts72xx_wdt_ops; + wdd->min_timeout = 1; + wdd->max_hw_heartbeat_ms = 8000; + wdd->parent = &pdev->dev; - /* make sure that the watchdog is disabled */ - ts72xx_wdt_stop(wdt); + watchdog_set_nowayout(wdd, nowayout); - error = misc_register(&ts72xx_wdt_miscdev); - if (error) { - dev_err(&pdev->dev, "failed to register miscdev\n"); - return error; - } + wdd->timeout = TS72XX_WDT_DEFAULT_TIMEOUT; + watchdog_init_timeout(wdd, timeout, &pdev->dev); - dev_info(&pdev->dev, "TS-72xx Watchdog driver\n"); + watchdog_set_drvdata(wdd, priv); - return 0; -} + ret = devm_watchdog_register_device(&pdev->dev, wdd); + if (ret) + return ret; + + dev_info(&pdev->dev, "TS-72xx Watchdog driver\n"); -static int ts72xx_wdt_remove(struct platform_device *pdev) -{ - misc_deregister(&ts72xx_wdt_miscdev); return 0; } static struct platform_driver ts72xx_wdt_driver = { .probe = ts72xx_wdt_probe, - .remove = ts72xx_wdt_remove, .driver = { .name = "ts72xx-wdt", }, diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c index ef2ecaf53a14..98fd186c6878 100644 --- a/drivers/watchdog/w83627hf_wdt.c +++ b/drivers/watchdog/w83627hf_wdt.c @@ -297,7 +297,7 @@ static unsigned int wdt_get_time(struct watchdog_device *wdog) * Kernel Interfaces */ -static struct watchdog_info wdt_info = { +static const struct watchdog_info wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .identity = "W83627HF Watchdog", }; diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 32930a073a12..d5d2bbd8f428 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -987,6 +987,11 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd) wdd->wd_data = NULL; mutex_unlock(&wd_data->lock); + if (watchdog_active(wdd) && + test_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status)) { + watchdog_stop(wdd); + } + cancel_delayed_work_sync(&wd_data->work); kref_put(&wd_data->kref, watchdog_core_data_release); diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c index 8d1184aee932..1ddc1f742cd4 100644 --- a/drivers/watchdog/wm831x_wdt.c +++ b/drivers/watchdog/wm831x_wdt.c @@ -194,7 +194,7 @@ static int wm831x_wdt_probe(struct platform_device *pdev) if (ret < 0) { dev_err(wm831x->dev, "Failed to read watchdog status: %d\n", ret); - goto err; + return ret; } reg = ret; @@ -203,10 +203,8 @@ static int wm831x_wdt_probe(struct platform_device *pdev) driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data), GFP_KERNEL); - if (!driver_data) { - ret = -ENOMEM; - goto err; - } + if (!driver_data) + return -ENOMEM; mutex_init(&driver_data->lock); driver_data->wm831x = wm831x; @@ -253,7 +251,7 @@ static int wm831x_wdt_probe(struct platform_device *pdev) dev_err(wm831x->dev, "Failed to request update GPIO: %d\n", ret); - goto err; + return ret; } driver_data->update_gpio = pdata->update_gpio; @@ -269,37 +267,22 @@ static int wm831x_wdt_probe(struct platform_device *pdev) } else { dev_err(wm831x->dev, "Failed to unlock security key: %d\n", ret); - goto err; + return ret; } } - ret = watchdog_register_device(&driver_data->wdt); + ret = devm_watchdog_register_device(&pdev->dev, &driver_data->wdt); if (ret != 0) { dev_err(wm831x->dev, "watchdog_register_device() failed: %d\n", ret); - goto err; + return ret; } - platform_set_drvdata(pdev, driver_data); - - return 0; - -err: - return ret; -} - -static int wm831x_wdt_remove(struct platform_device *pdev) -{ - struct wm831x_wdt_drvdata *driver_data = platform_get_drvdata(pdev); - - watchdog_unregister_device(&driver_data->wdt); - return 0; } static struct platform_driver wm831x_wdt_driver = { .probe = wm831x_wdt_probe, - .remove = wm831x_wdt_remove, .driver = { .name = "wm831x-watchdog", }, diff --git a/drivers/watchdog/zx2967_wdt.c b/drivers/watchdog/zx2967_wdt.c new file mode 100644 index 000000000000..e290d5a13a6d --- /dev/null +++ b/drivers/watchdog/zx2967_wdt.c @@ -0,0 +1,291 @@ +/* + * watchdog driver for ZTE's zx2967 family + * + * Copyright (C) 2017 ZTE Ltd. + * + * Author: Baoyou Xie <baoyou.xie@linaro.org> + * + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include <linux/watchdog.h> + +#define ZX2967_WDT_CFG_REG 0x4 +#define ZX2967_WDT_LOAD_REG 0x8 +#define ZX2967_WDT_REFRESH_REG 0x18 +#define ZX2967_WDT_START_REG 0x1c + +#define ZX2967_WDT_REFRESH_MASK GENMASK(5, 0) + +#define ZX2967_WDT_CFG_DIV(n) ((((n) & 0xff) - 1) << 8) +#define ZX2967_WDT_START_EN 0x1 + +/* + * Hardware magic number. + * When watchdog reg is written, the lowest 16 bits are valid, but + * the highest 16 bits should be always this number. + */ +#define ZX2967_WDT_WRITEKEY (0x1234 << 16) +#define ZX2967_WDT_VAL_MASK GENMASK(15, 0) + +#define ZX2967_WDT_DIV_DEFAULT 16 +#define ZX2967_WDT_DEFAULT_TIMEOUT 32 +#define ZX2967_WDT_MIN_TIMEOUT 1 +#define ZX2967_WDT_MAX_TIMEOUT 524 +#define ZX2967_WDT_MAX_COUNT 0xffff + +#define ZX2967_WDT_CLK_FREQ 0x8000 + +#define ZX2967_WDT_FLAG_REBOOT_MON BIT(0) + +struct zx2967_wdt { + struct watchdog_device wdt_device; + void __iomem *reg_base; + struct clk *clock; +}; + +static inline u32 zx2967_wdt_readl(struct zx2967_wdt *wdt, u16 reg) +{ + return readl_relaxed(wdt->reg_base + reg); +} + +static inline void zx2967_wdt_writel(struct zx2967_wdt *wdt, u16 reg, u32 val) +{ + writel_relaxed(val | ZX2967_WDT_WRITEKEY, wdt->reg_base + reg); +} + +static void zx2967_wdt_refresh(struct zx2967_wdt *wdt) +{ + u32 val; + + val = zx2967_wdt_readl(wdt, ZX2967_WDT_REFRESH_REG); + /* + * Bit 4-5, 1 and 2: refresh config info + * Bit 2-3, 1 and 2: refresh counter + * Bit 0-1, 1 and 2: refresh int-value + * we shift each group value between 1 and 2 to refresh all data. + */ + val ^= ZX2967_WDT_REFRESH_MASK; + zx2967_wdt_writel(wdt, ZX2967_WDT_REFRESH_REG, + val & ZX2967_WDT_VAL_MASK); +} + +static int +zx2967_wdt_set_timeout(struct watchdog_device *wdd, unsigned int timeout) +{ + struct zx2967_wdt *wdt = watchdog_get_drvdata(wdd); + unsigned int divisor = ZX2967_WDT_DIV_DEFAULT; + u32 count; + + count = timeout * ZX2967_WDT_CLK_FREQ; + if (count > divisor * ZX2967_WDT_MAX_COUNT) + divisor = DIV_ROUND_UP(count, ZX2967_WDT_MAX_COUNT); + count = DIV_ROUND_UP(count, divisor); + zx2967_wdt_writel(wdt, ZX2967_WDT_CFG_REG, + ZX2967_WDT_CFG_DIV(divisor) & ZX2967_WDT_VAL_MASK); + zx2967_wdt_writel(wdt, ZX2967_WDT_LOAD_REG, + count & ZX2967_WDT_VAL_MASK); + zx2967_wdt_refresh(wdt); + wdd->timeout = (count * divisor) / ZX2967_WDT_CLK_FREQ; + + return 0; +} + +static void __zx2967_wdt_start(struct zx2967_wdt *wdt) +{ + u32 val; + + val = zx2967_wdt_readl(wdt, ZX2967_WDT_START_REG); + val |= ZX2967_WDT_START_EN; + zx2967_wdt_writel(wdt, ZX2967_WDT_START_REG, + val & ZX2967_WDT_VAL_MASK); +} + +static void __zx2967_wdt_stop(struct zx2967_wdt *wdt) +{ + u32 val; + + val = zx2967_wdt_readl(wdt, ZX2967_WDT_START_REG); + val &= ~ZX2967_WDT_START_EN; + zx2967_wdt_writel(wdt, ZX2967_WDT_START_REG, + val & ZX2967_WDT_VAL_MASK); +} + +static int zx2967_wdt_start(struct watchdog_device *wdd) +{ + struct zx2967_wdt *wdt = watchdog_get_drvdata(wdd); + + zx2967_wdt_set_timeout(wdd, wdd->timeout); + __zx2967_wdt_start(wdt); + + return 0; +} + +static int zx2967_wdt_stop(struct watchdog_device *wdd) +{ + struct zx2967_wdt *wdt = watchdog_get_drvdata(wdd); + + __zx2967_wdt_stop(wdt); + + return 0; +} + +static int zx2967_wdt_keepalive(struct watchdog_device *wdd) +{ + struct zx2967_wdt *wdt = watchdog_get_drvdata(wdd); + + zx2967_wdt_refresh(wdt); + + return 0; +} + +#define ZX2967_WDT_OPTIONS \ + (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE) +static const struct watchdog_info zx2967_wdt_ident = { + .options = ZX2967_WDT_OPTIONS, + .identity = "zx2967 watchdog", +}; + +static struct watchdog_ops zx2967_wdt_ops = { + .owner = THIS_MODULE, + .start = zx2967_wdt_start, + .stop = zx2967_wdt_stop, + .ping = zx2967_wdt_keepalive, + .set_timeout = zx2967_wdt_set_timeout, +}; + +static void zx2967_wdt_reset_sysctrl(struct device *dev) +{ + int ret; + void __iomem *regmap; + unsigned int offset, mask, config; + struct of_phandle_args out_args; + + ret = of_parse_phandle_with_fixed_args(dev->of_node, + "zte,wdt-reset-sysctrl", 3, 0, &out_args); + if (ret) + return; + + offset = out_args.args[0]; + config = out_args.args[1]; + mask = out_args.args[2]; + + regmap = syscon_node_to_regmap(out_args.np); + if (IS_ERR(regmap)) { + of_node_put(out_args.np); + return; + } + + regmap_update_bits(regmap, offset, mask, config); + of_node_put(out_args.np); +} + +static int zx2967_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct zx2967_wdt *wdt; + struct resource *base; + int ret; + struct reset_control *rstc; + + wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) + return -ENOMEM; + + platform_set_drvdata(pdev, wdt); + + wdt->wdt_device.info = &zx2967_wdt_ident; + wdt->wdt_device.ops = &zx2967_wdt_ops; + wdt->wdt_device.timeout = ZX2967_WDT_DEFAULT_TIMEOUT; + wdt->wdt_device.max_timeout = ZX2967_WDT_MAX_TIMEOUT; + wdt->wdt_device.min_timeout = ZX2967_WDT_MIN_TIMEOUT; + wdt->wdt_device.parent = &pdev->dev; + + base = platform_get_resource(pdev, IORESOURCE_MEM, 0); + wdt->reg_base = devm_ioremap_resource(dev, base); + if (IS_ERR(wdt->reg_base)) { + dev_err(dev, "ioremap failed\n"); + return PTR_ERR(wdt->reg_base); + } + + zx2967_wdt_reset_sysctrl(dev); + + wdt->clock = devm_clk_get(dev, NULL); + if (IS_ERR(wdt->clock)) { + dev_err(dev, "failed to find watchdog clock source\n"); + return PTR_ERR(wdt->clock); + } + + ret = clk_prepare_enable(wdt->clock); + if (ret < 0) { + dev_err(dev, "failed to enable clock\n"); + return ret; + } + clk_set_rate(wdt->clock, ZX2967_WDT_CLK_FREQ); + + rstc = devm_reset_control_get(dev, NULL); + if (IS_ERR(rstc)) { + dev_err(dev, "failed to get rstc"); + ret = PTR_ERR(rstc); + goto err; + } + + reset_control_assert(rstc); + reset_control_deassert(rstc); + + watchdog_set_drvdata(&wdt->wdt_device, wdt); + watchdog_init_timeout(&wdt->wdt_device, + ZX2967_WDT_DEFAULT_TIMEOUT, dev); + watchdog_set_nowayout(&wdt->wdt_device, WATCHDOG_NOWAYOUT); + + ret = watchdog_register_device(&wdt->wdt_device); + if (ret) + goto err; + + dev_info(dev, "watchdog enabled (timeout=%d sec, nowayout=%d)", + wdt->wdt_device.timeout, WATCHDOG_NOWAYOUT); + + return 0; + +err: + clk_disable_unprepare(wdt->clock); + return ret; +} + +static int zx2967_wdt_remove(struct platform_device *pdev) +{ + struct zx2967_wdt *wdt = platform_get_drvdata(pdev); + + watchdog_unregister_device(&wdt->wdt_device); + clk_disable_unprepare(wdt->clock); + + return 0; +} + +static const struct of_device_id zx2967_wdt_match[] = { + { .compatible = "zte,zx296718-wdt", }, + {} +}; +MODULE_DEVICE_TABLE(of, zx2967_wdt_match); + +static struct platform_driver zx2967_wdt_driver = { + .probe = zx2967_wdt_probe, + .remove = zx2967_wdt_remove, + .driver = { + .name = "zx2967-wdt", + .of_match_table = of_match_ptr(zx2967_wdt_match), + }, +}; +module_platform_driver(zx2967_wdt_driver); + +MODULE_AUTHOR("Baoyou Xie <baoyou.xie@linaro.org>"); +MODULE_DESCRIPTION("ZTE zx2967 Watchdog Device Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index db107fa50ca1..a6d4378eb8d9 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -41,6 +41,7 @@ #include <linux/cpu.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/bootmem.h> diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 2ef2b61b69df..c77a0751a311 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -32,6 +32,7 @@ #include <linux/types.h> #include <linux/uaccess.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/highmem.h> diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 072e7599583a..a89f3cfe3c7d 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -29,6 +29,7 @@ #include <linux/errno.h> #include <linux/fs.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/parser.h> #include <linux/idr.h> #include <linux/slab.h> diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index f4f4450119e4..f1d96233670c 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -1047,16 +1047,18 @@ done: /** * v9fs_vfs_getattr - retrieve file metadata - * @mnt: mount information - * @dentry: file to get attributes on + * @path: Object to query * @stat: metadata structure to populate + * @request_mask: Mask of STATX_xxx flags indicating the caller's interests + * @flags: AT_STATX_xxx setting * */ static int -v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +v9fs_vfs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { + struct dentry *dentry = path->dentry; struct v9fs_session_info *v9ses; struct p9_fid *fid; struct p9_wstat *st; diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index 5999bd050678..570e63ee5b71 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -468,9 +468,10 @@ error: } static int -v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +v9fs_vfs_getattr_dotl(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { + struct dentry *dentry = path->dentry; struct v9fs_session_info *v9ses; struct p9_fid *fid; struct p9_stat_dotl *st; diff --git a/fs/affs/affs.h b/fs/affs/affs.h index 2f088773f1c0..2f8bab390d13 100644 --- a/fs/affs/affs.h +++ b/fs/affs/affs.h @@ -138,9 +138,9 @@ extern int affs_remove_hash(struct inode *dir, struct buffer_head *rem_bh); extern int affs_remove_header(struct dentry *dentry); extern u32 affs_checksum_block(struct super_block *sb, struct buffer_head *bh); extern void affs_fix_checksum(struct super_block *sb, struct buffer_head *bh); -extern void secs_to_datestamp(time64_t secs, struct affs_date *ds); -extern umode_t prot_to_mode(u32 prot); -extern void mode_to_prot(struct inode *inode); +extern void affs_secs_to_datestamp(time64_t secs, struct affs_date *ds); +extern umode_t affs_prot_to_mode(u32 prot); +extern void affs_mode_to_prot(struct inode *inode); __printf(3, 4) extern void affs_error(struct super_block *sb, const char *function, const char *fmt, ...); @@ -162,6 +162,7 @@ extern void affs_free_bitmap(struct super_block *sb); /* namei.c */ +extern const struct export_operations affs_export_ops; extern int affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len); extern struct dentry *affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int); extern int affs_unlink(struct inode *dir, struct dentry *dentry); @@ -178,7 +179,6 @@ extern int affs_rename(struct inode *old_dir, struct dentry *old_dentry, /* inode.c */ -extern unsigned long affs_parent_ino(struct inode *dir); extern struct inode *affs_new_inode(struct inode *dir); extern int affs_notify_change(struct dentry *dentry, struct iattr *attr); extern void affs_evict_inode(struct inode *inode); @@ -213,6 +213,12 @@ extern const struct address_space_operations affs_aops_ofs; extern const struct dentry_operations affs_dentry_operations; extern const struct dentry_operations affs_intl_dentry_operations; +static inline bool affs_validblock(struct super_block *sb, int block) +{ + return(block >= AFFS_SB(sb)->s_reserved && + block < AFFS_SB(sb)->s_partition_size); +} + static inline void affs_set_blocksize(struct super_block *sb, int size) { @@ -222,7 +228,7 @@ static inline struct buffer_head * affs_bread(struct super_block *sb, int block) { pr_debug("%s: %d\n", __func__, block); - if (block >= AFFS_SB(sb)->s_reserved && block < AFFS_SB(sb)->s_partition_size) + if (affs_validblock(sb, block)) return sb_bread(sb, block); return NULL; } @@ -230,7 +236,7 @@ static inline struct buffer_head * affs_getblk(struct super_block *sb, int block) { pr_debug("%s: %d\n", __func__, block); - if (block >= AFFS_SB(sb)->s_reserved && block < AFFS_SB(sb)->s_partition_size) + if (affs_validblock(sb, block)) return sb_getblk(sb, block); return NULL; } @@ -239,7 +245,7 @@ affs_getzeroblk(struct super_block *sb, int block) { struct buffer_head *bh; pr_debug("%s: %d\n", __func__, block); - if (block >= AFFS_SB(sb)->s_reserved && block < AFFS_SB(sb)->s_partition_size) { + if (affs_validblock(sb, block)) { bh = sb_getblk(sb, block); lock_buffer(bh); memset(bh->b_data, 0 , sb->s_blocksize); @@ -254,7 +260,7 @@ affs_getemptyblk(struct super_block *sb, int block) { struct buffer_head *bh; pr_debug("%s: %d\n", __func__, block); - if (block >= AFFS_SB(sb)->s_reserved && block < AFFS_SB(sb)->s_partition_size) { + if (affs_validblock(sb, block)) { bh = sb_getblk(sb, block); wait_on_buffer(bh); set_buffer_uptodate(bh); diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index 0ec65c133b93..b573c3b9a328 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -367,7 +367,7 @@ affs_fix_checksum(struct super_block *sb, struct buffer_head *bh) } void -secs_to_datestamp(time64_t secs, struct affs_date *ds) +affs_secs_to_datestamp(time64_t secs, struct affs_date *ds) { u32 days; u32 minute; @@ -386,55 +386,55 @@ secs_to_datestamp(time64_t secs, struct affs_date *ds) } umode_t -prot_to_mode(u32 prot) +affs_prot_to_mode(u32 prot) { umode_t mode = 0; if (!(prot & FIBF_NOWRITE)) - mode |= S_IWUSR; + mode |= 0200; if (!(prot & FIBF_NOREAD)) - mode |= S_IRUSR; + mode |= 0400; if (!(prot & FIBF_NOEXECUTE)) - mode |= S_IXUSR; + mode |= 0100; if (prot & FIBF_GRP_WRITE) - mode |= S_IWGRP; + mode |= 0020; if (prot & FIBF_GRP_READ) - mode |= S_IRGRP; + mode |= 0040; if (prot & FIBF_GRP_EXECUTE) - mode |= S_IXGRP; + mode |= 0010; if (prot & FIBF_OTR_WRITE) - mode |= S_IWOTH; + mode |= 0002; if (prot & FIBF_OTR_READ) - mode |= S_IROTH; + mode |= 0004; if (prot & FIBF_OTR_EXECUTE) - mode |= S_IXOTH; + mode |= 0001; return mode; } void -mode_to_prot(struct inode *inode) +affs_mode_to_prot(struct inode *inode) { u32 prot = AFFS_I(inode)->i_protect; umode_t mode = inode->i_mode; - if (!(mode & S_IXUSR)) + if (!(mode & 0100)) prot |= FIBF_NOEXECUTE; - if (!(mode & S_IRUSR)) + if (!(mode & 0400)) prot |= FIBF_NOREAD; - if (!(mode & S_IWUSR)) + if (!(mode & 0200)) prot |= FIBF_NOWRITE; - if (mode & S_IXGRP) + if (mode & 0010) prot |= FIBF_GRP_EXECUTE; - if (mode & S_IRGRP) + if (mode & 0040) prot |= FIBF_GRP_READ; - if (mode & S_IWGRP) + if (mode & 0020) prot |= FIBF_GRP_WRITE; - if (mode & S_IXOTH) + if (mode & 0001) prot |= FIBF_OTR_EXECUTE; - if (mode & S_IROTH) + if (mode & 0004) prot |= FIBF_OTR_READ; - if (mode & S_IWOTH) + if (mode & 0002) prot |= FIBF_OTR_WRITE; AFFS_I(inode)->i_protect = prot; diff --git a/fs/affs/inode.c b/fs/affs/inode.c index fe4e1290dbb5..abcc59899229 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c @@ -10,6 +10,7 @@ * (C) 1991 Linus Torvalds - minix filesystem */ #include <linux/sched.h> +#include <linux/cred.h> #include <linux/gfp.h> #include "affs.h" @@ -69,7 +70,7 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino) if (affs_test_opt(sbi->s_flags, SF_SETMODE)) inode->i_mode = sbi->s_mode; else - inode->i_mode = prot_to_mode(prot); + inode->i_mode = affs_prot_to_mode(prot); id = be16_to_cpu(tail->uid); if (id == 0 || affs_test_opt(sbi->s_flags, SF_SETUID)) @@ -184,11 +185,12 @@ affs_write_inode(struct inode *inode, struct writeback_control *wbc) } tail = AFFS_TAIL(sb, bh); if (tail->stype == cpu_to_be32(ST_ROOT)) { - secs_to_datestamp(inode->i_mtime.tv_sec,&AFFS_ROOT_TAIL(sb, bh)->root_change); + affs_secs_to_datestamp(inode->i_mtime.tv_sec, + &AFFS_ROOT_TAIL(sb, bh)->root_change); } else { tail->protect = cpu_to_be32(AFFS_I(inode)->i_protect); tail->size = cpu_to_be32(inode->i_size); - secs_to_datestamp(inode->i_mtime.tv_sec,&tail->change); + affs_secs_to_datestamp(inode->i_mtime.tv_sec, &tail->change); if (!(inode->i_ino == AFFS_SB(sb)->s_root_block)) { uid = i_uid_read(inode); gid = i_gid_read(inode); @@ -249,7 +251,7 @@ affs_notify_change(struct dentry *dentry, struct iattr *attr) mark_inode_dirty(inode); if (attr->ia_valid & ATTR_MODE) - mode_to_prot(inode); + affs_mode_to_prot(inode); out: return error; } diff --git a/fs/affs/namei.c b/fs/affs/namei.c index 29186d29a3b6..96dd1d09a273 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c @@ -9,29 +9,10 @@ */ #include "affs.h" +#include <linux/exportfs.h> typedef int (*toupper_t)(int); -static int affs_toupper(int ch); -static int affs_hash_dentry(const struct dentry *, struct qstr *); -static int affs_compare_dentry(const struct dentry *dentry, - unsigned int len, const char *str, const struct qstr *name); -static int affs_intl_toupper(int ch); -static int affs_intl_hash_dentry(const struct dentry *, struct qstr *); -static int affs_intl_compare_dentry(const struct dentry *dentry, - unsigned int len, const char *str, const struct qstr *name); - -const struct dentry_operations affs_dentry_operations = { - .d_hash = affs_hash_dentry, - .d_compare = affs_compare_dentry, -}; - -const struct dentry_operations affs_intl_dentry_operations = { - .d_hash = affs_intl_hash_dentry, - .d_compare = affs_intl_compare_dentry, -}; - - /* Simple toupper() for DOS\1 */ static int @@ -271,7 +252,7 @@ affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) return -ENOSPC; inode->i_mode = mode; - mode_to_prot(inode); + affs_mode_to_prot(inode); mark_inode_dirty(inode); inode->i_op = &affs_file_inode_operations; @@ -301,7 +282,7 @@ affs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) return -ENOSPC; inode->i_mode = S_IFDIR | mode; - mode_to_prot(inode); + affs_mode_to_prot(inode); inode->i_op = &affs_dir_inode_operations; inode->i_fop = &affs_dir_operations; @@ -347,7 +328,7 @@ affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) inode_nohighmem(inode); inode->i_data.a_ops = &affs_symlink_aops; inode->i_mode = S_IFLNK | 0777; - mode_to_prot(inode); + affs_mode_to_prot(inode); error = -EIO; bh = affs_bread(sb, inode->i_ino); @@ -465,3 +446,71 @@ done: affs_brelse(bh); return retval; } + +static struct dentry *affs_get_parent(struct dentry *child) +{ + struct inode *parent; + struct buffer_head *bh; + + bh = affs_bread(child->d_sb, d_inode(child)->i_ino); + if (!bh) + return ERR_PTR(-EIO); + + parent = affs_iget(child->d_sb, + be32_to_cpu(AFFS_TAIL(child->d_sb, bh)->parent)); + brelse(bh); + if (IS_ERR(parent)) + return ERR_CAST(parent); + + return d_obtain_alias(parent); +} + +static struct inode *affs_nfs_get_inode(struct super_block *sb, u64 ino, + u32 generation) +{ + struct inode *inode; + + if (!affs_validblock(sb, ino)) + return ERR_PTR(-ESTALE); + + inode = affs_iget(sb, ino); + if (IS_ERR(inode)) + return ERR_CAST(inode); + + if (generation && inode->i_generation != generation) { + iput(inode); + return ERR_PTR(-ESTALE); + } + + return inode; +} + +static struct dentry *affs_fh_to_dentry(struct super_block *sb, struct fid *fid, + int fh_len, int fh_type) +{ + return generic_fh_to_dentry(sb, fid, fh_len, fh_type, + affs_nfs_get_inode); +} + +static struct dentry *affs_fh_to_parent(struct super_block *sb, struct fid *fid, + int fh_len, int fh_type) +{ + return generic_fh_to_parent(sb, fid, fh_len, fh_type, + affs_nfs_get_inode); +} + +const struct export_operations affs_export_ops = { + .fh_to_dentry = affs_fh_to_dentry, + .fh_to_parent = affs_fh_to_parent, + .get_parent = affs_get_parent, +}; + +const struct dentry_operations affs_dentry_operations = { + .d_hash = affs_hash_dentry, + .d_compare = affs_compare_dentry, +}; + +const struct dentry_operations affs_intl_dentry_operations = { + .d_hash = affs_intl_hash_dentry, + .d_compare = affs_intl_compare_dentry, +}; diff --git a/fs/affs/super.c b/fs/affs/super.c index d6384863192c..c2c27a8f128e 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -16,6 +16,7 @@ #include <linux/parser.h> #include <linux/magic.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/slab.h> #include <linux/writeback.h> #include <linux/blkdev.h> @@ -32,7 +33,7 @@ affs_commit_super(struct super_block *sb, int wait) struct affs_root_tail *tail = AFFS_ROOT_TAIL(sb, bh); lock_buffer(bh); - secs_to_datestamp(ktime_get_real_seconds(), &tail->disk_change); + affs_secs_to_datestamp(ktime_get_real_seconds(), &tail->disk_change); affs_fix_checksum(sb, bh); unlock_buffer(bh); @@ -507,6 +508,7 @@ got_root: return -ENOMEM; } + sb->s_export_op = &affs_export_ops; pr_debug("s_flags=%lX\n", sb->s_flags); return 0; } diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 51a241e09fbb..949f960337f5 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -252,7 +252,7 @@ static int afs_dir_iterate_block(struct dir_context *ctx, /* skip entries marked unused in the bitmap */ if (!(block->pagehdr.bitmap[offset / 8] & (1 << (offset % 8)))) { - _debug("ENT[%Zu.%u]: unused", + _debug("ENT[%zu.%u]: unused", blkoff / sizeof(union afs_dir_block), offset); if (offset >= curr) ctx->pos = blkoff + @@ -266,7 +266,7 @@ static int afs_dir_iterate_block(struct dir_context *ctx, sizeof(*block) - offset * sizeof(union afs_dirent)); - _debug("ENT[%Zu.%u]: %s %Zu \"%s\"", + _debug("ENT[%zu.%u]: %s %zu \"%s\"", blkoff / sizeof(union afs_dir_block), offset, (offset < curr ? "skip" : "fill"), nlen, dire->u.name); @@ -274,23 +274,23 @@ static int afs_dir_iterate_block(struct dir_context *ctx, /* work out where the next possible entry is */ for (tmp = nlen; tmp > 15; tmp -= sizeof(union afs_dirent)) { if (next >= AFS_DIRENT_PER_BLOCK) { - _debug("ENT[%Zu.%u]:" + _debug("ENT[%zu.%u]:" " %u travelled beyond end dir block" - " (len %u/%Zu)", + " (len %u/%zu)", blkoff / sizeof(union afs_dir_block), offset, next, tmp, nlen); return -EIO; } if (!(block->pagehdr.bitmap[next / 8] & (1 << (next % 8)))) { - _debug("ENT[%Zu.%u]:" - " %u unmarked extension (len %u/%Zu)", + _debug("ENT[%zu.%u]:" + " %u unmarked extension (len %u/%zu)", blkoff / sizeof(union afs_dir_block), offset, next, tmp, nlen); return -EIO; } - _debug("ENT[%Zu.%u]: ext %u/%Zu", + _debug("ENT[%zu.%u]: ext %u/%zu", blkoff / sizeof(union afs_dir_block), next, tmp, nlen); next++; diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 86cc7264c21c..1e4897a048d2 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -375,12 +375,10 @@ error_unlock: /* * read the attributes of an inode */ -int afs_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +int afs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode; - - inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation); diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 8acf3670e756..5dfa56903a2d 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -533,7 +533,7 @@ extern struct inode *afs_iget(struct super_block *, struct key *, struct afs_callback *); extern void afs_zap_data(struct afs_vnode *); extern int afs_validate(struct afs_vnode *, struct key *); -extern int afs_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int afs_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int afs_setattr(struct dentry *, struct iattr *); extern void afs_evict_inode(struct inode *); extern int afs_drop_inode(struct inode *); diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 95f42872b787..419ef05dcb5e 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -10,6 +10,8 @@ */ #include <linux/slab.h> +#include <linux/sched/signal.h> + #include <net/sock.h> #include <net/af_rxrpc.h> #include <rxrpc/packet.h> @@ -260,8 +262,7 @@ void afs_flat_call_destructor(struct afs_call *call) /* * attach the data from a bunch of pages on an inode to a call */ -static int afs_send_pages(struct afs_call *call, struct msghdr *msg, - struct kvec *iov) +static int afs_send_pages(struct afs_call *call, struct msghdr *msg) { struct page *pages[8]; unsigned count, n, loop, offset, to; @@ -284,20 +285,21 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg, loop = 0; do { + struct bio_vec bvec = {.bv_page = pages[loop], + .bv_offset = offset}; msg->msg_flags = 0; to = PAGE_SIZE; if (first + loop >= last) to = call->last_to; else msg->msg_flags = MSG_MORE; - iov->iov_base = kmap(pages[loop]) + offset; - iov->iov_len = to - offset; + bvec.bv_len = to - offset; offset = 0; _debug("- range %u-%u%s", offset, to, msg->msg_flags ? " [more]" : ""); - iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, - iov, 1, to - offset); + iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, + &bvec, 1, to - offset); /* have to change the state *before* sending the last * packet as RxRPC might give us the reply before it @@ -306,7 +308,6 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg, call->state = AFS_CALL_AWAIT_REPLY; ret = rxrpc_kernel_send_data(afs_socket, call->rxcall, msg, to - offset); - kunmap(pages[loop]); if (ret < 0) break; } while (++loop < count); @@ -391,7 +392,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, goto error_do_abort; if (call->send_pages) { - ret = afs_send_pages(call, &msg, iov); + ret = afs_send_pages(call, &msg); if (ret < 0) goto error_do_abort; } @@ -20,7 +20,7 @@ #include <linux/backing-dev.h> #include <linux/uio.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/mm.h> @@ -1495,7 +1495,7 @@ static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored, return ret; ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); if (!ret) - ret = aio_ret(req, file->f_op->read_iter(req, &iter)); + ret = aio_ret(req, call_read_iter(file, req, &iter)); kfree(iovec); return ret; } @@ -1520,7 +1520,7 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored, if (!ret) { req->ki_flags |= IOCB_WRITE; file_start_write(file); - ret = aio_ret(req, file->f_op->write_iter(req, &iter)); + ret = aio_ret(req, call_write_iter(file, req, &iter)); /* * We release freeze protection in aio_complete(). Fool lockdep * by telling it the lock got released so that it doesn't diff --git a/fs/attr.c b/fs/attr.c index c902b3d53508..135304146120 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -9,6 +9,7 @@ #include <linux/time.h> #include <linux/mm.h> #include <linux/string.h> +#include <linux/sched/signal.h> #include <linux/capability.h> #include <linux/fsnotify.h> #include <linux/fcntl.h> diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index c885daae68c8..beef981aa54f 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -14,6 +14,7 @@ #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/list.h> +#include <linux/completion.h> /* This is the range of ioctl() numbers we claim as ours */ #define AUTOFS_IOC_FIRST AUTOFS_IOC_READY diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index 6f48d670c941..734cbf8d9676 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c @@ -17,6 +17,7 @@ #include <linux/file.h> #include <linux/fdtable.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/compat.h> #include <linux/syscalls.h> #include <linux/magic.h> @@ -38,8 +39,6 @@ * which have been left busy at at service shutdown. */ -#define AUTOFS_DEV_IOCTL_SIZE sizeof(struct autofs_dev_ioctl) - typedef int (*ioctl_fn)(struct file *, struct autofs_sb_info *, struct autofs_dev_ioctl *); diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 82e8f6edfb48..d79ced925861 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -281,8 +281,8 @@ static int autofs4_mount_wait(const struct path *path, bool rcu_walk) pr_debug("waiting for mount name=%pd\n", path->dentry); status = autofs4_wait(sbi, path, NFY_MOUNT); pr_debug("mount wait done status=%d\n", status); + ino->last_used = jiffies; } - ino->last_used = jiffies; return status; } @@ -321,16 +321,21 @@ static struct dentry *autofs4_mountpoint_changed(struct path *path) */ if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) { struct dentry *parent = dentry->d_parent; - struct autofs_info *ino; struct dentry *new; new = d_lookup(parent, &dentry->d_name); if (!new) return NULL; - ino = autofs4_dentry_ino(new); - ino->last_used = jiffies; - dput(path->dentry); - path->dentry = new; + if (new == dentry) + dput(new); + else { + struct autofs_info *ino; + + ino = autofs4_dentry_ino(new); + ino->last_used = jiffies; + dput(path->dentry); + path->dentry = new; + } } return path->dentry; } diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 79fbd85db4ba..24a58bf9ca72 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/time.h> #include <linux/signal.h> +#include <linux/sched/signal.h> #include <linux/file.h> #include "autofs_i.h" diff --git a/fs/bad_inode.c b/fs/bad_inode.c index 5f685c819298..bb53728c7a31 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c @@ -89,8 +89,8 @@ static int bad_inode_permission(struct inode *inode, int mask) return -EIO; } -static int bad_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int bad_inode_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { return -EIO; } diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 19407165f4aa..c500e954debb 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -18,6 +18,7 @@ #include <linux/parser.h> #include <linux/namei.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/exportfs.h> #include "befs.h" diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index 2a59139f520b..9be82c4e14a4 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -25,6 +25,7 @@ #include <linux/init.h> #include <linux/coredump.h> #include <linux/slab.h> +#include <linux/sched/task_stack.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 443a6f537d56..5075fd5c62c8 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -35,6 +35,10 @@ #include <linux/utsname.h> #include <linux/coredump.h> #include <linux/sched.h> +#include <linux/sched/coredump.h> +#include <linux/sched/task_stack.h> +#include <linux/sched/cputime.h> +#include <linux/cred.h> #include <linux/dax.h> #include <linux/uaccess.h> #include <asm/param.h> diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index ffca4bbc3d63..cf93a4fad012 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -15,6 +15,9 @@ #include <linux/fs.h> #include <linux/stat.h> #include <linux/sched.h> +#include <linux/sched/coredump.h> +#include <linux/sched/task_stack.h> +#include <linux/sched/cputime.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/errno.h> diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 9b2917a30294..2edcefc0a294 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/errno.h> diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 9b4688ab1d8e..bee1a36bc2ec 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -12,7 +12,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/magic.h> #include <linux/binfmts.h> #include <linux/slab.h> diff --git a/fs/block_dev.c b/fs/block_dev.c index 1c62845a72c7..2eca00ec4370 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -870,6 +870,7 @@ static void init_once(void *foo) #ifdef CONFIG_SYSFS INIT_LIST_HEAD(&bdev->bd_holder_disks); #endif + bdev->bd_bdi = &noop_backing_dev_info; inode_init_once(&ei->vfs_inode); /* Initialize mutex for freeze. */ mutex_init(&bdev->bd_fsfreeze_mutex); @@ -884,8 +885,10 @@ static void bdev_evict_inode(struct inode *inode) spin_lock(&bdev_lock); list_del_init(&bdev->bd_list); spin_unlock(&bdev_lock); - if (bdev->bd_bdi != &noop_backing_dev_info) + if (bdev->bd_bdi != &noop_backing_dev_info) { bdi_put(bdev->bd_bdi); + bdev->bd_bdi = &noop_backing_dev_info; + } } static const struct super_operations bdev_sops = { @@ -988,8 +991,7 @@ struct block_device *bdget(dev_t dev) bdev->bd_contains = NULL; bdev->bd_super = NULL; bdev->bd_inode = inode; - bdev->bd_bdi = &noop_backing_dev_info; - bdev->bd_block_size = (1 << inode->i_blkbits); + bdev->bd_block_size = i_blocksize(inode); bdev->bd_part_count = 0; bdev->bd_invalidated = 0; inode->i_mode = S_IFBLK; diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 8299601a3549..7699e16784d3 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -956,8 +956,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq, /* * add all inline backrefs for bytenr to the list */ -static int __add_inline_refs(struct btrfs_fs_info *fs_info, - struct btrfs_path *path, u64 bytenr, +static int __add_inline_refs(struct btrfs_path *path, u64 bytenr, int *info_level, struct list_head *prefs, struct ref_root *ref_tree, u64 *total_refs, u64 inum) @@ -1284,7 +1283,7 @@ again: */ delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); - head = btrfs_find_delayed_ref_head(trans, bytenr); + head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); if (head) { if (!mutex_trylock(&head->mutex)) { atomic_inc(&head->node.refs); @@ -1354,7 +1353,7 @@ again: if (key.objectid == bytenr && (key.type == BTRFS_EXTENT_ITEM_KEY || key.type == BTRFS_METADATA_ITEM_KEY)) { - ret = __add_inline_refs(fs_info, path, bytenr, + ret = __add_inline_refs(path, bytenr, &info_level, &prefs, ref_tree, &total_refs, inum); diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 1a8fa46ff87e..0c6baaba0651 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -224,47 +224,45 @@ static inline void btrfs_insert_inode_hash(struct inode *inode) __insert_inode_hash(inode, h); } -static inline u64 btrfs_ino(struct inode *inode) +static inline u64 btrfs_ino(struct btrfs_inode *inode) { - u64 ino = BTRFS_I(inode)->location.objectid; + u64 ino = inode->location.objectid; /* * !ino: btree_inode * type == BTRFS_ROOT_ITEM_KEY: subvol dir */ - if (!ino || BTRFS_I(inode)->location.type == BTRFS_ROOT_ITEM_KEY) - ino = inode->i_ino; + if (!ino || inode->location.type == BTRFS_ROOT_ITEM_KEY) + ino = inode->vfs_inode.i_ino; return ino; } -static inline void btrfs_i_size_write(struct inode *inode, u64 size) +static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size) { - i_size_write(inode, size); - BTRFS_I(inode)->disk_i_size = size; + i_size_write(&inode->vfs_inode, size); + inode->disk_i_size = size; } -static inline bool btrfs_is_free_space_inode(struct inode *inode) +static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; if (root == root->fs_info->tree_root && btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID) return true; - if (BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) + if (inode->location.objectid == BTRFS_FREE_INO_OBJECTID) return true; return false; } -static inline int btrfs_inode_in_log(struct inode *inode, u64 generation) +static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation) { int ret = 0; - spin_lock(&BTRFS_I(inode)->lock); - if (BTRFS_I(inode)->logged_trans == generation && - BTRFS_I(inode)->last_sub_trans <= - BTRFS_I(inode)->last_log_commit && - BTRFS_I(inode)->last_sub_trans <= - BTRFS_I(inode)->root->last_log_commit) { + spin_lock(&inode->lock); + if (inode->logged_trans == generation && + inode->last_sub_trans <= inode->last_log_commit && + inode->last_sub_trans <= inode->root->last_log_commit) { /* * After a ranged fsync we might have left some extent maps * (that fall outside the fsync's range). So return false @@ -272,10 +270,10 @@ static inline int btrfs_inode_in_log(struct inode *inode, u64 generation) * will be called and process those extent maps. */ smp_mb(); - if (list_empty(&BTRFS_I(inode)->extent_tree.modified_extents)) + if (list_empty(&inode->extent_tree.modified_extents)) ret = 1; } - spin_unlock(&BTRFS_I(inode)->lock); + spin_unlock(&inode->lock); return ret; } @@ -313,17 +311,34 @@ struct btrfs_dio_private { * to grab i_mutex. It is used to avoid the endless truncate due to * nonlocked dio read. */ -static inline void btrfs_inode_block_unlocked_dio(struct inode *inode) +static inline void btrfs_inode_block_unlocked_dio(struct btrfs_inode *inode) { - set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &BTRFS_I(inode)->runtime_flags); + set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags); smp_mb(); } -static inline void btrfs_inode_resume_unlocked_dio(struct inode *inode) +static inline void btrfs_inode_resume_unlocked_dio(struct btrfs_inode *inode) { smp_mb__before_atomic(); - clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, - &BTRFS_I(inode)->runtime_flags); + clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags); +} + +static inline void btrfs_print_data_csum_error(struct btrfs_inode *inode, + u64 logical_start, u32 csum, u32 csum_expected, int mirror_num) +{ + struct btrfs_root *root = inode->root; + + /* Output minus objectid, which is more meaningful */ + if (root->objectid >= BTRFS_LAST_FREE_OBJECTID) + btrfs_warn_rl(root->fs_info, + "csum failed root %lld ino %lld off %llu csum 0x%08x expected csum 0x%08x mirror %d", + root->objectid, btrfs_ino(inode), + logical_start, csum, csum_expected, mirror_num); + else + btrfs_warn_rl(root->fs_info, + "csum failed root %llu ino %llu off %llu csum 0x%08x expected csum 0x%08x mirror %d", + root->objectid, btrfs_ino(inode), + logical_start, csum, csum_expected, mirror_num); } bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end); diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index c4444d6f439f..c7721a6aa3bb 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -100,7 +100,7 @@ static struct bio *compressed_bio_alloc(struct block_device *bdev, return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags); } -static int check_compressed_csum(struct inode *inode, +static int check_compressed_csum(struct btrfs_inode *inode, struct compressed_bio *cb, u64 disk_start) { @@ -111,7 +111,7 @@ static int check_compressed_csum(struct inode *inode, u32 csum; u32 *cb_sum = &cb->sums; - if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) + if (inode->flags & BTRFS_INODE_NODATASUM) return 0; for (i = 0; i < cb->nr_pages; i++) { @@ -124,10 +124,8 @@ static int check_compressed_csum(struct inode *inode, kunmap_atomic(kaddr); if (csum != *cb_sum) { - btrfs_info(BTRFS_I(inode)->root->fs_info, - "csum failed ino %llu extent %llu csum %u wanted %u mirror %d", - btrfs_ino(inode), disk_start, csum, *cb_sum, - cb->mirror_num); + btrfs_print_data_csum_error(inode, disk_start, csum, + *cb_sum, cb->mirror_num); ret = -EIO; goto fail; } @@ -167,7 +165,7 @@ static void end_compressed_bio_read(struct bio *bio) goto out; inode = cb->inode; - ret = check_compressed_csum(inode, cb, + ret = check_compressed_csum(BTRFS_I(inode), cb, (u64)bio->bi_iter.bi_sector << 9); if (ret) goto csum_failed; @@ -913,32 +911,28 @@ static void free_workspaces(void) } /* - * given an address space and start/len, compress the bytes. + * Given an address space and start and length, compress the bytes into @pages + * that are allocated on demand. * - * pages are allocated to hold the compressed result and stored - * in 'pages' + * @out_pages is an in/out parameter, holds maximum number of pages to allocate + * and returns number of actually allocated pages * - * out_pages is used to return the number of pages allocated. There - * may be pages allocated even if we return an error - * - * total_in is used to return the number of bytes actually read. It - * may be smaller then len if we had to exit early because we + * @total_in is used to return the number of bytes actually read. It + * may be smaller than the input length if we had to exit early because we * ran out of room in the pages array or because we cross the * max_out threshold. * - * total_out is used to return the total number of compressed bytes + * @total_out is an in/out parameter, must be set to the input length and will + * be also used to return the total number of compressed bytes * - * max_out tells us the max number of bytes that we're allowed to + * @max_out tells us the max number of bytes that we're allowed to * stuff into pages */ int btrfs_compress_pages(int type, struct address_space *mapping, - u64 start, unsigned long len, - struct page **pages, - unsigned long nr_dest_pages, + u64 start, struct page **pages, unsigned long *out_pages, unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out) + unsigned long *total_out) { struct list_head *workspace; int ret; @@ -946,10 +940,9 @@ int btrfs_compress_pages(int type, struct address_space *mapping, workspace = find_workspace(type); ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, - start, len, pages, - nr_dest_pages, out_pages, - total_in, total_out, - max_out); + start, pages, + out_pages, + total_in, total_out); free_workspace(type, workspace); return ret; } @@ -1017,7 +1010,7 @@ void btrfs_exit_compress(void) * * total_out is the last byte of the buffer */ -int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, +int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, unsigned long total_out, u64 disk_start, struct bio *bio) { diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 09879579fbc8..39ec43ab8df1 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -19,20 +19,32 @@ #ifndef __BTRFS_COMPRESSION_ #define __BTRFS_COMPRESSION_ +/* + * We want to make sure that amount of RAM required to uncompress an extent is + * reasonable, so we limit the total size in ram of a compressed extent to + * 128k. This is a crucial number because it also controls how easily we can + * spread reads across cpus for decompression. + * + * We also want to make sure the amount of IO required to do a random read is + * reasonably small, so we limit the size of a compressed extent to 128k. + */ + +/* Maximum length of compressed data stored on disk */ +#define BTRFS_MAX_COMPRESSED (SZ_128K) +/* Maximum size of data before compression */ +#define BTRFS_MAX_UNCOMPRESSED (SZ_128K) + void btrfs_init_compress(void); void btrfs_exit_compress(void); int btrfs_compress_pages(int type, struct address_space *mapping, - u64 start, unsigned long len, - struct page **pages, - unsigned long nr_dest_pages, + u64 start, struct page **pages, unsigned long *out_pages, unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out); + unsigned long *total_out); int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, unsigned long start_byte, size_t srclen, size_t destlen); -int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, +int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, unsigned long total_out, u64 disk_start, struct bio *bio); @@ -59,13 +71,11 @@ struct btrfs_compress_op { int (*compress_pages)(struct list_head *workspace, struct address_space *mapping, - u64 start, unsigned long len, + u64 start, struct page **pages, - unsigned long nr_dest_pages, unsigned long *out_pages, unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out); + unsigned long *total_out); int (*decompress_bio)(struct list_head *workspace, struct page **pages_in, diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index a426dc822d4d..7dc8844037e0 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -28,9 +28,9 @@ static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level); -static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root - *root, struct btrfs_key *ins_key, - struct btrfs_path *path, int data_size, int extend); +static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, + const struct btrfs_key *ins_key, struct btrfs_path *path, + int data_size, int extend); static int push_node_left(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct extent_buffer *dst, @@ -426,7 +426,7 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, tm_root = &fs_info->tree_mod_log; for (node = rb_first(tm_root); node; node = next) { next = rb_next(node); - tm = container_of(node, struct tree_mod_elem, node); + tm = rb_entry(node, struct tree_mod_elem, node); if (tm->seq > min_seq) continue; rb_erase(node, tm_root); @@ -453,14 +453,12 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) struct rb_node *parent = NULL; struct tree_mod_elem *cur; - BUG_ON(!tm); - tm->seq = btrfs_inc_tree_mod_seq(fs_info); tm_root = &fs_info->tree_mod_log; new = &tm_root->rb_node; while (*new) { - cur = container_of(*new, struct tree_mod_elem, node); + cur = rb_entry(*new, struct tree_mod_elem, node); parent = *new; if (cur->logical < tm->logical) new = &((*new)->rb_left); @@ -746,7 +744,7 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq, tm_root = &fs_info->tree_mod_log; node = tm_root->rb_node; while (node) { - cur = container_of(node, struct tree_mod_elem, node); + cur = rb_entry(node, struct tree_mod_elem, node); if (cur->logical < start) { node = node->rb_left; } else if (cur->logical > start) { @@ -1074,7 +1072,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, ret = btrfs_dec_ref(trans, root, buf, 1); BUG_ON(ret); /* -ENOMEM */ } - clean_tree_block(trans, fs_info, buf); + clean_tree_block(fs_info, buf); *last_ref = 1; } return 0; @@ -1326,7 +1324,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, next = rb_next(&tm->node); if (!next) break; - tm = container_of(next, struct tree_mod_elem, node); + tm = rb_entry(next, struct tree_mod_elem, node); if (tm->logical != first_tm->logical) break; } @@ -1580,7 +1578,8 @@ static int close_blocks(u64 blocknr, u64 other, u32 blocksize) /* * compare two keys in a memcmp fashion */ -static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2) +static int comp_keys(const struct btrfs_disk_key *disk, + const struct btrfs_key *k2) { struct btrfs_key k1; @@ -1592,7 +1591,7 @@ static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2) /* * same as comp_keys only with two btrfs_key's */ -int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2) +int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) { if (k1->objectid > k2->objectid) return 1; @@ -1732,8 +1731,8 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, * slot may point to max if the key is bigger than all of the keys */ static noinline int generic_bin_search(struct extent_buffer *eb, - unsigned long p, - int item_size, struct btrfs_key *key, + unsigned long p, int item_size, + const struct btrfs_key *key, int max, int *slot) { int low = 0; @@ -1802,7 +1801,7 @@ static noinline int generic_bin_search(struct extent_buffer *eb, * simple bin_search frontend that does the right thing for * leaves vs nodes */ -static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, +static int bin_search(struct extent_buffer *eb, const struct btrfs_key *key, int level, int *slot) { if (level == 0) @@ -1819,7 +1818,7 @@ static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, slot); } -int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, +int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, int level, int *slot) { return bin_search(eb, key, level, slot); @@ -1937,7 +1936,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, path->locks[level] = 0; path->nodes[level] = NULL; - clean_tree_block(trans, fs_info, mid); + clean_tree_block(fs_info, mid); btrfs_tree_unlock(mid); /* once for the path */ free_extent_buffer(mid); @@ -1998,7 +1997,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, if (wret < 0 && wret != -ENOSPC) ret = wret; if (btrfs_header_nritems(right) == 0) { - clean_tree_block(trans, fs_info, right); + clean_tree_block(fs_info, right); btrfs_tree_unlock(right); del_ptr(root, path, level + 1, pslot + 1); root_sub_used(root, right->len); @@ -2042,7 +2041,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, BUG_ON(wret == 1); } if (btrfs_header_nritems(mid) == 0) { - clean_tree_block(trans, fs_info, mid); + clean_tree_block(fs_info, mid); btrfs_tree_unlock(mid); del_ptr(root, path, level + 1, pslot); root_sub_used(root, mid->len); @@ -2437,10 +2436,9 @@ noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level) * reada. -EAGAIN is returned and the search must be repeated. */ static int -read_block_for_search(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct btrfs_path *p, - struct extent_buffer **eb_ret, int level, int slot, - struct btrfs_key *key, u64 time_seq) +read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, + struct extent_buffer **eb_ret, int level, int slot, + const struct btrfs_key *key) { struct btrfs_fs_info *fs_info = root->fs_info; u64 blocknr; @@ -2587,7 +2585,7 @@ done: } static void key_search_validate(struct extent_buffer *b, - struct btrfs_key *key, + const struct btrfs_key *key, int level) { #ifdef CONFIG_BTRFS_ASSERT @@ -2606,7 +2604,7 @@ static void key_search_validate(struct extent_buffer *b, #endif } -static int key_search(struct extent_buffer *b, struct btrfs_key *key, +static int key_search(struct extent_buffer *b, const struct btrfs_key *key, int level, int *prev_cmp, int *slot) { if (*prev_cmp != 0) { @@ -2668,9 +2666,9 @@ int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if * possible) */ -int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root - *root, struct btrfs_key *key, struct btrfs_path *p, int - ins_len, int cow) +int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, + const struct btrfs_key *key, struct btrfs_path *p, + int ins_len, int cow) { struct btrfs_fs_info *fs_info = root->fs_info; struct extent_buffer *b; @@ -2870,8 +2868,8 @@ cow_done: goto done; } - err = read_block_for_search(trans, root, p, - &b, level, slot, key, 0); + err = read_block_for_search(root, p, &b, level, + slot, key); if (err == -EAGAIN) goto again; if (err) { @@ -2953,7 +2951,7 @@ done: * The resulting path and return value will be set up as if we called * btrfs_search_slot at that point in time with ins_len and cow both set to 0. */ -int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, +int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, struct btrfs_path *p, u64 time_seq) { struct btrfs_fs_info *fs_info = root->fs_info; @@ -3014,8 +3012,8 @@ again: goto done; } - err = read_block_for_search(NULL, root, p, &b, level, - slot, key, time_seq); + err = read_block_for_search(root, p, &b, level, + slot, key); if (err == -EAGAIN) goto again; if (err) { @@ -3067,8 +3065,9 @@ done: * < 0 on error */ int btrfs_search_slot_for_read(struct btrfs_root *root, - struct btrfs_key *key, struct btrfs_path *p, - int find_higher, int return_any) + const struct btrfs_key *key, + struct btrfs_path *p, int find_higher, + int return_any) { int ret; struct extent_buffer *leaf; @@ -3166,7 +3165,7 @@ static void fixup_low_keys(struct btrfs_fs_info *fs_info, */ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, struct btrfs_path *path, - struct btrfs_key *new_key) + const struct btrfs_key *new_key) { struct btrfs_disk_key disk_key; struct extent_buffer *eb; @@ -3594,8 +3593,7 @@ noinline int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info, * min slot controls the lowest index we're willing to push to the * right. We'll push up to and including min_slot, but no lower */ -static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, +static noinline int __push_leaf_right(struct btrfs_fs_info *fs_info, struct btrfs_path *path, int data_size, int empty, struct extent_buffer *right, @@ -3704,7 +3702,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, if (left_nritems) btrfs_mark_buffer_dirty(left); else - clean_tree_block(trans, fs_info, left); + clean_tree_block(fs_info, left); btrfs_mark_buffer_dirty(right); @@ -3716,7 +3714,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, if (path->slots[0] >= left_nritems) { path->slots[0] -= left_nritems; if (btrfs_header_nritems(path->nodes[0]) == 0) - clean_tree_block(trans, fs_info, path->nodes[0]); + clean_tree_block(fs_info, path->nodes[0]); btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; @@ -3809,7 +3807,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root return 0; } - return __push_leaf_right(trans, fs_info, path, min_data_size, empty, + return __push_leaf_right(fs_info, path, min_data_size, empty, right, free_space, left_nritems, min_slot); out_unlock: btrfs_tree_unlock(right); @@ -3825,8 +3823,7 @@ out_unlock: * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the * items */ -static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, +static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info, struct btrfs_path *path, int data_size, int empty, struct extent_buffer *left, int free_space, u32 right_nritems, @@ -3945,7 +3942,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, if (right_nritems) btrfs_mark_buffer_dirty(right); else - clean_tree_block(trans, fs_info, right); + clean_tree_block(fs_info, right); btrfs_item_key(right, &disk_key, 0); fixup_low_keys(fs_info, path, &disk_key, 1); @@ -4035,7 +4032,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root goto out; } - return __push_leaf_left(trans, fs_info, path, min_data_size, + return __push_leaf_left(fs_info, path, min_data_size, empty, left, free_space, right_nritems, max_slot); out: @@ -4160,6 +4157,9 @@ static noinline int push_for_double_split(struct btrfs_trans_handle *trans, /* try to push all the items before our slot into the next leaf */ slot = path->slots[0]; + space_needed = data_size; + if (slot > 0) + space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]); ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); if (ret < 0) return ret; @@ -4180,7 +4180,7 @@ static noinline int push_for_double_split(struct btrfs_trans_handle *trans, */ static noinline int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct btrfs_key *ins_key, + const struct btrfs_key *ins_key, struct btrfs_path *path, int data_size, int extend) { @@ -4215,6 +4215,10 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, if (wret < 0) return wret; if (wret) { + space_needed = data_size; + if (slot > 0) + space_needed -= btrfs_leaf_free_space(fs_info, + l); wret = push_leaf_left(trans, root, path, space_needed, space_needed, 0, (u32)-1); if (wret < 0) @@ -4412,10 +4416,9 @@ err: return ret; } -static noinline int split_item(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, +static noinline int split_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path, - struct btrfs_key *new_key, + const struct btrfs_key *new_key, unsigned long split_offset) { struct extent_buffer *leaf; @@ -4501,7 +4504,7 @@ static noinline int split_item(struct btrfs_trans_handle *trans, int btrfs_split_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *new_key, + const struct btrfs_key *new_key, unsigned long split_offset) { int ret; @@ -4510,7 +4513,7 @@ int btrfs_split_item(struct btrfs_trans_handle *trans, if (ret) return ret; - ret = split_item(trans, root->fs_info, path, new_key, split_offset); + ret = split_item(root->fs_info, path, new_key, split_offset); return ret; } @@ -4525,7 +4528,7 @@ int btrfs_split_item(struct btrfs_trans_handle *trans, int btrfs_duplicate_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *new_key) + const struct btrfs_key *new_key) { struct extent_buffer *leaf; int ret; @@ -4726,7 +4729,7 @@ void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path, * that doesn't call btrfs_search_slot */ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *cpu_key, u32 *data_size, + const struct btrfs_key *cpu_key, u32 *data_size, u32 total_data, u32 total_size, int nr) { struct btrfs_fs_info *fs_info = root->fs_info; @@ -4820,7 +4823,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *cpu_key, u32 *data_size, + const struct btrfs_key *cpu_key, u32 *data_size, int nr) { int ret = 0; @@ -4851,9 +4854,9 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, * Given a key and some data, insert an item into the tree. * This does all the path init required, making room in the tree if needed. */ -int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root - *root, struct btrfs_key *cpu_key, void *data, u32 - data_size) +int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, + const struct btrfs_key *cpu_key, void *data, + u32 data_size) { int ret = 0; struct btrfs_path *path; @@ -5008,7 +5011,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, btrfs_set_header_level(leaf, 0); } else { btrfs_set_path_blocking(path); - clean_tree_block(trans, fs_info, leaf); + clean_tree_block(fs_info, leaf); btrfs_del_leaf(trans, root, path, leaf); } } else { @@ -5243,7 +5246,7 @@ out: static int tree_move_down(struct btrfs_fs_info *fs_info, struct btrfs_path *path, - int *level, int root_level) + int *level) { struct extent_buffer *eb; @@ -5258,8 +5261,7 @@ static int tree_move_down(struct btrfs_fs_info *fs_info, return 0; } -static int tree_move_next_or_upnext(struct btrfs_fs_info *fs_info, - struct btrfs_path *path, +static int tree_move_next_or_upnext(struct btrfs_path *path, int *level, int root_level) { int ret = 0; @@ -5298,10 +5300,9 @@ static int tree_advance(struct btrfs_fs_info *fs_info, int ret; if (*level == 0 || !allow_down) { - ret = tree_move_next_or_upnext(fs_info, path, level, - root_level); + ret = tree_move_next_or_upnext(path, level, root_level); } else { - ret = tree_move_down(fs_info, path, level, root_level); + ret = tree_move_down(fs_info, path, level); } if (ret >= 0) { if (*level == 0) @@ -5784,8 +5785,8 @@ again: next = c; next_rw_lock = path->locks[level]; - ret = read_block_for_search(NULL, root, path, &next, level, - slot, &key, 0); + ret = read_block_for_search(root, path, &next, level, + slot, &key); if (ret == -EAGAIN) goto again; @@ -5834,8 +5835,8 @@ again: if (!level) break; - ret = read_block_for_search(NULL, root, path, &next, level, - 0, &key, 0); + ret = read_block_for_search(root, path, &next, level, + 0, &key); if (ret == -EAGAIN) goto again; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index adf16307842a..29b7fc28c607 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -20,6 +20,7 @@ #define __BTRFS_CTREE__ #include <linux/mm.h> +#include <linux/sched/signal.h> #include <linux/highmem.h> #include <linux/fs.h> #include <linux/rwsem.h> @@ -97,6 +98,14 @@ static const int btrfs_csum_sizes[] = { 4 }; #define BTRFS_MAX_EXTENT_SIZE SZ_128M +/* + * Count how many BTRFS_MAX_EXTENT_SIZE cover the @size + */ +static inline u32 count_max_extents(u64 size) +{ + return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE); +} + struct btrfs_mapping_tree { struct extent_map_tree map_tree; }; @@ -1953,7 +1962,7 @@ BTRFS_SETGET_STACK_FUNCS(disk_key_offset, struct btrfs_disk_key, offset, 64); BTRFS_SETGET_STACK_FUNCS(disk_key_type, struct btrfs_disk_key, type, 8); static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu, - struct btrfs_disk_key *disk) + const struct btrfs_disk_key *disk) { cpu->offset = le64_to_cpu(disk->offset); cpu->type = disk->type; @@ -1961,7 +1970,7 @@ static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu, } static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk, - struct btrfs_key *cpu) + const struct btrfs_key *cpu) { disk->offset = cpu_to_le64(cpu->offset); disk->type = cpu->type; @@ -1993,8 +2002,7 @@ static inline void btrfs_dir_item_key_to_cpu(struct extent_buffer *eb, btrfs_disk_key_to_cpu(key, &disk_key); } - -static inline u8 btrfs_key_type(struct btrfs_key *key) +static inline u8 btrfs_key_type(const struct btrfs_key *key) { return key->type; } @@ -2577,8 +2585,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes); int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info, struct extent_buffer *eb); -int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, - struct btrfs_root *root, +int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, u64 bytenr); struct btrfs_block_group_cache *btrfs_lookup_block_group( struct btrfs_fs_info *info, @@ -2587,10 +2594,11 @@ void btrfs_get_block_group(struct btrfs_block_group_cache *cache); void btrfs_put_block_group(struct btrfs_block_group_cache *cache); int get_block_group_index(struct btrfs_block_group_cache *cache); struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, - struct btrfs_root *root, u64 parent, - u64 root_objectid, - struct btrfs_disk_key *key, int level, - u64 hint, u64 empty_size); + struct btrfs_root *root, + u64 parent, u64 root_objectid, + const struct btrfs_disk_key *key, + int level, u64 hint, + u64 empty_size); void btrfs_free_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, @@ -2623,8 +2631,7 @@ int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len, int delalloc); int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len); -void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info); +void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info); int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, @@ -2681,7 +2688,7 @@ enum btrfs_flush_state { }; int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len); -int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes); +int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes); void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len); void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, u64 len); @@ -2689,17 +2696,16 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans); int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, - struct inode *inode); -void btrfs_orphan_release_metadata(struct inode *inode); + struct btrfs_inode *inode); +void btrfs_orphan_release_metadata(struct btrfs_inode *inode); int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, struct btrfs_block_rsv *rsv, int nitems, u64 *qgroup_reserved, bool use_global_rsv); void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info, - struct btrfs_block_rsv *rsv, - u64 qgroup_reserved); -int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes); -void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes); + struct btrfs_block_rsv *rsv); +int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes); +void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes); int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len); void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len); void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type); @@ -2724,7 +2730,7 @@ int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info, void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *block_rsv, u64 num_bytes); -int btrfs_inc_block_group_ro(struct btrfs_root *root, +int btrfs_inc_block_group_ro(struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *cache); void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache); void btrfs_put_block_group_cache(struct btrfs_fs_info *info); @@ -2750,9 +2756,9 @@ u64 add_new_free_space(struct btrfs_block_group_cache *block_group, struct btrfs_fs_info *info, u64 start, u64 end); /* ctree.c */ -int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, +int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, int level, int *slot); -int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2); +int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2); int btrfs_previous_item(struct btrfs_root *root, struct btrfs_path *path, u64 min_objectid, int type); @@ -2760,7 +2766,7 @@ int btrfs_previous_extent_item(struct btrfs_root *root, struct btrfs_path *path, u64 min_objectid); void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, struct btrfs_path *path, - struct btrfs_key *new_key); + const struct btrfs_key *new_key); struct extent_buffer *btrfs_root_node(struct btrfs_root *root); struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, @@ -2802,22 +2808,23 @@ void btrfs_truncate_item(struct btrfs_fs_info *fs_info, int btrfs_split_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *new_key, + const struct btrfs_key *new_key, unsigned long split_offset); int btrfs_duplicate_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *new_key); + const struct btrfs_key *new_key); int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key); -int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root - *root, struct btrfs_key *key, struct btrfs_path *p, int - ins_len, int cow); -int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, +int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, + const struct btrfs_key *key, struct btrfs_path *p, + int ins_len, int cow); +int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, struct btrfs_path *p, u64 time_seq); int btrfs_search_slot_for_read(struct btrfs_root *root, - struct btrfs_key *key, struct btrfs_path *p, - int find_higher, int return_any); + const struct btrfs_key *key, + struct btrfs_path *p, int find_higher, + int return_any); int btrfs_realloc_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *parent, int start_slot, u64 *last_ret, @@ -2840,19 +2847,20 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans, } void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *cpu_key, u32 *data_size, + const struct btrfs_key *cpu_key, u32 *data_size, u32 total_data, u32 total_size, int nr); -int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root - *root, struct btrfs_key *key, void *data, u32 data_size); +int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, + const struct btrfs_key *key, void *data, u32 data_size); int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *cpu_key, u32 *data_size, int nr); + const struct btrfs_key *cpu_key, u32 *data_size, + int nr); static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *key, + const struct btrfs_key *key, u32 data_size) { return btrfs_insert_empty_items(trans, root, path, key, &data_size, 1); @@ -2941,15 +2949,15 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id, u64 ref_id, u64 dirid, u64 *sequence, const char *name, int name_len); int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct btrfs_key *key); -int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root - *root, struct btrfs_key *key, struct btrfs_root_item - *item); + const struct btrfs_key *key); +int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, + const struct btrfs_key *key, + struct btrfs_root_item *item); int __must_check btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, struct btrfs_root_item *item); -int btrfs_find_root(struct btrfs_root *root, struct btrfs_key *search_key, +int btrfs_find_root(struct btrfs_root *root, const struct btrfs_key *search_key, struct btrfs_path *path, struct btrfs_root_item *root_item, struct btrfs_key *root_key); int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info); @@ -2975,7 +2983,7 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, const char *name, int name_len); int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, - int name_len, struct inode *dir, + int name_len, struct btrfs_inode *dir, struct btrfs_key *location, u8 type, u64 index); struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -3074,7 +3082,7 @@ int btrfs_csum_one_bio(struct inode *inode, struct bio *bio, u64 file_start, int contig); int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, struct list_head *list, int search_commit); -void btrfs_extent_item_to_extent_map(struct inode *inode, +void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, const struct btrfs_path *path, struct btrfs_file_extent_item *fi, const bool new_inline, @@ -3093,9 +3101,9 @@ struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, int delay_iput); void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work); -struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, - size_t pg_offset, u64 start, u64 len, - int create); +struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, + struct page *page, size_t pg_offset, u64 start, + u64 len, int create); noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, u64 *orig_start, u64 *orig_block_len, u64 *ram_bytes); @@ -3116,13 +3124,13 @@ static inline void btrfs_force_ra(struct address_space *mapping, } struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); -int btrfs_set_inode_index(struct inode *dir, u64 *index); +int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index); int btrfs_unlink_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct inode *dir, struct inode *inode, + struct btrfs_inode *dir, struct btrfs_inode *inode, const char *name, int name_len); int btrfs_add_link(struct btrfs_trans_handle *trans, - struct inode *parent_inode, struct inode *inode, + struct btrfs_inode *parent_inode, struct btrfs_inode *inode, const char *name, int name_len, int add_backref, u64 index); int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -3159,15 +3167,16 @@ void btrfs_destroy_cachep(void); long btrfs_ioctl_trans_end(struct file *file); struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, struct btrfs_root *root, int *was_new); -struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, - size_t pg_offset, u64 start, u64 end, - int create); +struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, + struct page *page, size_t pg_offset, + u64 start, u64 end, int create); int btrfs_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode); int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode); -int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); +int btrfs_orphan_add(struct btrfs_trans_handle *trans, + struct btrfs_inode *inode); int btrfs_orphan_cleanup(struct btrfs_root *root); void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, struct btrfs_root *root); @@ -3208,11 +3217,11 @@ ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen, int btrfs_auto_defrag_init(void); void btrfs_auto_defrag_exit(void); int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, - struct inode *inode); + struct btrfs_inode *inode); int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info); int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); -void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, +void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, int skip_pinned); extern const struct file_operations btrfs_file_operations; int __btrfs_drop_extents(struct btrfs_trans_handle *trans, @@ -3226,7 +3235,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, u64 end, int drop_cache); int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, - struct inode *inode, u64 start, u64 end); + struct btrfs_inode *inode, u64 start, u64 end); int btrfs_release_file(struct inode *inode, struct file *file); int btrfs_dirty_pages(struct inode *inode, struct page **pages, size_t num_pages, loff_t pos, size_t write_bytes, @@ -3447,7 +3456,8 @@ do { \ "BTRFS: Transaction aborted (error %d)\n", \ (errno)); \ } else { \ - pr_debug("BTRFS: Transaction aborted (error %d)\n", \ + btrfs_debug((trans)->fs_info, \ + "Transaction aborted (error %d)", \ (errno)); \ } \ } \ diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 80982a83c9fd..1aff676f0e5b 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -72,14 +72,14 @@ static inline int btrfs_is_continuous_delayed_item( return 0; } -static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode) +static struct btrfs_delayed_node *btrfs_get_delayed_node( + struct btrfs_inode *btrfs_inode) { - struct btrfs_inode *btrfs_inode = BTRFS_I(inode); struct btrfs_root *root = btrfs_inode->root; - u64 ino = btrfs_ino(inode); + u64 ino = btrfs_ino(btrfs_inode); struct btrfs_delayed_node *node; - node = ACCESS_ONCE(btrfs_inode->delayed_node); + node = READ_ONCE(btrfs_inode->delayed_node); if (node) { atomic_inc(&node->refs); return node; @@ -107,16 +107,15 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode) /* Will return either the node or PTR_ERR(-ENOMEM) */ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( - struct inode *inode) + struct btrfs_inode *btrfs_inode) { struct btrfs_delayed_node *node; - struct btrfs_inode *btrfs_inode = BTRFS_I(inode); struct btrfs_root *root = btrfs_inode->root; - u64 ino = btrfs_ino(inode); + u64 ino = btrfs_ino(btrfs_inode); int ret; again: - node = btrfs_get_delayed_node(inode); + node = btrfs_get_delayed_node(btrfs_inode); if (node) return node; @@ -574,7 +573,7 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info, static int btrfs_delayed_inode_reserve_metadata( struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct inode *inode, + struct btrfs_inode *inode, struct btrfs_delayed_node *node) { struct btrfs_fs_info *fs_info = root->fs_info; @@ -603,13 +602,13 @@ static int btrfs_delayed_inode_reserve_metadata( * worth which is less likely to hurt us. */ if (src_rsv && src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) { - spin_lock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED, - &BTRFS_I(inode)->runtime_flags)) + &inode->runtime_flags)) release = true; else src_rsv = NULL; - spin_unlock(&BTRFS_I(inode)->lock); + spin_unlock(&inode->lock); } /* @@ -1196,7 +1195,7 @@ int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, } int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, - struct inode *inode) + struct btrfs_inode *inode) { struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); struct btrfs_path *path; @@ -1233,9 +1232,9 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, return ret; } -int btrfs_commit_inode_delayed_inode(struct inode *inode) +int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); struct btrfs_trans_handle *trans; struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); struct btrfs_path *path; @@ -1288,15 +1287,15 @@ out: return ret; } -void btrfs_remove_delayed_node(struct inode *inode) +void btrfs_remove_delayed_node(struct btrfs_inode *inode) { struct btrfs_delayed_node *delayed_node; - delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node); + delayed_node = READ_ONCE(inode->delayed_node); if (!delayed_node) return; - BTRFS_I(inode)->delayed_node = NULL; + inode->delayed_node = NULL; btrfs_release_delayed_node(delayed_node); } @@ -1434,7 +1433,7 @@ void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info) int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, const char *name, int name_len, - struct inode *dir, + struct btrfs_inode *dir, struct btrfs_disk_key *disk_key, u8 type, u64 index) { @@ -1510,7 +1509,7 @@ static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info, int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct inode *dir, u64 index) + struct btrfs_inode *dir, u64 index) { struct btrfs_delayed_node *node; struct btrfs_delayed_item *item; @@ -1558,7 +1557,7 @@ end: return ret; } -int btrfs_inode_delayed_dir_index_count(struct inode *inode) +int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode) { struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); @@ -1575,7 +1574,7 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode) return -EINVAL; } - BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; + inode->index_cnt = delayed_node->index_cnt; btrfs_release_delayed_node(delayed_node); return 0; } @@ -1587,7 +1586,7 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode, struct btrfs_delayed_node *delayed_node; struct btrfs_delayed_item *item; - delayed_node = btrfs_get_delayed_node(inode); + delayed_node = btrfs_get_delayed_node(BTRFS_I(inode)); if (!delayed_node) return false; @@ -1776,7 +1775,7 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev) struct btrfs_delayed_node *delayed_node; struct btrfs_inode_item *inode_item; - delayed_node = btrfs_get_delayed_node(inode); + delayed_node = btrfs_get_delayed_node(BTRFS_I(inode)); if (!delayed_node) return -ENOENT; @@ -1791,7 +1790,7 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev) i_uid_write(inode, btrfs_stack_inode_uid(inode_item)); i_gid_write(inode, btrfs_stack_inode_gid(inode_item)); - btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item)); + btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item)); inode->i_mode = btrfs_stack_inode_mode(inode_item); set_nlink(inode, btrfs_stack_inode_nlink(inode_item)); inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); @@ -1831,7 +1830,7 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, struct btrfs_delayed_node *delayed_node; int ret = 0; - delayed_node = btrfs_get_or_create_delayed_node(inode); + delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode)); if (IS_ERR(delayed_node)) return PTR_ERR(delayed_node); @@ -1841,7 +1840,7 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, goto release_node; } - ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode, + ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode), delayed_node); if (ret) goto release_node; @@ -1856,9 +1855,9 @@ release_node: return ret; } -int btrfs_delayed_delete_inode_ref(struct inode *inode) +int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); struct btrfs_delayed_node *delayed_node; /* @@ -1933,7 +1932,7 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) mutex_unlock(&delayed_node->mutex); } -void btrfs_kill_delayed_inode_items(struct inode *inode) +void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode) { struct btrfs_delayed_node *delayed_node; diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index 8a2bf5e3e4cf..40327cc3b99a 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h @@ -101,15 +101,15 @@ static inline void btrfs_init_delayed_root( int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, const char *name, int name_len, - struct inode *dir, + struct btrfs_inode *dir, struct btrfs_disk_key *disk_key, u8 type, u64 index); int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct inode *dir, u64 index); + struct btrfs_inode *dir, u64 index); -int btrfs_inode_delayed_dir_index_count(struct inode *inode); +int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode); int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); @@ -119,17 +119,17 @@ int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info); int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, - struct inode *inode); + struct btrfs_inode *inode); /* Used for evicting the inode. */ -void btrfs_remove_delayed_node(struct inode *inode); -void btrfs_kill_delayed_inode_items(struct inode *inode); -int btrfs_commit_inode_delayed_inode(struct inode *inode); +void btrfs_remove_delayed_node(struct btrfs_inode *inode); +void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode); +int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode); int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode); int btrfs_fill_inode(struct inode *inode, u32 *rdev); -int btrfs_delayed_delete_inode_ref(struct inode *inode); +int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode); /* Used for drop dead root */ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index ef724a5fc30e..6eb80952efb3 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -550,13 +550,14 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_node *ref, struct btrfs_qgroup_extent_record *qrecord, u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved, - int action, int is_data) + int action, int is_data, int *qrecord_inserted_ret) { struct btrfs_delayed_ref_head *existing; struct btrfs_delayed_ref_head *head_ref = NULL; struct btrfs_delayed_ref_root *delayed_refs; int count_mod = 1; int must_insert_reserved = 0; + int qrecord_inserted = 0; /* If reserved is provided, it must be a data extent. */ BUG_ON(!is_data && reserved); @@ -623,6 +624,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info, if(btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, qrecord)) kfree(qrecord); + else + qrecord_inserted = 1; } spin_lock_init(&head_ref->lock); @@ -650,6 +653,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info, atomic_inc(&delayed_refs->num_entries); trans->delayed_ref_updates++; } + if (qrecord_inserted_ret) + *qrecord_inserted_ret = qrecord_inserted; return head_ref; } @@ -779,6 +784,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_head *head_ref; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_qgroup_extent_record *record = NULL; + int qrecord_inserted; BUG_ON(extent_op && extent_op->is_data); ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); @@ -806,12 +812,15 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, * the spin lock */ head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record, - bytenr, num_bytes, 0, 0, action, 0); + bytenr, num_bytes, 0, 0, action, 0, + &qrecord_inserted); add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr, num_bytes, parent, ref_root, level, action); spin_unlock(&delayed_refs->lock); + if (qrecord_inserted) + return btrfs_qgroup_trace_extent_post(fs_info, record); return 0; free_head_ref: @@ -829,15 +838,14 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root, - u64 owner, u64 offset, u64 reserved, int action, - struct btrfs_delayed_extent_op *extent_op) + u64 owner, u64 offset, u64 reserved, int action) { struct btrfs_delayed_data_ref *ref; struct btrfs_delayed_ref_head *head_ref; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_qgroup_extent_record *record = NULL; + int qrecord_inserted; - BUG_ON(extent_op && !extent_op->is_data); ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS); if (!ref) return -ENOMEM; @@ -859,7 +867,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, } } - head_ref->extent_op = extent_op; + head_ref->extent_op = NULL; delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); @@ -870,13 +878,15 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, */ head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record, bytenr, num_bytes, ref_root, reserved, - action, 1); + action, 1, &qrecord_inserted); add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr, num_bytes, parent, ref_root, owner, offset, action); spin_unlock(&delayed_refs->lock); + if (qrecord_inserted) + return btrfs_qgroup_trace_extent_post(fs_info, record); return 0; } @@ -899,7 +909,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr, num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD, - extent_op->is_data); + extent_op->is_data, NULL); spin_unlock(&delayed_refs->lock); return 0; @@ -911,11 +921,8 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, * the head node if any where found, or NULL if not. */ struct btrfs_delayed_ref_head * -btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr) +btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr) { - struct btrfs_delayed_ref_root *delayed_refs; - - delayed_refs = &trans->transaction->delayed_refs; return find_ref_head(&delayed_refs->href_root, bytenr, 0); } diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 50947b5a9152..0e537f98f1a1 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h @@ -250,8 +250,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root, - u64 owner, u64 offset, u64 reserved, int action, - struct btrfs_delayed_extent_op *extent_op); + u64 owner, u64 offset, u64 reserved, int action); int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, @@ -262,7 +261,8 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head); struct btrfs_delayed_ref_head * -btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); +btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, + u64 bytenr); int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head); static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head) diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 5de280b9ad73..e653921f05d9 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -304,8 +304,9 @@ void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info) dev_replace->cursor_left_last_write_of_item; } -int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name, - u64 srcdevid, char *srcdev_name, int read_src) +int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, + const char *tgtdev_name, u64 srcdevid, const char *srcdev_name, + int read_src) { struct btrfs_root *root = fs_info->dev_root; struct btrfs_trans_handle *trans; diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h index 54ea12bda15b..f94a76844ae7 100644 --- a/fs/btrfs/dev-replace.h +++ b/fs/btrfs/dev-replace.h @@ -27,8 +27,9 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans, void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info); int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_dev_replace_args *args); -int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name, - u64 srcdevid, char *srcdev_name, int read_src); +int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, + const char *tgtdev_name, u64 srcdevid, const char *srcdev_name, + int read_src); void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_dev_replace_args *args); int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index b039fe0c751a..60a750678a82 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -80,7 +80,8 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, struct extent_buffer *leaf; u32 data_size; - BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root->fs_info)); + if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root->fs_info)) + return -ENOSPC; key.objectid = objectid; key.type = BTRFS_XATTR_ITEM_KEY; @@ -120,7 +121,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, - struct inode *dir, struct btrfs_key *location, + struct btrfs_inode *dir, struct btrfs_key *location, u8 type, u64 index) { int ret = 0; @@ -174,8 +175,7 @@ second_insert: btrfs_release_path(path); ret2 = btrfs_insert_delayed_dir_index(trans, root->fs_info, name, - name_len, dir, &disk_key, type, - index); + name_len, dir, &disk_key, type, index); out_free: btrfs_free_path(path); if (ret) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 37a31b12bb0c..08b74daf35d0 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -64,8 +64,7 @@ static const struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); static void free_fs_root(struct btrfs_root *root); -static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, - int read_only); +static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info); static void btrfs_destroy_ordered_extents(struct btrfs_root *root); static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, struct btrfs_fs_info *fs_info); @@ -220,12 +219,12 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, * extents on the btree inode are pretty simple, there's one extent * that covers the entire device */ -static struct extent_map *btree_get_extent(struct inode *inode, +static struct extent_map *btree_get_extent(struct btrfs_inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, int create) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct extent_map_tree *em_tree = &inode->extent_tree; struct extent_map *em; int ret; @@ -266,7 +265,7 @@ out: return em; } -u32 btrfs_csum_data(char *data, u32 seed, size_t len) +u32 btrfs_csum_data(const char *data, u32 seed, size_t len) { return btrfs_crc32c(seed, data, len); } @@ -1005,7 +1004,7 @@ static int __btree_submit_bio_done(struct inode *inode, struct bio *bio, return ret; } -static int check_async_write(struct inode *inode, unsigned long bio_flags) +static int check_async_write(unsigned long bio_flags) { if (bio_flags & EXTENT_BIO_TREE_LOG) return 0; @@ -1021,7 +1020,7 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio, u64 bio_offset) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - int async = check_async_write(inode, bio_flags); + int async = check_async_write(bio_flags); int ret; if (bio_op(bio) != REQ_OP_WRITE) { @@ -1248,8 +1247,7 @@ struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, } -void clean_tree_block(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, +void clean_tree_block(struct btrfs_fs_info *fs_info, struct extent_buffer *buf) { if (btrfs_header_generation(buf) == @@ -2207,11 +2205,9 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) btrfs_destroy_workqueue(fs_info->delalloc_workers); btrfs_destroy_workqueue(fs_info->workers); btrfs_destroy_workqueue(fs_info->endio_workers); - btrfs_destroy_workqueue(fs_info->endio_meta_workers); btrfs_destroy_workqueue(fs_info->endio_raid56_workers); btrfs_destroy_workqueue(fs_info->endio_repair_workers); btrfs_destroy_workqueue(fs_info->rmw_workers); - btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); btrfs_destroy_workqueue(fs_info->endio_write_workers); btrfs_destroy_workqueue(fs_info->endio_freespace_worker); btrfs_destroy_workqueue(fs_info->submit_workers); @@ -2221,6 +2217,13 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) btrfs_destroy_workqueue(fs_info->flush_workers); btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); btrfs_destroy_workqueue(fs_info->extent_workers); + /* + * Now that all other work queues are destroyed, we can safely destroy + * the queues used for metadata I/O, since tasks from those other work + * queues can do metadata I/O operations. + */ + btrfs_destroy_workqueue(fs_info->endio_meta_workers); + btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); } static void free_root_extent_buffers(struct btrfs_root *root) @@ -2802,7 +2805,7 @@ int open_ctree(struct super_block *sb, memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE); - ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); + ret = btrfs_check_super_valid(fs_info); if (ret) { btrfs_err(fs_info, "superblock contains fatal errors"); err = -EINVAL; @@ -3263,7 +3266,6 @@ fail_fsdev_sysfs: fail_block_groups: btrfs_put_block_group_cache(fs_info); - btrfs_free_block_groups(fs_info); fail_tree_roots: free_root_pointers(fs_info, 1); @@ -3271,6 +3273,7 @@ fail_tree_roots: fail_sb_buffer: btrfs_stop_all_workers(fs_info); + btrfs_free_block_groups(fs_info); fail_alloc: fail_iput: btrfs_mapping_tree_free(&fs_info->mapping_tree); @@ -3411,7 +3414,7 @@ struct buffer_head *btrfs_read_dev_super(struct block_device *bdev) */ static int write_dev_supers(struct btrfs_device *device, struct btrfs_super_block *sb, - int do_barriers, int wait, int max_mirrors) + int wait, int max_mirrors) { struct buffer_head *bh; int i; @@ -3450,7 +3453,7 @@ static int write_dev_supers(struct btrfs_device *device, btrfs_set_super_bytenr(sb, bytenr); crc = ~(u32)0; - crc = btrfs_csum_data((char *)sb + + crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); @@ -3696,7 +3699,7 @@ int btrfs_calc_num_tolerated_disk_barrier_failures( return num_tolerated_disk_barrier_failures; } -static int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors) +int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors) { struct list_head *head; struct btrfs_device *dev; @@ -3753,7 +3756,7 @@ static int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors) flags = btrfs_super_flags(sb); btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); - ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors); + ret = write_dev_supers(dev, sb, 0, max_mirrors); if (ret) total_errors++; } @@ -3776,7 +3779,7 @@ static int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors) if (!dev->in_fs_metadata || !dev->writeable) continue; - ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors); + ret = write_dev_supers(dev, sb, 1, max_mirrors); if (ret) total_errors++; } @@ -3790,12 +3793,6 @@ static int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors) return 0; } -int write_ctree_super(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, int max_mirrors) -{ - return write_all_supers(fs_info, max_mirrors); -} - /* Drop a fs root from the radix tree and free it. */ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) @@ -3985,8 +3982,6 @@ void close_ctree(struct btrfs_fs_info *fs_info) btrfs_put_block_group_cache(fs_info); - btrfs_free_block_groups(fs_info); - /* * we must make sure there is not any read request to * submit after we stopping all workers. @@ -3994,6 +3989,8 @@ void close_ctree(struct btrfs_fs_info *fs_info) invalidate_inode_pages2(fs_info->btree_inode->i_mapping); btrfs_stop_all_workers(fs_info); + btrfs_free_block_groups(fs_info); + clear_bit(BTRFS_FS_OPEN, &fs_info->flags); free_root_pointers(fs_info, 1); @@ -4122,8 +4119,7 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) return btree_read_extent_buffer_pages(fs_info, buf, parent_transid); } -static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, - int read_only) +static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info) { struct btrfs_super_block *sb = fs_info->super_copy; u64 nodesize = btrfs_super_nodesize(sb); @@ -4662,9 +4658,12 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) } static const struct extent_io_ops btree_extent_io_ops = { - .readpage_end_io_hook = btree_readpage_end_io_hook, - .readpage_io_failed_hook = btree_io_failed_hook, + /* mandatory callbacks */ .submit_bio_hook = btree_submit_bio_hook, + .readpage_end_io_hook = btree_readpage_end_io_hook, /* note we're sharing with inode.c for the merge bio hook */ .merge_bio_hook = btrfs_merge_bio_hook, + .readpage_io_failed_hook = btree_io_failed_hook, + + /* optional callbacks */ }; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 44dcd9af6b7c..2e0ec29bfd69 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -52,14 +52,12 @@ int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr, struct extent_buffer *btrfs_find_create_tree_block( struct btrfs_fs_info *fs_info, u64 bytenr); -void clean_tree_block(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, struct extent_buffer *buf); +void clean_tree_block(struct btrfs_fs_info *fs_info, struct extent_buffer *buf); int open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices, char *options); void close_ctree(struct btrfs_fs_info *fs_info); -int write_ctree_super(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, int max_mirrors); +int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors); struct buffer_head *btrfs_read_dev_super(struct block_device *bdev); int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num, struct buffer_head **bh_ret); @@ -118,7 +116,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf); int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, int atomic); int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid); -u32 btrfs_csum_data(char *data, u32 seed, size_t len); +u32 btrfs_csum_data(const char *data, u32 seed, size_t len); void btrfs_csum_final(u32 crc, u8 *result); int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, enum btrfs_wq_endio_type metadata); diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 340d90751263..87144c9f9593 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -30,7 +30,7 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len, len = BTRFS_FID_SIZE_NON_CONNECTABLE; type = FILEID_BTRFS_WITHOUT_PARENT; - fid->objectid = btrfs_ino(inode); + fid->objectid = btrfs_ino(BTRFS_I(inode)); fid->root_objectid = BTRFS_I(inode)->root->objectid; fid->gen = inode->i_generation; @@ -166,13 +166,13 @@ static struct dentry *btrfs_get_parent(struct dentry *child) if (!path) return ERR_PTR(-ENOMEM); - if (btrfs_ino(dir) == BTRFS_FIRST_FREE_OBJECTID) { + if (btrfs_ino(BTRFS_I(dir)) == BTRFS_FIRST_FREE_OBJECTID) { key.objectid = root->root_key.objectid; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = (u64)-1; root = fs_info->tree_root; } else { - key.objectid = btrfs_ino(dir); + key.objectid = btrfs_ino(BTRFS_I(dir)); key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; } @@ -235,13 +235,10 @@ static int btrfs_get_name(struct dentry *parent, char *name, int ret; u64 ino; - if (!dir || !inode) - return -EINVAL; - if (!S_ISDIR(dir->i_mode)) return -EINVAL; - ino = btrfs_ino(inode); + ino = btrfs_ino(BTRFS_I(inode)); path = btrfs_alloc_path(); if (!path) @@ -255,7 +252,7 @@ static int btrfs_get_name(struct dentry *parent, char *name, root = fs_info->tree_root; } else { key.objectid = ino; - key.offset = btrfs_ino(dir); + key.offset = btrfs_ino(BTRFS_I(dir)); key.type = BTRFS_INODE_REF_KEY; } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index dcd2e798767e..be5477676cc8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -16,6 +16,7 @@ * Boston, MA 021110-1307, USA. */ #include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/blkdev.h> @@ -888,7 +889,7 @@ search_again: delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); - head = btrfs_find_delayed_ref_head(trans, bytenr); + head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); if (head) { if (!mutex_trylock(&head->mutex)) { atomic_inc(&head->node.refs); @@ -1035,10 +1036,11 @@ out_free: #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 static int convert_extent_item_v0(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 owner, u32 extra_size) { + struct btrfs_root *root = fs_info->extent_root; struct btrfs_extent_item *item; struct btrfs_extent_item_v0 *ei0; struct btrfs_extent_ref_v0 *ref0; @@ -1092,7 +1094,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans, return ret; BUG_ON(ret); /* Corruption */ - btrfs_extend_item(root->fs_info, path, new_size); + btrfs_extend_item(fs_info, path, new_size); leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); @@ -1151,12 +1153,13 @@ static int match_extent_data_ref(struct extent_buffer *leaf, } static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid, u64 owner, u64 offset) { + struct btrfs_root *root = fs_info->extent_root; struct btrfs_key key; struct btrfs_extent_data_ref *ref; struct extent_buffer *leaf; @@ -1238,12 +1241,13 @@ fail: } static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid, u64 owner, u64 offset, int refs_to_add) { + struct btrfs_root *root = fs_info->extent_root; struct btrfs_key key; struct extent_buffer *leaf; u32 size; @@ -1317,7 +1321,7 @@ fail: } static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_fs_info *fs_info, struct btrfs_path *path, int refs_to_drop, int *last_ref) { @@ -1354,7 +1358,7 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans, num_refs -= refs_to_drop; if (num_refs == 0) { - ret = btrfs_del_item(trans, root, path); + ret = btrfs_del_item(trans, fs_info->extent_root, path); *last_ref = 1; } else { if (key.type == BTRFS_EXTENT_DATA_REF_KEY) @@ -1416,11 +1420,12 @@ static noinline u32 extent_data_ref_count(struct btrfs_path *path, } static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid) { + struct btrfs_root *root = fs_info->extent_root; struct btrfs_key key; int ret; @@ -1449,7 +1454,7 @@ static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, } static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid) @@ -1466,7 +1471,8 @@ static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, key.offset = root_objectid; } - ret = btrfs_insert_empty_item(trans, root, path, &key, 0); + ret = btrfs_insert_empty_item(trans, fs_info->extent_root, + path, &key, 0); btrfs_release_path(path); return ret; } @@ -1524,14 +1530,14 @@ static int find_next_key(struct btrfs_path *path, int level, */ static noinline_for_stack int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_fs_info *fs_info, struct btrfs_path *path, struct btrfs_extent_inline_ref **ref_ret, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 owner, u64 offset, int insert) { - struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_root *root = fs_info->extent_root; struct btrfs_key key; struct extent_buffer *leaf; struct btrfs_extent_item *ei; @@ -1614,7 +1620,7 @@ again: err = -ENOENT; goto out; } - ret = convert_extent_item_v0(trans, root, path, owner, + ret = convert_extent_item_v0(trans, fs_info, path, owner, extra_size); if (ret < 0) { err = ret; @@ -1716,7 +1722,7 @@ out: * helper to add new inline back ref */ static noinline_for_stack -void setup_inline_extent_backref(struct btrfs_root *root, +void setup_inline_extent_backref(struct btrfs_fs_info *fs_info, struct btrfs_path *path, struct btrfs_extent_inline_ref *iref, u64 parent, u64 root_objectid, @@ -1739,7 +1745,7 @@ void setup_inline_extent_backref(struct btrfs_root *root, type = extent_ref_type(parent, owner); size = btrfs_extent_inline_ref_size(type); - btrfs_extend_item(root->fs_info, path, size); + btrfs_extend_item(fs_info, path, size); ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); refs = btrfs_extent_refs(leaf, ei); @@ -1777,7 +1783,7 @@ void setup_inline_extent_backref(struct btrfs_root *root, } static int lookup_extent_backref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_fs_info *fs_info, struct btrfs_path *path, struct btrfs_extent_inline_ref **ref_ret, u64 bytenr, u64 num_bytes, u64 parent, @@ -1785,7 +1791,7 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans, { int ret; - ret = lookup_inline_extent_backref(trans, root, path, ref_ret, + ret = lookup_inline_extent_backref(trans, fs_info, path, ref_ret, bytenr, num_bytes, parent, root_objectid, owner, offset, 0); if (ret != -ENOENT) @@ -1795,11 +1801,12 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans, *ref_ret = NULL; if (owner < BTRFS_FIRST_FREE_OBJECTID) { - ret = lookup_tree_block_ref(trans, root, path, bytenr, parent, - root_objectid); + ret = lookup_tree_block_ref(trans, fs_info, path, bytenr, + parent, root_objectid); } else { - ret = lookup_extent_data_ref(trans, root, path, bytenr, parent, - root_objectid, owner, offset); + ret = lookup_extent_data_ref(trans, fs_info, path, bytenr, + parent, root_objectid, owner, + offset); } return ret; } @@ -1808,7 +1815,7 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans, * helper to update/remove inline back ref */ static noinline_for_stack -void update_inline_extent_backref(struct btrfs_root *root, +void update_inline_extent_backref(struct btrfs_fs_info *fs_info, struct btrfs_path *path, struct btrfs_extent_inline_ref *iref, int refs_to_mod, @@ -1866,14 +1873,14 @@ void update_inline_extent_backref(struct btrfs_root *root, memmove_extent_buffer(leaf, ptr, ptr + size, end - ptr - size); item_size -= size; - btrfs_truncate_item(root->fs_info, path, item_size, 1); + btrfs_truncate_item(fs_info, path, item_size, 1); } btrfs_mark_buffer_dirty(leaf); } static noinline_for_stack int insert_inline_extent_backref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 owner, @@ -1883,15 +1890,15 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_extent_inline_ref *iref; int ret; - ret = lookup_inline_extent_backref(trans, root, path, &iref, + ret = lookup_inline_extent_backref(trans, fs_info, path, &iref, bytenr, num_bytes, parent, root_objectid, owner, offset, 1); if (ret == 0) { BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID); - update_inline_extent_backref(root, path, iref, + update_inline_extent_backref(fs_info, path, iref, refs_to_add, extent_op, NULL); } else if (ret == -ENOENT) { - setup_inline_extent_backref(root, path, iref, parent, + setup_inline_extent_backref(fs_info, path, iref, parent, root_objectid, owner, offset, refs_to_add, extent_op); ret = 0; @@ -1900,7 +1907,7 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans, } static int insert_extent_backref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid, u64 owner, u64 offset, int refs_to_add) @@ -1908,10 +1915,10 @@ static int insert_extent_backref(struct btrfs_trans_handle *trans, int ret; if (owner < BTRFS_FIRST_FREE_OBJECTID) { BUG_ON(refs_to_add != 1); - ret = insert_tree_block_ref(trans, root, path, bytenr, + ret = insert_tree_block_ref(trans, fs_info, path, bytenr, parent, root_objectid); } else { - ret = insert_extent_data_ref(trans, root, path, bytenr, + ret = insert_extent_data_ref(trans, fs_info, path, bytenr, parent, root_objectid, owner, offset, refs_to_add); } @@ -1919,7 +1926,7 @@ static int insert_extent_backref(struct btrfs_trans_handle *trans, } static int remove_extent_backref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_fs_info *fs_info, struct btrfs_path *path, struct btrfs_extent_inline_ref *iref, int refs_to_drop, int is_data, int *last_ref) @@ -1928,14 +1935,14 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans, BUG_ON(!is_data && refs_to_drop != 1); if (iref) { - update_inline_extent_backref(root, path, iref, + update_inline_extent_backref(fs_info, path, iref, -refs_to_drop, NULL, last_ref); } else if (is_data) { - ret = remove_extent_data_ref(trans, root, path, refs_to_drop, + ret = remove_extent_data_ref(trans, fs_info, path, refs_to_drop, last_ref); } else { *last_ref = 1; - ret = btrfs_del_item(trans, root, path); + ret = btrfs_del_item(trans, fs_info->extent_root, path); } return ret; } @@ -2089,7 +2096,7 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr, num_bytes, parent, root_objectid, owner, offset, 0, - BTRFS_ADD_DELAYED_REF, NULL); + BTRFS_ADD_DELAYED_REF); } return ret; } @@ -2117,9 +2124,9 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, path->reada = READA_FORWARD; path->leave_spinning = 1; /* this will setup the path even if it fails to insert the back ref */ - ret = insert_inline_extent_backref(trans, fs_info->extent_root, path, - bytenr, num_bytes, parent, - root_objectid, owner, offset, + ret = insert_inline_extent_backref(trans, fs_info, path, bytenr, + num_bytes, parent, root_objectid, + owner, offset, refs_to_add, extent_op); if ((ret < 0 && ret != -EAGAIN) || !ret) goto out; @@ -2143,9 +2150,8 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, path->reada = READA_FORWARD; path->leave_spinning = 1; /* now insert the actual backref */ - ret = insert_extent_backref(trans, fs_info->extent_root, - path, bytenr, parent, root_objectid, - owner, offset, refs_to_add); + ret = insert_extent_backref(trans, fs_info, path, bytenr, parent, + root_objectid, owner, offset, refs_to_add); if (ret) btrfs_abort_transaction(trans, ret); out: @@ -2290,8 +2296,7 @@ again: item_size = btrfs_item_size_nr(leaf, path->slots[0]); #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 if (item_size < sizeof(*ei)) { - ret = convert_extent_item_v0(trans, fs_info->extent_root, - path, (u64)-1, 0); + ret = convert_extent_item_v0(trans, fs_info, path, (u64)-1, 0); if (ret < 0) { err = ret; goto out; @@ -3028,8 +3033,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, return ret; } -static noinline int check_delayed_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, +static noinline int check_delayed_ref(struct btrfs_root *root, struct btrfs_path *path, u64 objectid, u64 offset, u64 bytenr) { @@ -3037,11 +3041,16 @@ static noinline int check_delayed_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_node *ref; struct btrfs_delayed_data_ref *data_ref; struct btrfs_delayed_ref_root *delayed_refs; + struct btrfs_transaction *cur_trans; int ret = 0; - delayed_refs = &trans->transaction->delayed_refs; + cur_trans = root->fs_info->running_transaction; + if (!cur_trans) + return 0; + + delayed_refs = &cur_trans->delayed_refs; spin_lock(&delayed_refs->lock); - head = btrfs_find_delayed_ref_head(trans, bytenr); + head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); if (!head) { spin_unlock(&delayed_refs->lock); return 0; @@ -3090,8 +3099,7 @@ static noinline int check_delayed_ref(struct btrfs_trans_handle *trans, return ret; } -static noinline int check_committed_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, +static noinline int check_committed_ref(struct btrfs_root *root, struct btrfs_path *path, u64 objectid, u64 offset, u64 bytenr) { @@ -3162,9 +3170,8 @@ out: return ret; } -int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 objectid, u64 offset, u64 bytenr) +int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, + u64 bytenr) { struct btrfs_path *path; int ret; @@ -3175,12 +3182,12 @@ int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, return -ENOENT; do { - ret = check_committed_ref(trans, root, path, objectid, + ret = check_committed_ref(root, path, objectid, offset, bytenr); if (ret && ret != -ENOENT) goto out; - ret2 = check_delayed_ref(trans, root, path, objectid, + ret2 = check_delayed_ref(root, path, objectid, offset, bytenr); } while (ret2 == -EAGAIN); @@ -3368,7 +3375,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, if (trans->aborted) return 0; again: - inode = lookup_free_space_inode(root, block_group, path); + inode = lookup_free_space_inode(fs_info, block_group, path); if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { ret = PTR_ERR(inode); btrfs_release_path(path); @@ -3382,7 +3389,8 @@ again: if (block_group->ro) goto out_free; - ret = create_free_space_inode(root, trans, block_group, path); + ret = create_free_space_inode(fs_info, trans, block_group, + path); if (ret) goto out_free; goto again; @@ -3424,7 +3432,7 @@ again: if (ret) goto out_put; - ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode); + ret = btrfs_truncate_free_space_cache(trans, NULL, inode); if (ret) goto out_put; } @@ -4119,10 +4127,19 @@ u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) return ret; } -int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes) +static u64 btrfs_space_info_used(struct btrfs_space_info *s_info, + bool may_use_included) +{ + ASSERT(s_info); + return s_info->bytes_used + s_info->bytes_reserved + + s_info->bytes_pinned + s_info->bytes_readonly + + (may_use_included ? s_info->bytes_may_use : 0); +} + +int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes) { struct btrfs_space_info *data_sinfo; - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; u64 used; int ret = 0; @@ -4144,9 +4161,7 @@ int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes) again: /* make sure we have enough space to handle the data first */ spin_lock(&data_sinfo->lock); - used = data_sinfo->bytes_used + data_sinfo->bytes_reserved + - data_sinfo->bytes_pinned + data_sinfo->bytes_readonly + - data_sinfo->bytes_may_use; + used = btrfs_space_info_used(data_sinfo, true); if (used + bytes > data_sinfo->total_bytes) { struct btrfs_trans_handle *trans; @@ -4267,7 +4282,7 @@ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len) round_down(start, fs_info->sectorsize); start = round_down(start, fs_info->sectorsize); - ret = btrfs_alloc_data_chunk_ondemand(inode, len); + ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), len); if (ret < 0) return ret; @@ -4421,9 +4436,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans, info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); spin_lock(&info->lock); - left = info->total_bytes - info->bytes_used - info->bytes_pinned - - info->bytes_reserved - info->bytes_readonly - - info->bytes_may_use; + left = info->total_bytes - btrfs_space_info_used(info, true); spin_unlock(&info->lock); num_devs = get_profile_num_devs(fs_info, type); @@ -4606,8 +4619,7 @@ static int can_overcommit(struct btrfs_root *root, return 0; profile = btrfs_get_alloc_profile(root, 0); - used = space_info->bytes_used + space_info->bytes_reserved + - space_info->bytes_pinned + space_info->bytes_readonly; + used = btrfs_space_info_used(space_info, false); /* * We only want to allow over committing if we have lots of actual space @@ -4787,11 +4799,10 @@ skip_async: * get us somewhere and then commit the transaction if it does. Otherwise it * will return -ENOSPC. */ -static int may_commit_transaction(struct btrfs_root *root, +static int may_commit_transaction(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, u64 bytes, int force) { - struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv; struct btrfs_trans_handle *trans; @@ -4823,7 +4834,7 @@ static int may_commit_transaction(struct btrfs_root *root, spin_unlock(&delayed_rsv->lock); commit: - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(fs_info->fs_root); if (IS_ERR(trans)) return -ENOSPC; @@ -4837,11 +4848,11 @@ struct reserve_ticket { wait_queue_head_t wait; }; -static int flush_space(struct btrfs_root *root, +static int flush_space(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, u64 num_bytes, u64 orig_bytes, int state) { - struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_root *root = fs_info->fs_root; struct btrfs_trans_handle *trans; int nr; int ret = 0; @@ -4881,7 +4892,8 @@ static int flush_space(struct btrfs_root *root, ret = 0; break; case COMMIT_TRANS: - ret = may_commit_transaction(root, space_info, orig_bytes, 0); + ret = may_commit_transaction(fs_info, space_info, + orig_bytes, 0); break; default: ret = -ENOSPC; @@ -4993,8 +5005,8 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work) struct reserve_ticket *ticket; int ret; - ret = flush_space(fs_info->fs_root, space_info, to_reclaim, - to_reclaim, flush_state); + ret = flush_space(fs_info, space_info, to_reclaim, to_reclaim, + flush_state); spin_lock(&space_info->lock); if (list_empty(&space_info->tickets)) { space_info->flush = 0; @@ -5049,8 +5061,8 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, spin_unlock(&space_info->lock); do { - flush_space(fs_info->fs_root, space_info, to_reclaim, - to_reclaim, flush_state); + flush_space(fs_info, space_info, to_reclaim, to_reclaim, + flush_state); flush_state++; spin_lock(&space_info->lock); if (ticket->bytes == 0) { @@ -5135,9 +5147,7 @@ static int __reserve_metadata_bytes(struct btrfs_root *root, spin_lock(&space_info->lock); ret = -ENOSPC; - used = space_info->bytes_used + space_info->bytes_reserved + - space_info->bytes_pinned + space_info->bytes_readonly + - space_info->bytes_may_use; + used = btrfs_space_info_used(space_info, true); /* * If we have enough space then hooray, make our reservation and carry @@ -5630,9 +5640,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) block_rsv->size = min_t(u64, num_bytes, SZ_512M); if (block_rsv->reserved < block_rsv->size) { - num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + - sinfo->bytes_reserved + sinfo->bytes_readonly + - sinfo->bytes_may_use; + num_bytes = btrfs_space_info_used(sinfo, true); if (sinfo->total_bytes > num_bytes) { num_bytes = sinfo->total_bytes - num_bytes; num_bytes = min(num_bytes, @@ -5735,10 +5743,10 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) /* Can only return 0 or -ENOSPC */ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, - struct inode *inode) + struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; /* * We always use trans->block_rsv here as we will have reserved space * for our orphan when starting the transaction, using get_block_rsv() @@ -5755,19 +5763,19 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, */ u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); - trace_btrfs_space_reservation(fs_info, "orphan", - btrfs_ino(inode), num_bytes, 1); + trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode), + num_bytes, 1); return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1); } -void btrfs_orphan_release_metadata(struct inode *inode) +void btrfs_orphan_release_metadata(struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); - trace_btrfs_space_reservation(fs_info, "orphan", - btrfs_ino(inode), num_bytes, 0); + trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode), + num_bytes, 0); btrfs_block_rsv_release(fs_info, root->orphan_block_rsv, num_bytes); } @@ -5799,7 +5807,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { /* One for parent inode, two for dir entries */ num_bytes = 3 * fs_info->nodesize; - ret = btrfs_qgroup_reserve_meta(root, num_bytes); + ret = btrfs_qgroup_reserve_meta(root, num_bytes, true); if (ret) return ret; } else { @@ -5824,8 +5832,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, } void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info, - struct btrfs_block_rsv *rsv, - u64 qgroup_reserved) + struct btrfs_block_rsv *rsv) { btrfs_block_rsv_release(fs_info, rsv, (u64)-1); } @@ -5840,35 +5847,32 @@ void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info, * reserved extents that need to be freed. This must be called with * BTRFS_I(inode)->lock held. */ -static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes) +static unsigned drop_outstanding_extent(struct btrfs_inode *inode, + u64 num_bytes) { unsigned drop_inode_space = 0; unsigned dropped_extents = 0; - unsigned num_extents = 0; + unsigned num_extents; - num_extents = (unsigned)div64_u64(num_bytes + - BTRFS_MAX_EXTENT_SIZE - 1, - BTRFS_MAX_EXTENT_SIZE); + num_extents = count_max_extents(num_bytes); ASSERT(num_extents); - ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents); - BTRFS_I(inode)->outstanding_extents -= num_extents; + ASSERT(inode->outstanding_extents >= num_extents); + inode->outstanding_extents -= num_extents; - if (BTRFS_I(inode)->outstanding_extents == 0 && + if (inode->outstanding_extents == 0 && test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED, - &BTRFS_I(inode)->runtime_flags)) + &inode->runtime_flags)) drop_inode_space = 1; /* * If we have more or the same amount of outstanding extents than we have * reserved then we need to leave the reserved extents count alone. */ - if (BTRFS_I(inode)->outstanding_extents >= - BTRFS_I(inode)->reserved_extents) + if (inode->outstanding_extents >= inode->reserved_extents) return drop_inode_space; - dropped_extents = BTRFS_I(inode)->reserved_extents - - BTRFS_I(inode)->outstanding_extents; - BTRFS_I(inode)->reserved_extents -= dropped_extents; + dropped_extents = inode->reserved_extents - inode->outstanding_extents; + inode->reserved_extents -= dropped_extents; return dropped_extents + drop_inode_space; } @@ -5890,24 +5894,21 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes) * * This must be called with BTRFS_I(inode)->lock held. */ -static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes, +static u64 calc_csum_metadata_size(struct btrfs_inode *inode, u64 num_bytes, int reserve) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); u64 old_csums, num_csums; - if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM && - BTRFS_I(inode)->csum_bytes == 0) + if (inode->flags & BTRFS_INODE_NODATASUM && inode->csum_bytes == 0) return 0; - old_csums = btrfs_csum_bytes_to_leaves(fs_info, - BTRFS_I(inode)->csum_bytes); + old_csums = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes); if (reserve) - BTRFS_I(inode)->csum_bytes += num_bytes; + inode->csum_bytes += num_bytes; else - BTRFS_I(inode)->csum_bytes -= num_bytes; - num_csums = btrfs_csum_bytes_to_leaves(fs_info, - BTRFS_I(inode)->csum_bytes); + inode->csum_bytes -= num_bytes; + num_csums = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes); /* No change, no need to reserve more */ if (old_csums == num_csums) @@ -5920,14 +5921,14 @@ static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes, return btrfs_calc_trans_metadata_size(fs_info, old_csums - num_csums); } -int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) +int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct btrfs_block_rsv *block_rsv = &fs_info->delalloc_block_rsv; u64 to_reserve = 0; u64 csum_bytes; - unsigned nr_extents = 0; + unsigned nr_extents; enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; int ret = 0; bool delalloc_lock = true; @@ -5955,31 +5956,28 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) schedule_timeout(1); if (delalloc_lock) - mutex_lock(&BTRFS_I(inode)->delalloc_mutex); + mutex_lock(&inode->delalloc_mutex); num_bytes = ALIGN(num_bytes, fs_info->sectorsize); - spin_lock(&BTRFS_I(inode)->lock); - nr_extents = (unsigned)div64_u64(num_bytes + - BTRFS_MAX_EXTENT_SIZE - 1, - BTRFS_MAX_EXTENT_SIZE); - BTRFS_I(inode)->outstanding_extents += nr_extents; + spin_lock(&inode->lock); + nr_extents = count_max_extents(num_bytes); + inode->outstanding_extents += nr_extents; nr_extents = 0; - if (BTRFS_I(inode)->outstanding_extents > - BTRFS_I(inode)->reserved_extents) - nr_extents += BTRFS_I(inode)->outstanding_extents - - BTRFS_I(inode)->reserved_extents; + if (inode->outstanding_extents > inode->reserved_extents) + nr_extents += inode->outstanding_extents - + inode->reserved_extents; /* We always want to reserve a slot for updating the inode. */ to_reserve = btrfs_calc_trans_metadata_size(fs_info, nr_extents + 1); to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); - csum_bytes = BTRFS_I(inode)->csum_bytes; - spin_unlock(&BTRFS_I(inode)->lock); + csum_bytes = inode->csum_bytes; + spin_unlock(&inode->lock); if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { ret = btrfs_qgroup_reserve_meta(root, - nr_extents * fs_info->nodesize); + nr_extents * fs_info->nodesize, true); if (ret) goto out_fail; } @@ -5991,17 +5989,17 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) goto out_fail; } - spin_lock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); if (test_and_set_bit(BTRFS_INODE_DELALLOC_META_RESERVED, - &BTRFS_I(inode)->runtime_flags)) { + &inode->runtime_flags)) { to_reserve -= btrfs_calc_trans_metadata_size(fs_info, 1); release_extra = true; } - BTRFS_I(inode)->reserved_extents += nr_extents; - spin_unlock(&BTRFS_I(inode)->lock); + inode->reserved_extents += nr_extents; + spin_unlock(&inode->lock); if (delalloc_lock) - mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); + mutex_unlock(&inode->delalloc_mutex); if (to_reserve) trace_btrfs_space_reservation(fs_info, "delalloc", @@ -6012,17 +6010,17 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) return 0; out_fail: - spin_lock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); dropped = drop_outstanding_extent(inode, num_bytes); /* * If the inodes csum_bytes is the same as the original * csum_bytes then we know we haven't raced with any free()ers * so we can just reduce our inodes csum bytes and carry on. */ - if (BTRFS_I(inode)->csum_bytes == csum_bytes) { + if (inode->csum_bytes == csum_bytes) { calc_csum_metadata_size(inode, num_bytes, 0); } else { - u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes; + u64 orig_csum_bytes = inode->csum_bytes; u64 bytes; /* @@ -6033,8 +6031,8 @@ out_fail: * number of bytes that were freed while we were trying our * reservation. */ - bytes = csum_bytes - BTRFS_I(inode)->csum_bytes; - BTRFS_I(inode)->csum_bytes = csum_bytes; + bytes = csum_bytes - inode->csum_bytes; + inode->csum_bytes = csum_bytes; to_free = calc_csum_metadata_size(inode, bytes, 0); @@ -6043,7 +6041,7 @@ out_fail: * been making this reservation and our ->csum_bytes were not * artificially inflated. */ - BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes; + inode->csum_bytes = csum_bytes - num_bytes; bytes = csum_bytes - orig_csum_bytes; bytes = calc_csum_metadata_size(inode, bytes, 0); @@ -6055,13 +6053,13 @@ out_fail: * need to do anything, the other free-ers did the correct * thing. */ - BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes; + inode->csum_bytes = orig_csum_bytes - num_bytes; if (bytes > to_free) to_free = bytes - to_free; else to_free = 0; } - spin_unlock(&BTRFS_I(inode)->lock); + spin_unlock(&inode->lock); if (dropped) to_free += btrfs_calc_trans_metadata_size(fs_info, dropped); @@ -6071,7 +6069,7 @@ out_fail: btrfs_ino(inode), to_free, 0); } if (delalloc_lock) - mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); + mutex_unlock(&inode->delalloc_mutex); return ret; } @@ -6084,27 +6082,27 @@ out_fail: * once we complete IO for a given set of bytes to release their metadata * reservations. */ -void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) +void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); u64 to_free = 0; unsigned dropped; num_bytes = ALIGN(num_bytes, fs_info->sectorsize); - spin_lock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); dropped = drop_outstanding_extent(inode, num_bytes); if (num_bytes) to_free = calc_csum_metadata_size(inode, num_bytes, 0); - spin_unlock(&BTRFS_I(inode)->lock); + spin_unlock(&inode->lock); if (dropped > 0) to_free += btrfs_calc_trans_metadata_size(fs_info, dropped); if (btrfs_is_testing(fs_info)) return; - trace_btrfs_space_reservation(fs_info, "delalloc", - btrfs_ino(inode), to_free, 0); + trace_btrfs_space_reservation(fs_info, "delalloc", btrfs_ino(inode), + to_free, 0); btrfs_block_rsv_release(fs_info, &fs_info->delalloc_block_rsv, to_free); } @@ -6139,7 +6137,7 @@ int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len) ret = btrfs_check_data_free_space(inode, start, len); if (ret < 0) return ret; - ret = btrfs_delalloc_reserve_metadata(inode, len); + ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len); if (ret < 0) btrfs_free_reserved_data_space(inode, start, len); return ret; @@ -6162,7 +6160,7 @@ int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len) */ void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len) { - btrfs_delalloc_release_metadata(inode, len); + btrfs_delalloc_release_metadata(BTRFS_I(inode), len); btrfs_free_reserved_data_space(inode, start, len); } @@ -6561,8 +6559,7 @@ static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, spin_unlock(&space_info->lock); return ret; } -void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info) +void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info) { struct btrfs_caching_control *next; struct btrfs_caching_control *caching_ctl; @@ -6845,7 +6842,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, if (is_data) skinny_metadata = 0; - ret = lookup_extent_backref(trans, extent_root, path, &iref, + ret = lookup_extent_backref(trans, info, path, &iref, bytenr, num_bytes, parent, root_objectid, owner_objectid, owner_offset); @@ -6877,8 +6874,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, #endif if (!found_extent) { BUG_ON(iref); - ret = remove_extent_backref(trans, extent_root, path, - NULL, refs_to_drop, + ret = remove_extent_backref(trans, info, path, NULL, + refs_to_drop, is_data, &last_ref); if (ret) { btrfs_abort_transaction(trans, ret); @@ -6953,8 +6950,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 if (item_size < sizeof(*ei)) { BUG_ON(found_extent || extent_slot != path->slots[0]); - ret = convert_extent_item_v0(trans, extent_root, path, - owner_objectid, 0); + ret = convert_extent_item_v0(trans, info, path, owner_objectid, + 0); if (ret < 0) { btrfs_abort_transaction(trans, ret); goto out; @@ -7021,7 +7018,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(leaf); } if (found_extent) { - ret = remove_extent_backref(trans, extent_root, path, + ret = remove_extent_backref(trans, info, path, iref, refs_to_drop, is_data, &last_ref); if (ret) { @@ -7095,7 +7092,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); - head = btrfs_find_delayed_ref_head(trans, bytenr); + head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); if (!head) goto out_delayed_unlock; @@ -7244,7 +7241,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, num_bytes, parent, root_objectid, owner, offset, 0, - BTRFS_DROP_DELAYED_REF, NULL); + BTRFS_DROP_DELAYED_REF); } return ret; } @@ -7419,12 +7416,11 @@ btrfs_release_block_group(struct btrfs_block_group_cache *cache, * If there is no suitable free space, we will record the max size of * the free space extent currently. */ -static noinline int find_free_extent(struct btrfs_root *orig_root, +static noinline int find_free_extent(struct btrfs_fs_info *fs_info, u64 ram_bytes, u64 num_bytes, u64 empty_size, u64 hint_byte, struct btrfs_key *ins, u64 flags, int delalloc) { - struct btrfs_fs_info *fs_info = orig_root->fs_info; int ret = 0; struct btrfs_root *root = fs_info->extent_root; struct btrfs_free_cluster *last_ptr = NULL; @@ -7716,18 +7712,20 @@ unclustered_alloc: last_ptr->fragmented = 1; spin_unlock(&last_ptr->lock); } - spin_lock(&block_group->free_space_ctl->tree_lock); - if (cached && - block_group->free_space_ctl->free_space < - num_bytes + empty_cluster + empty_size) { - if (block_group->free_space_ctl->free_space > - max_extent_size) - max_extent_size = - block_group->free_space_ctl->free_space; - spin_unlock(&block_group->free_space_ctl->tree_lock); - goto loop; + if (cached) { + struct btrfs_free_space_ctl *ctl = + block_group->free_space_ctl; + + spin_lock(&ctl->tree_lock); + if (ctl->free_space < + num_bytes + empty_cluster + empty_size) { + if (ctl->free_space > max_extent_size) + max_extent_size = ctl->free_space; + spin_unlock(&ctl->tree_lock); + goto loop; + } + spin_unlock(&ctl->tree_lock); } - spin_unlock(&block_group->free_space_ctl->tree_lock); offset = btrfs_find_space_for_alloc(block_group, search_start, num_bytes, empty_size, @@ -7908,9 +7906,8 @@ static void dump_space_info(struct btrfs_fs_info *fs_info, spin_lock(&info->lock); btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull", info->flags, - info->total_bytes - info->bytes_used - info->bytes_pinned - - info->bytes_reserved - info->bytes_readonly - - info->bytes_may_use, (info->full) ? "" : "not "); + info->total_bytes - btrfs_space_info_used(info, true), + info->full ? "" : "not "); btrfs_info(fs_info, "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu", info->total_bytes, info->bytes_used, info->bytes_pinned, @@ -7951,7 +7948,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, flags = btrfs_get_alloc_profile(root, is_data); again: WARN_ON(num_bytes < fs_info->sectorsize); - ret = find_free_extent(root, ram_bytes, num_bytes, empty_size, + ret = find_free_extent(fs_info, ram_bytes, num_bytes, empty_size, hint_byte, ins, flags, delalloc); if (!ret && !is_data) { btrfs_dec_block_group_reservations(fs_info, ins->objectid); @@ -8194,8 +8191,7 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, ret = btrfs_add_delayed_data_ref(fs_info, trans, ins->objectid, ins->offset, 0, root_objectid, owner, offset, - ram_bytes, BTRFS_ADD_DELAYED_EXTENT, - NULL); + ram_bytes, BTRFS_ADD_DELAYED_EXTENT); return ret; } @@ -8256,7 +8252,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, btrfs_set_header_generation(buf, trans->transid); btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level); btrfs_tree_lock(buf); - clean_tree_block(trans, fs_info, buf); + clean_tree_block(fs_info, buf); clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); btrfs_set_lock_blocking(buf); @@ -8351,10 +8347,11 @@ static void unuse_block_rsv(struct btrfs_fs_info *fs_info, * returns the tree buffer or an ERR_PTR on error. */ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 parent, u64 root_objectid, - struct btrfs_disk_key *key, int level, - u64 hint, u64 empty_size) + struct btrfs_root *root, + u64 parent, u64 root_objectid, + const struct btrfs_disk_key *key, + int level, u64 hint, + u64 empty_size) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_key ins; @@ -8876,7 +8873,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, btrfs_set_lock_blocking(eb); path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; } - clean_tree_block(trans, fs_info, eb); + clean_tree_block(fs_info, eb); } if (eb == root->node) { @@ -9346,8 +9343,7 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) num_bytes = cache->key.offset - cache->reserved - cache->pinned - cache->bytes_super - btrfs_block_group_used(&cache->item); - if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned + - sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes + + if (btrfs_space_info_used(sinfo, true) + num_bytes + min_allocable_bytes <= sinfo->total_bytes) { sinfo->bytes_readonly += num_bytes; cache->ro++; @@ -9360,17 +9356,16 @@ out: return ret; } -int btrfs_inc_block_group_ro(struct btrfs_root *root, +int btrfs_inc_block_group_ro(struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *cache) { - struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_trans_handle *trans; u64 alloc_flags; int ret; again: - trans = btrfs_join_transaction(root); + trans = btrfs_join_transaction(fs_info->extent_root); if (IS_ERR(trans)) return PTR_ERR(trans); @@ -9557,9 +9552,8 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr) * all of the extents from this block group. If we can, we're good */ if ((space_info->total_bytes != block_group->key.offset) && - (space_info->bytes_used + space_info->bytes_reserved + - space_info->bytes_pinned + space_info->bytes_readonly + - min_free < space_info->total_bytes)) { + (btrfs_space_info_used(space_info, false) + min_free < + space_info->total_bytes)) { spin_unlock(&space_info->lock); goto out; } @@ -9742,6 +9736,11 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info) } } +/* + * Must be called only after stopping all workers, since we could have block + * group caching kthreads running, and therefore they could race with us if we + * freed the block groups before stopping them. + */ int btrfs_free_block_groups(struct btrfs_fs_info *info) { struct btrfs_block_group_cache *block_group; @@ -9781,9 +9780,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) list_del(&block_group->list); up_write(&block_group->space_info->groups_sem); - if (block_group->cached == BTRFS_CACHE_STARTED) - wait_block_group_cache_done(block_group); - /* * We haven't cached this block group, which means we could * possibly have excluded extents on this block group. @@ -9793,6 +9789,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) free_excluded_extents(info, block_group); btrfs_remove_free_space_cache(block_group); + ASSERT(block_group->cached != BTRFS_CACHE_STARTED); ASSERT(list_empty(&block_group->dirty_list)); ASSERT(list_empty(&block_group->io_list)); ASSERT(list_empty(&block_group->bg_list)); @@ -10317,7 +10314,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, * get the inode first so any iput calls done for the io_list * aren't the final iput (no unlinks allowed now) */ - inode = lookup_free_space_inode(tree_root, block_group, path); + inode = lookup_free_space_inode(fs_info, block_group, path); mutex_lock(&trans->transaction->cache_write_mutex); /* @@ -10344,7 +10341,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, mutex_unlock(&trans->transaction->cache_write_mutex); if (!IS_ERR(inode)) { - ret = btrfs_orphan_add(trans, inode); + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); if (ret) { btrfs_add_delayed_iput(inode); goto out; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 4ac383a3a649..28e81922a21c 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -98,7 +98,7 @@ static inline void __btrfs_debug_check_extent_io_range(const char *caller, if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) { btrfs_debug_rl(BTRFS_I(inode)->root->fs_info, "%s: ino %llu isize %llu odd range [%llu,%llu]", - caller, btrfs_ino(inode), isize, start, end); + caller, btrfs_ino(BTRFS_I(inode)), isize, start, end); } } #else @@ -144,7 +144,7 @@ static void add_extent_changeset(struct extent_state *state, unsigned bits, if (!set && (state->state & bits) == 0) return; changeset->bytes_changed += state->end - state->start + 1; - ret = ulist_add(changeset->range_changed, state->start, state->end, + ret = ulist_add(&changeset->range_changed, state->start, state->end, GFP_ATOMIC); /* ENOMEM */ BUG_ON(ret < 0); @@ -226,6 +226,11 @@ static struct extent_state *alloc_extent_state(gfp_t mask) { struct extent_state *state; + /* + * The given mask might be not appropriate for the slab allocator, + * drop the unsupported bits + */ + mask &= ~(__GFP_DMA32|__GFP_HIGHMEM); state = kmem_cache_alloc(extent_state_cache, mask); if (!state) return state; @@ -423,7 +428,8 @@ static void clear_state_cb(struct extent_io_tree *tree, struct extent_state *state, unsigned *bits) { if (tree->ops && tree->ops->clear_bit_hook) - tree->ops->clear_bit_hook(tree->mapping->host, state, bits); + tree->ops->clear_bit_hook(BTRFS_I(tree->mapping->host), + state, bits); } static void set_state_bits(struct extent_io_tree *tree, @@ -1549,33 +1555,24 @@ out: return found; } +static int __process_pages_contig(struct address_space *mapping, + struct page *locked_page, + pgoff_t start_index, pgoff_t end_index, + unsigned long page_ops, pgoff_t *index_ret); + static noinline void __unlock_for_delalloc(struct inode *inode, struct page *locked_page, u64 start, u64 end) { - int ret; - struct page *pages[16]; unsigned long index = start >> PAGE_SHIFT; unsigned long end_index = end >> PAGE_SHIFT; - unsigned long nr_pages = end_index - index + 1; - int i; + ASSERT(locked_page); if (index == locked_page->index && end_index == index) return; - while (nr_pages > 0) { - ret = find_get_pages_contig(inode->i_mapping, index, - min_t(unsigned long, nr_pages, - ARRAY_SIZE(pages)), pages); - for (i = 0; i < ret; i++) { - if (pages[i] != locked_page) - unlock_page(pages[i]); - put_page(pages[i]); - } - nr_pages -= ret; - index += ret; - cond_resched(); - } + __process_pages_contig(inode->i_mapping, locked_page, index, end_index, + PAGE_UNLOCK, NULL); } static noinline int lock_delalloc_pages(struct inode *inode, @@ -1584,59 +1581,19 @@ static noinline int lock_delalloc_pages(struct inode *inode, u64 delalloc_end) { unsigned long index = delalloc_start >> PAGE_SHIFT; - unsigned long start_index = index; + unsigned long index_ret = index; unsigned long end_index = delalloc_end >> PAGE_SHIFT; - unsigned long pages_locked = 0; - struct page *pages[16]; - unsigned long nrpages; int ret; - int i; - /* the caller is responsible for locking the start index */ + ASSERT(locked_page); if (index == locked_page->index && index == end_index) return 0; - /* skip the page at the start index */ - nrpages = end_index - index + 1; - while (nrpages > 0) { - ret = find_get_pages_contig(inode->i_mapping, index, - min_t(unsigned long, - nrpages, ARRAY_SIZE(pages)), pages); - if (ret == 0) { - ret = -EAGAIN; - goto done; - } - /* now we have an array of pages, lock them all */ - for (i = 0; i < ret; i++) { - /* - * the caller is taking responsibility for - * locked_page - */ - if (pages[i] != locked_page) { - lock_page(pages[i]); - if (!PageDirty(pages[i]) || - pages[i]->mapping != inode->i_mapping) { - ret = -EAGAIN; - unlock_page(pages[i]); - put_page(pages[i]); - goto done; - } - } - put_page(pages[i]); - pages_locked++; - } - nrpages -= ret; - index += ret; - cond_resched(); - } - ret = 0; -done: - if (ret && pages_locked) { - __unlock_for_delalloc(inode, locked_page, - delalloc_start, - ((u64)(start_index + pages_locked - 1)) << - PAGE_SHIFT); - } + ret = __process_pages_contig(inode->i_mapping, locked_page, index, + end_index, PAGE_LOCK, &index_ret); + if (ret == -EAGAIN) + __unlock_for_delalloc(inode, locked_page, delalloc_start, + (u64)index_ret << PAGE_SHIFT); return ret; } @@ -1726,37 +1683,47 @@ out_failed: return found; } -void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, - u64 delalloc_end, struct page *locked_page, - unsigned clear_bits, - unsigned long page_ops) +static int __process_pages_contig(struct address_space *mapping, + struct page *locked_page, + pgoff_t start_index, pgoff_t end_index, + unsigned long page_ops, pgoff_t *index_ret) { - struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; - int ret; + unsigned long nr_pages = end_index - start_index + 1; + unsigned long pages_locked = 0; + pgoff_t index = start_index; struct page *pages[16]; - unsigned long index = start >> PAGE_SHIFT; - unsigned long end_index = end >> PAGE_SHIFT; - unsigned long nr_pages = end_index - index + 1; + unsigned ret; + int err = 0; int i; - clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS); - if (page_ops == 0) - return; + if (page_ops & PAGE_LOCK) { + ASSERT(page_ops == PAGE_LOCK); + ASSERT(index_ret && *index_ret == start_index); + } if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0) - mapping_set_error(inode->i_mapping, -EIO); + mapping_set_error(mapping, -EIO); while (nr_pages > 0) { - ret = find_get_pages_contig(inode->i_mapping, index, + ret = find_get_pages_contig(mapping, index, min_t(unsigned long, nr_pages, ARRAY_SIZE(pages)), pages); - for (i = 0; i < ret; i++) { + if (ret == 0) { + /* + * Only if we're going to lock these pages, + * can we find nothing at @index. + */ + ASSERT(page_ops & PAGE_LOCK); + return ret; + } + for (i = 0; i < ret; i++) { if (page_ops & PAGE_SET_PRIVATE2) SetPagePrivate2(pages[i]); if (pages[i] == locked_page) { put_page(pages[i]); + pages_locked++; continue; } if (page_ops & PAGE_CLEAR_DIRTY) @@ -1769,12 +1736,40 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, end_page_writeback(pages[i]); if (page_ops & PAGE_UNLOCK) unlock_page(pages[i]); + if (page_ops & PAGE_LOCK) { + lock_page(pages[i]); + if (!PageDirty(pages[i]) || + pages[i]->mapping != mapping) { + unlock_page(pages[i]); + put_page(pages[i]); + err = -EAGAIN; + goto out; + } + } put_page(pages[i]); + pages_locked++; } nr_pages -= ret; index += ret; cond_resched(); } +out: + if (err && index_ret) + *index_ret = start_index + pages_locked - 1; + return err; +} + +void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, + u64 delalloc_end, struct page *locked_page, + unsigned clear_bits, + unsigned long page_ops) +{ + clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, 1, 0, + NULL, GFP_NOFS); + + __process_pages_contig(inode->i_mapping, locked_page, + start >> PAGE_SHIFT, end >> PAGE_SHIFT, + page_ops, NULL); } /* @@ -1965,11 +1960,11 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) SetPageUptodate(page); } -int free_io_failure(struct inode *inode, struct io_failure_record *rec) +int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec) { int ret; int err = 0; - struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; + struct extent_io_tree *failure_tree = &inode->io_failure_tree; set_state_failrec(failure_tree, rec->start, NULL); ret = clear_extent_bits(failure_tree, rec->start, @@ -1978,7 +1973,7 @@ int free_io_failure(struct inode *inode, struct io_failure_record *rec) if (ret) err = ret; - ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start, + ret = clear_extent_bits(&inode->io_tree, rec->start, rec->start + rec->len - 1, EXTENT_DAMAGED); if (ret && !err) @@ -1998,10 +1993,11 @@ int free_io_failure(struct inode *inode, struct io_failure_record *rec) * currently, there can be no more than two copies of every data bit. thus, * exactly one rewrite is required. */ -int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, - struct page *page, unsigned int pg_offset, int mirror_num) +int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length, + u64 logical, struct page *page, + unsigned int pg_offset, int mirror_num) { - struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; + struct btrfs_fs_info *fs_info = inode->root->fs_info; struct bio *bio; struct btrfs_device *dev; u64 map_length = 0; @@ -2080,7 +2076,7 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info, for (i = 0; i < num_pages; i++) { struct page *p = eb->pages[i]; - ret = repair_io_failure(fs_info->btree_inode, start, + ret = repair_io_failure(BTRFS_I(fs_info->btree_inode), start, PAGE_SIZE, start, p, start - page_offset(p), mirror_num); if (ret) @@ -2095,23 +2091,23 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info, * each time an IO finishes, we do a fast check in the IO failure tree * to see if we need to process or clean up an io_failure_record */ -int clean_io_failure(struct inode *inode, u64 start, struct page *page, +int clean_io_failure(struct btrfs_inode *inode, u64 start, struct page *page, unsigned int pg_offset) { u64 private; struct io_failure_record *failrec; - struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; + struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_state *state; int num_copies; int ret; private = 0; - ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, + ret = count_range_bits(&inode->io_failure_tree, &private, (u64)-1, 1, EXTENT_DIRTY, 0); if (!ret) return 0; - ret = get_state_failrec(&BTRFS_I(inode)->io_failure_tree, start, + ret = get_state_failrec(&inode->io_failure_tree, start, &failrec); if (ret) return 0; @@ -2128,11 +2124,11 @@ int clean_io_failure(struct inode *inode, u64 start, struct page *page, if (fs_info->sb->s_flags & MS_RDONLY) goto out; - spin_lock(&BTRFS_I(inode)->io_tree.lock); - state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree, + spin_lock(&inode->io_tree.lock); + state = find_first_extent_bit_state(&inode->io_tree, failrec->start, EXTENT_LOCKED); - spin_unlock(&BTRFS_I(inode)->io_tree.lock); + spin_unlock(&inode->io_tree.lock); if (state && state->start <= failrec->start && state->end >= failrec->start + failrec->len - 1) { @@ -2157,9 +2153,9 @@ out: * - under ordered extent * - the inode is freeing */ -void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end) +void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end) { - struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; + struct extent_io_tree *failure_tree = &inode->io_failure_tree; struct io_failure_record *failrec; struct extent_state *state, *next; @@ -2399,7 +2395,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror); if (!ret) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); return -EIO; } @@ -2412,7 +2408,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, (int)phy_offset, failed_bio->bi_end_io, NULL); if (!bio) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); return -EIO; } bio_set_op_attrs(bio, REQ_OP_READ, read_mode); @@ -2424,7 +2420,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror, failrec->bio_flags, 0); if (ret) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); bio_put(bio); } @@ -2441,12 +2437,9 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end) tree = &BTRFS_I(page->mapping->host)->io_tree; - if (tree->ops && tree->ops->writepage_end_io_hook) { - ret = tree->ops->writepage_end_io_hook(page, start, - end, NULL, uptodate); - if (ret) - uptodate = 0; - } + if (tree->ops && tree->ops->writepage_end_io_hook) + tree->ops->writepage_end_io_hook(page, start, end, NULL, + uptodate); if (!uptodate) { ClearPageUptodate(page); @@ -2574,21 +2567,21 @@ static void end_bio_extent_readpage(struct bio *bio) len = bvec->bv_len; mirror = io_bio->mirror_num; - if (likely(uptodate && tree->ops && - tree->ops->readpage_end_io_hook)) { + if (likely(uptodate && tree->ops)) { ret = tree->ops->readpage_end_io_hook(io_bio, offset, page, start, end, mirror); if (ret) uptodate = 0; else - clean_io_failure(inode, start, page, 0); + clean_io_failure(BTRFS_I(inode), start, + page, 0); } if (likely(uptodate)) goto readpage_ok; - if (tree->ops && tree->ops->readpage_io_failed_hook) { + if (tree->ops) { ret = tree->ops->readpage_io_failed_hook(page, mirror); if (!ret && !bio->bi_error) uptodate = 1; @@ -2737,7 +2730,7 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num, bio->bi_private = NULL; bio_get(bio); - if (tree->ops && tree->ops->submit_bio_hook) + if (tree->ops) ret = tree->ops->submit_bio_hook(page->mapping->host, bio, mirror_num, bio_flags, start); else @@ -2752,7 +2745,7 @@ static int merge_bio(struct extent_io_tree *tree, struct page *page, unsigned long bio_flags) { int ret = 0; - if (tree->ops && tree->ops->merge_bio_hook) + if (tree->ops) ret = tree->ops->merge_bio_hook(page, offset, size, bio, bio_flags); return ret; @@ -2765,7 +2758,6 @@ static int submit_extent_page(int op, int op_flags, struct extent_io_tree *tree, size_t size, unsigned long offset, struct block_device *bdev, struct bio **bio_ret, - unsigned long max_pages, bio_end_io_t end_io_func, int mirror_num, unsigned long prev_bio_flags, @@ -2864,7 +2856,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, *em_cached = NULL; } - em = get_extent(inode, page, pg_offset, start, len, 0); + em = get_extent(BTRFS_I(inode), page, pg_offset, start, len, 0); if (em_cached && !IS_ERR_OR_NULL(em)) { BUG_ON(*em_cached); atomic_inc(&em->refs); @@ -2931,7 +2923,6 @@ static int __do_readpage(struct extent_io_tree *tree, } } while (cur <= end) { - unsigned long pnr = (last_byte >> PAGE_SHIFT) + 1; bool force_bio_submit = false; if (cur >= last_byte) { @@ -3066,10 +3057,9 @@ static int __do_readpage(struct extent_io_tree *tree, continue; } - pnr -= page->index; ret = submit_extent_page(REQ_OP_READ, read_flags, tree, NULL, page, sector, disk_io_size, pg_offset, - bdev, bio, pnr, + bdev, bio, end_bio_extent_readpage, mirror_num, *bio_flags, this_bio_flag, @@ -3110,7 +3100,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, inode = pages[0]->mapping->host; while (1) { lock_extent(tree, start, end); - ordered = btrfs_lookup_ordered_range(inode, start, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start, end - start + 1); if (!ordered) break; @@ -3182,7 +3172,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, while (1) { lock_extent(tree, start, end); - ordered = btrfs_lookup_ordered_range(inode, start, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start, PAGE_SIZE); if (!ordered) break; @@ -3210,7 +3200,7 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, return ret; } -static void update_nr_written(struct page *page, struct writeback_control *wbc, +static void update_nr_written(struct writeback_control *wbc, unsigned long nr_written) { wbc->nr_to_write -= nr_written; @@ -3330,7 +3320,6 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, u64 block_start; u64 iosize; sector_t sector; - struct extent_state *cached_state = NULL; struct extent_map *em; struct block_device *bdev; size_t pg_offset = 0; @@ -3349,10 +3338,9 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, else redirty_page_for_writepage(wbc, page); - update_nr_written(page, wbc, nr_written); + update_nr_written(wbc, nr_written); unlock_page(page); - ret = 1; - goto done_unlocked; + return 1; } } @@ -3360,7 +3348,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, * we don't want to touch the inode after unlocking the page, * so we update the mapping writeback index now */ - update_nr_written(page, wbc, nr_written + 1); + update_nr_written(wbc, nr_written + 1); end = page_end; if (i_size <= start) { @@ -3374,7 +3362,6 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, while (cur <= end) { u64 em_end; - unsigned long max_nr; if (cur >= i_size) { if (tree->ops && tree->ops->writepage_end_io_hook) @@ -3382,7 +3369,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, page_end, NULL, 1); break; } - em = epd->get_extent(inode, page, pg_offset, cur, + em = epd->get_extent(BTRFS_I(inode), page, pg_offset, cur, end - cur + 1, 1); if (IS_ERR_OR_NULL(em)) { SetPageError(page); @@ -3431,8 +3418,6 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, continue; } - max_nr = (i_size >> PAGE_SHIFT) + 1; - set_range_writeback(tree, cur, cur + iosize - 1); if (!PageWriteback(page)) { btrfs_err(BTRFS_I(inode)->root->fs_info, @@ -3442,11 +3427,14 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, ret = submit_extent_page(REQ_OP_WRITE, write_flags, tree, wbc, page, sector, iosize, pg_offset, - bdev, &epd->bio, max_nr, + bdev, &epd->bio, end_bio_extent_writepage, 0, 0, 0, false); - if (ret) + if (ret) { SetPageError(page); + if (PageWriteback(page)) + end_page_writeback(page); + } cur = cur + iosize; pg_offset += iosize; @@ -3454,11 +3442,6 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, } done: *nr_ret = nr; - -done_unlocked: - - /* drop our reference on any cached states */ - free_extent_state(cached_state); return ret; } @@ -3761,20 +3744,21 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, set_page_writeback(p); ret = submit_extent_page(REQ_OP_WRITE, write_flags, tree, wbc, p, offset >> 9, PAGE_SIZE, 0, bdev, - &epd->bio, -1, + &epd->bio, end_bio_extent_buffer_writepage, 0, epd->bio_flags, bio_flags, false); epd->bio_flags = bio_flags; if (ret) { set_btree_ioerr(p); - end_page_writeback(p); + if (PageWriteback(p)) + end_page_writeback(p); if (atomic_sub_and_test(num_pages - i, &eb->io_pages)) end_extent_buffer_writeback(eb); ret = -EIO; break; } offset += PAGE_SIZE; - update_nr_written(p, wbc, 1); + update_nr_written(wbc, 1); unlock_page(p); } @@ -3926,8 +3910,7 @@ retry: * WB_SYNC_ALL then we were called for data integrity and we must wait for * existing IO to complete. */ -static int extent_write_cache_pages(struct extent_io_tree *tree, - struct address_space *mapping, +static int extent_write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, void *data, void (*flush_fn)(void *)) @@ -4168,8 +4151,7 @@ int extent_writepages(struct extent_io_tree *tree, .bio_flags = 0, }; - ret = extent_write_cache_pages(tree, mapping, wbc, - __extent_writepage, &epd, + ret = extent_write_cache_pages(mapping, wbc, __extent_writepage, &epd, flush_write_bio); flush_epd_write_bio(&epd); return ret; @@ -4264,8 +4246,6 @@ static int try_release_extent_state(struct extent_map_tree *map, EXTENT_IOBITS, 0, NULL)) ret = 0; else { - if ((mask & GFP_NOFS) == GFP_NOFS) - mask = GFP_NOFS; /* * at this point we can safely clear everything except the * locked bit and the nodatasum bit @@ -4354,7 +4334,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode, if (len == 0) break; len = ALIGN(len, sectorsize); - em = get_extent(inode, NULL, 0, offset, len, 0); + em = get_extent(BTRFS_I(inode), NULL, 0, offset, len, 0); if (IS_ERR_OR_NULL(em)) return em; @@ -4410,8 +4390,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, * lookup the last file extent. We're not using i_size here * because there might be preallocation past i_size */ - ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1, - 0); + ret = btrfs_lookup_file_extent(NULL, root, path, + btrfs_ino(BTRFS_I(inode)), -1, 0); if (ret < 0) { btrfs_free_path(path); return ret; @@ -4426,7 +4406,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, found_type = found_key.type; /* No extents, but there might be delalloc bits */ - if (found_key.objectid != btrfs_ino(inode) || + if (found_key.objectid != btrfs_ino(BTRFS_I(inode)) || found_type != BTRFS_EXTENT_DATA_KEY) { /* have to trust i_size as the end */ last = (u64)-1; @@ -4535,8 +4515,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, * lookup stuff. */ ret = btrfs_check_shared(trans, root->fs_info, - root->objectid, - btrfs_ino(inode), bytenr); + root->objectid, + btrfs_ino(BTRFS_I(inode)), bytenr); if (trans) btrfs_end_transaction(trans); if (ret < 0) diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 17f9ce479ed7..3e4fad4a909d 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -45,13 +45,14 @@ #define EXTENT_BUFFER_IN_TREE 10 #define EXTENT_BUFFER_WRITE_ERR 11 /* write IO error */ -/* these are flags for extent_clear_unlock_delalloc */ +/* these are flags for __process_pages_contig */ #define PAGE_UNLOCK (1 << 0) #define PAGE_CLEAR_DIRTY (1 << 1) #define PAGE_SET_WRITEBACK (1 << 2) #define PAGE_END_WRITEBACK (1 << 3) #define PAGE_SET_PRIVATE2 (1 << 4) #define PAGE_SET_ERROR (1 << 5) +#define PAGE_LOCK (1 << 6) /* * page->private values. Every page that is controlled by the extent @@ -83,6 +84,7 @@ extern void le_bitmap_clear(u8 *map, unsigned int start, int len); struct extent_state; struct btrfs_root; +struct btrfs_inode; struct btrfs_io_bio; struct io_failure_record; @@ -90,24 +92,34 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset); struct extent_io_ops { - int (*fill_delalloc)(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started, - unsigned long *nr_written); - int (*writepage_start_hook)(struct page *page, u64 start, u64 end); + /* + * The following callbacks must be allways defined, the function + * pointer will be called unconditionally. + */ extent_submit_bio_hook_t *submit_bio_hook; + int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset, + struct page *page, u64 start, u64 end, + int mirror); int (*merge_bio_hook)(struct page *page, unsigned long offset, size_t size, struct bio *bio, unsigned long bio_flags); int (*readpage_io_failed_hook)(struct page *page, int failed_mirror); - int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset, - struct page *page, u64 start, u64 end, - int mirror); - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, + + /* + * Optional hooks, called if the pointer is not NULL + */ + int (*fill_delalloc)(struct inode *inode, struct page *locked_page, + u64 start, u64 end, int *page_started, + unsigned long *nr_written); + + int (*writepage_start_hook)(struct page *page, u64 start, u64 end); + void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state, int uptodate); void (*set_bit_hook)(struct inode *inode, struct extent_state *state, unsigned *bits); - void (*clear_bit_hook)(struct inode *inode, struct extent_state *state, - unsigned *bits); + void (*clear_bit_hook)(struct btrfs_inode *inode, + struct extent_state *state, + unsigned *bits); void (*merge_extent_hook)(struct inode *inode, struct extent_state *new, struct extent_state *other); @@ -192,7 +204,7 @@ struct extent_changeset { u64 bytes_changed; /* Changed ranges */ - struct ulist *range_changed; + struct ulist range_changed; }; static inline void extent_set_compress_type(unsigned long *bio_flags, @@ -208,7 +220,7 @@ static inline int extent_compress_type(unsigned long bio_flags) struct extent_map_tree; -typedef struct extent_map *(get_extent_t)(struct inode *inode, +typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, @@ -450,12 +462,13 @@ struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs); struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask); struct btrfs_fs_info; +struct btrfs_inode; -int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, - struct page *page, unsigned int pg_offset, - int mirror_num); -int clean_io_failure(struct inode *inode, u64 start, struct page *page, - unsigned int pg_offset); +int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length, + u64 logical, struct page *page, + unsigned int pg_offset, int mirror_num); +int clean_io_failure(struct btrfs_inode *inode, u64 start, + struct page *page, unsigned int pg_offset); void end_extent_writepage(struct page *page, int err, u64 start, u64 end); int repair_eb_io_failure(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, int mirror_num); @@ -479,7 +492,9 @@ struct io_failure_record { int in_validation; }; -void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end); + +void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, + u64 end); int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, struct io_failure_record **failrec_ret); int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio, @@ -488,7 +503,7 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, struct io_failure_record *failrec, struct page *page, int pg_offset, int icsum, bio_end_io_t *endio_func, void *data); -int free_io_failure(struct inode *inode, struct io_failure_record *rec); +int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec); #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS noinline u64 find_lock_delalloc_range(struct inode *inode, struct extent_io_tree *tree, diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index e97e322c28f0..64fcb31d7163 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -214,7 +214,7 @@ static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, * read from the commit root and sidestep a nasty deadlock * between reading the free space cache and updating the csum tree. */ - if (btrfs_is_free_space_inode(inode)) { + if (btrfs_is_free_space_inode(BTRFS_I(inode))) { path->search_commit_root = 1; path->skip_locking = 1; } @@ -255,7 +255,7 @@ static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, } else { btrfs_info_rl(fs_info, "no csum found for inode %llu start %llu", - btrfs_ino(inode), offset); + btrfs_ino(BTRFS_I(inode)), offset); } item = NULL; btrfs_release_path(path); @@ -643,7 +643,33 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, /* delete the entire item, it is inside our range */ if (key.offset >= bytenr && csum_end <= end_byte) { - ret = btrfs_del_item(trans, root, path); + int del_nr = 1; + + /* + * Check how many csum items preceding this one in this + * leaf correspond to our range and then delete them all + * at once. + */ + if (key.offset > bytenr && path->slots[0] > 0) { + int slot = path->slots[0] - 1; + + while (slot >= 0) { + struct btrfs_key pk; + + btrfs_item_key_to_cpu(leaf, &pk, slot); + if (pk.offset < bytenr || + pk.type != BTRFS_EXTENT_CSUM_KEY || + pk.objectid != + BTRFS_EXTENT_CSUM_OBJECTID) + break; + path->slots[0] = slot; + del_nr++; + key.offset = pk.offset; + slot--; + } + } + ret = btrfs_del_items(trans, root, path, + path->slots[0], del_nr); if (ret) goto out; if (key.offset == bytenr) @@ -856,8 +882,8 @@ insert: tmp = min(tmp, (next_offset - file_key.offset) >> fs_info->sb->s_blocksize_bits); - tmp = max((u64)1, tmp); - tmp = min(tmp, (u64)MAX_CSUM_ITEMS(fs_info, csum_size)); + tmp = max_t(u64, 1, tmp); + tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size)); ins_size = csum_size * tmp; } else { ins_size = csum_size; @@ -904,14 +930,14 @@ fail_unlock: goto out; } -void btrfs_extent_item_to_extent_map(struct inode *inode, +void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, const struct btrfs_path *path, struct btrfs_file_extent_item *fi, const bool new_inline, struct extent_map *em) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct extent_buffer *leaf = path->nodes[0]; const int slot = path->slots[0]; struct btrfs_key key; @@ -976,8 +1002,8 @@ void btrfs_extent_item_to_extent_map(struct inode *inode, } } else { btrfs_err(fs_info, - "unknown file extent item type %d, inode %llu, offset %llu, root %llu", - type, btrfs_ino(inode), extent_start, + "unknown file extent item type %d, inode %llu, offset %llu, " + "root %llu", type, btrfs_ino(inode), extent_start, root->root_key.objectid); } } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index b5c5da215d05..520cb7230b2d 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -92,10 +92,10 @@ static int __compare_inode_defrag(struct inode_defrag *defrag1, * If an existing record is found the defrag item you * pass in is freed */ -static int __btrfs_add_inode_defrag(struct inode *inode, +static int __btrfs_add_inode_defrag(struct btrfs_inode *inode, struct inode_defrag *defrag) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); struct inode_defrag *entry; struct rb_node **p; struct rb_node *parent = NULL; @@ -123,7 +123,7 @@ static int __btrfs_add_inode_defrag(struct inode *inode, return -EEXIST; } } - set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); + set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags); rb_link_node(&defrag->rb_node, parent, p); rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes); return 0; @@ -145,10 +145,10 @@ static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info) * enabled */ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, - struct inode *inode) + struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct inode_defrag *defrag; u64 transid; int ret; @@ -156,13 +156,13 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, if (!__need_auto_defrag(fs_info)) return 0; - if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) + if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) return 0; if (trans) transid = trans->transid; else - transid = BTRFS_I(inode)->root->last_trans; + transid = inode->root->last_trans; defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS); if (!defrag) @@ -173,7 +173,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, defrag->root = root->root_key.objectid; spin_lock(&fs_info->defrag_inodes_lock); - if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) { + if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) { /* * If we set IN_DEFRAG flag and evict the inode from memory, * and then re-read this inode, this new inode doesn't have @@ -194,10 +194,10 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, * the same inode in the tree, we will merge them together (by * __btrfs_add_inode_defrag()) and free the one that we want to requeue. */ -static void btrfs_requeue_inode_defrag(struct inode *inode, +static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode, struct inode_defrag *defrag) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); int ret; if (!__need_auto_defrag(fs_info)) @@ -334,7 +334,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, */ if (num_defrag == BTRFS_DEFRAG_BATCH) { defrag->last_offset = range.start; - btrfs_requeue_inode_defrag(inode, defrag); + btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag); } else if (defrag->last_offset && !defrag->cycled) { /* * we didn't fill our defrag batch, but @@ -343,7 +343,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, */ defrag->last_offset = 0; defrag->cycled = 1; - btrfs_requeue_inode_defrag(inode, defrag); + btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag); } else { kmem_cache_free(btrfs_inode_defrag_cachep, defrag); } @@ -529,13 +529,13 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages, * this drops all the extents in the cache that intersect the range * [start, end]. Existing extents are split as required. */ -void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, +void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, int skip_pinned) { struct extent_map *em; struct extent_map *split = NULL; struct extent_map *split2 = NULL; - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_map_tree *em_tree = &inode->extent_tree; u64 len = end - start + 1; u64 gen; int ret; @@ -702,7 +702,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_file_extent_item *fi; struct btrfs_key key; struct btrfs_key new_key; - u64 ino = btrfs_ino(inode); + u64 ino = btrfs_ino(BTRFS_I(inode)); u64 search_start = start; u64 disk_bytenr = 0; u64 num_bytes = 0; @@ -720,7 +720,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans, int leafs_visited = 0; if (drop_cache) - btrfs_drop_extent_cache(inode, start, end - 1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0); if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent) modify_tree = 0; @@ -1082,10 +1082,10 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot, * two or three. */ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, - struct inode *inode, u64 start, u64 end) + struct btrfs_inode *inode, u64 start, u64 end) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct extent_buffer *leaf; struct btrfs_path *path; struct btrfs_file_extent_item *fi; @@ -1415,13 +1415,13 @@ fail: * the other < 0 number - Something wrong happens */ static noinline int -lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages, +lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages, size_t num_pages, loff_t pos, size_t write_bytes, u64 *lockstart, u64 *lockend, struct extent_state **cached_state) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); u64 start_pos; u64 last_pos; int i; @@ -1432,30 +1432,30 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages, + round_up(pos + write_bytes - start_pos, fs_info->sectorsize) - 1; - if (start_pos < inode->i_size) { + if (start_pos < inode->vfs_inode.i_size) { struct btrfs_ordered_extent *ordered; - lock_extent_bits(&BTRFS_I(inode)->io_tree, - start_pos, last_pos, cached_state); + lock_extent_bits(&inode->io_tree, start_pos, last_pos, + cached_state); ordered = btrfs_lookup_ordered_range(inode, start_pos, last_pos - start_pos + 1); if (ordered && ordered->file_offset + ordered->len > start_pos && ordered->file_offset <= last_pos) { - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - start_pos, last_pos, - cached_state, GFP_NOFS); + unlock_extent_cached(&inode->io_tree, start_pos, + last_pos, cached_state, GFP_NOFS); for (i = 0; i < num_pages; i++) { unlock_page(pages[i]); put_page(pages[i]); } - btrfs_start_ordered_extent(inode, ordered, 1); + btrfs_start_ordered_extent(&inode->vfs_inode, + ordered, 1); btrfs_put_ordered_extent(ordered); return -EAGAIN; } if (ordered) btrfs_put_ordered_extent(ordered); - clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, + clear_extent_bit(&inode->io_tree, start_pos, last_pos, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, cached_state, GFP_NOFS); @@ -1474,11 +1474,11 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages, return ret; } -static noinline int check_can_nocow(struct inode *inode, loff_t pos, +static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, size_t *write_bytes) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct btrfs_ordered_extent *ordered; u64 lockstart, lockend; u64 num_bytes; @@ -1493,19 +1493,20 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos, fs_info->sectorsize) - 1; while (1) { - lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend); + lock_extent(&inode->io_tree, lockstart, lockend); ordered = btrfs_lookup_ordered_range(inode, lockstart, lockend - lockstart + 1); if (!ordered) { break; } - unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend); - btrfs_start_ordered_extent(inode, ordered, 1); + unlock_extent(&inode->io_tree, lockstart, lockend); + btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1); btrfs_put_ordered_extent(ordered); } num_bytes = lockend - lockstart + 1; - ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL); + ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes, + NULL, NULL, NULL); if (ret <= 0) { ret = 0; btrfs_end_write_no_snapshoting(root); @@ -1514,7 +1515,7 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos, num_bytes - pos + lockstart); } - unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend); + unlock_extent(&inode->io_tree, lockstart, lockend); return ret; } @@ -1579,7 +1580,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, if (ret < 0) { if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) && - check_can_nocow(inode, pos, &write_bytes) > 0) { + check_can_nocow(BTRFS_I(inode), pos, + &write_bytes) > 0) { /* * For nodata cow case, no need to reserve * data space. @@ -1599,7 +1601,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, } } - ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes); + ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), + reserve_bytes); if (ret) { if (!only_release_metadata) btrfs_free_reserved_data_space(inode, pos, @@ -1623,9 +1626,9 @@ again: if (ret) break; - ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages, - pos, write_bytes, &lockstart, - &lockend, &cached_state); + ret = lock_and_cleanup_extent_if_need(BTRFS_I(inode), pages, + num_pages, pos, write_bytes, &lockstart, + &lockend, &cached_state); if (ret < 0) { if (ret == -EAGAIN) goto again; @@ -1677,7 +1680,7 @@ again: spin_unlock(&BTRFS_I(inode)->lock); } if (only_release_metadata) { - btrfs_delalloc_release_metadata(inode, + btrfs_delalloc_release_metadata(BTRFS_I(inode), release_bytes); } else { u64 __pos; @@ -1738,7 +1741,8 @@ again: if (release_bytes) { if (only_release_metadata) { btrfs_end_write_no_snapshoting(root); - btrfs_delalloc_release_metadata(inode, release_bytes); + btrfs_delalloc_release_metadata(BTRFS_I(inode), + release_bytes); } else { btrfs_delalloc_release_space(inode, round_down(pos, fs_info->sectorsize), @@ -2062,7 +2066,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) * commit does not start nor waits for ordered extents to complete. */ smp_mb(); - if (btrfs_inode_in_log(inode, fs_info->generation) || + if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) || (full_sync && BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed) || (!btrfs_have_ordered_extents_in_range(inode, start, len) && @@ -2193,7 +2197,7 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) return 0; } -static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf, +static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf, int slot, u64 start, u64 end) { struct btrfs_file_extent_item *fi; @@ -2222,15 +2226,16 @@ static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf, return 0; } -static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, - struct btrfs_path *path, u64 offset, u64 end) +static int fill_holes(struct btrfs_trans_handle *trans, + struct btrfs_inode *inode, + struct btrfs_path *path, u64 offset, u64 end) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct extent_buffer *leaf; struct btrfs_file_extent_item *fi; struct extent_map *hole_em; - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_map_tree *em_tree = &inode->extent_tree; struct btrfs_key key; int ret; @@ -2253,7 +2258,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, } leaf = path->nodes[0]; - if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) { + if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) { u64 num_bytes; path->slots[0]--; @@ -2285,9 +2290,8 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, } btrfs_release_path(path); - ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset, - 0, 0, end - offset, 0, end - offset, - 0, 0, 0); + ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), + offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0); if (ret) return ret; @@ -2297,8 +2301,7 @@ out: hole_em = alloc_extent_map(); if (!hole_em) { btrfs_drop_extent_cache(inode, offset, end - 1, 0); - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags); + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); } else { hole_em->start = offset; hole_em->len = end - offset; @@ -2321,7 +2324,7 @@ out: free_extent_map(hole_em); if (ret) set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); } return 0; @@ -2338,7 +2341,7 @@ static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len) struct extent_map *em; int ret = 0; - em = btrfs_get_extent(inode, NULL, 0, *start, *len, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, *start, *len, 0); if (IS_ERR_OR_NULL(em)) { if (!em) ret = -ENOMEM; @@ -2551,8 +2554,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) trans->block_rsv = &fs_info->trans_block_rsv; if (cur_offset < drop_end && cur_offset < ino_size) { - ret = fill_holes(trans, inode, path, cur_offset, - drop_end); + ret = fill_holes(trans, BTRFS_I(inode), path, + cur_offset, drop_end); if (ret) { /* * If we failed then we didn't insert our hole @@ -2623,7 +2626,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) * cur_offset == drop_end). */ if (cur_offset < ino_size && cur_offset < drop_end) { - ret = fill_holes(trans, inode, path, cur_offset, drop_end); + ret = fill_holes(trans, BTRFS_I(inode), path, + cur_offset, drop_end); if (ret) { /* Same comment as above. */ btrfs_abort_transaction(trans, ret); @@ -2748,7 +2752,8 @@ static long btrfs_fallocate(struct file *file, int mode, * * For qgroup space, it will be checked later. */ - ret = btrfs_alloc_data_chunk_ondemand(inode, alloc_end - alloc_start); + ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), + alloc_end - alloc_start); if (ret < 0) return ret; @@ -2828,7 +2833,7 @@ static long btrfs_fallocate(struct file *file, int mode, /* First, check if we exceed the qgroup limit */ INIT_LIST_HEAD(&reserve_list); while (1) { - em = btrfs_get_extent(inode, NULL, 0, cur_offset, + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset, alloc_end - cur_offset, 0); if (IS_ERR_OR_NULL(em)) { if (!em) @@ -2876,7 +2881,7 @@ static long btrfs_fallocate(struct file *file, int mode, if (!ret) ret = btrfs_prealloc_file_range(inode, mode, range->start, - range->len, 1 << inode->i_blkbits, + range->len, i_blocksize(inode), offset + len, &alloc_hint); else btrfs_free_reserved_data_space(inode, range->start, @@ -2955,7 +2960,8 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) &cached_state); while (start < inode->i_size) { - em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0); + em = btrfs_get_extent_fiemap(BTRFS_I(inode), NULL, 0, + start, len, 0); if (IS_ERR(em)) { ret = PTR_ERR(em); em = NULL; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 7015892c9ee8..da6841efac26 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -18,6 +18,7 @@ #include <linux/pagemap.h> #include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/math64.h> #include <linux/ratelimit.h> @@ -94,12 +95,11 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root, return inode; } -struct inode *lookup_free_space_inode(struct btrfs_root *root, +struct inode *lookup_free_space_inode(struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *block_group, struct btrfs_path *path) { struct inode *inode = NULL; - struct btrfs_fs_info *fs_info = root->fs_info; u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; spin_lock(&block_group->lock); @@ -109,7 +109,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, if (inode) return inode; - inode = __lookup_free_space_inode(root, path, + inode = __lookup_free_space_inode(fs_info->tree_root, path, block_group->key.objectid); if (IS_ERR(inode)) return inode; @@ -192,7 +192,7 @@ static int __create_free_space_inode(struct btrfs_root *root, return 0; } -int create_free_space_inode(struct btrfs_root *root, +int create_free_space_inode(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, struct btrfs_path *path) @@ -200,11 +200,11 @@ int create_free_space_inode(struct btrfs_root *root, int ret; u64 ino; - ret = btrfs_find_free_objectid(root, &ino); + ret = btrfs_find_free_objectid(fs_info->tree_root, &ino); if (ret < 0) return ret; - return __create_free_space_inode(root, trans, path, ino, + return __create_free_space_inode(fs_info->tree_root, trans, path, ino, block_group->key.objectid); } @@ -227,21 +227,21 @@ int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, return ret; } -int btrfs_truncate_free_space_cache(struct btrfs_root *root, - struct btrfs_trans_handle *trans, +int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, struct inode *inode) { + struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; - struct btrfs_path *path = btrfs_alloc_path(); bool locked = false; - if (!path) { - ret = -ENOMEM; - goto fail; - } - if (block_group) { + struct btrfs_path *path = btrfs_alloc_path(); + + if (!path) { + ret = -ENOMEM; + goto fail; + } locked = true; mutex_lock(&trans->transaction->cache_write_mutex); if (!list_empty(&block_group->io_list)) { @@ -258,10 +258,10 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_CLEAR; spin_unlock(&block_group->lock); + btrfs_free_path(path); } - btrfs_free_path(path); - btrfs_i_size_write(inode, 0); + btrfs_i_size_write(BTRFS_I(inode), 0); truncate_pagecache(inode, 0); /* @@ -286,14 +286,14 @@ fail: return ret; } -static int readahead_cache(struct inode *inode) +static void readahead_cache(struct inode *inode) { struct file_ra_state *ra; unsigned long last_index; ra = kzalloc(sizeof(*ra), GFP_NOFS); if (!ra) - return -ENOMEM; + return; file_ra_state_init(ra, inode->i_mapping); last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; @@ -301,8 +301,6 @@ static int readahead_cache(struct inode *inode) page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index); kfree(ra); - - return 0; } static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode, @@ -313,7 +311,7 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode, num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); - if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID) + if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FREE_INO_OBJECTID) check_crcs = 1; /* Make sure we can fit our crcs into the first page */ @@ -730,9 +728,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, if (ret) return ret; - ret = readahead_cache(inode); - if (ret) - goto out; + readahead_cache(inode); ret = io_ctl_prepare_pages(&io_ctl, inode, 1); if (ret) @@ -828,7 +824,6 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *block_group) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; - struct btrfs_root *root = fs_info->tree_root; struct inode *inode; struct btrfs_path *path; int ret = 0; @@ -852,7 +847,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, path->search_commit_root = 1; path->skip_locking = 1; - inode = lookup_free_space_inode(root, block_group, path); + inode = lookup_free_space_inode(fs_info, block_group, path); if (IS_ERR(inode)) { btrfs_free_path(path); return 0; @@ -1128,8 +1123,7 @@ cleanup_bitmap_list(struct list_head *bitmap_list) static void noinline_for_stack cleanup_write_cache_enospc(struct inode *inode, struct btrfs_io_ctl *io_ctl, - struct extent_state **cached_state, - struct list_head *bitmap_list) + struct extent_state **cached_state) { io_ctl_drop_pages(io_ctl); unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, @@ -1225,8 +1219,6 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans, * @ctl - the free space cache we are going to write out * @block_group - the block_group for this cache if it belongs to a block_group * @trans - the trans handle - * @path - the path to use - * @offset - the offset for the key we'll insert * * This function writes out a free space cache struct to disk for quick recovery * on mount. This will return 0 if it was successful in writing the cache out, @@ -1236,8 +1228,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, struct btrfs_free_space_ctl *ctl, struct btrfs_block_group_cache *block_group, struct btrfs_io_ctl *io_ctl, - struct btrfs_trans_handle *trans, - struct btrfs_path *path, u64 offset) + struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = root->fs_info; struct extent_state *cached_state = NULL; @@ -1365,7 +1356,7 @@ out_nospc_locked: mutex_unlock(&ctl->cache_writeout_mutex); out_nospc: - cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list); + cleanup_write_cache_enospc(inode, io_ctl, &cached_state); if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) up_write(&block_group->data_rwsem); @@ -1378,7 +1369,6 @@ int btrfs_write_out_cache(struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *block_group, struct btrfs_path *path) { - struct btrfs_root *root = fs_info->tree_root; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct inode *inode; int ret = 0; @@ -1390,13 +1380,12 @@ int btrfs_write_out_cache(struct btrfs_fs_info *fs_info, } spin_unlock(&block_group->lock); - inode = lookup_free_space_inode(root, block_group, path); + inode = lookup_free_space_inode(fs_info, block_group, path); if (IS_ERR(inode)) return 0; - ret = __btrfs_write_out_cache(root, inode, ctl, block_group, - &block_group->io_ctl, trans, - path, block_group->key.objectid); + ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl, + block_group, &block_group->io_ctl, trans); if (ret) { #ifdef DEBUG btrfs_err(fs_info, @@ -3543,8 +3532,7 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, return 0; memset(&io_ctl, 0, sizeof(io_ctl)); - ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl, - trans, path, 0); + ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl, trans); if (!ret) { /* * At this point writepages() didn't error out, so our metadata @@ -3558,7 +3546,8 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, if (ret) { if (release_metadata) - btrfs_delalloc_release_metadata(inode, inode->i_size); + btrfs_delalloc_release_metadata(BTRFS_I(inode), + inode->i_size); #ifdef DEBUG btrfs_err(fs_info, "failed to write free ino cache for root %llu", diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 6f3c025a2c6c..79eca4cabb1c 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -51,18 +51,17 @@ struct btrfs_free_space_op { struct btrfs_io_ctl; -struct inode *lookup_free_space_inode(struct btrfs_root *root, +struct inode *lookup_free_space_inode(struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); -int create_free_space_inode(struct btrfs_root *root, +int create_free_space_inode(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *rsv); -int btrfs_truncate_free_space_cache(struct btrfs_root *root, - struct btrfs_trans_handle *trans, +int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, struct inode *inode); int load_free_space_cache(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index ff0c55337c2e..dd7fb22a955a 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -1269,7 +1269,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info) list_del(&free_space_root->dirty_list); btrfs_tree_lock(free_space_root->node); - clean_tree_block(trans, fs_info, free_space_root->node); + clean_tree_block(fs_info, free_space_root->node); btrfs_tree_unlock(free_space_root->node); btrfs_free_tree_block(trans, free_space_root, free_space_root->node, 0, 1); diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 144b119ff43f..5c6c20ec64d8 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -467,7 +467,7 @@ again: } if (i_size_read(inode) > 0) { - ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode); + ret = btrfs_truncate_free_space_cache(trans, NULL, inode); if (ret) { if (ret != -ENOSPC) btrfs_abort_transaction(trans, ret); @@ -499,7 +499,7 @@ again: ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, prealloc, prealloc, &alloc_hint); if (ret) { - btrfs_delalloc_release_metadata(inode, prealloc); + btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc); goto out_put; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ea1d500cfba6..c40060cc481f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -71,6 +71,7 @@ struct btrfs_dio_data { u64 reserve; u64 unsubmitted_oe_range_start; u64 unsubmitted_oe_range_end; + int overwrite; }; static const struct inode_operations btrfs_dir_inode_operations; @@ -108,11 +109,11 @@ static noinline int cow_file_range(struct inode *inode, u64 start, u64 end, u64 delalloc_end, int *page_started, unsigned long *nr_written, int unlock, struct btrfs_dedupe_hash *hash); -static struct extent_map *create_pinned_em(struct inode *inode, u64 start, - u64 len, u64 orig_start, - u64 block_start, u64 block_len, - u64 orig_block_len, u64 ram_bytes, - int type); +static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, + u64 orig_start, u64 block_start, + u64 block_len, u64 orig_block_len, + u64 ram_bytes, int compress_type, + int type); static int btrfs_dirty_inode(struct inode *inode); @@ -166,7 +167,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans, struct btrfs_key key; size_t datasize; - key.objectid = btrfs_ino(inode); + key.objectid = btrfs_ino(BTRFS_I(inode)); key.offset = start; key.type = BTRFS_EXTENT_DATA_KEY; @@ -315,8 +316,8 @@ static noinline int cow_file_range_inline(struct btrfs_root *root, } set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); - btrfs_delalloc_release_metadata(inode, end + 1 - start); - btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); + btrfs_delalloc_release_metadata(BTRFS_I(inode), end + 1 - start); + btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0); out: /* * Don't forget to free the reserved space, as for inlined extent @@ -388,6 +389,15 @@ static inline int inode_need_compress(struct inode *inode) return 0; } +static inline void inode_should_defrag(struct btrfs_inode *inode, + u64 start, u64 end, u64 num_bytes, u64 small_write) +{ + /* If this is a small write inside eof, kick off a defrag */ + if (num_bytes < small_write && + (start > 0 || end + 1 < inode->disk_i_size)) + btrfs_add_inode_defrag(NULL, inode); +} + /* * we create compressed extents in two phases. The first * phase compresses a range of pages that have already been @@ -420,26 +430,23 @@ static noinline void compress_file_range(struct inode *inode, int ret = 0; struct page **pages = NULL; unsigned long nr_pages; - unsigned long nr_pages_ret = 0; unsigned long total_compressed = 0; unsigned long total_in = 0; - unsigned long max_compressed = SZ_128K; - unsigned long max_uncompressed = SZ_128K; int i; int will_compress; int compress_type = fs_info->compress_type; int redirty = 0; - /* if this is a small write inside eof, kick off a defrag */ - if ((end - start + 1) < SZ_16K && - (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) - btrfs_add_inode_defrag(NULL, inode); + inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1, + SZ_16K); actual_end = min_t(u64, isize, end + 1); again: will_compress = 0; nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; - nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_SIZE); + BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0); + nr_pages = min_t(unsigned long, nr_pages, + BTRFS_MAX_COMPRESSED / PAGE_SIZE); /* * we don't want to send crud past the end of i_size through @@ -464,17 +471,8 @@ again: (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) goto cleanup_and_bail_uncompressed; - /* we want to make sure that amount of ram required to uncompress - * an extent is reasonable, so we limit the total size in ram - * of a compressed extent to 128k. This is a crucial number - * because it also controls how easily we can spread reads across - * cpus for decompression. - * - * We also want to make sure the amount of IO required to do - * a random read is reasonably small, so we limit the size of - * a compressed extent to 128k. - */ - total_compressed = min(total_compressed, max_uncompressed); + total_compressed = min_t(unsigned long, total_compressed, + BTRFS_MAX_UNCOMPRESSED); num_bytes = ALIGN(end - start + 1, blocksize); num_bytes = max(blocksize, num_bytes); total_in = 0; @@ -509,16 +507,15 @@ again: redirty = 1; ret = btrfs_compress_pages(compress_type, inode->i_mapping, start, - total_compressed, pages, - nr_pages, &nr_pages_ret, + pages, + &nr_pages, &total_in, - &total_compressed, - max_compressed); + &total_compressed); if (!ret) { unsigned long offset = total_compressed & (PAGE_SIZE - 1); - struct page *page = pages[nr_pages_ret - 1]; + struct page *page = pages[nr_pages - 1]; char *kaddr; /* zero the tail end of the last page, we might be @@ -541,7 +538,7 @@ cont: * to make an uncompressed inline extent. */ ret = cow_file_range_inline(root, inode, start, end, - 0, 0, NULL); + 0, BTRFS_COMPRESS_NONE, NULL); } else { /* try making a compressed inline extent */ ret = cow_file_range_inline(root, inode, start, end, @@ -599,7 +596,7 @@ cont: * will submit them to the elevator. */ add_async_extent(async_cow, start, num_bytes, - total_compressed, pages, nr_pages_ret, + total_compressed, pages, nr_pages, compress_type); if (start + num_bytes < end) { @@ -616,14 +613,14 @@ cont: * the compression code ran but failed to make things smaller, * free any pages it allocated and our page pointer array */ - for (i = 0; i < nr_pages_ret; i++) { + for (i = 0; i < nr_pages; i++) { WARN_ON(pages[i]->mapping); put_page(pages[i]); } kfree(pages); pages = NULL; total_compressed = 0; - nr_pages_ret = 0; + nr_pages = 0; /* flag the file so we don't compress in the future */ if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && @@ -652,7 +649,7 @@ cleanup_and_bail_uncompressed: return; free_pages_out: - for (i = 0; i < nr_pages_ret; i++) { + for (i = 0; i < nr_pages; i++) { WARN_ON(pages[i]->mapping); put_page(pages[i]); } @@ -690,7 +687,6 @@ static noinline void submit_compressed_extents(struct inode *inode, struct btrfs_key ins; struct extent_map *em; struct btrfs_root *root = BTRFS_I(inode)->root; - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_io_tree *io_tree; int ret = 0; @@ -778,46 +774,19 @@ retry: * here we're doing allocation and writeback of the * compressed pages */ - btrfs_drop_extent_cache(inode, async_extent->start, - async_extent->start + - async_extent->ram_size - 1, 0); - - em = alloc_extent_map(); - if (!em) { - ret = -ENOMEM; - goto out_free_reserve; - } - em->start = async_extent->start; - em->len = async_extent->ram_size; - em->orig_start = em->start; - em->mod_start = em->start; - em->mod_len = em->len; - - em->block_start = ins.objectid; - em->block_len = ins.offset; - em->orig_block_len = ins.offset; - em->ram_bytes = async_extent->ram_size; - em->bdev = fs_info->fs_devices->latest_bdev; - em->compress_type = async_extent->compress_type; - set_bit(EXTENT_FLAG_PINNED, &em->flags); - set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); - em->generation = -1; - - while (1) { - write_lock(&em_tree->lock); - ret = add_extent_mapping(em_tree, em, 1); - write_unlock(&em_tree->lock); - if (ret != -EEXIST) { - free_extent_map(em); - break; - } - btrfs_drop_extent_cache(inode, async_extent->start, - async_extent->start + - async_extent->ram_size - 1, 0); - } - - if (ret) + em = create_io_em(inode, async_extent->start, + async_extent->ram_size, /* len */ + async_extent->start, /* orig_start */ + ins.objectid, /* block_start */ + ins.offset, /* block_len */ + ins.offset, /* orig_block_len */ + async_extent->ram_size, /* ram_bytes */ + async_extent->compress_type, + BTRFS_ORDERED_COMPRESSED); + if (IS_ERR(em)) + /* ret value is not necessary due to void function */ goto out_free_reserve; + free_extent_map(em); ret = btrfs_add_ordered_extent_compress(inode, async_extent->start, @@ -827,7 +796,8 @@ retry: BTRFS_ORDERED_COMPRESSED, async_extent->compress_type); if (ret) { - btrfs_drop_extent_cache(inode, async_extent->start, + btrfs_drop_extent_cache(BTRFS_I(inode), + async_extent->start, async_extent->start + async_extent->ram_size - 1, 0); goto out_free_reserve; @@ -952,10 +922,9 @@ static noinline int cow_file_range(struct inode *inode, u64 blocksize = fs_info->sectorsize; struct btrfs_key ins; struct extent_map *em; - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 0; - if (btrfs_is_free_space_inode(inode)) { + if (btrfs_is_free_space_inode(BTRFS_I(inode))) { WARN_ON_ONCE(1); ret = -EINVAL; goto out_unlock; @@ -965,15 +934,12 @@ static noinline int cow_file_range(struct inode *inode, num_bytes = max(blocksize, num_bytes); disk_num_bytes = num_bytes; - /* if this is a small write inside eof, kick off defrag */ - if (num_bytes < SZ_64K && - (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) - btrfs_add_inode_defrag(NULL, inode); + inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K); if (start == 0) { /* lets try to make an inline extent */ - ret = cow_file_range_inline(root, inode, start, end, 0, 0, - NULL); + ret = cow_file_range_inline(root, inode, start, end, 0, + BTRFS_COMPRESS_NONE, NULL); if (ret == 0) { extent_clear_unlock_delalloc(inode, start, end, delalloc_end, NULL, @@ -996,7 +962,8 @@ static noinline int cow_file_range(struct inode *inode, btrfs_super_total_bytes(fs_info->super_copy)); alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); - btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), start, + start + num_bytes - 1, 0); while (disk_num_bytes > 0) { unsigned long op; @@ -1008,39 +975,18 @@ static noinline int cow_file_range(struct inode *inode, if (ret < 0) goto out_unlock; - em = alloc_extent_map(); - if (!em) { - ret = -ENOMEM; - goto out_reserve; - } - em->start = start; - em->orig_start = em->start; ram_size = ins.offset; - em->len = ins.offset; - em->mod_start = em->start; - em->mod_len = em->len; - - em->block_start = ins.objectid; - em->block_len = ins.offset; - em->orig_block_len = ins.offset; - em->ram_bytes = ram_size; - em->bdev = fs_info->fs_devices->latest_bdev; - set_bit(EXTENT_FLAG_PINNED, &em->flags); - em->generation = -1; - - while (1) { - write_lock(&em_tree->lock); - ret = add_extent_mapping(em_tree, em, 1); - write_unlock(&em_tree->lock); - if (ret != -EEXIST) { - free_extent_map(em); - break; - } - btrfs_drop_extent_cache(inode, start, - start + ram_size - 1, 0); - } - if (ret) + em = create_io_em(inode, start, ins.offset, /* len */ + start, /* orig_start */ + ins.objectid, /* block_start */ + ins.offset, /* block_len */ + ins.offset, /* orig_block_len */ + ram_size, /* ram_bytes */ + BTRFS_COMPRESS_NONE, /* compress_type */ + BTRFS_ORDERED_REGULAR /* type */); + if (IS_ERR(em)) goto out_reserve; + free_extent_map(em); cur_alloc_size = ins.offset; ret = btrfs_add_ordered_extent(inode, start, ins.objectid, @@ -1085,7 +1031,7 @@ out: return ret; out_drop_extent_cache: - btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0); out_reserve: btrfs_dec_block_group_reservations(fs_info, ins.objectid); btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); @@ -1164,7 +1110,6 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, struct btrfs_root *root = BTRFS_I(inode)->root; unsigned long nr_pages; u64 cur_end; - int limit = 10 * SZ_1M; clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS); @@ -1196,12 +1141,6 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, btrfs_queue_work(fs_info->delalloc_workers, &async_cow->work); - if (atomic_read(&fs_info->async_delalloc_pages) > limit) { - wait_event(fs_info->async_submit_wait, - (atomic_read(&fs_info->async_delalloc_pages) < - limit)); - } - while (atomic_read(&fs_info->async_submit_draining) && atomic_read(&fs_info->async_delalloc_pages)) { wait_event(fs_info->async_submit_wait, @@ -1250,11 +1189,11 @@ static noinline int run_delalloc_nocow(struct inode *inode, { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_trans_handle *trans; struct extent_buffer *leaf; struct btrfs_path *path; struct btrfs_file_extent_item *fi; struct btrfs_key found_key; + struct extent_map *em; u64 cow_start; u64 cur_offset; u64 extent_end; @@ -1269,7 +1208,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, int nocow; int check_prev = 1; bool nolock; - u64 ino = btrfs_ino(inode); + u64 ino = btrfs_ino(BTRFS_I(inode)); path = btrfs_alloc_path(); if (!path) { @@ -1284,32 +1223,12 @@ static noinline int run_delalloc_nocow(struct inode *inode, return -ENOMEM; } - nolock = btrfs_is_free_space_inode(inode); - - if (nolock) - trans = btrfs_join_transaction_nolock(root); - else - trans = btrfs_join_transaction(root); - - if (IS_ERR(trans)) { - extent_clear_unlock_delalloc(inode, start, end, end, - locked_page, - EXTENT_LOCKED | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING | - EXTENT_DEFRAG, PAGE_UNLOCK | - PAGE_CLEAR_DIRTY | - PAGE_SET_WRITEBACK | - PAGE_END_WRITEBACK); - btrfs_free_path(path); - return PTR_ERR(trans); - } - - trans->block_rsv = &fs_info->delalloc_block_rsv; + nolock = btrfs_is_free_space_inode(BTRFS_I(inode)); cow_start = (u64)-1; cur_offset = start; while (1) { - ret = btrfs_lookup_file_extent(trans, root, path, ino, + ret = btrfs_lookup_file_extent(NULL, root, path, ino, cur_offset, 0); if (ret < 0) goto error; @@ -1382,7 +1301,7 @@ next_slot: goto out_check; if (btrfs_extent_readonly(fs_info, disk_bytenr)) goto out_check; - if (btrfs_cross_ref_exist(trans, root, ino, + if (btrfs_cross_ref_exist(root, ino, found_key.offset - extent_offset, disk_bytenr)) goto out_check; @@ -1404,10 +1323,16 @@ next_slot: * either valid or do not exist. */ if (csum_exist_in_range(fs_info, disk_bytenr, - num_bytes)) + num_bytes)) { + if (!nolock) + btrfs_end_write_no_snapshoting(root); goto out_check; - if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) + } + if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) { + if (!nolock) + btrfs_end_write_no_snapshoting(root); goto out_check; + } nocow = 1; } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { extent_end = found_key.offset + @@ -1455,35 +1380,28 @@ out_check: } if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { - struct extent_map *em; - struct extent_map_tree *em_tree; - em_tree = &BTRFS_I(inode)->extent_tree; - em = alloc_extent_map(); - BUG_ON(!em); /* -ENOMEM */ - em->start = cur_offset; - em->orig_start = found_key.offset - extent_offset; - em->len = num_bytes; - em->block_len = num_bytes; - em->block_start = disk_bytenr; - em->orig_block_len = disk_num_bytes; - em->ram_bytes = ram_bytes; - em->bdev = fs_info->fs_devices->latest_bdev; - em->mod_start = em->start; - em->mod_len = em->len; - set_bit(EXTENT_FLAG_PINNED, &em->flags); - set_bit(EXTENT_FLAG_FILLING, &em->flags); - em->generation = -1; - while (1) { - write_lock(&em_tree->lock); - ret = add_extent_mapping(em_tree, em, 1); - write_unlock(&em_tree->lock); - if (ret != -EEXIST) { - free_extent_map(em); - break; - } - btrfs_drop_extent_cache(inode, em->start, - em->start + em->len - 1, 0); + u64 orig_start = found_key.offset - extent_offset; + + em = create_io_em(inode, cur_offset, num_bytes, + orig_start, + disk_bytenr, /* block_start */ + num_bytes, /* block_len */ + disk_num_bytes, /* orig_block_len */ + ram_bytes, BTRFS_COMPRESS_NONE, + BTRFS_ORDERED_PREALLOC); + if (IS_ERR(em)) { + if (!nolock && nocow) + btrfs_end_write_no_snapshoting(root); + if (nocow) + btrfs_dec_nocow_writers(fs_info, + disk_bytenr); + ret = PTR_ERR(em); + goto error; } + free_extent_map(em); + } + + if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { type = BTRFS_ORDERED_PREALLOC; } else { type = BTRFS_ORDERED_NOCOW; @@ -1534,10 +1452,6 @@ out_check: } error: - err = btrfs_end_transaction(trans); - if (!ret) - ret = err; - if (ret && cur_offset < end) extent_clear_unlock_delalloc(inode, cur_offset, end, end, locked_page, EXTENT_LOCKED | @@ -1609,7 +1523,7 @@ static void btrfs_split_extent_hook(struct inode *inode, size = orig->end - orig->start + 1; if (size > BTRFS_MAX_EXTENT_SIZE) { - u64 num_extents; + u32 num_extents; u64 new_size; /* @@ -1617,13 +1531,10 @@ static void btrfs_split_extent_hook(struct inode *inode, * applies here, just in reverse. */ new_size = orig->end - split + 1; - num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, - BTRFS_MAX_EXTENT_SIZE); + num_extents = count_max_extents(new_size); new_size = split - orig->start; - num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, - BTRFS_MAX_EXTENT_SIZE); - if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, - BTRFS_MAX_EXTENT_SIZE) >= num_extents) + num_extents += count_max_extents(new_size); + if (count_max_extents(size) >= num_extents) return; } @@ -1643,7 +1554,7 @@ static void btrfs_merge_extent_hook(struct inode *inode, struct extent_state *other) { u64 new_size, old_size; - u64 num_extents; + u32 num_extents; /* not delalloc, ignore it */ if (!(other->state & EXTENT_DELALLOC)) @@ -1681,14 +1592,10 @@ static void btrfs_merge_extent_hook(struct inode *inode, * this case. */ old_size = other->end - other->start + 1; - num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, - BTRFS_MAX_EXTENT_SIZE); + num_extents = count_max_extents(old_size); old_size = new->end - new->start + 1; - num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, - BTRFS_MAX_EXTENT_SIZE); - - if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, - BTRFS_MAX_EXTENT_SIZE) >= num_extents) + num_extents += count_max_extents(old_size); + if (count_max_extents(new_size) >= num_extents) return; spin_lock(&BTRFS_I(inode)->lock); @@ -1720,15 +1627,15 @@ static void btrfs_add_delalloc_inodes(struct btrfs_root *root, } static void btrfs_del_delalloc_inode(struct btrfs_root *root, - struct inode *inode) + struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); spin_lock(&root->delalloc_lock); - if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) { - list_del_init(&BTRFS_I(inode)->delalloc_inodes); + if (!list_empty(&inode->delalloc_inodes)) { + list_del_init(&inode->delalloc_inodes); clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); root->nr_delalloc_inodes--; if (!root->nr_delalloc_inodes) { spin_lock(&fs_info->delalloc_root_lock); @@ -1761,7 +1668,7 @@ static void btrfs_set_bit_hook(struct inode *inode, if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 len = state->end + 1 - state->start; - bool do_list = !btrfs_is_free_space_inode(inode); + bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode)); if (*bits & EXTENT_FIRST_DELALLOC) { *bits &= ~EXTENT_FIRST_DELALLOC; @@ -1791,19 +1698,18 @@ static void btrfs_set_bit_hook(struct inode *inode, /* * extent_io.c clear_bit_hook, see set_bit_hook for why */ -static void btrfs_clear_bit_hook(struct inode *inode, +static void btrfs_clear_bit_hook(struct btrfs_inode *inode, struct extent_state *state, unsigned *bits) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); u64 len = state->end + 1 - state->start; - u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1, - BTRFS_MAX_EXTENT_SIZE); + u32 num_extents = count_max_extents(len); - spin_lock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) - BTRFS_I(inode)->defrag_bytes -= len; - spin_unlock(&BTRFS_I(inode)->lock); + inode->defrag_bytes -= len; + spin_unlock(&inode->lock); /* * set_bit and clear bit hooks normally require _irqsave/restore @@ -1811,15 +1717,15 @@ static void btrfs_clear_bit_hook(struct inode *inode, * bit, which is only set or cleared with irqs on */ if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; bool do_list = !btrfs_is_free_space_inode(inode); if (*bits & EXTENT_FIRST_DELALLOC) { *bits &= ~EXTENT_FIRST_DELALLOC; } else if (!(*bits & EXTENT_DO_ACCOUNTING)) { - spin_lock(&BTRFS_I(inode)->lock); - BTRFS_I(inode)->outstanding_extents -= num_extents; - spin_unlock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); + inode->outstanding_extents -= num_extents; + spin_unlock(&inode->lock); } /* @@ -1839,18 +1745,19 @@ static void btrfs_clear_bit_hook(struct inode *inode, && do_list && !(state->state & EXTENT_NORESERVE) && (*bits & (EXTENT_DO_ACCOUNTING | EXTENT_CLEAR_DATA_RESV))) - btrfs_free_reserved_data_space_noquota(inode, + btrfs_free_reserved_data_space_noquota( + &inode->vfs_inode, state->start, len); __percpu_counter_add(&fs_info->delalloc_bytes, -len, fs_info->delalloc_batch); - spin_lock(&BTRFS_I(inode)->lock); - BTRFS_I(inode)->delalloc_bytes -= len; - if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 && + spin_lock(&inode->lock); + inode->delalloc_bytes -= len; + if (do_list && inode->delalloc_bytes == 0 && test_bit(BTRFS_INODE_IN_DELALLOC_LIST, - &BTRFS_I(inode)->runtime_flags)) + &inode->runtime_flags)) btrfs_del_delalloc_inode(root, inode); - spin_unlock(&BTRFS_I(inode)->lock); + spin_unlock(&inode->lock); } } @@ -1946,7 +1853,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio, skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; - if (btrfs_is_free_space_inode(inode)) + if (btrfs_is_free_space_inode(BTRFS_I(inode))) metadata = BTRFS_WQ_ENDIO_FREE_SPACE; if (bio_op(bio) != REQ_OP_WRITE) { @@ -1997,8 +1904,7 @@ out: * at IO completion time based on sums calculated at bio submission time. */ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, - struct inode *inode, u64 file_offset, - struct list_head *list) + struct inode *inode, struct list_head *list) { struct btrfs_ordered_sum *sum; @@ -2056,7 +1962,7 @@ again: if (PagePrivate2(page)) goto out; - ordered = btrfs_lookup_ordered_range(inode, page_start, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, PAGE_SIZE); if (ordered) { unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, @@ -2161,7 +2067,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, goto out; if (!extent_inserted) { - ins.objectid = btrfs_ino(inode); + ins.objectid = btrfs_ino(BTRFS_I(inode)); ins.offset = file_pos; ins.type = BTRFS_EXTENT_DATA_KEY; @@ -2194,8 +2100,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, ins.offset = disk_num_bytes; ins.type = BTRFS_EXTENT_ITEM_KEY; ret = btrfs_alloc_reserved_file_extent(trans, root->root_key.objectid, - btrfs_ino(inode), file_pos, - ram_bytes, &ins); + btrfs_ino(BTRFS_I(inode)), file_pos, ram_bytes, &ins); /* * Release the reserved range from inode dirty range map, as it is * already moved into delayed_ref_head @@ -2320,7 +2225,7 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id, u64 num_bytes; if (BTRFS_I(inode)->root->root_key.objectid == root_id && - inum == btrfs_ino(inode)) + inum == btrfs_ino(BTRFS_I(inode))) return 0; key.objectid = root_id; @@ -2589,7 +2494,7 @@ static noinline int relink_extent_backref(struct btrfs_path *path, if (ret) goto out_free_path; again: - key.objectid = btrfs_ino(inode); + key.objectid = btrfs_ino(BTRFS_I(inode)); key.type = BTRFS_EXTENT_DATA_KEY; key.offset = start; @@ -2768,7 +2673,7 @@ record_old_file_extents(struct inode *inode, if (!path) goto out_kfree; - key.objectid = btrfs_ino(inode); + key.objectid = btrfs_ino(BTRFS_I(inode)); key.type = BTRFS_EXTENT_DATA_KEY; key.offset = new->file_pos; @@ -2803,7 +2708,7 @@ record_old_file_extents(struct inode *inode, btrfs_item_key_to_cpu(l, &key, slot); - if (key.objectid != btrfs_ino(inode)) + if (key.objectid != btrfs_ino(BTRFS_I(inode))) break; if (key.type != BTRFS_EXTENT_DATA_KEY) break; @@ -2887,16 +2792,17 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) bool nolock; bool truncated = false; - nolock = btrfs_is_free_space_inode(inode); + nolock = btrfs_is_free_space_inode(BTRFS_I(inode)); if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { ret = -EIO; goto out; } - btrfs_free_io_failure_record(inode, ordered_extent->file_offset, - ordered_extent->file_offset + - ordered_extent->len - 1); + btrfs_free_io_failure_record(BTRFS_I(inode), + ordered_extent->file_offset, + ordered_extent->file_offset + + ordered_extent->len - 1); if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { truncated = true; @@ -2967,7 +2873,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) compress_type = ordered_extent->compress_type; if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { BUG_ON(compress_type); - ret = btrfs_mark_extent_written(trans, inode, + ret = btrfs_mark_extent_written(trans, BTRFS_I(inode), ordered_extent->file_offset, ordered_extent->file_offset + logical_len); @@ -2993,8 +2899,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) goto out_unlock; } - add_pending_csums(trans, inode, ordered_extent->file_offset, - &ordered_extent->list); + add_pending_csums(trans, inode, &ordered_extent->list); btrfs_ordered_update_i_size(inode, 0, ordered_extent); ret = btrfs_update_inode_fallback(trans, root, inode); @@ -3009,7 +2914,8 @@ out_unlock: ordered_extent->len - 1, &cached_state, GFP_NOFS); out: if (root != fs_info->tree_root) - btrfs_delalloc_release_metadata(inode, ordered_extent->len); + btrfs_delalloc_release_metadata(BTRFS_I(inode), + ordered_extent->len); if (trans) btrfs_end_transaction(trans); @@ -3024,7 +2930,7 @@ out: clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS); /* Drop the cache for the part of the extent we didn't write. */ - btrfs_drop_extent_cache(inode, start, end, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0); /* * If the ordered extent had an IOERR or something else went @@ -3072,7 +2978,7 @@ static void finish_ordered_fn(struct btrfs_work *work) btrfs_finish_ordered_io(ordered_extent); } -static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, +static void btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, struct extent_state *state, int uptodate) { struct inode *inode = page->mapping->host; @@ -3086,9 +2992,9 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, ClearPagePrivate2(page); if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, end - start + 1, uptodate)) - return 0; + return; - if (btrfs_is_free_space_inode(inode)) { + if (btrfs_is_free_space_inode(BTRFS_I(inode))) { wq = fs_info->endio_freespace_worker; func = btrfs_freespace_write_helper; } else { @@ -3099,8 +3005,6 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL, NULL); btrfs_queue_work(wq, &ordered_extent->work); - - return 0; } static int __readpage_endio_check(struct inode *inode, @@ -3123,9 +3027,8 @@ static int __readpage_endio_check(struct inode *inode, kunmap_atomic(kaddr); return 0; zeroit: - btrfs_warn_rl(BTRFS_I(inode)->root->fs_info, - "csum failed ino %llu off %llu csum %u expected csum %u", - btrfs_ino(inode), start, csum, csum_expected); + btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected, + io_bio->mirror_num); memset(kaddr + pgoff, 1, len); flush_dcache_page(page); kunmap_atomic(kaddr); @@ -3263,10 +3166,11 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, * NOTE: caller of this function should reserve 5 units of metadata for * this function. */ -int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) +int btrfs_orphan_add(struct btrfs_trans_handle *trans, + struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct btrfs_block_rsv *block_rsv = NULL; int reserve = 0; int insert = 0; @@ -3288,7 +3192,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) } if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, - &BTRFS_I(inode)->runtime_flags)) { + &inode->runtime_flags)) { #if 0 /* * For proper ENOSPC handling, we should do orphan @@ -3305,7 +3209,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) } if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED, - &BTRFS_I(inode)->runtime_flags)) + &inode->runtime_flags)) reserve = 1; spin_unlock(&root->orphan_lock); @@ -3316,10 +3220,10 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) if (ret) { atomic_dec(&root->orphan_inodes); clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); if (insert) clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); return ret; } } @@ -3331,12 +3235,12 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) atomic_dec(&root->orphan_inodes); if (reserve) { clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); btrfs_orphan_release_metadata(inode); } if (ret != -EEXIST) { clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); btrfs_abort_transaction(trans, ret); return ret; } @@ -3361,20 +3265,20 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) * item for this particular inode. */ static int btrfs_orphan_del(struct btrfs_trans_handle *trans, - struct inode *inode) + struct btrfs_inode *inode) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; int delete_item = 0; int release_rsv = 0; int ret = 0; spin_lock(&root->orphan_lock); if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, - &BTRFS_I(inode)->runtime_flags)) + &inode->runtime_flags)) delete_item = 1; if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, - &BTRFS_I(inode)->runtime_flags)) + &inode->runtime_flags)) release_rsv = 1; spin_unlock(&root->orphan_lock); @@ -3548,7 +3452,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) ret = PTR_ERR(trans); goto out; } - ret = btrfs_orphan_add(trans, inode); + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); btrfs_end_transaction(trans); if (ret) { iput(inode); @@ -3557,7 +3461,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) ret = btrfs_truncate(inode); if (ret) - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); } else { nr_unlink++; } @@ -3712,7 +3616,7 @@ static int btrfs_read_locked_inode(struct inode *inode) set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); - btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); + btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item)); inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); @@ -3789,7 +3693,7 @@ cache_index: goto cache_acl; btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); - if (location.objectid != btrfs_ino(inode)) + if (location.objectid != btrfs_ino(BTRFS_I(inode))) goto cache_acl; ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); @@ -3811,14 +3715,14 @@ cache_acl: * any xattrs or acls */ maybe_acls = acls_after_inode_item(leaf, path->slots[0], - btrfs_ino(inode), &first_xattr_slot); + btrfs_ino(BTRFS_I(inode)), &first_xattr_slot); if (first_xattr_slot != -1) { path->slots[0] = first_xattr_slot; ret = btrfs_load_inode_props(inode, path); if (ret) btrfs_err(fs_info, "error loading props for ino %llu (root %llu): %d", - btrfs_ino(inode), + btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret); } btrfs_free_path(path); @@ -3960,7 +3864,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, * The data relocation inode should also be directly updated * without delay */ - if (!btrfs_is_free_space_inode(inode) + if (!btrfs_is_free_space_inode(BTRFS_I(inode)) && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { btrfs_update_root_times(trans, root); @@ -3993,7 +3897,8 @@ noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, */ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct inode *dir, struct inode *inode, + struct btrfs_inode *dir, + struct btrfs_inode *inode, const char *name, int name_len) { struct btrfs_fs_info *fs_info = root->fs_info; @@ -4040,10 +3945,10 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, * that we delay to delete it, and just do this deletion when * we update the inode item. */ - if (BTRFS_I(inode)->dir_index) { + if (inode->dir_index) { ret = btrfs_delayed_delete_inode_ref(inode); if (!ret) { - index = BTRFS_I(inode)->dir_index; + index = inode->dir_index; goto skip_backref; } } @@ -4064,15 +3969,15 @@ skip_backref: goto err; } - ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, - inode, dir_ino); + ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode, + dir_ino); if (ret != 0 && ret != -ENOENT) { btrfs_abort_transaction(trans, ret); goto err; } - ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, - dir, index); + ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir, + index); if (ret == -ENOENT) ret = 0; else if (ret) @@ -4082,26 +3987,26 @@ err: if (ret) goto out; - btrfs_i_size_write(dir, dir->i_size - name_len * 2); - inode_inc_iversion(inode); - inode_inc_iversion(dir); - inode->i_ctime = dir->i_mtime = - dir->i_ctime = current_time(inode); - ret = btrfs_update_inode(trans, root, dir); + btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2); + inode_inc_iversion(&inode->vfs_inode); + inode_inc_iversion(&dir->vfs_inode); + inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime = + dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode); + ret = btrfs_update_inode(trans, root, &dir->vfs_inode); out: return ret; } int btrfs_unlink_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct inode *dir, struct inode *inode, + struct btrfs_inode *dir, struct btrfs_inode *inode, const char *name, int name_len) { int ret; ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len); if (!ret) { - drop_nlink(inode); - ret = btrfs_update_inode(trans, root, inode); + drop_nlink(&inode->vfs_inode); + ret = btrfs_update_inode(trans, root, &inode->vfs_inode); } return ret; } @@ -4139,15 +4044,17 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_record_unlink_dir(trans, dir, d_inode(dentry), 0); + btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), + 0); - ret = btrfs_unlink_inode(trans, root, dir, d_inode(dentry), - dentry->d_name.name, dentry->d_name.len); + ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), + BTRFS_I(d_inode(dentry)), dentry->d_name.name, + dentry->d_name.len); if (ret) goto out; if (inode->i_nlink == 0) { - ret = btrfs_orphan_add(trans, inode); + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); if (ret) goto out; } @@ -4170,7 +4077,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, struct btrfs_key key; u64 index; int ret; - u64 dir_ino = btrfs_ino(dir); + u64 dir_ino = btrfs_ino(BTRFS_I(dir)); path = btrfs_alloc_path(); if (!path) @@ -4222,13 +4129,13 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, } btrfs_release_path(path); - ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index); + ret = btrfs_delete_delayed_dir_index(trans, fs_info, BTRFS_I(dir), index); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } - btrfs_i_size_write(dir, dir->i_size - name_len * 2); + btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2); inode_inc_iversion(dir); dir->i_mtime = dir->i_ctime = current_time(dir); ret = btrfs_update_inode_fallback(trans, root, dir); @@ -4249,14 +4156,14 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) return -ENOTEMPTY; - if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) + if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) return -EPERM; trans = __unlink_start_trans(dir); if (IS_ERR(trans)) return PTR_ERR(trans); - if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { + if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { err = btrfs_unlink_subvol(trans, root, dir, BTRFS_I(inode)->location.objectid, dentry->d_name.name, @@ -4264,17 +4171,18 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) goto out; } - err = btrfs_orphan_add(trans, inode); + err = btrfs_orphan_add(trans, BTRFS_I(inode)); if (err) goto out; last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; /* now the directory is empty */ - err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry), - dentry->d_name.name, dentry->d_name.len); + err = btrfs_unlink_inode(trans, root, BTRFS_I(dir), + BTRFS_I(d_inode(dentry)), dentry->d_name.name, + dentry->d_name.len); if (!err) { - btrfs_i_size_write(inode, 0); + btrfs_i_size_write(BTRFS_I(inode), 0); /* * Propagate the last_unlink_trans value of the deleted dir to * its parent directory. This is to prevent an unrecoverable @@ -4398,7 +4306,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, int extent_type = -1; int ret; int err = 0; - u64 ino = btrfs_ino(inode); + u64 ino = btrfs_ino(BTRFS_I(inode)); u64 bytes_deleted = 0; bool be_nice = 0; bool should_throttle = 0; @@ -4410,7 +4318,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, * for non-free space inodes and ref cows, we want to back off from * time to time */ - if (!btrfs_is_free_space_inode(inode) && + if (!btrfs_is_free_space_inode(BTRFS_I(inode)) && test_bit(BTRFS_ROOT_REF_COWS, &root->state)) be_nice = 1; @@ -4426,7 +4334,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, */ if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || root == fs_info->tree_root) - btrfs_drop_extent_cache(inode, ALIGN(new_size, + btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size, fs_info->sectorsize), (u64)-1, 0); @@ -4437,7 +4345,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, * items. */ if (min_type == 0 && root == BTRFS_I(inode)->root) - btrfs_kill_delayed_inode_items(inode); + btrfs_kill_delayed_inode_items(BTRFS_I(inode)); key.objectid = ino; key.offset = (u64)-1; @@ -4502,19 +4410,8 @@ search_again: if (found_type > min_type) { del_item = 1; } else { - if (item_end < new_size) { - /* - * With NO_HOLES mode, for the following mapping - * - * [0-4k][hole][8k-12k] - * - * if truncating isize down to 6k, it ends up - * isize being 8k. - */ - if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) - last_size = new_size; + if (item_end < new_size) break; - } if (found_key.offset >= new_size) del_item = 1; else @@ -4697,11 +4594,22 @@ out: btrfs_abort_transaction(trans, ret); } error: - if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) + if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { + ASSERT(last_size >= new_size); + if (!err && last_size > new_size) + last_size = new_size; btrfs_ordered_update_i_size(inode, last_size, NULL); + } btrfs_free_path(path); + if (err == 0) { + /* only inline file may have last_size != new_size */ + if (new_size >= fs_info->sectorsize || + new_size > fs_info->max_inline) + ASSERT(last_size == new_size); + } + if (be_nice && bytes_deleted > SZ_32M) { unsigned long updates = trans->delayed_ref_updates; if (updates) { @@ -4870,8 +4778,8 @@ static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode, return ret; } - ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset, - 0, 0, len, 0, len, 0, 0, 0); + ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)), + offset, 0, 0, len, 0, len, 0, 0, 0); if (ret) btrfs_abort_transaction(trans, ret); else @@ -4918,7 +4826,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) lock_extent_bits(io_tree, hole_start, block_end - 1, &cached_state); - ordered = btrfs_lookup_ordered_range(inode, hole_start, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), hole_start, block_end - hole_start); if (!ordered) break; @@ -4930,7 +4838,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) cur_offset = hole_start; while (1) { - em = btrfs_get_extent(inode, NULL, 0, cur_offset, + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset, block_end - cur_offset, 0); if (IS_ERR(em)) { err = PTR_ERR(em); @@ -4947,7 +4855,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) hole_size); if (err) break; - btrfs_drop_extent_cache(inode, cur_offset, + btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, cur_offset + hole_size - 1, 0); hole_em = alloc_extent_map(); if (!hole_em) { @@ -4973,7 +4881,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) write_unlock(&em_tree->lock); if (err != -EEXIST) break; - btrfs_drop_extent_cache(inode, cur_offset, + btrfs_drop_extent_cache(BTRFS_I(inode), + cur_offset, cur_offset + hole_size - 1, 0); } @@ -5070,7 +4979,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) * so we need to guarantee from this point on that everything * will be consistent. */ - ret = btrfs_orphan_add(trans, inode); + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); btrfs_end_transaction(trans); if (ret) return ret; @@ -5079,14 +4988,21 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) truncate_setsize(inode, newsize); /* Disable nonlocked read DIO to avoid the end less truncate */ - btrfs_inode_block_unlocked_dio(inode); + btrfs_inode_block_unlocked_dio(BTRFS_I(inode)); inode_dio_wait(inode); - btrfs_inode_resume_unlocked_dio(inode); + btrfs_inode_resume_unlocked_dio(BTRFS_I(inode)); ret = btrfs_truncate(inode); if (ret && inode->i_nlink) { int err; + /* To get a stable disk_i_size */ + err = btrfs_wait_ordered_range(inode, 0, (u64)-1); + if (err) { + btrfs_orphan_del(NULL, BTRFS_I(inode)); + return err; + } + /* * failed to truncate, disk_i_size is only adjusted down * as we remove extents, so it should represent the true @@ -5095,11 +5011,11 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) */ trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); return ret; } i_size_write(inode, BTRFS_I(inode)->disk_i_size); - err = btrfs_orphan_del(trans, inode); + err = btrfs_orphan_del(trans, BTRFS_I(inode)); if (err) btrfs_abort_transaction(trans, err); btrfs_end_transaction(trans); @@ -5257,18 +5173,18 @@ void btrfs_evict_inode(struct inode *inode) if (inode->i_nlink && ((btrfs_root_refs(&root->root_item) != 0 && root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || - btrfs_is_free_space_inode(inode))) + btrfs_is_free_space_inode(BTRFS_I(inode)))) goto no_delete; if (is_bad_inode(inode)) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); goto no_delete; } /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ if (!special_file(inode->i_mode)) btrfs_wait_ordered_range(inode, 0, (u64)-1); - btrfs_free_io_failure_record(inode, 0, (u64)-1); + btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1); if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, @@ -5282,22 +5198,22 @@ void btrfs_evict_inode(struct inode *inode) goto no_delete; } - ret = btrfs_commit_inode_delayed_inode(inode); + ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); if (ret) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); goto no_delete; } rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); if (!rsv) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); goto no_delete; } rsv->size = min_size; rsv->failfast = 1; global_rsv = &fs_info->global_block_rsv; - btrfs_i_size_write(inode, 0); + btrfs_i_size_write(BTRFS_I(inode), 0); /* * This is a bit simpler than btrfs_truncate since we've already @@ -5332,14 +5248,14 @@ void btrfs_evict_inode(struct inode *inode) btrfs_warn(fs_info, "Could not get space for a delete, will truncate on mount %d", ret); - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); btrfs_free_block_rsv(fs_info, rsv); goto no_delete; } trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); btrfs_free_block_rsv(fs_info, rsv); goto no_delete; } @@ -5365,7 +5281,7 @@ void btrfs_evict_inode(struct inode *inode) if (ret) { ret = btrfs_commit_transaction(trans); if (ret) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); btrfs_free_block_rsv(fs_info, rsv); goto no_delete; } @@ -5394,20 +5310,20 @@ void btrfs_evict_inode(struct inode *inode) */ if (ret == 0) { trans->block_rsv = root->orphan_block_rsv; - btrfs_orphan_del(trans, inode); + btrfs_orphan_del(trans, BTRFS_I(inode)); } else { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); } trans->block_rsv = &fs_info->trans_block_rsv; if (!(root == fs_info->tree_root || root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) - btrfs_return_ino(root, btrfs_ino(inode)); + btrfs_return_ino(root, btrfs_ino(BTRFS_I(inode))); btrfs_end_transaction(trans); btrfs_btree_balance_dirty(fs_info); no_delete: - btrfs_remove_delayed_node(inode); + btrfs_remove_delayed_node(BTRFS_I(inode)); clear_inode(inode); } @@ -5429,8 +5345,8 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, if (!path) return -ENOMEM; - di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, - namelen, 0); + di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)), + name, namelen, 0); if (IS_ERR(di)) ret = PTR_ERR(di); @@ -5485,7 +5401,7 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, leaf = path->nodes[0]; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); - if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || + if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) || btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) goto out; @@ -5520,7 +5436,7 @@ static void inode_tree_add(struct inode *inode) struct rb_node **p; struct rb_node *parent; struct rb_node *new = &BTRFS_I(inode)->rb_node; - u64 ino = btrfs_ino(inode); + u64 ino = btrfs_ino(BTRFS_I(inode)); if (inode_unhashed(inode)) return; @@ -5531,9 +5447,9 @@ static void inode_tree_add(struct inode *inode) parent = *p; entry = rb_entry(parent, struct btrfs_inode, rb_node); - if (ino < btrfs_ino(&entry->vfs_inode)) + if (ino < btrfs_ino(BTRFS_I(&entry->vfs_inode))) p = &parent->rb_left; - else if (ino > btrfs_ino(&entry->vfs_inode)) + else if (ino > btrfs_ino(BTRFS_I(&entry->vfs_inode))) p = &parent->rb_right; else { WARN_ON(!(entry->vfs_inode.i_state & @@ -5593,9 +5509,9 @@ again: prev = node; entry = rb_entry(node, struct btrfs_inode, rb_node); - if (objectid < btrfs_ino(&entry->vfs_inode)) + if (objectid < btrfs_ino(BTRFS_I(&entry->vfs_inode))) node = node->rb_left; - else if (objectid > btrfs_ino(&entry->vfs_inode)) + else if (objectid > btrfs_ino(BTRFS_I(&entry->vfs_inode))) node = node->rb_right; else break; @@ -5603,7 +5519,7 @@ again: if (!node) { while (prev) { entry = rb_entry(prev, struct btrfs_inode, rb_node); - if (objectid <= btrfs_ino(&entry->vfs_inode)) { + if (objectid <= btrfs_ino(BTRFS_I(&entry->vfs_inode))) { node = prev; break; } @@ -5612,7 +5528,7 @@ again: } while (node) { entry = rb_entry(node, struct btrfs_inode, rb_node); - objectid = btrfs_ino(&entry->vfs_inode) + 1; + objectid = btrfs_ino(BTRFS_I(&entry->vfs_inode)) + 1; inode = igrab(&entry->vfs_inode); if (inode) { spin_unlock(&root->inode_lock); @@ -5796,7 +5712,7 @@ static int btrfs_dentry_delete(const struct dentry *dentry) if (btrfs_root_refs(&root->root_item) == 0) return 1; - if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) + if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) return 1; } return 0; @@ -5865,7 +5781,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) key.type = BTRFS_DIR_INDEX_KEY; key.offset = ctx->pos; - key.objectid = btrfs_ino(inode); + key.objectid = btrfs_ino(BTRFS_I(inode)); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) @@ -5974,7 +5890,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) return 0; - if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode)) + if (btrfs_fs_closing(root->fs_info) && + btrfs_is_free_space_inode(BTRFS_I(inode))) nolock = true; if (wbc->sync_mode == WB_SYNC_ALL) { @@ -6054,9 +5971,9 @@ static int btrfs_update_time(struct inode *inode, struct timespec *now, * and then set the in-memory index_cnt variable to reflect * free sequence numbers */ -static int btrfs_set_inode_index_count(struct inode *inode) +static int btrfs_set_inode_index_count(struct btrfs_inode *inode) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; struct btrfs_key key, found_key; struct btrfs_path *path; struct extent_buffer *leaf; @@ -6085,7 +6002,7 @@ static int btrfs_set_inode_index_count(struct inode *inode) * else has to start at 2 */ if (path->slots[0] == 0) { - BTRFS_I(inode)->index_cnt = 2; + inode->index_cnt = 2; goto out; } @@ -6096,11 +6013,11 @@ static int btrfs_set_inode_index_count(struct inode *inode) if (found_key.objectid != btrfs_ino(inode) || found_key.type != BTRFS_DIR_INDEX_KEY) { - BTRFS_I(inode)->index_cnt = 2; + inode->index_cnt = 2; goto out; } - BTRFS_I(inode)->index_cnt = found_key.offset + 1; + inode->index_cnt = found_key.offset + 1; out: btrfs_free_path(path); return ret; @@ -6110,11 +6027,11 @@ out: * helper to find a free sequence number in a given directory. This current * code is very simple, later versions will do smarter things in the btree */ -int btrfs_set_inode_index(struct inode *dir, u64 *index) +int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) { int ret = 0; - if (BTRFS_I(dir)->index_cnt == (u64)-1) { + if (dir->index_cnt == (u64)-1) { ret = btrfs_inode_delayed_dir_index_count(dir); if (ret) { ret = btrfs_set_inode_index_count(dir); @@ -6123,8 +6040,8 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index) } } - *index = BTRFS_I(dir)->index_cnt; - BTRFS_I(dir)->index_cnt++; + *index = dir->index_cnt; + dir->index_cnt++; return ret; } @@ -6185,7 +6102,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, if (dir && name) { trace_btrfs_inode_request(dir); - ret = btrfs_set_inode_index(dir, index); + ret = btrfs_set_inode_index(BTRFS_I(dir), index); if (ret) { btrfs_free_path(path); iput(inode); @@ -6294,7 +6211,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, if (ret) btrfs_err(fs_info, "error inheriting props for ino %llu (root %llu): %d", - btrfs_ino(inode), root->root_key.objectid, ret); + btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret); return inode; @@ -6320,18 +6237,18 @@ static inline u8 btrfs_inode_type(struct inode *inode) * inode to the parent directory. */ int btrfs_add_link(struct btrfs_trans_handle *trans, - struct inode *parent_inode, struct inode *inode, + struct btrfs_inode *parent_inode, struct btrfs_inode *inode, const char *name, int name_len, int add_backref, u64 index) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); int ret = 0; struct btrfs_key key; - struct btrfs_root *root = BTRFS_I(parent_inode)->root; + struct btrfs_root *root = parent_inode->root; u64 ino = btrfs_ino(inode); u64 parent_ino = btrfs_ino(parent_inode); if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { - memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); + memcpy(&key, &inode->root->root_key, sizeof(key)); } else { key.objectid = ino; key.type = BTRFS_INODE_ITEM_KEY; @@ -6353,7 +6270,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, ret = btrfs_insert_dir_item(trans, root, name, name_len, parent_inode, &key, - btrfs_inode_type(inode), index); + btrfs_inode_type(&inode->vfs_inode), index); if (ret == -EEXIST || ret == -EOVERFLOW) goto fail_dir_item; else if (ret) { @@ -6361,12 +6278,12 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, return ret; } - btrfs_i_size_write(parent_inode, parent_inode->i_size + + btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + name_len * 2); - inode_inc_iversion(parent_inode); - parent_inode->i_mtime = parent_inode->i_ctime = - current_time(parent_inode); - ret = btrfs_update_inode(trans, root, parent_inode); + inode_inc_iversion(&parent_inode->vfs_inode); + parent_inode->vfs_inode.i_mtime = parent_inode->vfs_inode.i_ctime = + current_time(&parent_inode->vfs_inode); + ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode); if (ret) btrfs_abort_transaction(trans, ret); return ret; @@ -6390,8 +6307,8 @@ fail_dir_item: } static int btrfs_add_nondir(struct btrfs_trans_handle *trans, - struct inode *dir, struct dentry *dentry, - struct inode *inode, int backref, u64 index) + struct btrfs_inode *dir, struct dentry *dentry, + struct btrfs_inode *inode, int backref, u64 index) { int err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, dentry->d_name.len, @@ -6427,8 +6344,8 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, goto out_unlock; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, btrfs_ino(dir), objectid, - mode, &index); + dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, + mode, &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_unlock; @@ -6447,7 +6364,8 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, if (err) goto out_unlock_inode; - err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); + err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), + 0, index); if (err) { goto out_unlock_inode; } else { @@ -6499,8 +6417,8 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, goto out_unlock; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, btrfs_ino(dir), objectid, - mode, &index); + dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, + mode, &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_unlock; @@ -6524,7 +6442,8 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, if (err) goto out_unlock_inode; - err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); + err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), + 0, index); if (err) goto out_unlock_inode; @@ -6566,7 +6485,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, if (inode->i_nlink >= BTRFS_LINK_MAX) return -EMLINK; - err = btrfs_set_inode_index(dir, &index); + err = btrfs_set_inode_index(BTRFS_I(dir), &index); if (err) goto fail; @@ -6590,7 +6509,8 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, ihold(inode); set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); - err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); + err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), + 1, index); if (err) { drop_inode = 1; @@ -6604,12 +6524,12 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, * If new hard link count is 1, it's a file created * with open(2) O_TMPFILE flag. */ - err = btrfs_orphan_del(trans, inode); + err = btrfs_orphan_del(trans, BTRFS_I(inode)); if (err) goto fail; } d_instantiate(dentry, inode); - btrfs_log_new_name(trans, inode, NULL, parent); + btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent); } btrfs_balance_delayed_items(fs_info); @@ -6649,8 +6569,8 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) goto out_fail; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, btrfs_ino(dir), objectid, - S_IFDIR | mode, &index); + dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, + S_IFDIR | mode, &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_fail; @@ -6665,13 +6585,14 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) if (err) goto out_fail_inode; - btrfs_i_size_write(inode, 0); + btrfs_i_size_write(BTRFS_I(inode), 0); err = btrfs_update_inode(trans, root, inode); if (err) goto out_fail_inode; - err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, - dentry->d_name.len, 0, index); + err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), + dentry->d_name.name, + dentry->d_name.len, 0, index); if (err) goto out_fail_inode; @@ -6801,11 +6722,12 @@ static noinline int uncompress_inline(struct btrfs_path *path, * This also copies inline extents directly into the page. */ -struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, - size_t pg_offset, u64 start, u64 len, - int create) +struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, + struct page *page, + size_t pg_offset, u64 start, u64 len, + int create) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); int ret; int err = 0; u64 extent_start = 0; @@ -6813,13 +6735,13 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, u64 objectid = btrfs_ino(inode); u32 found_type; struct btrfs_path *path = NULL; - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; struct btrfs_file_extent_item *item; struct extent_buffer *leaf; struct btrfs_key found_key; struct extent_map *em = NULL; - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct extent_map_tree *em_tree = &inode->extent_tree; + struct extent_io_tree *io_tree = &inode->io_tree; struct btrfs_trans_handle *trans = NULL; const bool new_inline = !page || create; @@ -6932,7 +6854,8 @@ next: goto not_found_em; } - btrfs_extent_item_to_extent_map(inode, path, item, new_inline, em); + btrfs_extent_item_to_extent_map(inode, path, item, + new_inline, em); if (found_type == BTRFS_FILE_EXTENT_REG || found_type == BTRFS_FILE_EXTENT_PREALLOC) { @@ -7084,9 +7007,10 @@ out: return em; } -struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, - size_t pg_offset, u64 start, u64 len, - int create) +struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, + struct page *page, + size_t pg_offset, u64 start, u64 len, + int create) { struct extent_map *em; struct extent_map *hole_em = NULL; @@ -7123,7 +7047,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag em = NULL; /* ok, we didn't find anything, lets look for delalloc */ - found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, + found = count_range_bits(&inode->io_tree, &range_start, end, len, EXTENT_DELALLOC, 1); found_end = range_start + found; if (found_end < range_start) @@ -7225,9 +7149,11 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode, int ret; if (type != BTRFS_ORDERED_NOCOW) { - em = create_pinned_em(inode, start, len, orig_start, - block_start, block_len, orig_block_len, - ram_bytes, type); + em = create_io_em(inode, start, len, orig_start, + block_start, block_len, orig_block_len, + ram_bytes, + BTRFS_COMPRESS_NONE, /* compress_type */ + type); if (IS_ERR(em)) goto out; } @@ -7236,7 +7162,7 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode, if (ret) { if (em) { free_extent_map(em); - btrfs_drop_extent_cache(inode, start, + btrfs_drop_extent_cache(BTRFS_I(inode), start, start + len - 1, 0); } em = ERR_PTR(ret); @@ -7264,7 +7190,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, em = btrfs_create_dio_extent(inode, start, ins.offset, start, ins.objectid, ins.offset, ins.offset, - ins.offset, 0); + ins.offset, BTRFS_ORDERED_REGULAR); btrfs_dec_block_group_reservations(fs_info, ins.objectid); if (IS_ERR(em)) btrfs_free_reserved_extent(fs_info, ins.objectid, @@ -7282,7 +7208,6 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, u64 *ram_bytes) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_trans_handle *trans; struct btrfs_path *path; int ret; struct extent_buffer *leaf; @@ -7302,8 +7227,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, if (!path) return -ENOMEM; - ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), - offset, 0); + ret = btrfs_lookup_file_extent(NULL, root, path, + btrfs_ino(BTRFS_I(inode)), offset, 0); if (ret < 0) goto out; @@ -7319,7 +7244,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, ret = 0; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, slot); - if (key.objectid != btrfs_ino(inode) || + if (key.objectid != btrfs_ino(BTRFS_I(inode)) || key.type != BTRFS_EXTENT_DATA_KEY) { /* not our file or wrong item type, must cow */ goto out; @@ -7385,15 +7310,9 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, * look for other files referencing this extent, if we * find any we must cow */ - trans = btrfs_join_transaction(root); - if (IS_ERR(trans)) { - ret = 0; - goto out; - } - ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode), + ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)), key.offset - backref_offset, disk_bytenr); - btrfs_end_transaction(trans); if (ret) { ret = 0; goto out; @@ -7504,7 +7423,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, * doing DIO to, so we need to make sure there's no ordered * extents in this range. */ - ordered = btrfs_lookup_ordered_range(inode, lockstart, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart, lockend - lockstart + 1); /* @@ -7570,17 +7489,23 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, return ret; } -static struct extent_map *create_pinned_em(struct inode *inode, u64 start, - u64 len, u64 orig_start, - u64 block_start, u64 block_len, - u64 orig_block_len, u64 ram_bytes, - int type) +/* The callers of this must take lock_extent() */ +static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, + u64 orig_start, u64 block_start, + u64 block_len, u64 orig_block_len, + u64 ram_bytes, int compress_type, + int type) { struct extent_map_tree *em_tree; struct extent_map *em; struct btrfs_root *root = BTRFS_I(inode)->root; int ret; + ASSERT(type == BTRFS_ORDERED_PREALLOC || + type == BTRFS_ORDERED_COMPRESSED || + type == BTRFS_ORDERED_NOCOW || + type == BTRFS_ORDERED_REGULAR); + em_tree = &BTRFS_I(inode)->extent_tree; em = alloc_extent_map(); if (!em) @@ -7588,8 +7513,6 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start, em->start = start; em->orig_start = orig_start; - em->mod_start = start; - em->mod_len = len; em->len = len; em->block_len = block_len; em->block_start = block_start; @@ -7598,15 +7521,23 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start, em->ram_bytes = ram_bytes; em->generation = -1; set_bit(EXTENT_FLAG_PINNED, &em->flags); - if (type == BTRFS_ORDERED_PREALLOC) + if (type == BTRFS_ORDERED_PREALLOC) { set_bit(EXTENT_FLAG_FILLING, &em->flags); + } else if (type == BTRFS_ORDERED_COMPRESSED) { + set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); + em->compress_type = compress_type; + } do { - btrfs_drop_extent_cache(inode, em->start, + btrfs_drop_extent_cache(BTRFS_I(inode), em->start, em->start + em->len - 1, 0); write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 1); write_unlock(&em_tree->lock); + /* + * The caller has taken lock_extent(), who could race with us + * to add em? + */ } while (ret == -EEXIST); if (ret) { @@ -7614,6 +7545,7 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start, return ERR_PTR(ret); } + /* em got 2 refs now, callers needs to do free_extent_map once. */ return em; } @@ -7621,10 +7553,8 @@ static void adjust_dio_outstanding_extents(struct inode *inode, struct btrfs_dio_data *dio_data, const u64 len) { - unsigned num_extents; + unsigned num_extents = count_max_extents(len); - num_extents = (unsigned) div64_u64(len + BTRFS_MAX_EXTENT_SIZE - 1, - BTRFS_MAX_EXTENT_SIZE); /* * If we have an outstanding_extents count still set then we're * within our reservation, otherwise we need to adjust our inode @@ -7687,7 +7617,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, goto err; } - em = btrfs_get_extent(inode, NULL, 0, start, len, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); if (IS_ERR(em)) { ret = PTR_ERR(em); goto unlock_err; @@ -7804,7 +7734,7 @@ unlock: * Need to update the i_size under the extent lock so buffered * readers will get the updated i_size when we unlock. */ - if (start + len > i_size_read(inode)) + if (!dio_data->overwrite && start + len > i_size_read(inode)) i_size_write(inode, start + len); adjust_dio_outstanding_extents(inode, dio_data, len); @@ -7924,7 +7854,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio, ret = btrfs_check_dio_repairable(inode, failed_bio, failrec, failed_mirror); if (!ret) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); return -EIO; } @@ -7938,7 +7868,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio, bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, pgoff, isector, repair_endio, repair_arg); if (!bio) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); return -EIO; } bio_set_op_attrs(bio, REQ_OP_READ, read_mode); @@ -7949,7 +7879,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio, ret = submit_dio_repair_bio(inode, bio, failrec->this_mirror); if (ret) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); bio_put(bio); } @@ -7979,7 +7909,7 @@ static void btrfs_retry_endio_nocsum(struct bio *bio) done->uptodate = 1; bio_for_each_segment_all(bvec, bio, i) - clean_io_failure(done->inode, done->start, bvec->bv_page, 0); + clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, 0); end: complete(&done->done); bio_put(bio); @@ -8065,7 +7995,7 @@ static void btrfs_retry_endio(struct bio *bio) bvec->bv_page, bvec->bv_offset, done->start, bvec->bv_len); if (!ret) - clean_io_failure(done->inode, done->start, + clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, bvec->bv_offset); else uptodate = 0; @@ -8254,7 +8184,8 @@ static void btrfs_end_dio_bio(struct bio *bio) if (err) btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d", - btrfs_ino(dip->inode), bio_op(bio), bio->bi_opf, + btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio), + bio->bi_opf, (unsigned long long)bio->bi_iter.bi_sector, bio->bi_iter.bi_size, err); @@ -8679,15 +8610,14 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) * not unlock the i_mutex at this case. */ if (offset + count <= inode->i_size) { + dio_data.overwrite = 1; inode_unlock(inode); relock = true; } ret = btrfs_delalloc_reserve_space(inode, offset, count); if (ret) goto out; - dio_data.outstanding_extents = div64_u64(count + - BTRFS_MAX_EXTENT_SIZE - 1, - BTRFS_MAX_EXTENT_SIZE); + dio_data.outstanding_extents = count_max_extents(count); /* * We need to know how many extents we reserved so that we can @@ -8831,7 +8761,7 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) { if (PageWriteback(page) || PageDirty(page)) return 0; - return __btrfs_releasepage(page, gfp_flags & GFP_NOFS); + return __btrfs_releasepage(page, gfp_flags); } static void btrfs_invalidatepage(struct page *page, unsigned int offset, @@ -8866,7 +8796,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset, lock_extent_bits(tree, page_start, page_end, &cached_state); again: start = page_start; - ordered = btrfs_lookup_ordered_range(inode, start, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start, page_end - start + 1); if (ordered) { end = min(page_end, ordered->file_offset + ordered->len - 1); @@ -9032,7 +8962,8 @@ again: * we can't set the delalloc bits if there are pending ordered * extents. Drop our locks and wait for them to finish */ - ordered = btrfs_lookup_ordered_range(inode, page_start, page_end); + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, + PAGE_SIZE); if (ordered) { unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); @@ -9056,11 +8987,11 @@ again: } /* - * XXX - page_mkwrite gets called every time the page is dirtied, even - * if it was already dirty, so for space accounting reasons we need to - * clear any delalloc bits for the range we are fixing to save. There - * is probably a better way to do this, but for now keep consistent with - * prepare_pages in the normal write path. + * page_mkwrite gets called when the page is firstly dirtied after it's + * faulted in, but write(2) could also dirty a page and set delalloc + * bits, thus in this case for space account reason, we still need to + * clear any delalloc bits within this page range since we have to + * reserve data&meta space before lock_page() (see above comments). */ clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, EXTENT_DIRTY | EXTENT_DELALLOC | @@ -9230,7 +9161,7 @@ static int btrfs_truncate(struct inode *inode) if (ret == 0 && inode->i_nlink > 0) { trans->block_rsv = root->orphan_block_rsv; - ret = btrfs_orphan_del(trans, inode); + ret = btrfs_orphan_del(trans, BTRFS_I(inode)); if (ret) err = ret; } @@ -9275,7 +9206,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, inode->i_fop = &btrfs_dir_file_operations; set_nlink(inode, 1); - btrfs_i_size_write(inode, 0); + btrfs_i_size_write(BTRFS_I(inode), 0); unlock_new_inode(inode); err = btrfs_subvol_inherit_props(trans, new_root, parent_root); @@ -9348,7 +9279,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS void btrfs_test_destroy_inode(struct inode *inode) { - btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); } #endif @@ -9384,7 +9315,7 @@ void btrfs_destroy_inode(struct inode *inode) if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, &BTRFS_I(inode)->runtime_flags)) { btrfs_info(fs_info, "inode %llu still on the orphan list", - btrfs_ino(inode)); + btrfs_ino(BTRFS_I(inode))); atomic_dec(&root->orphan_inodes); } @@ -9403,7 +9334,7 @@ void btrfs_destroy_inode(struct inode *inode) } btrfs_qgroup_check_reserved_leak(inode); inode_tree_del(inode); - btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); free: call_rcu(&inode->i_rcu, btrfs_i_callback); } @@ -9482,11 +9413,11 @@ fail: return -ENOMEM; } -static int btrfs_getattr(struct vfsmount *mnt, - struct dentry *dentry, struct kstat *stat) +static int btrfs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { u64 delalloc_bytes; - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); u32 blocksize = inode->i_sb->s_blocksize; generic_fillattr(inode, stat); @@ -9513,8 +9444,8 @@ static int btrfs_rename_exchange(struct inode *old_dir, struct inode *old_inode = old_dentry->d_inode; struct timespec ctime = current_time(old_inode); struct dentry *parent; - u64 old_ino = btrfs_ino(old_inode); - u64 new_ino = btrfs_ino(new_inode); + u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); + u64 new_ino = btrfs_ino(BTRFS_I(new_inode)); u64 old_idx = 0; u64 new_idx = 0; u64 root_objectid; @@ -9550,10 +9481,10 @@ static int btrfs_rename_exchange(struct inode *old_dir, * We need to find a free sequence number both in the source and * in the destination directory for the exchange. */ - ret = btrfs_set_inode_index(new_dir, &old_idx); + ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx); if (ret) goto out_fail; - ret = btrfs_set_inode_index(old_dir, &new_idx); + ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx); if (ret) goto out_fail; @@ -9571,7 +9502,8 @@ static int btrfs_rename_exchange(struct inode *old_dir, new_dentry->d_name.name, new_dentry->d_name.len, old_ino, - btrfs_ino(new_dir), old_idx); + btrfs_ino(BTRFS_I(new_dir)), + old_idx); if (ret) goto out_fail; } @@ -9587,7 +9519,8 @@ static int btrfs_rename_exchange(struct inode *old_dir, old_dentry->d_name.name, old_dentry->d_name.len, new_ino, - btrfs_ino(old_dir), new_idx); + btrfs_ino(BTRFS_I(old_dir)), + new_idx); if (ret) goto out_fail; } @@ -9603,8 +9536,10 @@ static int btrfs_rename_exchange(struct inode *old_dir, new_inode->i_ctime = ctime; if (old_dentry->d_parent != new_dentry->d_parent) { - btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); - btrfs_record_unlink_dir(trans, new_dir, new_inode, 1); + btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), + BTRFS_I(old_inode), 1); + btrfs_record_unlink_dir(trans, BTRFS_I(new_dir), + BTRFS_I(new_inode), 1); } /* src is a subvolume */ @@ -9615,8 +9550,8 @@ static int btrfs_rename_exchange(struct inode *old_dir, old_dentry->d_name.name, old_dentry->d_name.len); } else { /* src is an inode */ - ret = __btrfs_unlink_inode(trans, root, old_dir, - old_dentry->d_inode, + ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir), + BTRFS_I(old_dentry->d_inode), old_dentry->d_name.name, old_dentry->d_name.len); if (!ret) @@ -9635,8 +9570,8 @@ static int btrfs_rename_exchange(struct inode *old_dir, new_dentry->d_name.name, new_dentry->d_name.len); } else { /* dest is an inode */ - ret = __btrfs_unlink_inode(trans, dest, new_dir, - new_dentry->d_inode, + ret = __btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir), + BTRFS_I(new_dentry->d_inode), new_dentry->d_name.name, new_dentry->d_name.len); if (!ret) @@ -9647,7 +9582,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, goto out_fail; } - ret = btrfs_add_link(trans, new_dir, old_inode, + ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), new_dentry->d_name.name, new_dentry->d_name.len, 0, old_idx); if (ret) { @@ -9655,7 +9590,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, goto out_fail; } - ret = btrfs_add_link(trans, old_dir, new_inode, + ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), old_dentry->d_name.name, old_dentry->d_name.len, 0, new_idx); if (ret) { @@ -9670,13 +9605,15 @@ static int btrfs_rename_exchange(struct inode *old_dir, if (root_log_pinned) { parent = new_dentry->d_parent; - btrfs_log_new_name(trans, old_inode, old_dir, parent); + btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir), + parent); btrfs_end_log_trans(root); root_log_pinned = false; } if (dest_log_pinned) { parent = old_dentry->d_parent; - btrfs_log_new_name(trans, new_inode, new_dir, parent); + btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir), + parent); btrfs_end_log_trans(dest); dest_log_pinned = false; } @@ -9693,11 +9630,11 @@ out_fail: * allow the tasks to sync it. */ if (ret && (root_log_pinned || dest_log_pinned)) { - if (btrfs_inode_in_log(old_dir, fs_info->generation) || - btrfs_inode_in_log(new_dir, fs_info->generation) || - btrfs_inode_in_log(old_inode, fs_info->generation) || + if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) || + btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) || + btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) || (new_inode && - btrfs_inode_in_log(new_inode, fs_info->generation))) + btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation))) btrfs_set_log_full_commit(fs_info, trans); if (root_log_pinned) { @@ -9736,7 +9673,7 @@ static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans, inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, - btrfs_ino(dir), + btrfs_ino(BTRFS_I(dir)), objectid, S_IFCHR | WHITEOUT_MODE, &index); @@ -9755,8 +9692,8 @@ static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans, if (ret) goto out; - ret = btrfs_add_nondir(trans, dir, dentry, - inode, 0, index); + ret = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, + BTRFS_I(inode), 0, index); if (ret) goto out; @@ -9784,10 +9721,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, u64 index = 0; u64 root_objectid; int ret; - u64 old_ino = btrfs_ino(old_inode); + u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); bool log_pinned = false; - if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) + if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) return -EPERM; /* we only allow rename subvolume link between subvolumes */ @@ -9795,7 +9732,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, return -EXDEV; if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || - (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID)) + (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID)) return -ENOTEMPTY; if (S_ISDIR(old_inode->i_mode) && new_inode && @@ -9855,7 +9792,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (dest != root) btrfs_record_root_in_trans(trans, dest); - ret = btrfs_set_inode_index(new_dir, &index); + ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index); if (ret) goto out_fail; @@ -9870,7 +9807,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_dentry->d_name.name, new_dentry->d_name.len, old_ino, - btrfs_ino(new_dir), index); + btrfs_ino(BTRFS_I(new_dir)), index); if (ret) goto out_fail; } @@ -9883,7 +9820,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, old_inode->i_ctime = current_time(old_dir); if (old_dentry->d_parent != new_dentry->d_parent) - btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); + btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), + BTRFS_I(old_inode), 1); if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; @@ -9891,8 +9829,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, old_dentry->d_name.name, old_dentry->d_name.len); } else { - ret = __btrfs_unlink_inode(trans, root, old_dir, - d_inode(old_dentry), + ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir), + BTRFS_I(d_inode(old_dentry)), old_dentry->d_name.name, old_dentry->d_name.len); if (!ret) @@ -9906,7 +9844,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_inode) { inode_inc_iversion(new_inode); new_inode->i_ctime = current_time(new_inode); - if (unlikely(btrfs_ino(new_inode) == + if (unlikely(btrfs_ino(BTRFS_I(new_inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { root_objectid = BTRFS_I(new_inode)->location.objectid; ret = btrfs_unlink_subvol(trans, dest, new_dir, @@ -9915,20 +9853,21 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_dentry->d_name.len); BUG_ON(new_inode->i_nlink == 0); } else { - ret = btrfs_unlink_inode(trans, dest, new_dir, - d_inode(new_dentry), + ret = btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir), + BTRFS_I(d_inode(new_dentry)), new_dentry->d_name.name, new_dentry->d_name.len); } if (!ret && new_inode->i_nlink == 0) - ret = btrfs_orphan_add(trans, d_inode(new_dentry)); + ret = btrfs_orphan_add(trans, + BTRFS_I(d_inode(new_dentry))); if (ret) { btrfs_abort_transaction(trans, ret); goto out_fail; } } - ret = btrfs_add_link(trans, new_dir, old_inode, + ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), new_dentry->d_name.name, new_dentry->d_name.len, 0, index); if (ret) { @@ -9942,7 +9881,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (log_pinned) { struct dentry *parent = new_dentry->d_parent; - btrfs_log_new_name(trans, old_inode, old_dir, parent); + btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir), + parent); btrfs_end_log_trans(root); log_pinned = false; } @@ -9969,11 +9909,11 @@ out_fail: * allow the tasks to sync it. */ if (ret && log_pinned) { - if (btrfs_inode_in_log(old_dir, fs_info->generation) || - btrfs_inode_in_log(new_dir, fs_info->generation) || - btrfs_inode_in_log(old_inode, fs_info->generation) || + if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) || + btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) || + btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) || (new_inode && - btrfs_inode_in_log(new_inode, fs_info->generation))) + btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation))) btrfs_set_log_full_commit(fs_info, trans); btrfs_end_log_trans(root); @@ -10237,8 +10177,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, goto out_unlock; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, btrfs_ino(dir), objectid, - S_IFLNK|S_IRWXUGO, &index); + dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), + objectid, S_IFLNK|S_IRWXUGO, &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_unlock; @@ -10264,7 +10204,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, err = -ENOMEM; goto out_unlock_inode; } - key.objectid = btrfs_ino(inode); + key.objectid = btrfs_ino(BTRFS_I(inode)); key.offset = 0; key.type = BTRFS_EXTENT_DATA_KEY; datasize = btrfs_file_extent_calc_inline_size(name_len); @@ -10294,7 +10234,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, inode_nohighmem(inode); inode->i_mapping->a_ops = &btrfs_symlink_aops; inode_set_bytes(inode, name_len); - btrfs_i_size_write(inode, name_len); + btrfs_i_size_write(BTRFS_I(inode), name_len); err = btrfs_update_inode(trans, root, inode); /* * Last step, add directory indexes for our symlink inode. This is the @@ -10302,7 +10242,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, * elsewhere above. */ if (!err) - err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); + err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, + BTRFS_I(inode), 0, index); if (err) { drop_inode = 1; goto out_unlock_inode; @@ -10388,7 +10329,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, break; } - btrfs_drop_extent_cache(inode, cur_offset, + btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, cur_offset + ins.offset -1, 0); em = alloc_extent_map(); @@ -10415,7 +10356,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, write_unlock(&em_tree->lock); if (ret != -EEXIST) break; - btrfs_drop_extent_cache(inode, cur_offset, + btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, cur_offset + ins.offset - 1, 0); } @@ -10517,7 +10458,7 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) goto out; inode = btrfs_new_inode(trans, root, dir, NULL, 0, - btrfs_ino(dir), objectid, mode, &index); + btrfs_ino(BTRFS_I(dir)), objectid, mode, &index); if (IS_ERR(inode)) { ret = PTR_ERR(inode); inode = NULL; @@ -10537,7 +10478,7 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) ret = btrfs_update_inode(trans, root, inode); if (ret) goto out_inode; - ret = btrfs_orphan_add(trans, inode); + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); if (ret) goto out_inode; @@ -10567,6 +10508,12 @@ out_inode: } +__attribute__((const)) +static int dummy_readpage_io_failed_hook(struct page *page, int failed_mirror) +{ + return 0; +} + static const struct inode_operations btrfs_dir_inode_operations = { .getattr = btrfs_getattr, .lookup = btrfs_lookup, @@ -10605,10 +10552,14 @@ static const struct file_operations btrfs_dir_file_operations = { }; static const struct extent_io_ops btrfs_extent_io_ops = { - .fill_delalloc = run_delalloc_range, + /* mandatory callbacks */ .submit_bio_hook = btrfs_submit_bio_hook, - .merge_bio_hook = btrfs_merge_bio_hook, .readpage_end_io_hook = btrfs_readpage_end_io_hook, + .merge_bio_hook = btrfs_merge_bio_hook, + .readpage_io_failed_hook = dummy_readpage_io_failed_hook, + + /* optional callbacks */ + .fill_delalloc = run_delalloc_range, .writepage_end_io_hook = btrfs_writepage_end_io_hook, .writepage_start_hook = btrfs_writepage_start_hook, .set_bit_hook = btrfs_set_bit_hook, diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 21e51b0ba188..dabfc7ac48a6 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -395,7 +395,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) q = bdev_get_queue(device->bdev); if (blk_queue_discard(q)) { num_devices++; - minlen = min((u64)q->limits.discard_granularity, + minlen = min_t(u64, q->limits.discard_granularity, minlen); } } @@ -434,7 +434,7 @@ int btrfs_is_empty_uuid(u8 *uuid) static noinline int create_subvol(struct inode *dir, struct dentry *dentry, - char *name, int namelen, + const char *name, int namelen, u64 *async_transid, struct btrfs_qgroup_inherit *inherit) { @@ -487,8 +487,7 @@ static noinline int create_subvol(struct inode *dir, trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); - btrfs_subvolume_release_metadata(fs_info, &block_rsv, - qgroup_reserved); + btrfs_subvolume_release_metadata(fs_info, &block_rsv); goto fail_free; } trans->block_rsv = &block_rsv; @@ -581,27 +580,27 @@ static noinline int create_subvol(struct inode *dir, /* * insert the directory item */ - ret = btrfs_set_inode_index(dir, &index); + ret = btrfs_set_inode_index(BTRFS_I(dir), &index); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } ret = btrfs_insert_dir_item(trans, root, - name, namelen, dir, &key, + name, namelen, BTRFS_I(dir), &key, BTRFS_FT_DIR, index); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } - btrfs_i_size_write(dir, dir->i_size + namelen * 2); + btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2); ret = btrfs_update_inode(trans, root, dir); BUG_ON(ret); ret = btrfs_add_root_ref(trans, fs_info, objectid, root->root_key.objectid, - btrfs_ino(dir), index, name, namelen); + btrfs_ino(BTRFS_I(dir)), index, name, namelen); BUG_ON(ret); ret = btrfs_uuid_tree_add(trans, fs_info, root_item->uuid, @@ -613,7 +612,7 @@ fail: kfree(root_item); trans->block_rsv = NULL; trans->bytes_reserved = 0; - btrfs_subvolume_release_metadata(fs_info, &block_rsv, qgroup_reserved); + btrfs_subvolume_release_metadata(fs_info, &block_rsv); if (async_transid) { *async_transid = trans->transid; @@ -657,7 +656,7 @@ static void btrfs_wait_for_no_snapshoting_writes(struct btrfs_root *root) } static int create_snapshot(struct btrfs_root *root, struct inode *dir, - struct dentry *dentry, char *name, int namelen, + struct dentry *dentry, u64 *async_transid, bool readonly, struct btrfs_qgroup_inherit *inherit) { @@ -670,12 +669,12 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) return -EINVAL; - pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS); + pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_KERNEL); if (!pending_snapshot) return -ENOMEM; pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item), - GFP_NOFS); + GFP_KERNEL); pending_snapshot->path = btrfs_alloc_path(); if (!pending_snapshot->root_item || !pending_snapshot->path) { ret = -ENOMEM; @@ -753,9 +752,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, d_instantiate(dentry, inode); ret = 0; fail: - btrfs_subvolume_release_metadata(fs_info, - &pending_snapshot->block_rsv, - pending_snapshot->qgroup_reserved); + btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv); dec_and_free: if (atomic_dec_and_test(&root->will_be_snapshoted)) wake_up_atomic_t(&root->will_be_snapshoted); @@ -835,7 +832,7 @@ static inline int btrfs_may_create(struct inode *dir, struct dentry *child) * inside this filesystem so it's quite a bit simpler. */ static noinline int btrfs_mksubvol(const struct path *parent, - char *name, int namelen, + const char *name, int namelen, struct btrfs_root *snap_src, u64 *async_transid, bool readonly, struct btrfs_qgroup_inherit *inherit) @@ -874,7 +871,7 @@ static noinline int btrfs_mksubvol(const struct path *parent, goto out_up_read; if (snap_src) { - error = create_snapshot(snap_src, dir, dentry, name, namelen, + error = create_snapshot(snap_src, dir, dentry, async_transid, readonly, inherit); } else { error = create_subvol(dir, dentry, name, namelen, @@ -941,7 +938,7 @@ static int find_new_extents(struct btrfs_root *root, struct btrfs_file_extent_item *extent; int type; int ret; - u64 ino = btrfs_ino(inode); + u64 ino = btrfs_ino(BTRFS_I(inode)); path = btrfs_alloc_path(); if (!path) @@ -1012,7 +1009,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) /* get the big lock and read metadata off disk */ lock_extent_bits(io_tree, start, end, &cached); - em = btrfs_get_extent(inode, NULL, 0, start, len, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS); if (IS_ERR(em)) @@ -1628,7 +1625,7 @@ out: } static noinline int btrfs_ioctl_snap_create_transid(struct file *file, - char *name, unsigned long fd, int subvol, + const char *name, unsigned long fd, int subvol, u64 *transid, bool readonly, struct btrfs_qgroup_inherit *inherit) { @@ -1780,7 +1777,7 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file, int ret = 0; u64 flags = 0; - if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) + if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) return -EINVAL; down_read(&fs_info->subvol_sem); @@ -1812,7 +1809,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, if (ret) goto out; - if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) { + if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) { ret = -EINVAL; goto out_drop_write; } @@ -2446,7 +2443,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, if (err) goto out_dput; - if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) { + if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) { err = -EINVAL; goto out_dput; } @@ -2497,7 +2494,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, trans->block_rsv = &block_rsv; trans->bytes_reserved = block_rsv.size; - btrfs_record_snapshot_destroy(trans, dir); + btrfs_record_snapshot_destroy(trans, BTRFS_I(dir)); ret = btrfs_unlink_subvol(trans, root, dir, dest->root_key.objectid, @@ -2555,7 +2552,7 @@ out_end_trans: err = ret; inode->i_flags |= S_DEAD; out_release: - btrfs_subvolume_release_metadata(fs_info, &block_rsv, qgroup_reserved); + btrfs_subvolume_release_metadata(fs_info, &block_rsv); out_up_write: up_write(&fs_info->subvol_sem); if (err) { @@ -2613,9 +2610,6 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp) goto out; } ret = btrfs_defrag_root(root); - if (ret) - goto out; - ret = btrfs_defrag_root(root->fs_info->extent_root); break; case S_IFREG: if (!(file->f_mode & FMODE_WRITE)) { @@ -3047,11 +3041,21 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff, cmp->src_pages = src_pgarr; cmp->dst_pages = dst_pgarr; - ret = gather_extent_pages(src, cmp->src_pages, cmp->num_pages, loff); + /* + * If deduping ranges in the same inode, locking rules make it mandatory + * to always lock pages in ascending order to avoid deadlocks with + * concurrent tasks (such as starting writeback/delalloc). + */ + if (src == dst && dst_loff < loff) { + swap(src_pgarr, dst_pgarr); + swap(loff, dst_loff); + } + + ret = gather_extent_pages(src, src_pgarr, cmp->num_pages, loff); if (ret) goto out; - ret = gather_extent_pages(dst, cmp->dst_pages, cmp->num_pages, dst_loff); + ret = gather_extent_pages(dst, dst_pgarr, cmp->num_pages, dst_loff); out: if (ret) @@ -3059,8 +3063,7 @@ out: return 0; } -static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst, - u64 dst_loff, u64 len, struct cmp_pages *cmp) +static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp) { int ret = 0; int i; @@ -3128,26 +3131,27 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, int ret; u64 len = olen; struct cmp_pages cmp; - int same_inode = 0; + bool same_inode = (src == dst); u64 same_lock_start = 0; u64 same_lock_len = 0; - if (src == dst) - same_inode = 1; - if (len == 0) return 0; - if (same_inode) { + if (same_inode) inode_lock(src); + else + btrfs_double_inode_lock(src, dst); - ret = extent_same_check_offsets(src, loff, &len, olen); - if (ret) - goto out_unlock; - ret = extent_same_check_offsets(src, dst_loff, &len, olen); - if (ret) - goto out_unlock; + ret = extent_same_check_offsets(src, loff, &len, olen); + if (ret) + goto out_unlock; + + ret = extent_same_check_offsets(dst, dst_loff, &len, olen); + if (ret) + goto out_unlock; + if (same_inode) { /* * Single inode case wants the same checks, except we * don't want our length pushed out past i_size as @@ -3175,16 +3179,6 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, same_lock_start = min_t(u64, loff, dst_loff); same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start; - } else { - btrfs_double_inode_lock(src, dst); - - ret = extent_same_check_offsets(src, loff, &len, olen); - if (ret) - goto out_unlock; - - ret = extent_same_check_offsets(dst, dst_loff, &len, olen); - if (ret) - goto out_unlock; } /* don't make the dst file partly checksummed */ @@ -3236,7 +3230,7 @@ again: } /* pass original length for comparison so we stay within i_size */ - ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp); + ret = btrfs_cmp_data(olen, &cmp); if (ret == 0) ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1); @@ -3304,7 +3298,7 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans, if (endoff > destoff + olen) endoff = destoff + olen; if (endoff > inode->i_size) - btrfs_i_size_write(inode, endoff); + btrfs_i_size_write(BTRFS_I(inode), endoff); ret = btrfs_update_inode(trans, root, inode); if (ret) { @@ -3317,20 +3311,19 @@ out: return ret; } -static void clone_update_extent_map(struct inode *inode, +static void clone_update_extent_map(struct btrfs_inode *inode, const struct btrfs_trans_handle *trans, const struct btrfs_path *path, const u64 hole_offset, const u64 hole_len) { - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_map_tree *em_tree = &inode->extent_tree; struct extent_map *em; int ret; em = alloc_extent_map(); if (!em) { - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags); + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); return; } @@ -3344,7 +3337,7 @@ static void clone_update_extent_map(struct inode *inode, if (btrfs_file_extent_type(path->nodes[0], fi) == BTRFS_FILE_EXTENT_INLINE) set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); } else { em->start = hole_offset; em->len = hole_len; @@ -3370,8 +3363,7 @@ static void clone_update_extent_map(struct inode *inode, } if (ret) - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags); + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); } /* @@ -3399,8 +3391,7 @@ static void clone_update_extent_map(struct inode *inode, * data into the destination inode's inline extent if the later is greater then * the former. */ -static int clone_copy_inline_extent(struct inode *src, - struct inode *dst, +static int clone_copy_inline_extent(struct inode *dst, struct btrfs_trans_handle *trans, struct btrfs_path *path, struct btrfs_key *new_key, @@ -3420,7 +3411,7 @@ static int clone_copy_inline_extent(struct inode *src, if (new_key->offset > 0) return -EOPNOTSUPP; - key.objectid = btrfs_ino(dst); + key.objectid = btrfs_ino(BTRFS_I(dst)); key.type = BTRFS_EXTENT_DATA_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); @@ -3435,7 +3426,7 @@ static int clone_copy_inline_extent(struct inode *src, goto copy_inline_extent; } btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); - if (key.objectid == btrfs_ino(dst) && + if (key.objectid == btrfs_ino(BTRFS_I(dst)) && key.type == BTRFS_EXTENT_DATA_KEY) { ASSERT(key.offset > 0); return -EOPNOTSUPP; @@ -3469,7 +3460,7 @@ static int clone_copy_inline_extent(struct inode *src, } else if (ret == 0) { btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); - if (key.objectid == btrfs_ino(dst) && + if (key.objectid == btrfs_ino(BTRFS_I(dst)) && key.type == BTRFS_EXTENT_DATA_KEY) return -EOPNOTSUPP; } @@ -3563,7 +3554,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode, path->reada = READA_FORWARD; /* clone data */ - key.objectid = btrfs_ino(src); + key.objectid = btrfs_ino(BTRFS_I(src)); key.type = BTRFS_EXTENT_DATA_KEY; key.offset = off; @@ -3606,7 +3597,7 @@ process_slot: btrfs_item_key_to_cpu(leaf, &key, slot); if (key.type > BTRFS_EXTENT_DATA_KEY || - key.objectid != btrfs_ino(src)) + key.objectid != btrfs_ino(BTRFS_I(src))) break; if (key.type == BTRFS_EXTENT_DATA_KEY) { @@ -3659,7 +3650,7 @@ process_slot: path->leave_spinning = 0; memcpy(&new_key, &key, sizeof(new_key)); - new_key.objectid = btrfs_ino(inode); + new_key.objectid = btrfs_ino(BTRFS_I(inode)); if (off <= key.offset) new_key.offset = key.offset + destoff - off; else @@ -3749,7 +3740,7 @@ process_slot: fs_info, disko, diskl, 0, root->root_key.objectid, - btrfs_ino(inode), + btrfs_ino(BTRFS_I(inode)), new_key.offset - datao); if (ret) { btrfs_abort_transaction(trans, @@ -3779,7 +3770,7 @@ process_slot: size -= skip + trim; datal -= skip + trim; - ret = clone_copy_inline_extent(src, inode, + ret = clone_copy_inline_extent(inode, trans, path, &new_key, drop_start, @@ -3798,11 +3789,12 @@ process_slot: /* If we have an implicit hole (NO_HOLES feature). */ if (drop_start < new_key.offset) - clone_update_extent_map(inode, trans, + clone_update_extent_map(BTRFS_I(inode), trans, NULL, drop_start, new_key.offset - drop_start); - clone_update_extent_map(inode, trans, path, 0, 0); + clone_update_extent_map(BTRFS_I(inode), trans, + path, 0, 0); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); @@ -3852,8 +3844,9 @@ process_slot: btrfs_end_transaction(trans); goto out; } - clone_update_extent_map(inode, trans, NULL, last_dest_end, - destoff + len - last_dest_end); + clone_update_extent_map(BTRFS_I(inode), trans, NULL, + last_dest_end, + destoff + len - last_dest_end); ret = clone_finish_inode_update(trans, inode, destoff + len, destoff, olen, no_time_update); } @@ -5129,7 +5122,7 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file, down_write(&fs_info->subvol_sem); - if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) { + if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) { ret = -EINVAL; goto out; } diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 45d26980caf9..f48c8c14dc14 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -76,7 +76,7 @@ static inline void write_compress_length(char *buf, size_t len) memcpy(buf, &dlen, LZO_LEN); } -static inline size_t read_compress_length(char *buf) +static inline size_t read_compress_length(const char *buf) { __le32 dlen; @@ -86,13 +86,11 @@ static inline size_t read_compress_length(char *buf) static int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, - u64 start, unsigned long len, + u64 start, struct page **pages, - unsigned long nr_dest_pages, unsigned long *out_pages, unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out) + unsigned long *total_out) { struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0; @@ -102,7 +100,9 @@ static int lzo_compress_pages(struct list_head *ws, struct page *in_page = NULL; struct page *out_page = NULL; unsigned long bytes_left; - + unsigned long len = *total_out; + unsigned long nr_dest_pages = *out_pages; + const unsigned long max_out = nr_dest_pages * PAGE_SIZE; size_t in_len; size_t out_len; char *buf; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 041c3326d109..9a46878ba60f 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -432,7 +432,7 @@ out: } /* Needs to either be called under a log transaction or the log_mutex */ -void btrfs_get_logged_extents(struct inode *inode, +void btrfs_get_logged_extents(struct btrfs_inode *inode, struct list_head *logged_list, const loff_t start, const loff_t end) @@ -442,7 +442,7 @@ void btrfs_get_logged_extents(struct inode *inode, struct rb_node *n; struct rb_node *prev; - tree = &BTRFS_I(inode)->ordered_tree; + tree = &inode->ordered_tree; spin_lock_irq(&tree->lock); n = __tree_search(&tree->tree, end, &prev); if (!n) @@ -879,15 +879,14 @@ out: /* Since the DIO code tries to lock a wide area we need to look for any ordered * extents that exist in the range, rather than just the start of the range. */ -struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, - u64 file_offset, - u64 len) +struct btrfs_ordered_extent *btrfs_lookup_ordered_range( + struct btrfs_inode *inode, u64 file_offset, u64 len) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; - tree = &BTRFS_I(inode)->ordered_tree; + tree = &inode->ordered_tree; spin_lock_irq(&tree->lock); node = tree_search(tree, file_offset); if (!node) { @@ -923,7 +922,7 @@ bool btrfs_have_ordered_extents_in_range(struct inode *inode, { struct btrfs_ordered_extent *oe; - oe = btrfs_lookup_ordered_range(inode, file_offset, len); + oe = btrfs_lookup_ordered_range(BTRFS_I(inode), file_offset, len); if (oe) { btrfs_put_ordered_extent(oe); return true; @@ -984,8 +983,18 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, } disk_i_size = BTRFS_I(inode)->disk_i_size; - /* truncate file */ - if (disk_i_size > i_size) { + /* + * truncate file. + * If ordered is not NULL, then this is called from endio and + * disk_i_size will be updated by either truncate itself or any + * in-flight IOs which are inside the disk_i_size. + * + * Because btrfs_setsize() may set i_size with disk_i_size if truncate + * fails somehow, we need to make sure we have a precise disk_i_size by + * updating it as usual. + * + */ + if (!ordered && disk_i_size > i_size) { BTRFS_I(inode)->disk_i_size = orig_offset; ret = 0; goto out; @@ -1032,25 +1041,22 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, /* We treat this entry as if it doesn't exist */ if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) continue; - if (test->file_offset + test->len <= disk_i_size) + + if (entry_end(test) <= disk_i_size) break; if (test->file_offset >= i_size) break; - if (entry_end(test) > disk_i_size) { - /* - * we don't update disk_i_size now, so record this - * undealt i_size. Or we will not know the real - * i_size. - */ - if (test->outstanding_isize < offset) - test->outstanding_isize = offset; - if (ordered && - ordered->outstanding_isize > - test->outstanding_isize) - test->outstanding_isize = - ordered->outstanding_isize; - goto out; - } + + /* + * We don't update disk_i_size now, so record this undealt + * i_size. Or we will not know the real i_size. + */ + if (test->outstanding_isize < offset) + test->outstanding_isize = offset; + if (ordered && + ordered->outstanding_isize > test->outstanding_isize) + test->outstanding_isize = ordered->outstanding_isize; + goto out; } new_i_size = min_t(u64, offset, i_size); diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 5f2b0ca28705..195c93b67fe0 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -75,6 +75,8 @@ struct btrfs_ordered_sum { * in the logging code. */ #define BTRFS_ORDERED_PENDING 11 /* We are waiting for this ordered extent to * complete in the current transaction. */ +#define BTRFS_ORDERED_REGULAR 12 /* Regular IO for COW */ + struct btrfs_ordered_extent { /* logical offset in the file */ u64 file_offset; @@ -187,9 +189,10 @@ void btrfs_start_ordered_extent(struct inode *inode, int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); struct btrfs_ordered_extent * btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); -struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, - u64 file_offset, - u64 len); +struct btrfs_ordered_extent *btrfs_lookup_ordered_range( + struct btrfs_inode *inode, + u64 file_offset, + u64 len); bool btrfs_have_ordered_extents_in_range(struct inode *inode, u64 file_offset, u64 len); @@ -201,7 +204,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, const u64 range_start, const u64 range_len); int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, const u64 range_start, const u64 range_len); -void btrfs_get_logged_extents(struct inode *inode, +void btrfs_get_logged_extents(struct btrfs_inode *inode, struct list_head *logged_list, const loff_t start, const loff_t end); diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c index f2621e330954..d6cb155ef7a1 100644 --- a/fs/btrfs/props.c +++ b/fs/btrfs/props.c @@ -279,7 +279,7 @@ static void inode_prop_iterator(void *ctx, if (unlikely(ret)) btrfs_warn(root->fs_info, "error applying prop %s to ino %llu (root %llu): %d", - handler->xattr_name, btrfs_ino(inode), + handler->xattr_name, btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret); else set_bit(BTRFS_INODE_HAS_PROPS, &BTRFS_I(inode)->runtime_flags); @@ -288,7 +288,7 @@ static void inode_prop_iterator(void *ctx, int btrfs_load_inode_props(struct inode *inode, struct btrfs_path *path) { struct btrfs_root *root = BTRFS_I(inode)->root; - u64 ino = btrfs_ino(inode); + u64 ino = btrfs_ino(BTRFS_I(inode)); int ret; ret = iterate_object_props(root, path, ino, inode_prop_iterator, inode); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 662821f1252c..a5da750c1087 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -319,7 +319,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) return 0; - fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS); + fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); if (!fs_info->qgroup_ulist) { ret = -ENOMEM; goto out; @@ -876,7 +876,7 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans, goto out; } - fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS); + fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); if (!fs_info->qgroup_ulist) { ret = -ENOMEM; goto out; @@ -1019,7 +1019,7 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans, list_del("a_root->dirty_list); btrfs_tree_lock(quota_root->node); - clean_tree_block(trans, fs_info, quota_root->node); + clean_tree_block(fs_info, quota_root->node); btrfs_tree_unlock(quota_root->node); btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1); @@ -1038,6 +1038,15 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info, list_add(&qgroup->dirty, &fs_info->dirty_qgroups); } +static void report_reserved_underflow(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup *qgroup, + u64 num_bytes) +{ + btrfs_warn(fs_info, + "qgroup %llu reserved space underflow, have: %llu, to free: %llu", + qgroup->qgroupid, qgroup->reserved, num_bytes); + qgroup->reserved = 0; +} /* * The easy accounting, if we are adding/removing the only ref for an extent * then this qgroup and all of the parent qgroups get their reference and @@ -1065,8 +1074,12 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, WARN_ON(sign < 0 && qgroup->excl < num_bytes); qgroup->excl += sign * num_bytes; qgroup->excl_cmpr += sign * num_bytes; - if (sign > 0) - qgroup->reserved -= num_bytes; + if (sign > 0) { + if (WARN_ON(qgroup->reserved < num_bytes)) + report_reserved_underflow(fs_info, qgroup, num_bytes); + else + qgroup->reserved -= num_bytes; + } qgroup_dirty(fs_info, qgroup); @@ -1086,8 +1099,13 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, qgroup->rfer_cmpr += sign * num_bytes; WARN_ON(sign < 0 && qgroup->excl < num_bytes); qgroup->excl += sign * num_bytes; - if (sign > 0) - qgroup->reserved -= num_bytes; + if (sign > 0) { + if (WARN_ON(qgroup->reserved < num_bytes)) + report_reserved_underflow(fs_info, qgroup, + num_bytes); + else + qgroup->reserved -= num_bytes; + } qgroup->excl_cmpr += sign * num_bytes; qgroup_dirty(fs_info, qgroup); @@ -1156,7 +1174,7 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) return -EINVAL; - tmp = ulist_alloc(GFP_NOFS); + tmp = ulist_alloc(GFP_KERNEL); if (!tmp) return -ENOMEM; @@ -1205,7 +1223,7 @@ out: return ret; } -int __del_qgroup_relation(struct btrfs_trans_handle *trans, +static int __del_qgroup_relation(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 src, u64 dst) { struct btrfs_root *quota_root; @@ -1216,7 +1234,7 @@ int __del_qgroup_relation(struct btrfs_trans_handle *trans, int ret = 0; int err; - tmp = ulist_alloc(GFP_NOFS); + tmp = ulist_alloc(GFP_KERNEL); if (!tmp) return -ENOMEM; @@ -1446,8 +1464,9 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans, while (node) { record = rb_entry(node, struct btrfs_qgroup_extent_record, node); - ret = btrfs_find_all_roots(NULL, fs_info, record->bytenr, 0, - &record->old_roots); + if (WARN_ON(!record->old_roots)) + ret = btrfs_find_all_roots(NULL, fs_info, + record->bytenr, 0, &record->old_roots); if (ret < 0) break; if (qgroup_to_skip) @@ -1486,6 +1505,28 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, return 0; } +int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup_extent_record *qrecord) +{ + struct ulist *old_root; + u64 bytenr = qrecord->bytenr; + int ret; + + ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root); + if (ret < 0) + return ret; + + /* + * Here we don't need to get the lock of + * trans->transaction->delayed_refs, since inserted qrecord won't + * be deleted, only qrecord->node may be modified (new qrecord insert) + * + * So modifying qrecord->old_roots is safe here + */ + qrecord->old_roots = old_root; + return 0; +} + int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes, gfp_t gfp_flag) @@ -1511,9 +1552,11 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, spin_lock(&delayed_refs->lock); ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record); spin_unlock(&delayed_refs->lock); - if (ret > 0) + if (ret > 0) { kfree(record); - return 0; + return 0; + } + return btrfs_qgroup_trace_extent_post(fs_info, record); } int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, @@ -1571,8 +1614,7 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, * If we increment the root nodes slot counter past the number of * elements, 1 is returned to signal completion of the search. */ -static int adjust_slots_upwards(struct btrfs_root *root, - struct btrfs_path *path, int root_level) +static int adjust_slots_upwards(struct btrfs_path *path, int root_level) { int level = 0; int nr, slot; @@ -1713,7 +1755,7 @@ walk_down: goto out; /* Nonzero return here means we completed our search */ - ret = adjust_slots_upwards(root, path, root_level); + ret = adjust_slots_upwards(path, root_level); if (ret) break; @@ -1927,13 +1969,14 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 nr_old_roots = 0; int ret = 0; + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) + return 0; + if (new_roots) nr_new_roots = new_roots->nnodes; if (old_roots) nr_old_roots = old_roots->nnodes; - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) - goto out_free; BUG_ON(!fs_info->quota_root); trace_btrfs_qgroup_account_extent(fs_info, bytenr, num_bytes, @@ -2170,9 +2213,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, goto out; } - rcu_read_lock(); level_size = fs_info->nodesize; - rcu_read_unlock(); } /* @@ -2306,7 +2347,20 @@ out: return ret; } -static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes) +static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) +{ + if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && + qg->reserved + (s64)qg->rfer + num_bytes > qg->max_rfer) + return false; + + if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && + qg->reserved + (s64)qg->excl + num_bytes > qg->max_excl) + return false; + + return true; +} + +static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce) { struct btrfs_root *quota_root; struct btrfs_qgroup *qgroup; @@ -2347,16 +2401,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes) qg = unode_aux_to_qgroup(unode); - if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && - qg->reserved + (s64)qg->rfer + num_bytes > - qg->max_rfer) { - ret = -EDQUOT; - goto out; - } - - if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && - qg->reserved + (s64)qg->excl + num_bytes > - qg->max_excl) { + if (enforce && !qgroup_check_limits(qg, num_bytes)) { ret = -EDQUOT; goto out; } @@ -2424,7 +2469,10 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, qg = unode_aux_to_qgroup(unode); - qg->reserved -= num_bytes; + if (WARN_ON(qg->reserved < num_bytes)) + report_reserved_underflow(fs_info, qg, num_bytes); + else + qg->reserved -= num_bytes; list_for_each_entry(glist, &qg->groups, next_group) { ret = ulist_add(fs_info->qgroup_ulist, @@ -2439,11 +2487,6 @@ out: spin_unlock(&fs_info->qgroup_lock); } -static inline void qgroup_free(struct btrfs_root *root, u64 num_bytes) -{ - return btrfs_qgroup_free_refroot(root->fs_info, root->objectid, - num_bytes); -} void assert_qgroups_uptodate(struct btrfs_trans_handle *trans) { if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq) @@ -2803,7 +2846,7 @@ int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len) return 0; changeset.bytes_changed = 0; - changeset.range_changed = ulist_alloc(GFP_NOFS); + ulist_init(&changeset.range_changed); ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len -1, EXTENT_QGROUP_RESERVED, &changeset); trace_btrfs_qgroup_reserve_data(inode, start, len, @@ -2811,21 +2854,21 @@ int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len) QGROUP_RESERVE); if (ret < 0) goto cleanup; - ret = qgroup_reserve(root, changeset.bytes_changed); + ret = qgroup_reserve(root, changeset.bytes_changed, true); if (ret < 0) goto cleanup; - ulist_free(changeset.range_changed); + ulist_release(&changeset.range_changed); return ret; cleanup: /* cleanup already reserved ranges */ ULIST_ITER_INIT(&uiter); - while ((unode = ulist_next(changeset.range_changed, &uiter))) + while ((unode = ulist_next(&changeset.range_changed, &uiter))) clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val, unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL, GFP_NOFS); - ulist_free(changeset.range_changed); + ulist_release(&changeset.range_changed); return ret; } @@ -2837,23 +2880,22 @@ static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len, int ret; changeset.bytes_changed = 0; - changeset.range_changed = ulist_alloc(GFP_NOFS); - if (!changeset.range_changed) - return -ENOMEM; - + ulist_init(&changeset.range_changed); ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len -1, EXTENT_QGROUP_RESERVED, &changeset); if (ret < 0) goto out; if (free) { - qgroup_free(BTRFS_I(inode)->root, changeset.bytes_changed); + btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info, + BTRFS_I(inode)->root->objectid, + changeset.bytes_changed); trace_op = QGROUP_FREE; } trace_btrfs_qgroup_release_data(inode, start, len, changeset.bytes_changed, trace_op); out: - ulist_free(changeset.range_changed); + ulist_release(&changeset.range_changed); return ret; } @@ -2892,7 +2934,8 @@ int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len) return __btrfs_qgroup_release_data(inode, start, len, 0); } -int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes) +int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, + bool enforce) { struct btrfs_fs_info *fs_info = root->fs_info; int ret; @@ -2902,7 +2945,7 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes) return 0; BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); - ret = qgroup_reserve(root, num_bytes); + ret = qgroup_reserve(root, num_bytes, enforce); if (ret < 0) return ret; atomic_add(num_bytes, &root->qgroup_meta_rsv); @@ -2921,7 +2964,7 @@ void btrfs_qgroup_free_meta_all(struct btrfs_root *root) reserved = atomic_xchg(&root->qgroup_meta_rsv, 0); if (reserved == 0) return; - qgroup_free(root, reserved); + btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved); } void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes) @@ -2935,7 +2978,7 @@ void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes) BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes); atomic_sub(num_bytes, &root->qgroup_meta_rsv); - qgroup_free(root, num_bytes); + btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes); } /* @@ -2950,22 +2993,22 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode) int ret; changeset.bytes_changed = 0; - changeset.range_changed = ulist_alloc(GFP_NOFS); - if (WARN_ON(!changeset.range_changed)) - return; - + ulist_init(&changeset.range_changed); ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1, EXTENT_QGROUP_RESERVED, &changeset); WARN_ON(ret < 0); if (WARN_ON(changeset.bytes_changed)) { ULIST_ITER_INIT(&iter); - while ((unode = ulist_next(changeset.range_changed, &iter))) { + while ((unode = ulist_next(&changeset.range_changed, &iter))) { btrfs_warn(BTRFS_I(inode)->root->fs_info, "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu", inode->i_ino, unode->val, unode->aux); } - qgroup_free(BTRFS_I(inode)->root, changeset.bytes_changed); + btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info, + BTRFS_I(inode)->root->objectid, + changeset.bytes_changed); + } - ulist_free(changeset.range_changed); + ulist_release(&changeset.range_changed); } diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index 416ae8e1d23c..26932a8a1993 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -94,9 +94,10 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); /* * Inform qgroup to trace one dirty extent, its info is recorded in @record. - * So qgroup can account it at commit trans time. + * So qgroup can account it at transaction committing time. * - * No lock version, caller must acquire delayed ref lock and allocate memory. + * No lock version, caller must acquire delayed ref lock and allocated memory, + * then call btrfs_qgroup_trace_extent_post() after exiting lock context. * * Return 0 for success insert * Return >0 for existing record, caller can free @record safely. @@ -108,11 +109,37 @@ int btrfs_qgroup_trace_extent_nolock( struct btrfs_qgroup_extent_record *record); /* + * Post handler after qgroup_trace_extent_nolock(). + * + * NOTE: Current qgroup does the expensive backref walk at transaction + * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming + * new transaction. + * This is designed to allow btrfs_find_all_roots() to get correct new_roots + * result. + * + * However for old_roots there is no need to do backref walk at that time, + * since we search commit roots to walk backref and result will always be + * correct. + * + * Due to the nature of no lock version, we can't do backref there. + * So we must call btrfs_qgroup_trace_extent_post() after exiting + * spinlock context. + * + * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result + * using current root, then we can move all expensive backref walk out of + * transaction committing, but not now as qgroup accounting will be wrong again. + */ +int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup_extent_record *qrecord); + +/* * Inform qgroup to trace one dirty extent, specified by @bytenr and * @num_bytes. * So qgroup can account it at commit trans time. * - * Better encapsulated version. + * Better encapsulated version, with memory allocation and backref walk for + * commit roots. + * So this can sleep. * * Return 0 if the operation is done. * Return <0 for error, like memory allocation failure or invalid parameter @@ -181,7 +208,8 @@ int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len); int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len); int btrfs_qgroup_free_data(struct inode *inode, u64 start, u64 len); -int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes); +int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, + bool enforce); void btrfs_qgroup_free_meta_all(struct btrfs_root *root); void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes); void btrfs_qgroup_check_reserved_leak(struct inode *inode); diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index d2a9a1ee5361..1571bf26dc07 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -677,11 +677,9 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) struct btrfs_raid_bio *freeit = NULL; struct btrfs_raid_bio *cache_drop = NULL; int ret = 0; - int walk = 0; spin_lock_irqsave(&h->lock, flags); list_for_each_entry(cur, &h->hash_list, hash_list) { - walk++; if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { spin_lock(&cur->bio_list_lock); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 379711048fb0..d60df51959f7 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1548,9 +1548,9 @@ again: prev = node; entry = rb_entry(node, struct btrfs_inode, rb_node); - if (objectid < btrfs_ino(&entry->vfs_inode)) + if (objectid < btrfs_ino(entry)) node = node->rb_left; - else if (objectid > btrfs_ino(&entry->vfs_inode)) + else if (objectid > btrfs_ino(entry)) node = node->rb_right; else break; @@ -1558,7 +1558,7 @@ again: if (!node) { while (prev) { entry = rb_entry(prev, struct btrfs_inode, rb_node); - if (objectid <= btrfs_ino(&entry->vfs_inode)) { + if (objectid <= btrfs_ino(entry)) { node = prev; break; } @@ -1573,7 +1573,7 @@ again: return inode; } - objectid = btrfs_ino(&entry->vfs_inode) + 1; + objectid = btrfs_ino(entry) + 1; if (cond_resched_lock(&root->inode_lock)) goto again; @@ -1609,8 +1609,8 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, return -ENOMEM; bytenr -= BTRFS_I(reloc_inode)->index_cnt; - ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode), - bytenr, 0); + ret = btrfs_lookup_file_extent(NULL, root, path, + btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0); if (ret < 0) goto out; if (ret > 0) { @@ -1698,11 +1698,11 @@ int replace_file_extents(struct btrfs_trans_handle *trans, if (first) { inode = find_next_inode(root, key.objectid); first = 0; - } else if (inode && btrfs_ino(inode) < key.objectid) { + } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) { btrfs_add_delayed_iput(inode); inode = find_next_inode(root, key.objectid); } - if (inode && btrfs_ino(inode) == key.objectid) { + if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) { end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); WARN_ON(!IS_ALIGNED(key.offset, @@ -1714,8 +1714,8 @@ int replace_file_extents(struct btrfs_trans_handle *trans, if (!ret) continue; - btrfs_drop_extent_cache(inode, key.offset, end, - 1); + btrfs_drop_extent_cache(BTRFS_I(inode), + key.offset, end, 1); unlock_extent(&BTRFS_I(inode)->io_tree, key.offset, end); } @@ -2088,7 +2088,7 @@ static int invalidate_extent_cache(struct btrfs_root *root, inode = find_next_inode(root, objectid); if (!inode) break; - ino = btrfs_ino(inode); + ino = btrfs_ino(BTRFS_I(inode)); if (ino > max_key->objectid) { iput(inode); @@ -2130,7 +2130,7 @@ static int invalidate_extent_cache(struct btrfs_root *root, /* the lock_extent waits for readpage to complete */ lock_extent(&BTRFS_I(inode)->io_tree, start, end); - btrfs_drop_extent_cache(inode, start, end, 1); + btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1); unlock_extent(&BTRFS_I(inode)->io_tree, start, end); } return 0; @@ -3161,7 +3161,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end, free_extent_map(em); break; } - btrfs_drop_extent_cache(inode, start, end, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0); } unlock_extent(&BTRFS_I(inode)->io_tree, start, end); return ret; @@ -3203,7 +3203,8 @@ static int relocate_file_extent_cluster(struct inode *inode, index = (cluster->start - offset) >> PAGE_SHIFT; last_index = (cluster->end - offset) >> PAGE_SHIFT; while (index <= last_index) { - ret = btrfs_delalloc_reserve_metadata(inode, PAGE_SIZE); + ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), + PAGE_SIZE); if (ret) goto out; @@ -3215,7 +3216,7 @@ static int relocate_file_extent_cluster(struct inode *inode, page = find_or_create_page(inode->i_mapping, index, mask); if (!page) { - btrfs_delalloc_release_metadata(inode, + btrfs_delalloc_release_metadata(BTRFS_I(inode), PAGE_SIZE); ret = -ENOMEM; goto out; @@ -3234,7 +3235,7 @@ static int relocate_file_extent_cluster(struct inode *inode, if (!PageUptodate(page)) { unlock_page(page); put_page(page); - btrfs_delalloc_release_metadata(inode, + btrfs_delalloc_release_metadata(BTRFS_I(inode), PAGE_SIZE); ret = -EIO; goto out; @@ -3543,7 +3544,7 @@ truncate: goto out; } - ret = btrfs_truncate_free_space_cache(root, trans, block_group, inode); + ret = btrfs_truncate_free_space_cache(trans, block_group, inode); btrfs_end_transaction(trans); btrfs_btree_balance_dirty(fs_info); @@ -4245,7 +4246,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); BTRFS_I(inode)->index_cnt = group->key.objectid; - err = btrfs_orphan_add(trans, inode); + err = btrfs_orphan_add(trans, BTRFS_I(inode)); out: btrfs_end_transaction(trans); btrfs_btree_balance_dirty(fs_info); @@ -4334,7 +4335,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) rc->block_group = btrfs_lookup_block_group(fs_info, group_start); BUG_ON(!rc->block_group); - ret = btrfs_inc_block_group_ro(extent_root, rc->block_group); + ret = btrfs_inc_block_group_ro(fs_info, rc->block_group); if (ret) { err = ret; goto out; @@ -4347,8 +4348,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) goto out; } - inode = lookup_free_space_inode(fs_info->tree_root, rc->block_group, - path); + inode = lookup_free_space_inode(fs_info, rc->block_group, path); btrfs_free_path(path); if (!IS_ERR(inode)) diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 4c6735491ee0..a08224eab8b4 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -74,7 +74,7 @@ static void btrfs_read_root_item(struct extent_buffer *eb, int slot, * * If we find something return 0, otherwise > 0, < 0 on error. */ -int btrfs_find_root(struct btrfs_root *root, struct btrfs_key *search_key, +int btrfs_find_root(struct btrfs_root *root, const struct btrfs_key *search_key, struct btrfs_path *path, struct btrfs_root_item *root_item, struct btrfs_key *root_key) { @@ -207,7 +207,7 @@ out: } int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct btrfs_key *key, struct btrfs_root_item *item) + const struct btrfs_key *key, struct btrfs_root_item *item) { /* * Make sure generation v1 and v2 match. See update_root for details. @@ -337,7 +337,7 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info) /* drop the root item for 'key' from 'root' */ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct btrfs_key *key) + const struct btrfs_key *key) { struct btrfs_path *path; int ret; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 9a94670536a6..b0251eb1239f 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -282,9 +282,7 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info, u64 *extent_physical, struct btrfs_device **extent_dev, int *extent_mirror_num); -static int scrub_setup_wr_ctx(struct scrub_ctx *sctx, - struct scrub_wr_ctx *wr_ctx, - struct btrfs_fs_info *fs_info, +static int scrub_setup_wr_ctx(struct scrub_wr_ctx *wr_ctx, struct btrfs_device *dev, int is_dev_replace); static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx); @@ -501,7 +499,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) spin_lock_init(&sctx->stat_lock); init_waitqueue_head(&sctx->list_wait); - ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info, + ret = scrub_setup_wr_ctx(&sctx->wr_ctx, fs_info->dev_replace.tgtdev, is_dev_replace); if (ret) { scrub_free_ctx(sctx); @@ -733,7 +731,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx) ret = -EIO; goto out; } - ret = repair_io_failure(inode, offset, PAGE_SIZE, + ret = repair_io_failure(BTRFS_I(inode), offset, PAGE_SIZE, fixup->logical, page, offset - page_offset(page), fixup->mirror_num); @@ -3584,7 +3582,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, * -> btrfs_scrub_pause() */ scrub_pause_on(fs_info); - ret = btrfs_inc_block_group_ro(root, cache); + ret = btrfs_inc_block_group_ro(fs_info, cache); if (!ret && is_dev_replace) { /* * If we are doing a device replace wait for any tasks @@ -4084,9 +4082,7 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info, btrfs_put_bbio(bbio); } -static int scrub_setup_wr_ctx(struct scrub_ctx *sctx, - struct scrub_wr_ctx *wr_ctx, - struct btrfs_fs_info *fs_info, +static int scrub_setup_wr_ctx(struct scrub_wr_ctx *wr_ctx, struct btrfs_device *dev, int is_dev_replace) { @@ -4240,7 +4236,7 @@ out: scrub_pending_trans_workers_dec(sctx); } -static int check_extent_to_block(struct inode *inode, u64 start, u64 len, +static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len, u64 logical) { struct extent_state *cached_state = NULL; @@ -4250,7 +4246,7 @@ static int check_extent_to_block(struct inode *inode, u64 start, u64 len, u64 lockstart = start, lockend = start + len - 1; int ret = 0; - io_tree = &BTRFS_I(inode)->io_tree; + io_tree = &inode->io_tree; lock_extent_bits(io_tree, lockstart, lockend, &cached_state); ordered = btrfs_lookup_ordered_range(inode, lockstart, len); @@ -4329,7 +4325,8 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, io_tree = &BTRFS_I(inode)->io_tree; nocow_ctx_logical = nocow_ctx->logical; - ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical); + ret = check_extent_to_block(BTRFS_I(inode), offset, len, + nocow_ctx_logical); if (ret) { ret = ret > 0 ? 0 : ret; goto out; @@ -4376,7 +4373,7 @@ again: } } - ret = check_extent_to_block(inode, offset, len, + ret = check_extent_to_block(BTRFS_I(inode), offset, len, nocow_ctx_logical); if (ret) { ret = ret > 0 ? 0 : ret; diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index d145ce804620..456c8901489b 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -1681,6 +1681,9 @@ static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen) { int ret; + if (ino == BTRFS_FIRST_FREE_OBJECTID) + return 1; + ret = get_cur_inode_state(sctx, ino, gen); if (ret < 0) goto out; @@ -1866,7 +1869,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, * not deleted and then re-created, if it was then we have no overwrite * and we can just unlink this entry. */ - if (sctx->parent_root) { + if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) { ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL, NULL, NULL); if (ret < 0 && ret != -ENOENT) @@ -1934,6 +1937,19 @@ static int did_overwrite_ref(struct send_ctx *sctx, if (ret <= 0) goto out; + if (dir != BTRFS_FIRST_FREE_OBJECTID) { + ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, + NULL, NULL, NULL); + if (ret < 0 && ret != -ENOENT) + goto out; + if (ret) { + ret = 0; + goto out; + } + if (gen != dir_gen) + goto out; + } + /* check if the ref was overwritten by another ref */ ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len, &ow_inode, &other_type); @@ -3556,6 +3572,7 @@ static int wait_for_parent_move(struct send_ctx *sctx, { int ret = 0; u64 ino = parent_ref->dir; + u64 ino_gen = parent_ref->dir_gen; u64 parent_ino_before, parent_ino_after; struct fs_path *path_before = NULL; struct fs_path *path_after = NULL; @@ -3576,6 +3593,8 @@ static int wait_for_parent_move(struct send_ctx *sctx, * at get_cur_path()). */ while (ino > BTRFS_FIRST_FREE_OBJECTID) { + u64 parent_ino_after_gen; + if (is_waiting_for_move(sctx, ino)) { /* * If the current inode is an ancestor of ino in the @@ -3598,7 +3617,7 @@ static int wait_for_parent_move(struct send_ctx *sctx, fs_path_reset(path_after); ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, - NULL, path_after); + &parent_ino_after_gen, path_after); if (ret < 0) goto out; ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before, @@ -3615,10 +3634,20 @@ static int wait_for_parent_move(struct send_ctx *sctx, if (ino > sctx->cur_ino && (parent_ino_before != parent_ino_after || len1 != len2 || memcmp(path_before->start, path_after->start, len1))) { - ret = 1; - break; + u64 parent_ino_gen; + + ret = get_inode_info(sctx->parent_root, ino, NULL, + &parent_ino_gen, NULL, NULL, NULL, + NULL); + if (ret < 0) + goto out; + if (ino_gen == parent_ino_gen) { + ret = 1; + break; + } } ino = parent_ino_after; + ino_gen = parent_ino_after_gen; } out: @@ -5277,6 +5306,81 @@ out: return ret; } +static int range_is_hole_in_parent(struct send_ctx *sctx, + const u64 start, + const u64 end) +{ + struct btrfs_path *path; + struct btrfs_key key; + struct btrfs_root *root = sctx->parent_root; + u64 search_start = start; + int ret; + + path = alloc_path_for_send(); + if (!path) + return -ENOMEM; + + key.objectid = sctx->cur_ino; + key.type = BTRFS_EXTENT_DATA_KEY; + key.offset = search_start; + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + if (ret > 0 && path->slots[0] > 0) + path->slots[0]--; + + while (search_start < end) { + struct extent_buffer *leaf = path->nodes[0]; + int slot = path->slots[0]; + struct btrfs_file_extent_item *fi; + u64 extent_end; + + if (slot >= btrfs_header_nritems(leaf)) { + ret = btrfs_next_leaf(root, path); + if (ret < 0) + goto out; + else if (ret > 0) + break; + continue; + } + + btrfs_item_key_to_cpu(leaf, &key, slot); + if (key.objectid < sctx->cur_ino || + key.type < BTRFS_EXTENT_DATA_KEY) + goto next; + if (key.objectid > sctx->cur_ino || + key.type > BTRFS_EXTENT_DATA_KEY || + key.offset >= end) + break; + + fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); + if (btrfs_file_extent_type(leaf, fi) == + BTRFS_FILE_EXTENT_INLINE) { + u64 size = btrfs_file_extent_inline_len(leaf, slot, fi); + + extent_end = ALIGN(key.offset + size, + root->fs_info->sectorsize); + } else { + extent_end = key.offset + + btrfs_file_extent_num_bytes(leaf, fi); + } + if (extent_end <= start) + goto next; + if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) { + search_start = extent_end; + goto next; + } + ret = 0; + goto out; +next: + path->slots[0]++; + } + ret = 1; +out: + btrfs_free_path(path); + return ret; +} + static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, struct btrfs_key *key) { @@ -5321,8 +5425,17 @@ static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, return ret; } - if (sctx->cur_inode_last_extent < key->offset) - ret = send_hole(sctx, key->offset); + if (sctx->cur_inode_last_extent < key->offset) { + ret = range_is_hole_in_parent(sctx, + sctx->cur_inode_last_extent, + key->offset); + if (ret < 0) + return ret; + else if (ret == 0) + ret = send_hole(sctx, key->offset); + else + ret = 0; + } sctx->cur_inode_last_extent = extent_end; return ret; } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index b5ae7d3d1896..da687dc79cce 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -265,7 +265,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, function, line, errstr); return; } - ACCESS_ONCE(trans->transaction->aborted) = errno; + WRITE_ONCE(trans->transaction->aborted, errno); /* Wake up anybody who may be waiting on this transaction */ wake_up(&fs_info->transaction_wait); wake_up(&fs_info->transaction_blocked_wait); @@ -1114,7 +1114,7 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec static int btrfs_fill_super(struct super_block *sb, struct btrfs_fs_devices *fs_devices, - void *data, int silent) + void *data) { struct inode *inode; struct btrfs_fs_info *fs_info = btrfs_sb(sb); @@ -1611,8 +1611,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, } else { snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev); btrfs_sb(s)->bdev_holder = fs_type; - error = btrfs_fill_super(s, fs_devices, data, - flags & MS_SILENT ? 1 : 0); + error = btrfs_fill_super(s, fs_devices, data); } if (error) { deactivate_locked_super(s); diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index 4d0f038e14f1..8c91d03cc82d 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c @@ -278,7 +278,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) /* First with no extents */ BTRFS_I(inode)->root = root; - em = btrfs_get_extent(inode, NULL, 0, 0, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, sectorsize, 0); if (IS_ERR(em)) { em = NULL; test_msg("Got an error when we shouldn't have\n"); @@ -293,7 +293,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) goto out; } free_extent_map(em); - btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); /* * All of the magic numbers are based on the mapping setup in @@ -302,7 +302,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) */ setup_file_extents(root, sectorsize); - em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, (u64)-1, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -323,7 +323,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -350,7 +350,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -372,7 +372,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* Regular extent */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -399,7 +399,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* The next 3 are split extents */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -428,7 +428,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -450,7 +450,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -484,7 +484,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* Prealloc extent */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -513,7 +513,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* The next 3 are a half written prealloc extent */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -543,7 +543,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -576,7 +576,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -611,7 +611,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* Now for the compressed extent */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -645,7 +645,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* Split compressed extent */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -680,7 +680,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -707,7 +707,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -742,7 +742,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* A hole between regular extents but no hole extent */ - em = btrfs_get_extent(inode, NULL, 0, offset + 6, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset + 6, + sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -769,7 +770,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, 4096 * 1024, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, 4096 * 1024, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -802,7 +803,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -885,7 +886,7 @@ static int test_hole_first(u32 sectorsize, u32 nodesize) insert_inode_item_key(root); insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize, sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1); - em = btrfs_get_extent(inode, NULL, 0, 0, 2 * sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, 2 * sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -907,7 +908,8 @@ static int test_hole_first(u32 sectorsize, u32 nodesize) } free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, sectorsize, 2 * sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, sectorsize, + 2 * sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 0e0508f488b2..61b807de3e16 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -474,7 +474,8 @@ static inline bool need_reserve_reloc_root(struct btrfs_root *root) static struct btrfs_trans_handle * start_transaction(struct btrfs_root *root, unsigned int num_items, - unsigned int type, enum btrfs_reserve_flush_enum flush) + unsigned int type, enum btrfs_reserve_flush_enum flush, + bool enforce_qgroups) { struct btrfs_fs_info *fs_info = root->fs_info; @@ -505,9 +506,10 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, * Do the reservation before we join the transaction so we can do all * the appropriate flushing if need be. */ - if (num_items > 0 && root != fs_info->chunk_root) { + if (num_items && root != fs_info->chunk_root) { qgroup_reserved = num_items * fs_info->nodesize; - ret = btrfs_qgroup_reserve_meta(root, qgroup_reserved); + ret = btrfs_qgroup_reserve_meta(root, qgroup_reserved, + enforce_qgroups); if (ret) return ERR_PTR(ret); @@ -613,8 +615,9 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, unsigned int num_items) { return start_transaction(root, num_items, TRANS_START, - BTRFS_RESERVE_FLUSH_ALL); + BTRFS_RESERVE_FLUSH_ALL, true); } + struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( struct btrfs_root *root, unsigned int num_items, @@ -625,7 +628,14 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( u64 num_bytes; int ret; - trans = btrfs_start_transaction(root, num_items); + /* + * We have two callers: unlink and block group removal. The + * former should succeed even if we will temporarily exceed + * quota and the latter operates on the extent root so + * qgroup enforcement is ignored anyway. + */ + trans = start_transaction(root, num_items, TRANS_START, + BTRFS_RESERVE_FLUSH_ALL, false); if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) return trans; @@ -654,25 +664,25 @@ struct btrfs_trans_handle *btrfs_start_transaction_lflush( unsigned int num_items) { return start_transaction(root, num_items, TRANS_START, - BTRFS_RESERVE_FLUSH_LIMIT); + BTRFS_RESERVE_FLUSH_LIMIT, true); } struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) { - return start_transaction(root, 0, TRANS_JOIN, - BTRFS_RESERVE_NO_FLUSH); + return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH, + true); } struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) { return start_transaction(root, 0, TRANS_JOIN_NOLOCK, - BTRFS_RESERVE_NO_FLUSH); + BTRFS_RESERVE_NO_FLUSH, true); } struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) { return start_transaction(root, 0, TRANS_USERSPACE, - BTRFS_RESERVE_NO_FLUSH); + BTRFS_RESERVE_NO_FLUSH, true); } /* @@ -691,7 +701,7 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root) { return start_transaction(root, 0, TRANS_ATTACH, - BTRFS_RESERVE_NO_FLUSH); + BTRFS_RESERVE_NO_FLUSH, true); } /* @@ -707,7 +717,7 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root) struct btrfs_trans_handle *trans; trans = start_transaction(root, 0, TRANS_ATTACH, - BTRFS_RESERVE_NO_FLUSH); + BTRFS_RESERVE_NO_FLUSH, true); if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT) btrfs_wait_for_commit(root->fs_info, 0); @@ -866,14 +876,14 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, if (lock && !atomic_read(&info->open_ioctl_trans) && should_end_transaction(trans) && - ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) { + READ_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) { spin_lock(&info->trans_lock); if (cur_trans->state == TRANS_STATE_RUNNING) cur_trans->state = TRANS_STATE_BLOCKED; spin_unlock(&info->trans_lock); } - if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) { + if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) { if (throttle) return btrfs_commit_transaction(trans); else @@ -1354,12 +1364,8 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans, * enabled. If this check races with the ioctl, rescan will * kick in anyway. */ - mutex_lock(&fs_info->qgroup_ioctl_lock); - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { - mutex_unlock(&fs_info->qgroup_ioctl_lock); + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) return 0; - } - mutex_unlock(&fs_info->qgroup_ioctl_lock); /* * We are going to commit transaction, see btrfs_commit_transaction() @@ -1499,12 +1505,12 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, /* * insert the directory item */ - ret = btrfs_set_inode_index(parent_inode, &index); + ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index); BUG_ON(ret); /* -ENOMEM */ /* check if there is a file/dir which has the same name. */ dir_item = btrfs_lookup_dir_item(NULL, parent_root, path, - btrfs_ino(parent_inode), + btrfs_ino(BTRFS_I(parent_inode)), dentry->d_name.name, dentry->d_name.len, 0); if (dir_item != NULL && !IS_ERR(dir_item)) { @@ -1598,7 +1604,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, */ ret = btrfs_add_root_ref(trans, fs_info, objectid, parent_root->root_key.objectid, - btrfs_ino(parent_inode), index, + btrfs_ino(BTRFS_I(parent_inode)), index, dentry->d_name.name, dentry->d_name.len); if (ret) { btrfs_abort_transaction(trans, ret); @@ -1638,7 +1644,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ret = btrfs_insert_dir_item(trans, parent_root, dentry->d_name.name, dentry->d_name.len, - parent_inode, &key, + BTRFS_I(parent_inode), &key, BTRFS_FT_DIR, index); /* We have check then name at the beginning, so it is impossible. */ BUG_ON(ret == -EEXIST || ret == -EOVERFLOW); @@ -1647,7 +1653,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, goto fail; } - btrfs_i_size_write(parent_inode, parent_inode->i_size + + btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size + dentry->d_name.len * 2); parent_inode->i_mtime = parent_inode->i_ctime = current_time(parent_inode); @@ -1940,7 +1946,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) int ret; /* Stop the commit early if ->aborted is set */ - if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { + if (unlikely(READ_ONCE(cur_trans->aborted))) { ret = cur_trans->aborted; btrfs_end_transaction(trans); return ret; @@ -2080,7 +2086,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) atomic_read(&cur_trans->num_writers) == 1); /* ->aborted might be set after the previous check, so check it */ - if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { + if (unlikely(READ_ONCE(cur_trans->aborted))) { ret = cur_trans->aborted; goto scrub_continue; } @@ -2194,14 +2200,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) * The tasks which save the space cache and inode cache may also * update ->aborted, check it. */ - if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { + if (unlikely(READ_ONCE(cur_trans->aborted))) { ret = cur_trans->aborted; mutex_unlock(&fs_info->tree_log_mutex); mutex_unlock(&fs_info->reloc_mutex); goto scrub_continue; } - btrfs_prepare_extent_commit(trans, fs_info); + btrfs_prepare_extent_commit(fs_info); cur_trans = fs_info->running_transaction; @@ -2251,7 +2257,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) goto scrub_continue; } - ret = write_ctree_super(trans, fs_info, 0); + ret = write_all_supers(fs_info, 0); if (ret) { mutex_unlock(&fs_info->tree_log_mutex); goto scrub_continue; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index eeffff84f280..a59674c3e69e 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -97,7 +97,7 @@ #define LOG_WALK_REPLAY_ALL 3 static int btrfs_log_inode(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *inode, + struct btrfs_root *root, struct btrfs_inode *inode, int inode_only, const loff_t start, const loff_t end, @@ -631,8 +631,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, * file. This must be done before the btrfs_drop_extents run * so we don't try to drop this extent. */ - ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), - start, 0); + ret = btrfs_lookup_file_extent(trans, root, path, + btrfs_ino(BTRFS_I(inode)), start, 0); if (ret == 0 && (found_type == BTRFS_FILE_EXTENT_REG || @@ -673,6 +673,10 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, unsigned long dest_offset; struct btrfs_key ins; + if (btrfs_file_extent_disk_bytenr(eb, item) == 0 && + btrfs_fs_incompat(fs_info, NO_HOLES)) + goto update_inode; + ret = btrfs_insert_empty_item(trans, root, path, key, sizeof(*item)); if (ret) @@ -825,6 +829,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, } inode_add_bytes(inode, nbytes); +update_inode: ret = btrfs_update_inode(trans, root, inode); out: if (inode) @@ -843,7 +848,7 @@ out: static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, - struct inode *dir, + struct btrfs_inode *dir, struct btrfs_dir_item *di) { struct btrfs_fs_info *fs_info = root->fs_info; @@ -875,7 +880,8 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, if (ret) goto out; - ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len); + ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name, + name_len); if (ret) goto out; else @@ -991,8 +997,8 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_root *log_root, - struct inode *dir, struct inode *inode, - struct extent_buffer *eb, + struct btrfs_inode *dir, + struct btrfs_inode *inode, u64 inode_objectid, u64 parent_objectid, u64 ref_index, char *name, int namelen, int *search_done) @@ -1047,12 +1053,11 @@ again: parent_objectid, victim_name, victim_name_len)) { - inc_nlink(inode); + inc_nlink(&inode->vfs_inode); btrfs_release_path(path); - ret = btrfs_unlink_inode(trans, root, dir, - inode, victim_name, - victim_name_len); + ret = btrfs_unlink_inode(trans, root, dir, inode, + victim_name, victim_name_len); kfree(victim_name); if (ret) return ret; @@ -1115,16 +1120,16 @@ again: victim_name_len)) { ret = -ENOENT; victim_parent = read_one_inode(root, - parent_objectid); + parent_objectid); if (victim_parent) { - inc_nlink(inode); + inc_nlink(&inode->vfs_inode); btrfs_release_path(path); ret = btrfs_unlink_inode(trans, root, - victim_parent, - inode, - victim_name, - victim_name_len); + BTRFS_I(victim_parent), + inode, + victim_name, + victim_name_len); if (!ret) ret = btrfs_run_delayed_items( trans, @@ -1295,8 +1300,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, goto out; /* if we already have a perfect match, we're done */ - if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode), - ref_index, name, namelen)) { + if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)), + btrfs_ino(BTRFS_I(inode)), ref_index, + name, namelen)) { /* * look for a conflicting back reference in the * metadata. if we find one we have to unlink that name @@ -1307,7 +1313,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, if (!search_done) { ret = __add_inode_ref(trans, root, path, log, - dir, inode, eb, + BTRFS_I(dir), + BTRFS_I(inode), inode_objectid, parent_objectid, ref_index, name, namelen, @@ -1320,8 +1327,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, } /* insert our name */ - ret = btrfs_add_link(trans, dir, inode, name, namelen, - 0, ref_index); + ret = btrfs_add_link(trans, BTRFS_I(dir), + BTRFS_I(inode), + name, namelen, 0, ref_index); if (ret) goto out; @@ -1360,7 +1368,7 @@ static int insert_orphan_item(struct btrfs_trans_handle *trans, } static int count_inode_extrefs(struct btrfs_root *root, - struct inode *inode, struct btrfs_path *path) + struct btrfs_inode *inode, struct btrfs_path *path) { int ret = 0; int name_len; @@ -1404,7 +1412,7 @@ static int count_inode_extrefs(struct btrfs_root *root, } static int count_inode_refs(struct btrfs_root *root, - struct inode *inode, struct btrfs_path *path) + struct btrfs_inode *inode, struct btrfs_path *path) { int ret; struct btrfs_key key; @@ -1477,19 +1485,19 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, struct btrfs_path *path; int ret; u64 nlink = 0; - u64 ino = btrfs_ino(inode); + u64 ino = btrfs_ino(BTRFS_I(inode)); path = btrfs_alloc_path(); if (!path) return -ENOMEM; - ret = count_inode_refs(root, inode, path); + ret = count_inode_refs(root, BTRFS_I(inode), path); if (ret < 0) goto out; nlink = ret; - ret = count_inode_extrefs(root, inode, path); + ret = count_inode_extrefs(root, BTRFS_I(inode), path); if (ret < 0) goto out; @@ -1639,7 +1647,8 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans, return -EIO; } - ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index); + ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, + name_len, 1, index); /* FIXME, put inode into FIXUP list */ @@ -1769,7 +1778,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, if (!exists) goto out; - ret = drop_one_dir_item(trans, root, path, dir, dst_di); + ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di); if (ret) goto out; @@ -1778,7 +1787,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, out: btrfs_release_path(path); if (!ret && update_size) { - btrfs_i_size_write(dir, dir->i_size + name_len * 2); + btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2); ret = btrfs_update_inode(trans, root, dir); } kfree(name); @@ -2052,8 +2061,8 @@ again: } inc_nlink(inode); - ret = btrfs_unlink_inode(trans, root, dir, inode, - name, name_len); + ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), + BTRFS_I(inode), name, name_len); if (!ret) ret = btrfs_run_delayed_items(trans, fs_info); kfree(name); @@ -2469,7 +2478,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, if (trans) { btrfs_tree_lock(next); btrfs_set_lock_blocking(next); - clean_tree_block(trans, fs_info, next); + clean_tree_block(fs_info, next); btrfs_wait_tree_block_writeback(next); btrfs_tree_unlock(next); } @@ -2549,7 +2558,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, if (trans) { btrfs_tree_lock(next); btrfs_set_lock_blocking(next); - clean_tree_block(trans, fs_info, next); + clean_tree_block(fs_info, next); btrfs_wait_tree_block_writeback(next); btrfs_tree_unlock(next); } @@ -2627,7 +2636,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, if (trans) { btrfs_tree_lock(next); btrfs_set_lock_blocking(next); - clean_tree_block(trans, fs_info, next); + clean_tree_block(fs_info, next); btrfs_wait_tree_block_writeback(next); btrfs_tree_unlock(next); } @@ -2958,7 +2967,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, * the running transaction open, so a full commit can't hop * in and cause problems either. */ - ret = write_ctree_super(trans, fs_info, 1); + ret = write_all_supers(fs_info, 1); if (ret) { btrfs_set_log_full_commit(fs_info, trans); btrfs_abort_transaction(trans, ret); @@ -3084,7 +3093,7 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, - struct inode *dir, u64 index) + struct btrfs_inode *dir, u64 index) { struct btrfs_root *log; struct btrfs_dir_item *di; @@ -3094,14 +3103,14 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, int bytes_del = 0; u64 dir_ino = btrfs_ino(dir); - if (BTRFS_I(dir)->logged_trans < trans->transid) + if (dir->logged_trans < trans->transid) return 0; ret = join_running_log_trans(root); if (ret) return 0; - mutex_lock(&BTRFS_I(dir)->log_mutex); + mutex_lock(&dir->log_mutex); log = root->log_root; path = btrfs_alloc_path(); @@ -3176,7 +3185,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, fail: btrfs_free_path(path); out_unlock: - mutex_unlock(&BTRFS_I(dir)->log_mutex); + mutex_unlock(&dir->log_mutex); if (ret == -ENOSPC) { btrfs_set_log_full_commit(root->fs_info, trans); ret = 0; @@ -3192,25 +3201,25 @@ out_unlock: int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, - struct inode *inode, u64 dirid) + struct btrfs_inode *inode, u64 dirid) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_root *log; u64 index; int ret; - if (BTRFS_I(inode)->logged_trans < trans->transid) + if (inode->logged_trans < trans->transid) return 0; ret = join_running_log_trans(root); if (ret) return 0; log = root->log_root; - mutex_lock(&BTRFS_I(inode)->log_mutex); + mutex_lock(&inode->log_mutex); ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), dirid, &index); - mutex_unlock(&BTRFS_I(inode)->log_mutex); + mutex_unlock(&inode->log_mutex); if (ret == -ENOSPC) { btrfs_set_log_full_commit(fs_info, trans); ret = 0; @@ -3260,7 +3269,7 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, * to replay anything deleted before the fsync */ static noinline int log_dir_items(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *inode, + struct btrfs_root *root, struct btrfs_inode *inode, struct btrfs_path *path, struct btrfs_path *dst_path, int key_type, struct btrfs_log_ctx *ctx, @@ -3450,7 +3459,7 @@ done: * key logged by this transaction. */ static noinline int log_directory_changes(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *inode, + struct btrfs_root *root, struct btrfs_inode *inode, struct btrfs_path *path, struct btrfs_path *dst_path, struct btrfs_log_ctx *ctx) @@ -3464,9 +3473,8 @@ again: min_key = 0; max_key = 0; while (1) { - ret = log_dir_items(trans, root, inode, path, - dst_path, key_type, ctx, min_key, - &max_key); + ret = log_dir_items(trans, root, inode, path, dst_path, key_type, + ctx, min_key, &max_key); if (ret) return ret; if (max_key == (u64)-1) @@ -3595,34 +3603,34 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, static int log_inode_item(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_path *path, - struct inode *inode) + struct btrfs_inode *inode) { struct btrfs_inode_item *inode_item; int ret; ret = btrfs_insert_empty_item(trans, log, path, - &BTRFS_I(inode)->location, - sizeof(*inode_item)); + &inode->location, sizeof(*inode_item)); if (ret && ret != -EEXIST) return ret; inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_item); - fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0); + fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode, + 0, 0); btrfs_release_path(path); return 0; } static noinline int copy_items(struct btrfs_trans_handle *trans, - struct inode *inode, + struct btrfs_inode *inode, struct btrfs_path *dst_path, struct btrfs_path *src_path, u64 *last_extent, int start_slot, int nr, int inode_only, u64 logged_isize) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); unsigned long src_offset; unsigned long dst_offset; - struct btrfs_root *log = BTRFS_I(inode)->root->log_root; + struct btrfs_root *log = inode->root->log_root; struct btrfs_file_extent_item *extent; struct btrfs_inode_item *inode_item; struct extent_buffer *src = src_path->nodes[0]; @@ -3633,7 +3641,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, char *ins_data; int i; struct list_head ordered_sums; - int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; + int skip_csum = inode->flags & BTRFS_INODE_NODATASUM; bool has_extents = false; bool need_find_last_extent = true; bool done = false; @@ -3675,7 +3683,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, dst_path->slots[0], struct btrfs_inode_item); fill_inode_item(trans, dst_path->nodes[0], inode_item, - inode, inode_only == LOG_INODE_EXISTS, + &inode->vfs_inode, + inode_only == LOG_INODE_EXISTS, logged_isize); } else { copy_extent_buffer(dst_path->nodes[0], src, dst_offset, @@ -3783,7 +3792,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, if (need_find_last_extent) { u64 len; - ret = btrfs_prev_leaf(BTRFS_I(inode)->root, src_path); + ret = btrfs_prev_leaf(inode->root, src_path); if (ret < 0) return ret; if (ret) @@ -3825,8 +3834,8 @@ fill_holes: if (need_find_last_extent) { /* btrfs_prev_leaf could return 1 without releasing the path */ btrfs_release_path(src_path); - ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &first_key, - src_path, 0, 0); + ret = btrfs_search_slot(NULL, inode->root, &first_key, + src_path, 0, 0); if (ret < 0) return ret; ASSERT(ret == 0); @@ -3846,7 +3855,7 @@ fill_holes: u64 extent_end; if (i >= btrfs_header_nritems(src_path->nodes[0])) { - ret = btrfs_next_leaf(BTRFS_I(inode)->root, src_path); + ret = btrfs_next_leaf(inode->root, src_path); if (ret < 0) return ret; ASSERT(ret == 0); @@ -3881,8 +3890,7 @@ fill_holes: offset = *last_extent; len = key.offset - *last_extent; ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode), - offset, 0, 0, len, 0, len, 0, - 0, 0); + offset, 0, 0, len, 0, len, 0, 0, 0); if (ret) break; *last_extent = extent_end; @@ -4055,7 +4063,7 @@ static int wait_ordered_extents(struct btrfs_trans_handle *trans, } static int log_one_extent(struct btrfs_trans_handle *trans, - struct inode *inode, struct btrfs_root *root, + struct btrfs_inode *inode, struct btrfs_root *root, const struct extent_map *em, struct btrfs_path *path, const struct list_head *logged_list, @@ -4072,8 +4080,8 @@ static int log_one_extent(struct btrfs_trans_handle *trans, int extent_inserted = 0; bool ordered_io_err = false; - ret = wait_ordered_extents(trans, inode, root, em, logged_list, - &ordered_io_err); + ret = wait_ordered_extents(trans, &inode->vfs_inode, root, em, + logged_list, &ordered_io_err); if (ret) return ret; @@ -4084,7 +4092,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans, btrfs_init_map_token(&token); - ret = __btrfs_drop_extents(trans, log, inode, path, em->start, + ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start, em->start + em->len, NULL, 0, 1, sizeof(*fi), &extent_inserted); if (ret) @@ -4150,7 +4158,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans, static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct inode *inode, + struct btrfs_inode *inode, struct btrfs_path *path, struct list_head *logged_list, struct btrfs_log_ctx *ctx, @@ -4159,14 +4167,14 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, { struct extent_map *em, *n; struct list_head extents; - struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree; + struct extent_map_tree *tree = &inode->extent_tree; u64 test_gen; int ret = 0; int num = 0; INIT_LIST_HEAD(&extents); - down_write(&BTRFS_I(inode)->dio_sem); + down_write(&inode->dio_sem); write_lock(&tree->lock); test_gen = root->fs_info->last_trans_committed; @@ -4206,7 +4214,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, * without writing to the log tree and the fsync must report the * file data write error and not commit the current transaction. */ - ret = filemap_check_errors(inode->i_mapping); + ret = filemap_check_errors(inode->vfs_inode.i_mapping); if (ret) ctx->io_err = ret; process: @@ -4235,13 +4243,13 @@ process: } WARN_ON(!list_empty(&extents)); write_unlock(&tree->lock); - up_write(&BTRFS_I(inode)->dio_sem); + up_write(&inode->dio_sem); btrfs_release_path(path); return ret; } -static int logged_inode_size(struct btrfs_root *log, struct inode *inode, +static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode, struct btrfs_path *path, u64 *size_ret) { struct btrfs_key key; @@ -4279,7 +4287,7 @@ static int logged_inode_size(struct btrfs_root *log, struct inode *inode, */ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct inode *inode, + struct btrfs_inode *inode, struct btrfs_path *path, struct btrfs_path *dst_path) { @@ -4374,7 +4382,7 @@ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, */ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct inode *inode, + struct btrfs_inode *inode, struct btrfs_path *path) { struct btrfs_fs_info *fs_info = root->fs_info; @@ -4385,7 +4393,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, struct extent_buffer *leaf; struct btrfs_root *log = root->log_root; const u64 ino = btrfs_ino(inode); - const u64 i_size = i_size_read(inode); + const u64 i_size = i_size_read(&inode->vfs_inode); if (!btrfs_fs_incompat(fs_info, NO_HOLES)) return 0; @@ -4495,7 +4503,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, static int btrfs_check_ref_name_override(struct extent_buffer *eb, const int slot, const struct btrfs_key *key, - struct inode *inode, + struct btrfs_inode *inode, u64 *other_ino) { int ret; @@ -4551,9 +4559,8 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb, } read_extent_buffer(eb, name, name_ptr, this_name_len); - di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root, - search_path, parent, - name, this_name_len, 0); + di = btrfs_lookup_dir_item(NULL, inode->root, search_path, + parent, name, this_name_len, 0); if (di && !IS_ERR(di)) { struct btrfs_key di_key; @@ -4596,7 +4603,7 @@ out: * This handles both files and directories. */ static int btrfs_log_inode(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *inode, + struct btrfs_root *root, struct btrfs_inode *inode, int inode_only, const loff_t start, const loff_t end, @@ -4618,7 +4625,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, int ins_nr; bool fast_search = false; u64 ino = btrfs_ino(inode); - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_map_tree *em_tree = &inode->extent_tree; u64 logged_isize = 0; bool need_log_inode_item = true; @@ -4639,9 +4646,9 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, /* today the code can only do partial logging of directories */ - if (S_ISDIR(inode->i_mode) || + if (S_ISDIR(inode->vfs_inode.i_mode) || (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags) && + &inode->runtime_flags) && inode_only >= LOG_INODE_EXISTS)) max_key.type = BTRFS_XATTR_ITEM_KEY; else @@ -4654,8 +4661,8 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, * order for the log replay code to mark inodes for link count * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items). */ - if (S_ISDIR(inode->i_mode) || - BTRFS_I(inode)->generation > fs_info->last_trans_committed) + if (S_ISDIR(inode->vfs_inode.i_mode) || + inode->generation > fs_info->last_trans_committed) ret = btrfs_commit_inode_delayed_items(trans, inode); else ret = btrfs_commit_inode_delayed_inode(inode); @@ -4668,17 +4675,16 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, if (inode_only == LOG_OTHER_INODE) { inode_only = LOG_INODE_EXISTS; - mutex_lock_nested(&BTRFS_I(inode)->log_mutex, - SINGLE_DEPTH_NESTING); + mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING); } else { - mutex_lock(&BTRFS_I(inode)->log_mutex); + mutex_lock(&inode->log_mutex); } /* * a brute force approach to making sure we get the most uptodate * copies of everything. */ - if (S_ISDIR(inode->i_mode)) { + if (S_ISDIR(inode->vfs_inode.i_mode)) { int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; if (inode_only == LOG_INODE_EXISTS) @@ -4699,31 +4705,30 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, * (zeroes), as if an expanding truncate happened, * instead of getting a file of 4Kb only. */ - err = logged_inode_size(log, inode, path, - &logged_isize); + err = logged_inode_size(log, inode, path, &logged_isize); if (err) goto out_unlock; } if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags)) { + &inode->runtime_flags)) { if (inode_only == LOG_INODE_EXISTS) { max_key.type = BTRFS_XATTR_ITEM_KEY; ret = drop_objectid_items(trans, log, path, ino, max_key.type); } else { clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); clear_bit(BTRFS_INODE_COPY_EVERYTHING, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); while(1) { ret = btrfs_truncate_inode_items(trans, - log, inode, 0, 0); + log, &inode->vfs_inode, 0, 0); if (ret != -EAGAIN) break; } } } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, - &BTRFS_I(inode)->runtime_flags) || + &inode->runtime_flags) || inode_only == LOG_INODE_EXISTS) { if (inode_only == LOG_INODE_ALL) fast_search = true; @@ -4764,18 +4769,17 @@ again: if ((min_key.type == BTRFS_INODE_REF_KEY || min_key.type == BTRFS_INODE_EXTREF_KEY) && - BTRFS_I(inode)->generation == trans->transid) { + inode->generation == trans->transid) { u64 other_ino = 0; ret = btrfs_check_ref_name_override(path->nodes[0], - path->slots[0], - &min_key, inode, - &other_ino); + path->slots[0], &min_key, inode, + &other_ino); if (ret < 0) { err = ret; goto out_unlock; } else if (ret > 0 && ctx && - other_ino != btrfs_ino(ctx->inode)) { + other_ino != btrfs_ino(BTRFS_I(ctx->inode))) { struct btrfs_key inode_key; struct inode *other_inode; @@ -4823,9 +4827,10 @@ again: * update the log with the new name before we * unpin it. */ - err = btrfs_log_inode(trans, root, other_inode, - LOG_OTHER_INODE, - 0, LLONG_MAX, ctx); + err = btrfs_log_inode(trans, root, + BTRFS_I(other_inode), + LOG_OTHER_INODE, 0, LLONG_MAX, + ctx); iput(other_inode); if (err) goto out_unlock; @@ -4979,25 +4984,25 @@ log_extents: write_unlock(&em_tree->lock); } - if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { + if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) { ret = log_directory_changes(trans, root, inode, path, dst_path, - ctx); + ctx); if (ret) { err = ret; goto out_unlock; } } - spin_lock(&BTRFS_I(inode)->lock); - BTRFS_I(inode)->logged_trans = trans->transid; - BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans; - spin_unlock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); + inode->logged_trans = trans->transid; + inode->last_log_commit = inode->last_sub_trans; + spin_unlock(&inode->lock); out_unlock: if (unlikely(err)) btrfs_put_logged_extents(&logged_list); else btrfs_submit_logged_extents(&logged_list, log); - mutex_unlock(&BTRFS_I(inode)->log_mutex); + mutex_unlock(&inode->log_mutex); btrfs_free_path(path); btrfs_free_path(dst_path); @@ -5021,13 +5026,13 @@ out_unlock: * we logged the inode or it might have also done the unlink). */ static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans, - struct inode *inode) + struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; + struct btrfs_fs_info *fs_info = inode->root->fs_info; bool ret = false; - mutex_lock(&BTRFS_I(inode)->log_mutex); - if (BTRFS_I(inode)->last_unlink_trans > fs_info->last_trans_committed) { + mutex_lock(&inode->log_mutex); + if (inode->last_unlink_trans > fs_info->last_trans_committed) { /* * Make sure any commits to the log are forced to be full * commits. @@ -5035,7 +5040,7 @@ static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans, btrfs_set_log_full_commit(fs_info, trans); ret = true; } - mutex_unlock(&BTRFS_I(inode)->log_mutex); + mutex_unlock(&inode->log_mutex); return ret; } @@ -5047,14 +5052,14 @@ static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans, * a full commit is required. */ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, - struct inode *inode, + struct btrfs_inode *inode, struct dentry *parent, struct super_block *sb, u64 last_committed) { int ret = 0; struct dentry *old_parent = NULL; - struct inode *orig_inode = inode; + struct btrfs_inode *orig_inode = inode; /* * for regular files, if its inode is already on disk, we don't @@ -5062,15 +5067,15 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, * we can use the last_unlink_trans field to record renames * and other fun in this file. */ - if (S_ISREG(inode->i_mode) && - BTRFS_I(inode)->generation <= last_committed && - BTRFS_I(inode)->last_unlink_trans <= last_committed) - goto out; + if (S_ISREG(inode->vfs_inode.i_mode) && + inode->generation <= last_committed && + inode->last_unlink_trans <= last_committed) + goto out; - if (!S_ISDIR(inode->i_mode)) { + if (!S_ISDIR(inode->vfs_inode.i_mode)) { if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) goto out; - inode = d_inode(parent); + inode = BTRFS_I(d_inode(parent)); } while (1) { @@ -5081,7 +5086,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, * think this inode has already been logged. */ if (inode != orig_inode) - BTRFS_I(inode)->logged_trans = trans->transid; + inode->logged_trans = trans->transid; smp_mb(); if (btrfs_must_commit_transaction(trans, inode)) { @@ -5093,7 +5098,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, break; if (IS_ROOT(parent)) { - inode = d_inode(parent); + inode = BTRFS_I(d_inode(parent)); if (btrfs_must_commit_transaction(trans, inode)) ret = 1; break; @@ -5102,7 +5107,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, parent = dget_parent(parent); dput(old_parent); old_parent = parent; - inode = d_inode(parent); + inode = BTRFS_I(d_inode(parent)); } dput(old_parent); @@ -5159,7 +5164,7 @@ struct btrfs_dir_list { */ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct inode *start_inode, + struct btrfs_inode *start_inode, struct btrfs_log_ctx *ctx) { struct btrfs_fs_info *fs_info = root->fs_info; @@ -5237,7 +5242,7 @@ process_leaf: goto next_dir_inode; } - if (btrfs_inode_in_log(di_inode, trans->transid)) { + if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) { iput(di_inode); break; } @@ -5245,10 +5250,10 @@ process_leaf: ctx->log_new_dentries = false; if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK) log_mode = LOG_INODE_ALL; - ret = btrfs_log_inode(trans, root, di_inode, + ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode), log_mode, 0, LLONG_MAX, ctx); if (!ret && - btrfs_must_commit_transaction(trans, di_inode)) + btrfs_must_commit_transaction(trans, BTRFS_I(di_inode))) ret = 1; iput(di_inode); if (ret) @@ -5289,14 +5294,14 @@ next_dir_inode: } static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, - struct inode *inode, + struct btrfs_inode *inode, struct btrfs_log_ctx *ctx) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); int ret; struct btrfs_path *path; struct btrfs_key key; - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; const u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); @@ -5365,14 +5370,14 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, if (ctx) ctx->log_new_dentries = false; - ret = btrfs_log_inode(trans, root, dir_inode, + ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode), LOG_INODE_ALL, 0, LLONG_MAX, ctx); if (!ret && - btrfs_must_commit_transaction(trans, dir_inode)) + btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode))) ret = 1; if (!ret && ctx && ctx->log_new_dentries) ret = log_new_dir_dentries(trans, root, - dir_inode, ctx); + BTRFS_I(dir_inode), ctx); iput(dir_inode); if (ret) goto out; @@ -5392,7 +5397,8 @@ out: * the last committed transaction */ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *inode, + struct btrfs_root *root, + struct btrfs_inode *inode, struct dentry *parent, const loff_t start, const loff_t end, @@ -5406,9 +5412,9 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, int ret = 0; u64 last_committed = fs_info->last_trans_committed; bool log_dentries = false; - struct inode *orig_inode = inode; + struct btrfs_inode *orig_inode = inode; - sb = inode->i_sb; + sb = inode->vfs_inode.i_sb; if (btrfs_test_opt(fs_info, NOTREELOG)) { ret = 1; @@ -5425,14 +5431,13 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, goto end_no_trans; } - if (root != BTRFS_I(inode)->root || - btrfs_root_refs(&root->root_item) == 0) { + if (root != inode->root || btrfs_root_refs(&root->root_item) == 0) { ret = 1; goto end_no_trans; } - ret = check_parent_dirs_for_sync(trans, inode, parent, - sb, last_committed); + ret = check_parent_dirs_for_sync(trans, inode, parent, sb, + last_committed); if (ret) goto end_no_trans; @@ -5455,14 +5460,14 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, * we can use the last_unlink_trans field to record renames * and other fun in this file. */ - if (S_ISREG(inode->i_mode) && - BTRFS_I(inode)->generation <= last_committed && - BTRFS_I(inode)->last_unlink_trans <= last_committed) { + if (S_ISREG(inode->vfs_inode.i_mode) && + inode->generation <= last_committed && + inode->last_unlink_trans <= last_committed) { ret = 0; goto end_trans; } - if (S_ISDIR(inode->i_mode) && ctx && ctx->log_new_dentries) + if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries) log_dentries = true; /* @@ -5506,7 +5511,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, * but the file inode does not have a matching BTRFS_INODE_REF_KEY item * and has a link count of 2. */ - if (BTRFS_I(inode)->last_unlink_trans > last_committed) { + if (inode->last_unlink_trans > last_committed) { ret = btrfs_log_all_parents(trans, orig_inode, ctx); if (ret) goto end_trans; @@ -5516,14 +5521,13 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) break; - inode = d_inode(parent); - if (root != BTRFS_I(inode)->root) + inode = BTRFS_I(d_inode(parent)); + if (root != inode->root) break; - if (BTRFS_I(inode)->generation > last_committed) { + if (inode->generation > last_committed) { ret = btrfs_log_inode(trans, root, inode, - LOG_INODE_EXISTS, - 0, LLONG_MAX, ctx); + LOG_INODE_EXISTS, 0, LLONG_MAX, ctx); if (ret) goto end_trans; } @@ -5567,8 +5571,8 @@ int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, struct dentry *parent = dget_parent(dentry); int ret; - ret = btrfs_log_inode_parent(trans, root, d_inode(dentry), parent, - start, end, 0, ctx); + ret = btrfs_log_inode_parent(trans, root, BTRFS_I(d_inode(dentry)), + parent, start, end, 0, ctx); dput(parent); return ret; @@ -5730,7 +5734,7 @@ error: * inodes, etc) are done. */ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, - struct inode *dir, struct inode *inode, + struct btrfs_inode *dir, struct btrfs_inode *inode, int for_rename) { /* @@ -5743,23 +5747,23 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, * into the file. When the file is logged we check it and * don't log the parents if the file is fully on disk. */ - mutex_lock(&BTRFS_I(inode)->log_mutex); - BTRFS_I(inode)->last_unlink_trans = trans->transid; - mutex_unlock(&BTRFS_I(inode)->log_mutex); + mutex_lock(&inode->log_mutex); + inode->last_unlink_trans = trans->transid; + mutex_unlock(&inode->log_mutex); /* * if this directory was already logged any new * names for this file/dir will get recorded */ smp_mb(); - if (BTRFS_I(dir)->logged_trans == trans->transid) + if (dir->logged_trans == trans->transid) return; /* * if the inode we're about to unlink was logged, * the log will be properly updated for any new names */ - if (BTRFS_I(inode)->logged_trans == trans->transid) + if (inode->logged_trans == trans->transid) return; /* @@ -5776,9 +5780,9 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, return; record: - mutex_lock(&BTRFS_I(dir)->log_mutex); - BTRFS_I(dir)->last_unlink_trans = trans->transid; - mutex_unlock(&BTRFS_I(dir)->log_mutex); + mutex_lock(&dir->log_mutex); + dir->last_unlink_trans = trans->transid; + mutex_unlock(&dir->log_mutex); } /* @@ -5794,11 +5798,11 @@ record: * parent root and tree of tree roots trees, etc) are done. */ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, - struct inode *dir) + struct btrfs_inode *dir) { - mutex_lock(&BTRFS_I(dir)->log_mutex); - BTRFS_I(dir)->last_unlink_trans = trans->transid; - mutex_unlock(&BTRFS_I(dir)->log_mutex); + mutex_lock(&dir->log_mutex); + dir->last_unlink_trans = trans->transid; + mutex_unlock(&dir->log_mutex); } /* @@ -5809,27 +5813,25 @@ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, * full transaction commit is required. */ int btrfs_log_new_name(struct btrfs_trans_handle *trans, - struct inode *inode, struct inode *old_dir, + struct btrfs_inode *inode, struct btrfs_inode *old_dir, struct dentry *parent) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root * root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; /* * this will force the logging code to walk the dentry chain * up for the file */ - if (S_ISREG(inode->i_mode)) - BTRFS_I(inode)->last_unlink_trans = trans->transid; + if (S_ISREG(inode->vfs_inode.i_mode)) + inode->last_unlink_trans = trans->transid; /* * if this inode hasn't been logged and directory we're renaming it * from hasn't been logged, we don't need to log it */ - if (BTRFS_I(inode)->logged_trans <= - fs_info->last_trans_committed && - (!old_dir || BTRFS_I(old_dir)->logged_trans <= - fs_info->last_trans_committed)) + if (inode->logged_trans <= fs_info->last_trans_committed && + (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed)) return 0; return btrfs_log_inode_parent(trans, root, inode, parent, 0, diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h index ab858e31ccbc..483027f9a7f4 100644 --- a/fs/btrfs/tree-log.h +++ b/fs/btrfs/tree-log.h @@ -48,13 +48,13 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx, static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans) { - ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid; + WRITE_ONCE(fs_info->last_trans_log_full_commit, trans->transid); } static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans) { - return ACCESS_ONCE(fs_info->last_trans_log_full_commit) == + return READ_ONCE(fs_info->last_trans_log_full_commit) == trans->transid; } @@ -72,19 +72,19 @@ int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, - struct inode *dir, u64 index); + struct btrfs_inode *dir, u64 index); int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, - struct inode *inode, u64 dirid); + struct btrfs_inode *inode, u64 dirid); void btrfs_end_log_trans(struct btrfs_root *root); int btrfs_pin_log_trans(struct btrfs_root *root); void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, - struct inode *dir, struct inode *inode, + struct btrfs_inode *dir, struct btrfs_inode *inode, int for_rename); void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, - struct inode *dir); + struct btrfs_inode *dir); int btrfs_log_new_name(struct btrfs_trans_handle *trans, - struct inode *inode, struct inode *old_dir, + struct btrfs_inode *inode, struct btrfs_inode *old_dir, struct dentry *parent); #endif diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c index b1434bb57e36..d8edf164f81c 100644 --- a/fs/btrfs/ulist.c +++ b/fs/btrfs/ulist.c @@ -52,13 +52,13 @@ void ulist_init(struct ulist *ulist) } /** - * ulist_fini - free up additionally allocated memory for the ulist + * ulist_release - free up additionally allocated memory for the ulist * @ulist: the ulist from which to free the additional memory * * This is useful in cases where the base 'struct ulist' has been statically * allocated. */ -static void ulist_fini(struct ulist *ulist) +void ulist_release(struct ulist *ulist) { struct ulist_node *node; struct ulist_node *next; @@ -79,7 +79,7 @@ static void ulist_fini(struct ulist *ulist) */ void ulist_reinit(struct ulist *ulist) { - ulist_fini(ulist); + ulist_release(ulist); ulist_init(ulist); } @@ -105,13 +105,13 @@ struct ulist *ulist_alloc(gfp_t gfp_mask) * ulist_free - free dynamically allocated ulist * @ulist: ulist to free * - * It is not necessary to call ulist_fini before. + * It is not necessary to call ulist_release before. */ void ulist_free(struct ulist *ulist) { if (!ulist) return; - ulist_fini(ulist); + ulist_release(ulist); kfree(ulist); } diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h index a01a2c45825f..53c913632733 100644 --- a/fs/btrfs/ulist.h +++ b/fs/btrfs/ulist.h @@ -19,9 +19,6 @@ * */ struct ulist_iterator { -#ifdef CONFIG_BTRFS_DEBUG - int i; -#endif struct list_head *cur_list; /* hint to start search */ }; @@ -32,10 +29,6 @@ struct ulist_node { u64 val; /* value to store */ u64 aux; /* auxiliary value saved along with the val */ -#ifdef CONFIG_BTRFS_DEBUG - int seqnum; /* sequence number this node is added */ -#endif - struct list_head list; /* used to link node */ struct rb_node rb_node; /* used to speed up search */ }; @@ -51,6 +44,7 @@ struct ulist { }; void ulist_init(struct ulist *ulist); +void ulist_release(struct ulist *ulist); void ulist_reinit(struct ulist *ulist); struct ulist *ulist_alloc(gfp_t gfp_mask); void ulist_free(struct ulist *ulist); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index b2e70073a10d..73d56eef5e60 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -134,8 +134,7 @@ const int btrfs_raid_mindev_error[BTRFS_NR_RAID_TYPES] = { }; static int init_first_rw_device(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, - struct btrfs_device *device); + struct btrfs_fs_info *fs_info); static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); static void __btrfs_reset_dev_stats(struct btrfs_device *dev); static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); @@ -1726,7 +1725,7 @@ out: * Function to update ctime/mtime for a given device path. * Mainly used for ctime/mtime based probe like libblkid. */ -static void update_dev_time(char *path_name) +static void update_dev_time(const char *path_name) { struct file *filp; @@ -1852,7 +1851,8 @@ void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info, fs_info->fs_devices->latest_bdev = next_device->bdev; } -int btrfs_rm_device(struct btrfs_fs_info *fs_info, char *device_path, u64 devid) +int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path, + u64 devid) { struct btrfs_device *device; struct btrfs_fs_devices *cur_devices; @@ -2092,7 +2092,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, } static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info, - char *device_path, + const char *device_path, struct btrfs_device **device) { int ret = 0; @@ -2119,7 +2119,7 @@ static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info, } int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info, - char *device_path, + const char *device_path, struct btrfs_device **device) { *device = NULL; @@ -2152,7 +2152,8 @@ int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info, * Lookup a device given by device id, or the path if the id is 0. */ int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid, - char *devpath, struct btrfs_device **device) + const char *devpath, + struct btrfs_device **device) { int ret; @@ -2308,7 +2309,7 @@ error: return ret; } -int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *device_path) +int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) { struct btrfs_root *root = fs_info->dev_root; struct request_queue *q; @@ -2440,7 +2441,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *device_path) if (seeding_dev) { mutex_lock(&fs_info->chunk_mutex); - ret = init_first_rw_device(trans, fs_info, device); + ret = init_first_rw_device(trans, fs_info); mutex_unlock(&fs_info->chunk_mutex); if (ret) { btrfs_abort_transaction(trans, ret); @@ -2516,7 +2517,7 @@ error: } int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, - char *device_path, + const char *device_path, struct btrfs_device *srcdev, struct btrfs_device **device_out) { @@ -4584,8 +4585,7 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) / sizeof(struct btrfs_stripe) + 1) static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, u64 start, - u64 type) + u64 start, u64 type) { struct btrfs_fs_info *info = trans->fs_info; struct btrfs_fs_devices *fs_devices = info->fs_devices; @@ -5009,12 +5009,11 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, ASSERT(mutex_is_locked(&fs_info->chunk_mutex)); chunk_offset = find_next_chunk(fs_info); - return __btrfs_alloc_chunk(trans, fs_info, chunk_offset, type); + return __btrfs_alloc_chunk(trans, chunk_offset, type); } static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, - struct btrfs_device *device) + struct btrfs_fs_info *fs_info) { struct btrfs_root *extent_root = fs_info->extent_root; u64 chunk_offset; @@ -5024,14 +5023,13 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, chunk_offset = find_next_chunk(fs_info); alloc_profile = btrfs_get_alloc_profile(extent_root, 0); - ret = __btrfs_alloc_chunk(trans, fs_info, chunk_offset, alloc_profile); + ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile); if (ret) return ret; sys_chunk_offset = find_next_chunk(fs_info); alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0); - ret = __btrfs_alloc_chunk(trans, fs_info, sys_chunk_offset, - alloc_profile); + ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile); return ret; } @@ -6958,7 +6956,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, key.offset = device->devid; path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) + return -ENOMEM; ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); if (ret < 0) { btrfs_warn_in_rcu(fs_info, @@ -7106,7 +7105,7 @@ int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, return 0; } -void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path) +void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path) { struct buffer_head *bh; struct btrfs_super_block *disk_super; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 24ba6bc3ec34..59be81206dd7 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -422,16 +422,16 @@ void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step); void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info, struct btrfs_device *device, struct btrfs_device *this_dev); int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info, - char *device_path, + const char *device_path, struct btrfs_device **device); int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid, - char *devpath, + const char *devpath, struct btrfs_device **device); struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, const u64 *devid, const u8 *uuid); int btrfs_rm_device(struct btrfs_fs_info *fs_info, - char *device_path, u64 devid); + const char *device_path, u64 devid); void btrfs_cleanup_fs_uuids(void); int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len); int btrfs_grow_device(struct btrfs_trans_handle *trans, @@ -439,9 +439,9 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans, struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid, u8 *uuid, u8 *fsid); int btrfs_shrink_device(struct btrfs_device *device, u64 new_size); -int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *path); +int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path); int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, - char *device_path, + const char *device_path, struct btrfs_device *srcdev, struct btrfs_device **device_out); int btrfs_balance(struct btrfs_balance_control *bctl, @@ -474,7 +474,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, struct btrfs_device *tgtdev); void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info, struct btrfs_device *tgtdev); -void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path); +void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path); int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len, int mirror_num); unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 9621c7f2503e..b3cbf80c5acf 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -47,8 +47,8 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name, return -ENOMEM; /* lookup the xattr by name */ - di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, - strlen(name), 0); + di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(BTRFS_I(inode)), + name, strlen(name), 0); if (!di) { ret = -ENODATA; goto out; @@ -108,8 +108,8 @@ static int do_setxattr(struct btrfs_trans_handle *trans, path->skip_release_on_error = 1; if (!value) { - di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), - name, name_len, -1); + di = btrfs_lookup_xattr(trans, root, path, + btrfs_ino(BTRFS_I(inode)), name, name_len, -1); if (!di && (flags & XATTR_REPLACE)) ret = -ENODATA; else if (IS_ERR(di)) @@ -128,8 +128,8 @@ static int do_setxattr(struct btrfs_trans_handle *trans, */ if (flags & XATTR_REPLACE) { ASSERT(inode_is_locked(inode)); - di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), - name, name_len, 0); + di = btrfs_lookup_xattr(NULL, root, path, + btrfs_ino(BTRFS_I(inode)), name, name_len, 0); if (!di) ret = -ENODATA; else if (IS_ERR(di)) @@ -140,7 +140,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans, di = NULL; } - ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), + ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(BTRFS_I(inode)), name, name_len, value, size); if (ret == -EOVERFLOW) { /* @@ -278,7 +278,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) * NOTE: we set key.offset = 0; because we want to start with the * first xattr that we find and walk forward */ - key.objectid = btrfs_ino(inode); + key.objectid = btrfs_ino(BTRFS_I(inode)); key.type = BTRFS_XATTR_ITEM_KEY; key.offset = 0; diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index da497f184ff4..135b10823c6d 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -73,13 +73,11 @@ fail: static int zlib_compress_pages(struct list_head *ws, struct address_space *mapping, - u64 start, unsigned long len, + u64 start, struct page **pages, - unsigned long nr_dest_pages, unsigned long *out_pages, unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out) + unsigned long *total_out) { struct workspace *workspace = list_entry(ws, struct workspace, list); int ret; @@ -89,6 +87,9 @@ static int zlib_compress_pages(struct list_head *ws, struct page *in_page = NULL; struct page *out_page = NULL; unsigned long bytes_left; + unsigned long len = *total_out; + unsigned long nr_dest_pages = *out_pages; + const unsigned long max_out = nr_dest_pages * PAGE_SIZE; *out_pages = 0; *total_out = 0; diff --git a/fs/buffer.c b/fs/buffer.c index 0e87401cf335..9196f2a270da 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -19,6 +19,7 @@ */ #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/syscalls.h> #include <linux/fs.h> #include <linux/iomap.h> @@ -2395,7 +2396,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping, loff_t pos, loff_t *bytes) { struct inode *inode = mapping->host; - unsigned blocksize = 1 << inode->i_blkbits; + unsigned int blocksize = i_blocksize(inode); struct page *page; void *fsdata; pgoff_t index, curidx; @@ -2475,8 +2476,8 @@ int cont_write_begin(struct file *file, struct address_space *mapping, get_block_t *get_block, loff_t *bytes) { struct inode *inode = mapping->host; - unsigned blocksize = 1 << inode->i_blkbits; - unsigned zerofrom; + unsigned int blocksize = i_blocksize(inode); + unsigned int zerofrom; int err; err = cont_expand_zero(file, mapping, pos, bytes); @@ -2838,7 +2839,7 @@ int nobh_truncate_page(struct address_space *mapping, struct buffer_head map_bh; int err; - blocksize = 1 << inode->i_blkbits; + blocksize = i_blocksize(inode); length = offset & (blocksize - 1); /* Block boundary? Nothing to do */ @@ -2916,7 +2917,7 @@ int block_truncate_page(struct address_space *mapping, struct buffer_head *bh; int err; - blocksize = 1 << inode->i_blkbits; + blocksize = i_blocksize(inode); length = offset & (blocksize - 1); /* Block boundary? Nothing to do */ @@ -3028,7 +3029,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block, struct inode *inode = mapping->host; tmp.b_state = 0; tmp.b_blocknr = 0; - tmp.b_size = 1 << inode->i_blkbits; + tmp.b_size = i_blocksize(inode); get_block(inode, block, &tmp, 0); return tmp.b_blocknr; } diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index cd1effee8a49..9bf90bcc56ac 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -19,6 +19,7 @@ #include <linux/fscache-cache.h> #include <linux/timer.h> #include <linux/wait.h> +#include <linux/cred.h> #include <linux/workqueue.h> #include <linux/security.h> diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 09860c0ec7c1..1a3e1b40799a 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -8,6 +8,7 @@ #include <linux/slab.h> #include <linux/pagevec.h> #include <linux/task_io_accounting_ops.h> +#include <linux/signal.h> #include "super.h" #include "mds_client.h" @@ -391,6 +392,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) nr_pages = i; if (nr_pages > 0) { len = nr_pages << PAGE_SHIFT; + osd_req_op_extent_update(req, 0, len); break; } goto out_pages; @@ -751,7 +753,7 @@ static int ceph_writepages_start(struct address_space *mapping, struct pagevec pvec; int done = 0; int rc = 0; - unsigned wsize = 1 << inode->i_blkbits; + unsigned int wsize = i_blocksize(inode); struct ceph_osd_request *req = NULL; int do_sync = 0; loff_t snap_size, i_size; @@ -771,7 +773,7 @@ static int ceph_writepages_start(struct address_space *mapping, wbc->sync_mode == WB_SYNC_NONE ? "NONE" : (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); - if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { + if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { if (ci->i_wrbuffer_ref > 0) { pr_warn_ratelimited( "writepage_start %p %lld forced umount\n", @@ -1017,8 +1019,7 @@ new_request: &ci->i_layout, vino, offset, &len, 0, num_ops, CEPH_OSD_OP_WRITE, - CEPH_OSD_FLAG_WRITE | - CEPH_OSD_FLAG_ONDISK, + CEPH_OSD_FLAG_WRITE, snapc, truncate_seq, truncate_size, false); if (IS_ERR(req)) { @@ -1028,8 +1029,7 @@ new_request: min(num_ops, CEPH_OSD_SLAB_OPS), CEPH_OSD_OP_WRITE, - CEPH_OSD_FLAG_WRITE | - CEPH_OSD_FLAG_ONDISK, + CEPH_OSD_FLAG_WRITE, snapc, truncate_seq, truncate_size, true); BUG_ON(IS_ERR(req)); @@ -1194,7 +1194,7 @@ static int ceph_update_writeable_page(struct file *file, int r; struct ceph_snap_context *snapc, *oldest; - if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { + if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { dout(" page %p forced umount\n", page); unlock_page(page); return -EIO; @@ -1681,8 +1681,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, ceph_vino(inode), 0, &len, 0, 1, - CEPH_OSD_OP_CREATE, - CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, + CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE, NULL, 0, 0, false); if (IS_ERR(req)) { err = PTR_ERR(req); @@ -1699,8 +1698,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, ceph_vino(inode), 0, &len, 1, 3, - CEPH_OSD_OP_WRITE, - CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, + CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, NULL, ci->i_truncate_seq, ci->i_truncate_size, false); if (IS_ERR(req)) { @@ -1873,7 +1871,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, goto out_unlock; } - wr_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ACK; + wr_req->r_flags = CEPH_OSD_FLAG_WRITE; osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL); ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc); ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid); diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c index 5bc5d37b1217..4e7421caf380 100644 --- a/fs/ceph/cache.c +++ b/fs/ceph/cache.c @@ -234,7 +234,7 @@ void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp) fscache_enable_cookie(ci->fscache, ceph_fscache_can_enable, inode); if (fscache_cookie_enabled(ci->fscache)) { - dout("fscache_file_set_cookie %p %p enabing cache\n", + dout("fscache_file_set_cookie %p %p enabling cache\n", inode, filp); } } diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 94fd76d04683..68c78be19d5b 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2,7 +2,7 @@ #include <linux/fs.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/wait.h> @@ -867,7 +867,7 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci) /* * Return caps we have registered with the MDS(s) as 'wanted'. */ -int __ceph_caps_mds_wanted(struct ceph_inode_info *ci) +int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check) { struct ceph_cap *cap; struct rb_node *p; @@ -875,7 +875,7 @@ int __ceph_caps_mds_wanted(struct ceph_inode_info *ci) for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { cap = rb_entry(p, struct ceph_cap, ci_node); - if (!__cap_is_valid(cap)) + if (check && !__cap_is_valid(cap)) continue; if (cap == ci->i_auth_cap) mds_wanted |= cap->mds_wanted; @@ -1184,6 +1184,13 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, delayed = 1; } ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH); + if (want & ~cap->mds_wanted) { + /* user space may open/close single file frequently. + * This avoids droping mds_wanted immediately after + * requesting new mds_wanted. + */ + __cap_set_timeouts(mdsc, ci); + } cap->issued &= retain; /* drop bits we don't want */ if (cap->implemented & ~cap->issued) { @@ -2084,8 +2091,6 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync) dout("fsync %p%s\n", inode, datasync ? " datasync" : ""); - ceph_sync_write_wait(inode); - ret = filemap_write_and_wait_range(inode->i_mapping, start, end); if (ret < 0) goto out; @@ -2477,23 +2482,22 @@ again: if (ci->i_ceph_flags & CEPH_I_CAP_DROPPED) { int mds_wanted; - if (ACCESS_ONCE(mdsc->fsc->mount_state) == + if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { dout("get_cap_refs %p forced umount\n", inode); *err = -EIO; ret = 1; goto out_unlock; } - mds_wanted = __ceph_caps_mds_wanted(ci); - if ((mds_wanted & need) != need) { + mds_wanted = __ceph_caps_mds_wanted(ci, false); + if (need & ~(mds_wanted & need)) { dout("get_cap_refs %p caps were dropped" " (session killed?)\n", inode); *err = -ESTALE; ret = 1; goto out_unlock; } - if ((mds_wanted & file_wanted) == - (file_wanted & (CEPH_CAP_FILE_RD|CEPH_CAP_FILE_WR))) + if (!(file_wanted & ~mds_wanted)) ci->i_ceph_flags &= ~CEPH_I_CAP_DROPPED; } @@ -3404,6 +3408,7 @@ retry: tcap->implemented |= issued; if (cap == ci->i_auth_cap) ci->i_auth_cap = tcap; + if (!list_empty(&ci->i_cap_flush_list) && ci->i_auth_cap == tcap) { spin_lock(&mdsc->cap_dirty_lock); @@ -3417,9 +3422,18 @@ retry: } else if (tsession) { /* add placeholder for the export tagert */ int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0; + tcap = new_cap; ceph_add_cap(inode, tsession, t_cap_id, -1, issued, 0, t_seq - 1, t_mseq, (u64)-1, flag, &new_cap); + if (!list_empty(&ci->i_cap_flush_list) && + ci->i_auth_cap == tcap) { + spin_lock(&mdsc->cap_dirty_lock); + list_move_tail(&ci->i_flushing_item, + &tcap->session->s_cap_flushing); + spin_unlock(&mdsc->cap_dirty_lock); + } + __ceph_remove_cap(cap, false); goto out_unlock; } @@ -3924,9 +3938,10 @@ int ceph_encode_inode_release(void **p, struct inode *inode, } int ceph_encode_dentry_release(void **p, struct dentry *dentry, + struct inode *dir, int mds, int drop, int unless) { - struct inode *dir = d_inode(dentry->d_parent); + struct dentry *parent = NULL; struct ceph_mds_request_release *rel = *p; struct ceph_dentry_info *di = ceph_dentry(dentry); int force = 0; @@ -3941,9 +3956,14 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry, spin_lock(&dentry->d_lock); if (di->lease_session && di->lease_session->s_mds == mds) force = 1; + if (!dir) { + parent = dget(dentry->d_parent); + dir = d_inode(parent); + } spin_unlock(&dentry->d_lock); ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force); + dput(parent); spin_lock(&dentry->d_lock); if (ret && di->lease_session && di->lease_session->s_mds == mds) { diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index 39ff678e567f..f2ae393e2c31 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -70,7 +70,7 @@ static int mdsc_show(struct seq_file *s, void *p) seq_printf(s, "%s", ceph_mds_op_name(req->r_op)); - if (req->r_got_unsafe) + if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) seq_puts(s, "\t(unsafe)"); else seq_puts(s, "\t"); diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 8ab1fdf0bd49..3e9ad501addf 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -371,7 +371,7 @@ more: /* hints to request -> mds selection code */ req->r_direct_mode = USE_AUTH_MDS; req->r_direct_hash = ceph_frag_value(frag); - req->r_direct_is_hash = true; + __set_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags); if (fi->last_name) { req->r_path2 = kstrdup(fi->last_name, GFP_KERNEL); if (!req->r_path2) { @@ -417,7 +417,7 @@ more: fi->frag = frag; fi->last_readdir = req; - if (req->r_did_prepopulate) { + if (test_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags)) { fi->readdir_cache_idx = req->r_readdir_cache_idx; if (fi->readdir_cache_idx < 0) { /* preclude from marking dir ordered */ @@ -752,7 +752,8 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, mask |= CEPH_CAP_XATTR_SHARED; req->r_args.getattr.mask = cpu_to_le32(mask); - req->r_locked_dir = dir; + req->r_parent = dir; + set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); err = ceph_mdsc_do_request(mdsc, NULL, req); err = ceph_handle_snapdir(req, dentry, err); dentry = ceph_finish_lookup(req, dentry, err); @@ -813,7 +814,8 @@ static int ceph_mknod(struct inode *dir, struct dentry *dentry, } req->r_dentry = dget(dentry); req->r_num_caps = 2; - req->r_locked_dir = dir; + req->r_parent = dir; + set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); req->r_args.mknod.mode = cpu_to_le32(mode); req->r_args.mknod.rdev = cpu_to_le32(rdev); req->r_dentry_drop = CEPH_CAP_FILE_SHARED; @@ -864,7 +866,8 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry, ceph_mdsc_put_request(req); goto out; } - req->r_locked_dir = dir; + req->r_parent = dir; + set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); req->r_dentry = dget(dentry); req->r_num_caps = 2; req->r_dentry_drop = CEPH_CAP_FILE_SHARED; @@ -913,7 +916,8 @@ static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) req->r_dentry = dget(dentry); req->r_num_caps = 2; - req->r_locked_dir = dir; + req->r_parent = dir; + set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); req->r_args.mkdir.mode = cpu_to_le32(mode); req->r_dentry_drop = CEPH_CAP_FILE_SHARED; req->r_dentry_unless = CEPH_CAP_FILE_EXCL; @@ -957,7 +961,8 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir, req->r_dentry = dget(dentry); req->r_num_caps = 2; req->r_old_dentry = dget(old_dentry); - req->r_locked_dir = dir; + req->r_parent = dir; + set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); req->r_dentry_drop = CEPH_CAP_FILE_SHARED; req->r_dentry_unless = CEPH_CAP_FILE_EXCL; /* release LINK_SHARED on source inode (mds will lock it) */ @@ -1023,7 +1028,8 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry) } req->r_dentry = dget(dentry); req->r_num_caps = 2; - req->r_locked_dir = dir; + req->r_parent = dir; + set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); req->r_dentry_drop = CEPH_CAP_FILE_SHARED; req->r_dentry_unless = CEPH_CAP_FILE_EXCL; req->r_inode_drop = drop_caps_for_unlink(inode); @@ -1066,7 +1072,8 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, req->r_num_caps = 2; req->r_old_dentry = dget(old_dentry); req->r_old_dentry_dir = old_dir; - req->r_locked_dir = new_dir; + req->r_parent = new_dir; + set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED; req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL; req->r_dentry_drop = CEPH_CAP_FILE_SHARED; @@ -1194,7 +1201,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) struct inode *dir; if (flags & LOOKUP_RCU) { - parent = ACCESS_ONCE(dentry->d_parent); + parent = READ_ONCE(dentry->d_parent); dir = d_inode_rcu(parent); if (!dir) return -ECHILD; @@ -1237,11 +1244,12 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) return -ECHILD; op = ceph_snap(dir) == CEPH_SNAPDIR ? - CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_GETATTR; + CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); if (!IS_ERR(req)) { req->r_dentry = dget(dentry); - req->r_num_caps = op == CEPH_MDS_OP_GETATTR ? 1 : 2; + req->r_num_caps = 2; + req->r_parent = dir; mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; if (ceph_security_xattr_wanted(dir)) diff --git a/fs/ceph/export.c b/fs/ceph/export.c index 180bbef760f2..e8f11fa565c5 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -207,7 +207,8 @@ static int ceph_get_name(struct dentry *parent, char *name, req->r_inode = d_inode(child); ihold(d_inode(child)); req->r_ino2 = ceph_vino(d_inode(parent)); - req->r_locked_dir = d_inode(parent); + req->r_parent = d_inode(parent); + set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); req->r_num_caps = 2; err = ceph_mdsc_do_request(mdsc, NULL, req); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 045d30d26624..26cc95421cca 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -283,7 +283,7 @@ int ceph_open(struct inode *inode, struct file *file) spin_lock(&ci->i_ceph_lock); if (__ceph_is_any_real_caps(ci) && (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) { - int mds_wanted = __ceph_caps_mds_wanted(ci); + int mds_wanted = __ceph_caps_mds_wanted(ci, true); int issued = __ceph_caps_issued(ci, NULL); dout("open %p fmode %d want %s issued %s using existing\n", @@ -379,7 +379,8 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, mask |= CEPH_CAP_XATTR_SHARED; req->r_args.open.mask = cpu_to_le32(mask); - req->r_locked_dir = dir; /* caller holds dir->i_mutex */ + req->r_parent = dir; + set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); err = ceph_mdsc_do_request(mdsc, (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, req); @@ -758,9 +759,7 @@ static void ceph_aio_retry_work(struct work_struct *work) goto out; } - req->r_flags = CEPH_OSD_FLAG_ORDERSNAP | - CEPH_OSD_FLAG_ONDISK | - CEPH_OSD_FLAG_WRITE; + req->r_flags = CEPH_OSD_FLAG_ORDERSNAP | CEPH_OSD_FLAG_WRITE; ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc); ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid); @@ -794,89 +793,6 @@ out: kfree(aio_work); } -/* - * Write commit request unsafe callback, called to tell us when a - * request is unsafe (that is, in flight--has been handed to the - * messenger to send to its target osd). It is called again when - * we've received a response message indicating the request is - * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request - * is completed early (and unsuccessfully) due to a timeout or - * interrupt. - * - * This is used if we requested both an ACK and ONDISK commit reply - * from the OSD. - */ -static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe) -{ - struct ceph_inode_info *ci = ceph_inode(req->r_inode); - - dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid, - unsafe ? "un" : ""); - if (unsafe) { - ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); - spin_lock(&ci->i_unsafe_lock); - list_add_tail(&req->r_unsafe_item, - &ci->i_unsafe_writes); - spin_unlock(&ci->i_unsafe_lock); - - complete_all(&req->r_completion); - } else { - spin_lock(&ci->i_unsafe_lock); - list_del_init(&req->r_unsafe_item); - spin_unlock(&ci->i_unsafe_lock); - ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); - } -} - -/* - * Wait on any unsafe replies for the given inode. First wait on the - * newest request, and make that the upper bound. Then, if there are - * more requests, keep waiting on the oldest as long as it is still older - * than the original request. - */ -void ceph_sync_write_wait(struct inode *inode) -{ - struct ceph_inode_info *ci = ceph_inode(inode); - struct list_head *head = &ci->i_unsafe_writes; - struct ceph_osd_request *req; - u64 last_tid; - - if (!S_ISREG(inode->i_mode)) - return; - - spin_lock(&ci->i_unsafe_lock); - if (list_empty(head)) - goto out; - - /* set upper bound as _last_ entry in chain */ - - req = list_last_entry(head, struct ceph_osd_request, - r_unsafe_item); - last_tid = req->r_tid; - - do { - ceph_osdc_get_request(req); - spin_unlock(&ci->i_unsafe_lock); - - dout("sync_write_wait on tid %llu (until %llu)\n", - req->r_tid, last_tid); - wait_for_completion(&req->r_done_completion); - ceph_osdc_put_request(req); - - spin_lock(&ci->i_unsafe_lock); - /* - * from here on look at first entry in chain, since we - * only want to wait for anything older than last_tid - */ - if (list_empty(head)) - break; - req = list_first_entry(head, struct ceph_osd_request, - r_unsafe_item); - } while (req->r_tid < last_tid); -out: - spin_unlock(&ci->i_unsafe_lock); -} - static ssize_t ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, struct ceph_snap_context *snapc, @@ -915,9 +831,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, if (ret2 < 0) dout("invalidate_inode_pages2_range returned %d\n", ret2); - flags = CEPH_OSD_FLAG_ORDERSNAP | - CEPH_OSD_FLAG_ONDISK | - CEPH_OSD_FLAG_WRITE; + flags = CEPH_OSD_FLAG_ORDERSNAP | CEPH_OSD_FLAG_WRITE; } else { flags = CEPH_OSD_FLAG_READ; } @@ -1116,10 +1030,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, if (ret < 0) dout("invalidate_inode_pages2_range returned %d\n", ret); - flags = CEPH_OSD_FLAG_ORDERSNAP | - CEPH_OSD_FLAG_ONDISK | - CEPH_OSD_FLAG_WRITE | - CEPH_OSD_FLAG_ACK; + flags = CEPH_OSD_FLAG_ORDERSNAP | CEPH_OSD_FLAG_WRITE; while ((len = iov_iter_count(from)) > 0) { size_t left; @@ -1165,8 +1076,6 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, goto out; } - /* get a second commit callback */ - req->r_unsafe_callback = ceph_sync_write_unsafe; req->r_inode = inode; osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, @@ -1616,8 +1525,7 @@ static int ceph_zero_partial_object(struct inode *inode, ceph_vino(inode), offset, length, 0, 1, op, - CEPH_OSD_FLAG_WRITE | - CEPH_OSD_FLAG_ONDISK, + CEPH_OSD_FLAG_WRITE, NULL, 0, 0, false); if (IS_ERR(req)) { ret = PTR_ERR(req); diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 5e659d054b40..d449e1c03cbd 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -499,7 +499,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb) ci->i_rdcache_gen = 0; ci->i_rdcache_revoking = 0; - INIT_LIST_HEAD(&ci->i_unsafe_writes); INIT_LIST_HEAD(&ci->i_unsafe_dirops); INIT_LIST_HEAD(&ci->i_unsafe_iops); spin_lock_init(&ci->i_unsafe_lock); @@ -583,14 +582,6 @@ int ceph_drop_inode(struct inode *inode) return 1; } -void ceph_evict_inode(struct inode *inode) -{ - /* wait unsafe sync writes */ - ceph_sync_write_wait(inode); - truncate_inode_pages_final(&inode->i_data); - clear_inode(inode); -} - static inline blkcnt_t calc_inode_blocks(u64 size) { return (size + (1<<9) - 1) >> 9; @@ -1016,7 +1007,9 @@ out: static void update_dentry_lease(struct dentry *dentry, struct ceph_mds_reply_lease *lease, struct ceph_mds_session *session, - unsigned long from_time) + unsigned long from_time, + struct ceph_vino *tgt_vino, + struct ceph_vino *dir_vino) { struct ceph_dentry_info *di = ceph_dentry(dentry); long unsigned duration = le32_to_cpu(lease->duration_ms); @@ -1024,13 +1017,27 @@ static void update_dentry_lease(struct dentry *dentry, long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000; struct inode *dir; + /* + * Make sure dentry's inode matches tgt_vino. NULL tgt_vino means that + * we expect a negative dentry. + */ + if (!tgt_vino && d_really_is_positive(dentry)) + return; + + if (tgt_vino && (d_really_is_negative(dentry) || + !ceph_ino_compare(d_inode(dentry), tgt_vino))) + return; + spin_lock(&dentry->d_lock); dout("update_dentry_lease %p duration %lu ms ttl %lu\n", dentry, duration, ttl); - /* make lease_rdcache_gen match directory */ dir = d_inode(dentry->d_parent); + /* make sure parent matches dir_vino */ + if (!ceph_ino_compare(dir, dir_vino)) + goto out_unlock; + /* only track leases on regular dentries */ if (ceph_snap(dir) != CEPH_NOSNAP) goto out_unlock; @@ -1108,61 +1115,27 @@ out: * * Called with snap_rwsem (read). */ -int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, - struct ceph_mds_session *session) +int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req) { + struct ceph_mds_session *session = req->r_session; struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; struct inode *in = NULL; - struct ceph_vino vino; + struct ceph_vino tvino, dvino; struct ceph_fs_client *fsc = ceph_sb_to_client(sb); int err = 0; dout("fill_trace %p is_dentry %d is_target %d\n", req, rinfo->head->is_dentry, rinfo->head->is_target); -#if 0 - /* - * Debugging hook: - * - * If we resend completed ops to a recovering mds, we get no - * trace. Since that is very rare, pretend this is the case - * to ensure the 'no trace' handlers in the callers behave. - * - * Fill in inodes unconditionally to avoid breaking cap - * invariants. - */ - if (rinfo->head->op & CEPH_MDS_OP_WRITE) { - pr_info("fill_trace faking empty trace on %lld %s\n", - req->r_tid, ceph_mds_op_name(rinfo->head->op)); - if (rinfo->head->is_dentry) { - rinfo->head->is_dentry = 0; - err = fill_inode(req->r_locked_dir, - &rinfo->diri, rinfo->dirfrag, - session, req->r_request_started, -1); - } - if (rinfo->head->is_target) { - rinfo->head->is_target = 0; - ininfo = rinfo->targeti.in; - vino.ino = le64_to_cpu(ininfo->ino); - vino.snap = le64_to_cpu(ininfo->snapid); - in = ceph_get_inode(sb, vino); - err = fill_inode(in, &rinfo->targeti, NULL, - session, req->r_request_started, - req->r_fmode); - iput(in); - } - } -#endif - if (!rinfo->head->is_target && !rinfo->head->is_dentry) { dout("fill_trace reply is empty!\n"); - if (rinfo->head->result == 0 && req->r_locked_dir) + if (rinfo->head->result == 0 && req->r_parent) ceph_invalidate_dir_request(req); return 0; } if (rinfo->head->is_dentry) { - struct inode *dir = req->r_locked_dir; + struct inode *dir = req->r_parent; if (dir) { err = fill_inode(dir, NULL, @@ -1188,8 +1161,8 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, dname.name = rinfo->dname; dname.len = rinfo->dname_len; dname.hash = full_name_hash(parent, dname.name, dname.len); - vino.ino = le64_to_cpu(rinfo->targeti.in->ino); - vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); + tvino.ino = le64_to_cpu(rinfo->targeti.in->ino); + tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid); retry_lookup: dn = d_lookup(parent, &dname); dout("d_lookup on parent=%p name=%.*s got %p\n", @@ -1206,8 +1179,8 @@ retry_lookup: } err = 0; } else if (d_really_is_positive(dn) && - (ceph_ino(d_inode(dn)) != vino.ino || - ceph_snap(d_inode(dn)) != vino.snap)) { + (ceph_ino(d_inode(dn)) != tvino.ino || + ceph_snap(d_inode(dn)) != tvino.snap)) { dout(" dn %p points to wrong inode %p\n", dn, d_inode(dn)); d_delete(dn); @@ -1221,10 +1194,10 @@ retry_lookup: } if (rinfo->head->is_target) { - vino.ino = le64_to_cpu(rinfo->targeti.in->ino); - vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); + tvino.ino = le64_to_cpu(rinfo->targeti.in->ino); + tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid); - in = ceph_get_inode(sb, vino); + in = ceph_get_inode(sb, tvino); if (IS_ERR(in)) { err = PTR_ERR(in); goto done; @@ -1233,8 +1206,8 @@ retry_lookup: err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL, session, req->r_request_started, - (!req->r_aborted && rinfo->head->result == 0) ? - req->r_fmode : -1, + (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) && + rinfo->head->result == 0) ? req->r_fmode : -1, &req->r_caps_reservation); if (err < 0) { pr_err("fill_inode badness %p %llx.%llx\n", @@ -1247,8 +1220,9 @@ retry_lookup: * ignore null lease/binding on snapdir ENOENT, or else we * will have trouble splicing in the virtual snapdir later */ - if (rinfo->head->is_dentry && !req->r_aborted && - req->r_locked_dir && + if (rinfo->head->is_dentry && + !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) && + test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) && (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, fsc->mount_options->snapdir_name, req->r_dentry->d_name.len))) { @@ -1257,17 +1231,19 @@ retry_lookup: * mknod symlink mkdir : null -> new inode * unlink : linked -> null */ - struct inode *dir = req->r_locked_dir; + struct inode *dir = req->r_parent; struct dentry *dn = req->r_dentry; bool have_dir_cap, have_lease; BUG_ON(!dn); BUG_ON(!dir); BUG_ON(d_inode(dn->d_parent) != dir); - BUG_ON(ceph_ino(dir) != - le64_to_cpu(rinfo->diri.in->ino)); - BUG_ON(ceph_snap(dir) != - le64_to_cpu(rinfo->diri.in->snapid)); + + dvino.ino = le64_to_cpu(rinfo->diri.in->ino); + dvino.snap = le64_to_cpu(rinfo->diri.in->snapid); + + BUG_ON(ceph_ino(dir) != dvino.ino); + BUG_ON(ceph_snap(dir) != dvino.snap); /* do we have a lease on the whole dir? */ have_dir_cap = @@ -1319,12 +1295,13 @@ retry_lookup: ceph_dir_clear_ordered(dir); dout("d_delete %p\n", dn); d_delete(dn); - } else { - if (have_lease && d_unhashed(dn)) + } else if (have_lease) { + if (d_unhashed(dn)) d_add(dn, NULL); update_dentry_lease(dn, rinfo->dlease, session, - req->r_request_started); + req->r_request_started, + NULL, &dvino); } goto done; } @@ -1347,15 +1324,19 @@ retry_lookup: have_lease = false; } - if (have_lease) + if (have_lease) { + tvino.ino = le64_to_cpu(rinfo->targeti.in->ino); + tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid); update_dentry_lease(dn, rinfo->dlease, session, - req->r_request_started); + req->r_request_started, + &tvino, &dvino); + } dout(" final dn %p\n", dn); - } else if (!req->r_aborted && - (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || - req->r_op == CEPH_MDS_OP_MKSNAP)) { + } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP || + req->r_op == CEPH_MDS_OP_MKSNAP) && + !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { struct dentry *dn = req->r_dentry; - struct inode *dir = req->r_locked_dir; + struct inode *dir = req->r_parent; /* fill out a snapdir LOOKUPSNAP dentry */ BUG_ON(!dn); @@ -1370,6 +1351,26 @@ retry_lookup: goto done; } req->r_dentry = dn; /* may have spliced */ + } else if (rinfo->head->is_dentry) { + struct ceph_vino *ptvino = NULL; + + if ((le32_to_cpu(rinfo->diri.in->cap.caps) & CEPH_CAP_FILE_SHARED) || + le32_to_cpu(rinfo->dlease->duration_ms)) { + dvino.ino = le64_to_cpu(rinfo->diri.in->ino); + dvino.snap = le64_to_cpu(rinfo->diri.in->snapid); + + if (rinfo->head->is_target) { + tvino.ino = le64_to_cpu(rinfo->targeti.in->ino); + tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid); + ptvino = &tvino; + } + + update_dentry_lease(req->r_dentry, rinfo->dlease, + session, req->r_request_started, ptvino, + &dvino); + } else { + dout("%s: no dentry lease or dir cap\n", __func__); + } } done: dout("fill_trace done err=%d\n", err); @@ -1478,7 +1479,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, u32 fpos_offset; struct ceph_readdir_cache_control cache_ctl = {}; - if (req->r_aborted) + if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) return readdir_prepopulate_inodes_only(req, session); if (rinfo->hash_order && req->r_path2) { @@ -1523,14 +1524,14 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, /* FIXME: release caps/leases if error occurs */ for (i = 0; i < rinfo->dir_nr; i++) { struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i; - struct ceph_vino vino; + struct ceph_vino tvino, dvino; dname.name = rde->name; dname.len = rde->name_len; dname.hash = full_name_hash(parent, dname.name, dname.len); - vino.ino = le64_to_cpu(rde->inode.in->ino); - vino.snap = le64_to_cpu(rde->inode.in->snapid); + tvino.ino = le64_to_cpu(rde->inode.in->ino); + tvino.snap = le64_to_cpu(rde->inode.in->snapid); if (rinfo->hash_order) { u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, @@ -1559,8 +1560,8 @@ retry_lookup: goto out; } } else if (d_really_is_positive(dn) && - (ceph_ino(d_inode(dn)) != vino.ino || - ceph_snap(d_inode(dn)) != vino.snap)) { + (ceph_ino(d_inode(dn)) != tvino.ino || + ceph_snap(d_inode(dn)) != tvino.snap)) { dout(" dn %p points to wrong inode %p\n", dn, d_inode(dn)); d_delete(dn); @@ -1572,7 +1573,7 @@ retry_lookup: if (d_really_is_positive(dn)) { in = d_inode(dn); } else { - in = ceph_get_inode(parent->d_sb, vino); + in = ceph_get_inode(parent->d_sb, tvino); if (IS_ERR(in)) { dout("new_inode badness\n"); d_drop(dn); @@ -1617,8 +1618,9 @@ retry_lookup: ceph_dentry(dn)->offset = rde->offset; + dvino = ceph_vino(d_inode(parent)); update_dentry_lease(dn, rde->lease, req->r_session, - req->r_request_started); + req->r_request_started, &tvino, &dvino); if (err == 0 && skipped == 0 && cache_ctl.index >= 0) { ret = fill_readdir_cache(d_inode(parent), dn, @@ -1632,7 +1634,7 @@ next_item: } out: if (err == 0 && skipped == 0) { - req->r_did_prepopulate = true; + set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags); req->r_readdir_cache_idx = cache_ctl.index; } ceph_readdir_cache_release(&cache_ctl); @@ -1720,7 +1722,7 @@ static void ceph_invalidate_work(struct work_struct *work) mutex_lock(&ci->i_truncate_mutex); - if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { + if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n", inode, ceph_ino(inode)); mapping_set_error(inode->i_mapping, -EIO); @@ -2185,10 +2187,10 @@ int ceph_permission(struct inode *inode, int mask) * Get all attributes. Hopefully somedata we'll have a statlite() * and can limit the fields we require to be accurate. */ -int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +int ceph_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct ceph_inode_info *ci = ceph_inode(inode); int err; diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c index 7d752d53353a..4c9c72f26eb9 100644 --- a/fs/ceph/ioctl.c +++ b/fs/ceph/ioctl.c @@ -25,7 +25,7 @@ static long ceph_ioctl_get_layout(struct file *file, void __user *arg) l.stripe_count = ci->i_layout.stripe_count; l.object_size = ci->i_layout.object_size; l.data_pool = ci->i_layout.pool_id; - l.preferred_osd = (s32)-1; + l.preferred_osd = -1; if (copy_to_user(arg, &l, sizeof(l))) return -EFAULT; } @@ -97,7 +97,7 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg) nl.data_pool = ci->i_layout.pool_id; /* this is obsolete, and always -1 */ - nl.preferred_osd = le64_to_cpu(-1); + nl.preferred_osd = -1; err = __validate_layout(mdsc, &nl); if (err) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index c9d2e553a6c4..c681762d76e6 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -547,8 +547,8 @@ void ceph_mdsc_release_request(struct kref *kref) ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); iput(req->r_inode); } - if (req->r_locked_dir) - ceph_put_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); + if (req->r_parent) + ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); iput(req->r_target_inode); if (req->r_dentry) dput(req->r_dentry); @@ -628,6 +628,9 @@ static void __unregister_request(struct ceph_mds_client *mdsc, { dout("__unregister_request %p tid %lld\n", req, req->r_tid); + /* Never leave an unregistered request on an unsafe list! */ + list_del_init(&req->r_unsafe_item); + if (req->r_tid == mdsc->oldest_tid) { struct rb_node *p = rb_next(&req->r_node); mdsc->oldest_tid = 0; @@ -644,13 +647,15 @@ static void __unregister_request(struct ceph_mds_client *mdsc, erase_request(&mdsc->request_tree, req); - if (req->r_unsafe_dir && req->r_got_unsafe) { + if (req->r_unsafe_dir && + test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); spin_lock(&ci->i_unsafe_lock); list_del_init(&req->r_unsafe_dir_item); spin_unlock(&ci->i_unsafe_lock); } - if (req->r_target_inode && req->r_got_unsafe) { + if (req->r_target_inode && + test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); spin_lock(&ci->i_unsafe_lock); list_del_init(&req->r_unsafe_target_item); @@ -668,6 +673,28 @@ static void __unregister_request(struct ceph_mds_client *mdsc, } /* + * Walk back up the dentry tree until we hit a dentry representing a + * non-snapshot inode. We do this using the rcu_read_lock (which must be held + * when calling this) to ensure that the objects won't disappear while we're + * working with them. Once we hit a candidate dentry, we attempt to take a + * reference to it, and return that as the result. + */ +static struct inode *get_nonsnap_parent(struct dentry *dentry) +{ + struct inode *inode = NULL; + + while (dentry && !IS_ROOT(dentry)) { + inode = d_inode_rcu(dentry); + if (!inode || ceph_snap(inode) == CEPH_NOSNAP) + break; + dentry = dentry->d_parent; + } + if (inode) + inode = igrab(inode); + return inode; +} + +/* * Choose mds to send request to next. If there is a hint set in the * request (e.g., due to a prior forward hint from the mds), use that. * Otherwise, consult frag tree and/or caps to identify the @@ -675,19 +702,6 @@ static void __unregister_request(struct ceph_mds_client *mdsc, * * Called under mdsc->mutex. */ -static struct dentry *get_nonsnap_parent(struct dentry *dentry) -{ - /* - * we don't need to worry about protecting the d_parent access - * here because we never renaming inside the snapped namespace - * except to resplice to another snapdir, and either the old or new - * result is a valid result. - */ - while (!IS_ROOT(dentry) && ceph_snap(d_inode(dentry)) != CEPH_NOSNAP) - dentry = dentry->d_parent; - return dentry; -} - static int __choose_mds(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) { @@ -697,7 +711,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc, int mode = req->r_direct_mode; int mds = -1; u32 hash = req->r_direct_hash; - bool is_hash = req->r_direct_is_hash; + bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags); /* * is there a specific mds we should try? ignore hint if we have @@ -717,30 +731,39 @@ static int __choose_mds(struct ceph_mds_client *mdsc, inode = NULL; if (req->r_inode) { inode = req->r_inode; + ihold(inode); } else if (req->r_dentry) { /* ignore race with rename; old or new d_parent is okay */ - struct dentry *parent = req->r_dentry->d_parent; - struct inode *dir = d_inode(parent); + struct dentry *parent; + struct inode *dir; + + rcu_read_lock(); + parent = req->r_dentry->d_parent; + dir = req->r_parent ? : d_inode_rcu(parent); - if (dir->i_sb != mdsc->fsc->sb) { - /* not this fs! */ + if (!dir || dir->i_sb != mdsc->fsc->sb) { + /* not this fs or parent went negative */ inode = d_inode(req->r_dentry); + if (inode) + ihold(inode); } else if (ceph_snap(dir) != CEPH_NOSNAP) { /* direct snapped/virtual snapdir requests * based on parent dir inode */ - struct dentry *dn = get_nonsnap_parent(parent); - inode = d_inode(dn); + inode = get_nonsnap_parent(parent); dout("__choose_mds using nonsnap parent %p\n", inode); } else { /* dentry target */ inode = d_inode(req->r_dentry); if (!inode || mode == USE_AUTH_MDS) { /* dir + name */ - inode = dir; + inode = igrab(dir); hash = ceph_dentry_hash(dir, req->r_dentry); is_hash = true; + } else { + ihold(inode); } } + rcu_read_unlock(); } dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash, @@ -769,7 +792,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc, (int)r, frag.ndist); if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= CEPH_MDS_STATE_ACTIVE) - return mds; + goto out; } /* since this file/dir wasn't known to be @@ -784,7 +807,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc, inode, ceph_vinop(inode), frag.frag, mds); if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= CEPH_MDS_STATE_ACTIVE) - return mds; + goto out; } } } @@ -797,6 +820,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc, cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); if (!cap) { spin_unlock(&ci->i_ceph_lock); + iput(inode); goto random; } mds = cap->session->s_mds; @@ -804,6 +828,8 @@ static int __choose_mds(struct ceph_mds_client *mdsc, inode, ceph_vinop(inode), mds, cap == ci->i_auth_cap ? "auth " : "", cap); spin_unlock(&ci->i_ceph_lock); +out: + iput(inode); return mds; random: @@ -1036,7 +1062,6 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc, while (!list_empty(&session->s_unsafe)) { req = list_first_entry(&session->s_unsafe, struct ceph_mds_request, r_unsafe_item); - list_del_init(&req->r_unsafe_item); pr_warn_ratelimited(" dropping unsafe request %llu\n", req->r_tid); __unregister_request(mdsc, req); @@ -1146,7 +1171,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, ci->i_ceph_flags |= CEPH_I_CAP_DROPPED; if (ci->i_wrbuffer_ref > 0 && - ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) + READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) invalidate = true; while (!list_empty(&ci->i_cap_flush_list)) { @@ -1775,18 +1800,23 @@ retry: return path; } -static int build_dentry_path(struct dentry *dentry, +static int build_dentry_path(struct dentry *dentry, struct inode *dir, const char **ppath, int *ppathlen, u64 *pino, int *pfreepath) { char *path; - if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) { - *pino = ceph_ino(d_inode(dentry->d_parent)); + rcu_read_lock(); + if (!dir) + dir = d_inode_rcu(dentry->d_parent); + if (dir && ceph_snap(dir) == CEPH_NOSNAP) { + *pino = ceph_ino(dir); + rcu_read_unlock(); *ppath = dentry->d_name.name; *ppathlen = dentry->d_name.len; return 0; } + rcu_read_unlock(); path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); if (IS_ERR(path)) return PTR_ERR(path); @@ -1822,8 +1852,8 @@ static int build_inode_path(struct inode *inode, * an explicit ino+path. */ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, - const char *rpath, u64 rino, - const char **ppath, int *pathlen, + struct inode *rdiri, const char *rpath, + u64 rino, const char **ppath, int *pathlen, u64 *ino, int *freepath) { int r = 0; @@ -1833,7 +1863,8 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode), ceph_snap(rinode)); } else if (rdentry) { - r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath); + r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino, + freepath); dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, *ppath); } else if (rpath || rino) { @@ -1866,7 +1897,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, int ret; ret = set_request_path_attr(req->r_inode, req->r_dentry, - req->r_path1, req->r_ino1.ino, + req->r_parent, req->r_path1, req->r_ino1.ino, &path1, &pathlen1, &ino1, &freepath1); if (ret < 0) { msg = ERR_PTR(ret); @@ -1874,6 +1905,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, } ret = set_request_path_attr(NULL, req->r_old_dentry, + req->r_old_dentry_dir, req->r_path2, req->r_ino2.ino, &path2, &pathlen2, &ino2, &freepath2); if (ret < 0) { @@ -1927,10 +1959,13 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, mds, req->r_inode_drop, req->r_inode_unless, 0); if (req->r_dentry_drop) releases += ceph_encode_dentry_release(&p, req->r_dentry, - mds, req->r_dentry_drop, req->r_dentry_unless); + req->r_parent, mds, req->r_dentry_drop, + req->r_dentry_unless); if (req->r_old_dentry_drop) releases += ceph_encode_dentry_release(&p, req->r_old_dentry, - mds, req->r_old_dentry_drop, req->r_old_dentry_unless); + req->r_old_dentry_dir, mds, + req->r_old_dentry_drop, + req->r_old_dentry_unless); if (req->r_old_inode_drop) releases += ceph_encode_inode_release(&p, d_inode(req->r_old_dentry), @@ -2012,7 +2047,7 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); - if (req->r_got_unsafe) { + if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { void *p; /* * Replay. Do not regenerate message (and rebuild @@ -2061,16 +2096,16 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, rhead = msg->front.iov_base; rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); - if (req->r_got_unsafe) + if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) flags |= CEPH_MDS_FLAG_REPLAY; - if (req->r_locked_dir) + if (req->r_parent) flags |= CEPH_MDS_FLAG_WANT_DENTRY; rhead->flags = cpu_to_le32(flags); rhead->num_fwd = req->r_num_fwd; rhead->num_retry = req->r_attempts - 1; rhead->ino = 0; - dout(" r_locked_dir = %p\n", req->r_locked_dir); + dout(" r_parent = %p\n", req->r_parent); return 0; } @@ -2084,8 +2119,8 @@ static int __do_request(struct ceph_mds_client *mdsc, int mds = -1; int err = 0; - if (req->r_err || req->r_got_result) { - if (req->r_aborted) + if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { + if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) __unregister_request(mdsc, req); goto out; } @@ -2096,12 +2131,12 @@ static int __do_request(struct ceph_mds_client *mdsc, err = -EIO; goto finish; } - if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { + if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { dout("do_request forced umount\n"); err = -EIO; goto finish; } - if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) { + if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) { if (mdsc->mdsmap_err) { err = mdsc->mdsmap_err; dout("do_request mdsmap err %d\n", err); @@ -2215,7 +2250,7 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds) while (p) { req = rb_entry(p, struct ceph_mds_request, r_node); p = rb_next(p); - if (req->r_got_unsafe) + if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) continue; if (req->r_attempts > 0) continue; /* only new requests */ @@ -2250,11 +2285,11 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, dout("do_request on %p\n", req); - /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */ + /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */ if (req->r_inode) ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); - if (req->r_locked_dir) - ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); + if (req->r_parent) + ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); if (req->r_old_dentry_dir) ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir), CEPH_CAP_PIN); @@ -2289,7 +2324,7 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, mutex_lock(&mdsc->mutex); /* only abort if we didn't race with a real reply */ - if (req->r_got_result) { + if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { err = le32_to_cpu(req->r_reply_info.head->result); } else if (err < 0) { dout("aborted request %lld with %d\n", req->r_tid, err); @@ -2301,10 +2336,10 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, */ mutex_lock(&req->r_fill_mutex); req->r_err = err; - req->r_aborted = true; + set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags); mutex_unlock(&req->r_fill_mutex); - if (req->r_locked_dir && + if (req->r_parent && (req->r_op & CEPH_MDS_OP_WRITE)) ceph_invalidate_dir_request(req); } else { @@ -2323,7 +2358,7 @@ out: */ void ceph_invalidate_dir_request(struct ceph_mds_request *req) { - struct inode *inode = req->r_locked_dir; + struct inode *inode = req->r_parent; dout("invalidate_dir_request %p (complete, lease(s))\n", inode); @@ -2379,14 +2414,14 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) } /* dup? */ - if ((req->r_got_unsafe && !head->safe) || - (req->r_got_safe && head->safe)) { + if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) || + (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) { pr_warn("got a dup %s reply on %llu from mds%d\n", head->safe ? "safe" : "unsafe", tid, mds); mutex_unlock(&mdsc->mutex); goto out; } - if (req->r_got_safe) { + if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) { pr_warn("got unsafe after safe on %llu from mds%d\n", tid, mds); mutex_unlock(&mdsc->mutex); @@ -2425,10 +2460,10 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) if (head->safe) { - req->r_got_safe = true; + set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags); __unregister_request(mdsc, req); - if (req->r_got_unsafe) { + if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { /* * We already handled the unsafe response, now do the * cleanup. No need to examine the response; the MDS @@ -2437,7 +2472,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) * useful we could do with a revised return value. */ dout("got safe reply %llu, mds%d\n", tid, mds); - list_del_init(&req->r_unsafe_item); /* last unsafe request during umount? */ if (mdsc->stopping && !__get_oldest_req(mdsc)) @@ -2446,7 +2480,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) goto out; } } else { - req->r_got_unsafe = true; + set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags); list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); if (req->r_unsafe_dir) { struct ceph_inode_info *ci = @@ -2486,7 +2520,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) /* insert trace into our cache */ mutex_lock(&req->r_fill_mutex); current->journal_info = req; - err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); + err = ceph_fill_trace(mdsc->fsc->sb, req); if (err == 0) { if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || req->r_op == CEPH_MDS_OP_LSSNAP)) @@ -2500,7 +2534,8 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) if (realm) ceph_put_snap_realm(mdsc, realm); - if (err == 0 && req->r_got_unsafe && req->r_target_inode) { + if (err == 0 && req->r_target_inode && + test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); spin_lock(&ci->i_unsafe_lock); list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops); @@ -2508,12 +2543,12 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) } out_err: mutex_lock(&mdsc->mutex); - if (!req->r_aborted) { + if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { if (err) { req->r_err = err; } else { req->r_reply = ceph_msg_get(msg); - req->r_got_result = true; + set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags); } } else { dout("reply arrived after request %lld was aborted\n", tid); @@ -2557,7 +2592,7 @@ static void handle_forward(struct ceph_mds_client *mdsc, goto out; /* dup reply? */ } - if (req->r_aborted) { + if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { dout("forward tid %llu aborted, unregistering\n", tid); __unregister_request(mdsc, req); } else if (fwd_seq <= req->r_num_fwd) { @@ -2567,7 +2602,7 @@ static void handle_forward(struct ceph_mds_client *mdsc, /* resend. forward race not possible; mds would drop */ dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds); BUG_ON(req->r_err); - BUG_ON(req->r_got_result); + BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)); req->r_attempts = 0; req->r_num_fwd = fwd_seq; req->r_resend_mds = next_mds; @@ -2732,7 +2767,7 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc, while (p) { req = rb_entry(p, struct ceph_mds_request, r_node); p = rb_next(p); - if (req->r_got_unsafe) + if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) continue; if (req->r_attempts == 0) continue; /* only old requests */ @@ -3556,7 +3591,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc) { u64 want_tid, want_flush; - if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) + if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) return; dout("sync\n"); @@ -3587,7 +3622,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc) */ static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped) { - if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) + if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) return true; return atomic_read(&mdsc->num_sessions) <= skipped; } diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 3c6f77b7bb02..ac0475a2daa7 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -202,9 +202,18 @@ struct ceph_mds_request { char *r_path1, *r_path2; struct ceph_vino r_ino1, r_ino2; - struct inode *r_locked_dir; /* dir (if any) i_mutex locked by vfs */ + struct inode *r_parent; /* parent dir inode */ struct inode *r_target_inode; /* resulting inode */ +#define CEPH_MDS_R_DIRECT_IS_HASH (1) /* r_direct_hash is valid */ +#define CEPH_MDS_R_ABORTED (2) /* call was aborted */ +#define CEPH_MDS_R_GOT_UNSAFE (3) /* got an unsafe reply */ +#define CEPH_MDS_R_GOT_SAFE (4) /* got a safe reply */ +#define CEPH_MDS_R_GOT_RESULT (5) /* got a result */ +#define CEPH_MDS_R_DID_PREPOPULATE (6) /* prepopulated readdir */ +#define CEPH_MDS_R_PARENT_LOCKED (7) /* is r_parent->i_rwsem wlocked? */ + unsigned long r_req_flags; + struct mutex r_fill_mutex; union ceph_mds_request_args r_args; @@ -216,7 +225,6 @@ struct ceph_mds_request { /* for choosing which mds to send this request to */ int r_direct_mode; u32 r_direct_hash; /* choose dir frag based on this dentry hash */ - bool r_direct_is_hash; /* true if r_direct_hash is valid */ /* data payload is used for xattr ops */ struct ceph_pagelist *r_pagelist; @@ -234,7 +242,6 @@ struct ceph_mds_request { struct ceph_mds_reply_info_parsed r_reply_info; struct page *r_locked_page; int r_err; - bool r_aborted; unsigned long r_timeout; /* optional. jiffies, 0 is "wait forever" */ unsigned long r_started; /* start time to measure timeout against */ @@ -262,9 +269,7 @@ struct ceph_mds_request { ceph_mds_request_callback_t r_callback; ceph_mds_request_wait_callback_t r_wait_for_completion; struct list_head r_unsafe_item; /* per-session unsafe list item */ - bool r_got_unsafe, r_got_safe, r_got_result; - bool r_did_prepopulate; long long r_dir_release_cnt; long long r_dir_ordered_cnt; int r_readdir_cache_idx; diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 6bd20d707bfd..0ec8d0114e57 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -757,7 +757,6 @@ static const struct super_operations ceph_super_ops = { .destroy_inode = ceph_destroy_inode, .write_inode = ceph_write_inode, .drop_inode = ceph_drop_inode, - .evict_inode = ceph_evict_inode, .sync_fs = ceph_sync_fs, .put_super = ceph_put_super, .show_options = ceph_show_options, @@ -952,6 +951,14 @@ static int ceph_register_bdi(struct super_block *sb, fsc->backing_dev_info.ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE; + if (fsc->mount_options->rsize > fsc->mount_options->rasize && + fsc->mount_options->rsize >= PAGE_SIZE) + fsc->backing_dev_info.io_pages = + (fsc->mount_options->rsize + PAGE_SIZE - 1) + >> PAGE_SHIFT; + else if (fsc->mount_options->rsize == 0) + fsc->backing_dev_info.io_pages = ULONG_MAX; + err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld", atomic_long_inc_return(&bdi_seq)); if (!err) diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 3373b61faefd..fe6b9cfc4013 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -45,8 +45,8 @@ #define ceph_test_mount_opt(fsc, opt) \ (!!((fsc)->mount_options->flags & CEPH_MOUNT_OPT_##opt)) -#define CEPH_RSIZE_DEFAULT 0 /* max read size */ -#define CEPH_RASIZE_DEFAULT (8192*1024) /* readahead */ +#define CEPH_RSIZE_DEFAULT (64*1024*1024) /* max read size */ +#define CEPH_RASIZE_DEFAULT (8192*1024) /* max readahead */ #define CEPH_MAX_READDIR_DEFAULT 1024 #define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024) #define CEPH_SNAPDIRNAME_DEFAULT ".snap" @@ -343,7 +343,6 @@ struct ceph_inode_info { u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */ u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */ - struct list_head i_unsafe_writes; /* uncommitted sync writes */ struct list_head i_unsafe_dirops; /* uncommitted mds dir ops */ struct list_head i_unsafe_iops; /* uncommitted mds inode ops */ spinlock_t i_unsafe_lock; @@ -602,7 +601,7 @@ static inline int __ceph_caps_wanted(struct ceph_inode_info *ci) } /* what the mds thinks we want */ -extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci); +extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check); extern void ceph_caps_init(struct ceph_mds_client *mdsc); extern void ceph_caps_finalize(struct ceph_mds_client *mdsc); @@ -753,7 +752,6 @@ extern const struct inode_operations ceph_file_iops; extern struct inode *ceph_alloc_inode(struct super_block *sb); extern void ceph_destroy_inode(struct inode *inode); extern int ceph_drop_inode(struct inode *inode); -extern void ceph_evict_inode(struct inode *inode); extern struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino); @@ -764,8 +762,7 @@ extern void ceph_fill_file_time(struct inode *inode, int issued, u64 time_warp_seq, struct timespec *ctime, struct timespec *mtime, struct timespec *atime); extern int ceph_fill_trace(struct super_block *sb, - struct ceph_mds_request *req, - struct ceph_mds_session *session); + struct ceph_mds_request *req); extern int ceph_readdir_prepopulate(struct ceph_mds_request *req, struct ceph_mds_session *session); @@ -787,8 +784,8 @@ static inline int ceph_do_getattr(struct inode *inode, int mask, bool force) extern int ceph_permission(struct inode *inode, int mask); extern int __ceph_setattr(struct inode *inode, struct iattr *attr); extern int ceph_setattr(struct dentry *dentry, struct iattr *attr); -extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat); +extern int ceph_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); /* xattr.c */ int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int); @@ -904,6 +901,7 @@ extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc); extern int ceph_encode_inode_release(void **p, struct inode *inode, int mds, int drop, int unless, int force); extern int ceph_encode_dentry_release(void **p, struct dentry *dn, + struct inode *dir, int mds, int drop, int unless); extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, @@ -933,7 +931,7 @@ extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry, extern int ceph_release(struct inode *inode, struct file *filp); extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, char *data, size_t len); -extern void ceph_sync_write_wait(struct inode *inode); + /* dir.c */ extern const struct file_operations ceph_dir_fops; extern const struct file_operations ceph_snapdir_fops; diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 9156be545b0f..6b61df117fd4 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -303,7 +303,9 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) * gives us the latter, so we must adjust the result. */ mnt = ERR_PTR(-ENOMEM); - full_path = build_path_from_dentry(mntpt); + + /* always use tree name prefix */ + full_path = build_path_from_dentry_optional_prefix(mntpt, true); if (full_path == NULL) goto cdda_exit; diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h index 479bc0a941f3..3d7298cc0aeb 100644 --- a/fs/cifs/cifs_unicode.h +++ b/fs/cifs/cifs_unicode.h @@ -130,10 +130,10 @@ wchar_t cifs_toupper(wchar_t in); * Returns: * Address of the first string */ -static inline wchar_t * -UniStrcat(wchar_t *ucs1, const wchar_t *ucs2) +static inline __le16 * +UniStrcat(__le16 *ucs1, const __le16 *ucs2) { - wchar_t *anchor = ucs1; /* save a pointer to start of ucs1 */ + __le16 *anchor = ucs1; /* save a pointer to start of ucs1 */ while (*ucs1++) ; /* To end of first string */ ucs1--; /* Return to the null */ diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index c9c00a862036..da717fee3026 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -83,7 +83,7 @@ extern int cifs_revalidate_dentry(struct dentry *); extern int cifs_invalidate_mapping(struct inode *inode); extern int cifs_revalidate_mapping(struct inode *inode); extern int cifs_zap_mapping(struct inode *inode); -extern int cifs_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int cifs_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int cifs_setattr(struct dentry *, struct iattr *); extern const struct inode_operations cifs_file_inode_ops; diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 1a90bb3e2986..d42dd3288647 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -443,6 +443,9 @@ struct smb_version_operations { int (*is_transform_hdr)(void *buf); int (*receive_transform)(struct TCP_Server_Info *, struct mid_q_entry **); + enum securityEnum (*select_sectype)(struct TCP_Server_Info *, + enum securityEnum); + }; struct smb_version_values { @@ -822,7 +825,7 @@ struct cifs_ses { int ses_count; /* reference counter */ enum statusEnum status; unsigned overrideSecFlg; /* if non-zero override global sec flags */ - __u16 ipc_tid; /* special tid for connection to IPC share */ + __u32 ipc_tid; /* special tid for connection to IPC share */ char *serverOS; /* name of operating system underlying server */ char *serverNOS; /* name of network operating system of server */ char *serverDomain; /* security realm of server */ diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index f5b87303ce46..1ce733f3582f 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h @@ -2086,17 +2086,21 @@ typedef struct dfs_referral_level_3 { /* version 4 is same, + one flag bit */ __u8 ServiceSiteGuid[16]; /* MBZ, ignored */ } __attribute__((packed)) REFERRAL3; -typedef struct smb_com_transaction_get_dfs_refer_rsp { - struct smb_hdr hdr; /* wct = 10 */ - struct trans2_resp t2; - __u16 ByteCount; - __u8 Pad; +struct get_dfs_referral_rsp { __le16 PathConsumed; __le16 NumberOfReferrals; __le32 DFSFlags; REFERRAL3 referrals[1]; /* array of level 3 dfs_referral structures */ /* followed by the strings pointed to by the referral structures */ -} __attribute__((packed)) TRANSACTION2_GET_DFS_REFER_RSP; +} __packed; + +typedef struct smb_com_transaction_get_dfs_refer_rsp { + struct smb_hdr hdr; /* wct = 10 */ + struct trans2_resp t2; + __u16 ByteCount; + __u8 Pad; + struct get_dfs_referral_rsp dfs_data; +} __packed TRANSACTION2_GET_DFS_REFER_RSP; /* DFS Flags */ #define DFSREF_REFERRAL_SERVER 0x00000001 /* all targets are DFS roots */ diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 406d2c10ba78..97e5d236d265 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -61,6 +61,8 @@ extern void exit_cifs_idmap(void); extern int init_cifs_spnego(void); extern void exit_cifs_spnego(void); extern char *build_path_from_dentry(struct dentry *); +extern char *build_path_from_dentry_optional_prefix(struct dentry *direntry, + bool prefix); extern char *cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon, @@ -284,6 +286,11 @@ extern int get_dfs_path(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *nls_codepage, unsigned int *num_referrals, struct dfs_info3_param **referrals, int remap); +extern int parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size, + unsigned int *num_of_nodes, + struct dfs_info3_param **target_nodes, + const struct nls_table *nls_codepage, int remap, + const char *searchName, bool is_unicode); extern void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, struct smb_vol *vol); @@ -526,4 +533,6 @@ int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, int __cifs_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server, char *signature, struct shash_desc *shash); +enum securityEnum cifs_select_sectype(struct TCP_Server_Info *, + enum securityEnum); #endif /* _CIFSPROTO_H */ diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index f5099fb8a22f..066950671929 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -4786,117 +4786,6 @@ GetInodeNumOut: return rc; } -/* parses DFS refferal V3 structure - * caller is responsible for freeing target_nodes - * returns: - * on success - 0 - * on failure - errno - */ -static int -parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr, - unsigned int *num_of_nodes, - struct dfs_info3_param **target_nodes, - const struct nls_table *nls_codepage, int remap, - const char *searchName) -{ - int i, rc = 0; - char *data_end; - bool is_unicode; - struct dfs_referral_level_3 *ref; - - if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) - is_unicode = true; - else - is_unicode = false; - *num_of_nodes = le16_to_cpu(pSMBr->NumberOfReferrals); - - if (*num_of_nodes < 1) { - cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n", - *num_of_nodes); - rc = -EINVAL; - goto parse_DFS_referrals_exit; - } - - ref = (struct dfs_referral_level_3 *) &(pSMBr->referrals); - if (ref->VersionNumber != cpu_to_le16(3)) { - cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n", - le16_to_cpu(ref->VersionNumber)); - rc = -EINVAL; - goto parse_DFS_referrals_exit; - } - - /* get the upper boundary of the resp buffer */ - data_end = (char *)(&(pSMBr->PathConsumed)) + - le16_to_cpu(pSMBr->t2.DataCount); - - cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n", - *num_of_nodes, le32_to_cpu(pSMBr->DFSFlags)); - - *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param), - GFP_KERNEL); - if (*target_nodes == NULL) { - rc = -ENOMEM; - goto parse_DFS_referrals_exit; - } - - /* collect necessary data from referrals */ - for (i = 0; i < *num_of_nodes; i++) { - char *temp; - int max_len; - struct dfs_info3_param *node = (*target_nodes)+i; - - node->flags = le32_to_cpu(pSMBr->DFSFlags); - if (is_unicode) { - __le16 *tmp = kmalloc(strlen(searchName)*2 + 2, - GFP_KERNEL); - if (tmp == NULL) { - rc = -ENOMEM; - goto parse_DFS_referrals_exit; - } - cifsConvertToUTF16((__le16 *) tmp, searchName, - PATH_MAX, nls_codepage, remap); - node->path_consumed = cifs_utf16_bytes(tmp, - le16_to_cpu(pSMBr->PathConsumed), - nls_codepage); - kfree(tmp); - } else - node->path_consumed = le16_to_cpu(pSMBr->PathConsumed); - - node->server_type = le16_to_cpu(ref->ServerType); - node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags); - - /* copy DfsPath */ - temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset); - max_len = data_end - temp; - node->path_name = cifs_strndup_from_utf16(temp, max_len, - is_unicode, nls_codepage); - if (!node->path_name) { - rc = -ENOMEM; - goto parse_DFS_referrals_exit; - } - - /* copy link target UNC */ - temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset); - max_len = data_end - temp; - node->node_name = cifs_strndup_from_utf16(temp, max_len, - is_unicode, nls_codepage); - if (!node->node_name) { - rc = -ENOMEM; - goto parse_DFS_referrals_exit; - } - - ref++; - } - -parse_DFS_referrals_exit: - if (rc) { - free_dfs_info_array(*target_nodes, *num_of_nodes); - *target_nodes = NULL; - *num_of_nodes = 0; - } - return rc; -} - int CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses, const char *search_name, struct dfs_info3_param **target_nodes, @@ -4993,9 +4882,11 @@ getDFSRetry: get_bcc(&pSMBr->hdr), le16_to_cpu(pSMBr->t2.DataOffset)); /* parse returned result into more usable form */ - rc = parse_DFS_referrals(pSMBr, num_of_nodes, - target_nodes, nls_codepage, remap, - search_name); + rc = parse_dfs_referrals(&pSMBr->dfs_data, + le16_to_cpu(pSMBr->t2.DataCount), + num_of_nodes, target_nodes, nls_codepage, + remap, search_name, + (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) != 0); GetDFSRefExit: cifs_buf_release(pSMB); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 777ad9f4fc3c..9ae695ae3ed7 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -21,6 +21,7 @@ #include <linux/fs.h> #include <linux/net.h> #include <linux/string.h> +#include <linux/sched/signal.h> #include <linux/list.h> #include <linux/wait.h> #include <linux/slab.h> @@ -2073,7 +2074,8 @@ match_security(struct TCP_Server_Info *server, struct smb_vol *vol) * that was specified, or "Unspecified" if that sectype was not * compatible with the given NEGOTIATE request. */ - if (select_sectype(server, vol->sectype) == Unspecified) + if (server->ops->select_sectype(server, vol->sectype) + == Unspecified) return false; /* @@ -2455,7 +2457,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) } down_read(&key->sem); - upayload = user_key_payload(key); + upayload = user_key_payload_locked(key); if (IS_ERR_OR_NULL(upayload)) { rc = upayload ? PTR_ERR(upayload) : -EINVAL; goto out_key_put; diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 2c227a99f369..56366e984076 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -81,6 +81,17 @@ cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb, char * build_path_from_dentry(struct dentry *direntry) { + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); + struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); + bool prefix = tcon->Flags & SMB_SHARE_IS_IN_DFS; + + return build_path_from_dentry_optional_prefix(direntry, + prefix); +} + +char * +build_path_from_dentry_optional_prefix(struct dentry *direntry, bool prefix) +{ struct dentry *temp; int namelen; int dfsplen; @@ -92,7 +103,7 @@ build_path_from_dentry(struct dentry *direntry) unsigned seq; dirsep = CIFS_DIR_SEP(cifs_sb); - if (tcon->Flags & SMB_SHARE_IS_IN_DFS) + if (prefix) dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1); else dfsplen = 0; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 7ab5be7944aa..b261db34103c 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -23,6 +23,8 @@ #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/freezer.h> +#include <linux/sched/signal.h> + #include <asm/div64.h> #include "cifsfs.h" #include "cifspdu.h" @@ -1990,9 +1992,10 @@ int cifs_revalidate_dentry(struct dentry *dentry) return cifs_revalidate_mapping(inode); } -int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +int cifs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { + struct dentry *dentry = path->dentry; struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); struct inode *inode = d_inode(dentry); diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index c6729156f9a0..d3fb11529ed9 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -640,3 +640,108 @@ cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink, cifs_add_pending_open_locked(fid, tlink, open); spin_unlock(&tlink_tcon(open->tlink)->open_file_lock); } + +/* parses DFS refferal V3 structure + * caller is responsible for freeing target_nodes + * returns: + * - on success - 0 + * - on failure - errno + */ +int +parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size, + unsigned int *num_of_nodes, + struct dfs_info3_param **target_nodes, + const struct nls_table *nls_codepage, int remap, + const char *searchName, bool is_unicode) +{ + int i, rc = 0; + char *data_end; + struct dfs_referral_level_3 *ref; + + *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals); + + if (*num_of_nodes < 1) { + cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n", + *num_of_nodes); + rc = -EINVAL; + goto parse_DFS_referrals_exit; + } + + ref = (struct dfs_referral_level_3 *) &(rsp->referrals); + if (ref->VersionNumber != cpu_to_le16(3)) { + cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n", + le16_to_cpu(ref->VersionNumber)); + rc = -EINVAL; + goto parse_DFS_referrals_exit; + } + + /* get the upper boundary of the resp buffer */ + data_end = (char *)rsp + rsp_size; + + cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n", + *num_of_nodes, le32_to_cpu(rsp->DFSFlags)); + + *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param), + GFP_KERNEL); + if (*target_nodes == NULL) { + rc = -ENOMEM; + goto parse_DFS_referrals_exit; + } + + /* collect necessary data from referrals */ + for (i = 0; i < *num_of_nodes; i++) { + char *temp; + int max_len; + struct dfs_info3_param *node = (*target_nodes)+i; + + node->flags = le32_to_cpu(rsp->DFSFlags); + if (is_unicode) { + __le16 *tmp = kmalloc(strlen(searchName)*2 + 2, + GFP_KERNEL); + if (tmp == NULL) { + rc = -ENOMEM; + goto parse_DFS_referrals_exit; + } + cifsConvertToUTF16((__le16 *) tmp, searchName, + PATH_MAX, nls_codepage, remap); + node->path_consumed = cifs_utf16_bytes(tmp, + le16_to_cpu(rsp->PathConsumed), + nls_codepage); + kfree(tmp); + } else + node->path_consumed = le16_to_cpu(rsp->PathConsumed); + + node->server_type = le16_to_cpu(ref->ServerType); + node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags); + + /* copy DfsPath */ + temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset); + max_len = data_end - temp; + node->path_name = cifs_strndup_from_utf16(temp, max_len, + is_unicode, nls_codepage); + if (!node->path_name) { + rc = -ENOMEM; + goto parse_DFS_referrals_exit; + } + + /* copy link target UNC */ + temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset); + max_len = data_end - temp; + node->node_name = cifs_strndup_from_utf16(temp, max_len, + is_unicode, nls_codepage); + if (!node->node_name) { + rc = -ENOMEM; + goto parse_DFS_referrals_exit; + } + + ref++; + } + +parse_DFS_referrals_exit: + if (rc) { + free_dfs_info_array(*target_nodes, *num_of_nodes); + *target_nodes = NULL; + *num_of_nodes = 0; + } + return rc; +} diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index dcbcc927399a..8b0502cd39af 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -498,7 +498,7 @@ setup_ntlmv2_ret: } enum securityEnum -select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) +cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) { switch (server->negflavor) { case CIFS_NEGFLAVOR_EXTENDED: @@ -1391,7 +1391,7 @@ static int select_sec(struct cifs_ses *ses, struct sess_data *sess_data) { int type; - type = select_sectype(ses->server, ses->sectype); + type = cifs_select_sectype(ses->server, ses->sectype); cifs_dbg(FYI, "sess setup type %d\n", type); if (type == Unspecified) { cifs_dbg(VFS, diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 67a987e4d026..cc93ba4da9b5 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -1087,6 +1087,7 @@ struct smb_version_operations smb1_operations = { .is_read_op = cifs_is_read_op, .wp_retry_size = cifs_wp_retry_size, .dir_needs_close = cifs_dir_needs_close, + .select_sectype = cifs_select_sectype, #ifdef CONFIG_CIFS_XATTR .query_all_EAs = CIFSSMBQAllEAs, .set_EA = CIFSSMBSetEA, diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index b2aff0c6f22c..b4b1f0305f29 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -73,7 +73,8 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, nr_ioctl_req.Timeout = 0; /* use server default (120 seconds) */ nr_ioctl_req.Reserved = 0; rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, - fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, true, + fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, + true /* is_fsctl */, false /* use_ipc */, (char *)&nr_ioctl_req, sizeof(nr_ioctl_req), NULL, NULL /* no return info */); if (rc == -EOPNOTSUPP) { diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index a44b4dbe4aae..0231108d9387 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -282,6 +282,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon) rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */, + false /* use_ipc */, NULL /* no data input */, 0 /* no data input */, (char **)&out_buf, &ret_data_len); if (rc != 0) @@ -571,6 +572,7 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */, + false /* use_ipc */, NULL, 0 /* no input */, (char **)&res_key, &ret_data_len); @@ -635,7 +637,8 @@ smb2_clone_range(const unsigned int xid, /* Request server copy to target from src identified by key */ rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, - true /* is_fsctl */, (char *)pcchunk, + true /* is_fsctl */, false /* use_ipc */, + (char *)pcchunk, sizeof(struct copychunk_ioctl), (char **)&retbuf, &ret_data_len); if (rc == 0) { @@ -787,7 +790,8 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_SPARSE, - true /* is_fctl */, &setsparse, 1, NULL, NULL); + true /* is_fctl */, false /* use_ipc */, + &setsparse, 1, NULL, NULL); if (rc) { tcon->broken_sparse_sup = true; cifs_dbg(FYI, "set sparse rc = %d\n", rc); @@ -857,7 +861,8 @@ smb2_duplicate_extents(const unsigned int xid, rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, trgtfile->fid.volatile_fid, FSCTL_DUPLICATE_EXTENTS_TO_FILE, - true /* is_fsctl */, (char *)&dup_ext_buf, + true /* is_fsctl */, false /* use_ipc */, + (char *)&dup_ext_buf, sizeof(struct duplicate_extents_to_file), NULL, &ret_data_len); @@ -891,7 +896,8 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_INTEGRITY_INFORMATION, - true /* is_fsctl */, (char *)&integr_info, + true /* is_fsctl */, false /* use_ipc */, + (char *)&integr_info, sizeof(struct fsctl_set_integrity_information_req), NULL, &ret_data_len); @@ -910,7 +916,8 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SRV_ENUMERATE_SNAPSHOTS, - true /* is_fsctl */, NULL, 0 /* no input data */, + true /* is_fsctl */, false /* use_ipc */, + NULL, 0 /* no input data */, (char **)&retbuf, &ret_data_len); cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n", @@ -1097,6 +1104,103 @@ smb2_new_lease_key(struct cifs_fid *fid) generate_random_uuid(fid->lease_key); } +static int +smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, + const char *search_name, + struct dfs_info3_param **target_nodes, + unsigned int *num_of_nodes, + const struct nls_table *nls_codepage, int remap) +{ + int rc; + __le16 *utf16_path = NULL; + int utf16_path_len = 0; + struct cifs_tcon *tcon; + struct fsctl_get_dfs_referral_req *dfs_req = NULL; + struct get_dfs_referral_rsp *dfs_rsp = NULL; + u32 dfs_req_size = 0, dfs_rsp_size = 0; + + cifs_dbg(FYI, "smb2_get_dfs_refer path <%s>\n", search_name); + + /* + * Use any tcon from the current session. Here, the first one. + */ + spin_lock(&cifs_tcp_ses_lock); + tcon = list_first_entry_or_null(&ses->tcon_list, struct cifs_tcon, + tcon_list); + if (tcon) + tcon->tc_count++; + spin_unlock(&cifs_tcp_ses_lock); + + if (!tcon) { + cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n", + ses); + rc = -ENOTCONN; + goto out; + } + + utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX, + &utf16_path_len, + nls_codepage, remap); + if (!utf16_path) { + rc = -ENOMEM; + goto out; + } + + dfs_req_size = sizeof(*dfs_req) + utf16_path_len; + dfs_req = kzalloc(dfs_req_size, GFP_KERNEL); + if (!dfs_req) { + rc = -ENOMEM; + goto out; + } + + /* Highest DFS referral version understood */ + dfs_req->MaxReferralLevel = DFS_VERSION; + + /* Path to resolve in an UTF-16 null-terminated string */ + memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len); + + do { + /* try first with IPC */ + rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, + FSCTL_DFS_GET_REFERRALS, + true /* is_fsctl */, true /* use_ipc */, + (char *)dfs_req, dfs_req_size, + (char **)&dfs_rsp, &dfs_rsp_size); + if (rc == -ENOTCONN) { + /* try with normal tcon */ + rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, + FSCTL_DFS_GET_REFERRALS, + true /* is_fsctl */, false /*use_ipc*/, + (char *)dfs_req, dfs_req_size, + (char **)&dfs_rsp, &dfs_rsp_size); + } + } while (rc == -EAGAIN); + + if (rc) { + cifs_dbg(VFS, "ioctl error in smb2_get_dfs_refer rc=%d\n", rc); + goto out; + } + + rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size, + num_of_nodes, target_nodes, + nls_codepage, remap, search_name, + true /* is_unicode */); + if (rc) { + cifs_dbg(VFS, "parse error in smb2_get_dfs_refer rc=%d\n", rc); + goto out; + } + + out: + if (tcon) { + spin_lock(&cifs_tcp_ses_lock); + tcon->tc_count--; + spin_unlock(&cifs_tcp_ses_lock); + } + kfree(utf16_path); + kfree(dfs_req); + kfree(dfs_rsp); + return rc; +} #define SMB2_SYMLINK_STRUCT_SIZE \ (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp)) @@ -1220,7 +1324,8 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, - true /* is_fctl */, (char *)&fsctl_buf, + true /* is_fctl */, false /* use_ipc */, + (char *)&fsctl_buf, sizeof(struct file_zero_data_information), NULL, NULL); free_xid(xid); return rc; @@ -1254,7 +1359,8 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, - true /* is_fctl */, (char *)&fsctl_buf, + true /* is_fctl */, false /* use_ipc */, + (char *)&fsctl_buf, sizeof(struct file_zero_data_information), NULL, NULL); free_xid(xid); return rc; @@ -1609,6 +1715,26 @@ static void cifs_crypt_complete(struct crypto_async_request *req, int err) complete(&res->completion); } +static int +smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key) +{ + struct cifs_ses *ses; + u8 *ses_enc_key; + + spin_lock(&cifs_tcp_ses_lock); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { + if (ses->Suid != ses_id) + continue; + ses_enc_key = enc ? ses->smb3encryptionkey : + ses->smb3decryptionkey; + memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE); + spin_unlock(&cifs_tcp_ses_lock); + return 0; + } + spin_unlock(&cifs_tcp_ses_lock); + + return 1; +} /* * Encrypt or decrypt @rqst message. @rqst has the following format: * iov[0] - transform header (associate data), @@ -1622,10 +1748,10 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)rqst->rq_iov[0].iov_base; unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 24; - struct cifs_ses *ses; int rc = 0; struct scatterlist *sg; u8 sign[SMB2_SIGNATURE_SIZE] = {}; + u8 key[SMB3_SIGN_KEY_SIZE]; struct aead_request *req; char *iv; unsigned int iv_len; @@ -1635,9 +1761,10 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) init_completion(&result.completion); - ses = smb2_find_smb_ses(server, tr_hdr->SessionId); - if (!ses) { - cifs_dbg(VFS, "%s: Could not find session\n", __func__); + rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key); + if (rc) { + cifs_dbg(VFS, "%s: Could not get %scryption key\n", __func__, + enc ? "en" : "de"); return 0; } @@ -1649,8 +1776,7 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) tfm = enc ? server->secmech.ccmaesencrypt : server->secmech.ccmaesdecrypt; - rc = crypto_aead_setkey(tfm, enc ? ses->smb3encryptionkey : - ses->smb3decryptionkey, SMB3_SIGN_KEY_SIZE); + rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE); if (rc) { cifs_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc); return rc; @@ -2254,6 +2380,8 @@ struct smb_version_operations smb20_operations = { .clone_range = smb2_clone_range, .wp_retry_size = smb2_wp_retry_size, .dir_needs_close = smb2_dir_needs_close, + .get_dfs_refer = smb2_get_dfs_refer, + .select_sectype = smb2_select_sectype, }; struct smb_version_operations smb21_operations = { @@ -2335,6 +2463,8 @@ struct smb_version_operations smb21_operations = { .wp_retry_size = smb2_wp_retry_size, .dir_needs_close = smb2_dir_needs_close, .enum_snapshots = smb3_enum_snapshots, + .get_dfs_refer = smb2_get_dfs_refer, + .select_sectype = smb2_select_sectype, }; struct smb_version_operations smb30_operations = { @@ -2426,6 +2556,8 @@ struct smb_version_operations smb30_operations = { .free_transform_rq = smb3_free_transform_rq, .is_transform_hdr = smb3_is_transform_hdr, .receive_transform = smb3_receive_transform, + .get_dfs_refer = smb2_get_dfs_refer, + .select_sectype = smb2_select_sectype, }; #ifdef CONFIG_CIFS_SMB311 @@ -2518,6 +2650,8 @@ struct smb_version_operations smb311_operations = { .free_transform_rq = smb3_free_transform_rq, .is_transform_hdr = smb3_is_transform_hdr, .receive_transform = smb3_receive_transform, + .get_dfs_refer = smb2_get_dfs_refer, + .select_sectype = smb2_select_sectype, }; #endif /* CIFS_SMB311 */ diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index ad83b3db2840..7446496850a3 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -620,6 +620,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, + false /* use_ipc */, (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req), (char **)&pneg_rsp, &rsplen); @@ -656,6 +657,28 @@ vneg_out: return -EIO; } +enum securityEnum +smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) +{ + switch (requested) { + case Kerberos: + case RawNTLMSSP: + return requested; + case NTLMv2: + return RawNTLMSSP; + case Unspecified: + if (server->sec_ntlmssp && + (global_secflags & CIFSSEC_MAY_NTLMSSP)) + return RawNTLMSSP; + if ((server->sec_kerberos || server->sec_mskerberos) && + (global_secflags & CIFSSEC_MAY_KRB5)) + return Kerberos; + /* Fallthrough */ + default: + return Unspecified; + } +} + struct SMB2_sess_data { unsigned int xid; struct cifs_ses *ses; @@ -1008,10 +1031,17 @@ out: static int SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data) { - if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP) - ses->sectype = RawNTLMSSP; + int type; + + type = smb2_select_sectype(ses->server, ses->sectype); + cifs_dbg(FYI, "sess setup type %d\n", type); + if (type == Unspecified) { + cifs_dbg(VFS, + "Unable to select appropriate authentication method!"); + return -EINVAL; + } - switch (ses->sectype) { + switch (type) { case Kerberos: sess_data->func = SMB2_auth_kerberos; break; @@ -1019,7 +1049,7 @@ SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data) sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate; break; default: - cifs_dbg(VFS, "secType %d not supported!\n", ses->sectype); + cifs_dbg(VFS, "secType %d not supported!\n", type); return -EOPNOTSUPP; } @@ -1167,8 +1197,8 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, /* since no tcon, smb2_init can not do this, so do here */ req->hdr.sync_hdr.SessionId = ses->Suid; - /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED) - req->hdr.Flags |= SMB2_FLAGS_SIGNED; */ + if (ses->server->sign) + req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED; } else if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; @@ -1527,6 +1557,51 @@ add_durable_context(struct kvec *iov, unsigned int *num_iovec, return 0; } +static int +alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len, + const char *treename, const __le16 *path) +{ + int treename_len, path_len; + struct nls_table *cp; + const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)}; + + /* + * skip leading "\\" + */ + treename_len = strlen(treename); + if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\')) + return -EINVAL; + + treename += 2; + treename_len -= 2; + + path_len = UniStrnlen((wchar_t *)path, PATH_MAX); + + /* + * make room for one path separator between the treename and + * path + */ + *out_len = treename_len + 1 + path_len; + + /* + * final path needs to be null-terminated UTF16 with a + * size aligned to 8 + */ + + *out_size = roundup((*out_len+1)*2, 8); + *out_path = kzalloc(*out_size, GFP_KERNEL); + if (!*out_path) + return -ENOMEM; + + cp = load_nls_default(); + cifs_strtoUTF16(*out_path, treename, treename_len, cp); + UniStrcat(*out_path, sep); + UniStrcat(*out_path, path); + unload_nls(cp); + + return 0; +} + int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, __u8 *oplock, struct smb2_file_all_info *buf, @@ -1575,30 +1650,49 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, req->ShareAccess = FILE_SHARE_ALL_LE; req->CreateDisposition = cpu_to_le32(oparms->disposition); req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK); - uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2; - /* do not count rfc1001 len field */ - req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4); iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field */ iov[0].iov_len = get_rfc1002_length(req) + 4; - - /* MUST set path len (NameLength) to 0 opening root of share */ - req->NameLength = cpu_to_le16(uni_path_len - 2); /* -1 since last byte is buf[0] which is sent below (path) */ iov[0].iov_len--; - if (uni_path_len % 8 != 0) { - copy_size = uni_path_len / 8 * 8; - if (copy_size < uni_path_len) - copy_size += 8; - - copy_path = kzalloc(copy_size, GFP_KERNEL); - if (!copy_path) - return -ENOMEM; - memcpy((char *)copy_path, (const char *)path, - uni_path_len); + + req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4); + + /* [MS-SMB2] 2.2.13 NameOffset: + * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of + * the SMB2 header, the file name includes a prefix that will + * be processed during DFS name normalization as specified in + * section 3.3.5.9. Otherwise, the file name is relative to + * the share that is identified by the TreeId in the SMB2 + * header. + */ + if (tcon->share_flags & SHI1005_FLAGS_DFS) { + int name_len; + + req->hdr.sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; + rc = alloc_path_with_tree_prefix(©_path, ©_size, + &name_len, + tcon->treeName, path); + if (rc) + return rc; + req->NameLength = cpu_to_le16(name_len * 2); uni_path_len = copy_size; path = copy_path; + } else { + uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2; + /* MUST set path len (NameLength) to 0 opening root of share */ + req->NameLength = cpu_to_le16(uni_path_len - 2); + if (uni_path_len % 8 != 0) { + copy_size = roundup(uni_path_len, 8); + copy_path = kzalloc(copy_size, GFP_KERNEL); + if (!copy_path) + return -ENOMEM; + memcpy((char *)copy_path, (const char *)path, + uni_path_len); + uni_path_len = copy_size; + path = copy_path; + } } iov[1].iov_len = uni_path_len; @@ -1683,8 +1777,9 @@ creat_exit: */ int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, - u64 volatile_fid, u32 opcode, bool is_fsctl, char *in_data, - u32 indatalen, char **out_data, u32 *plen /* returned data len */) + u64 volatile_fid, u32 opcode, bool is_fsctl, bool use_ipc, + char *in_data, u32 indatalen, + char **out_data, u32 *plen /* returned data len */) { struct smb2_ioctl_req *req; struct smb2_ioctl_rsp *rsp; @@ -1721,6 +1816,16 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, if (rc) return rc; + if (use_ipc) { + if (ses->ipc_tid == 0) { + cifs_small_buf_release(req); + return -ENOTCONN; + } + + cifs_dbg(FYI, "replacing tid 0x%x with IPC tid 0x%x\n", + req->hdr.sync_hdr.TreeId, ses->ipc_tid); + req->hdr.sync_hdr.TreeId = ses->ipc_tid; + } if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; @@ -1843,6 +1948,7 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, FSCTL_SET_COMPRESSION, true /* is_fsctl */, + false /* use_ipc */, (char *)&fsctl_input /* data input */, 2 /* in data len */, &ret_data /* out data */, NULL); diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index c03b252501a1..18700fd25a0b 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -695,6 +695,14 @@ struct fsctl_get_integrity_information_rsp { /* Integrity flags for above */ #define FSCTL_INTEGRITY_FLAG_CHECKSUM_ENFORCEMENT_OFF 0x00000001 +/* See MS-DFSC 2.2.2 */ +struct fsctl_get_dfs_referral_req { + __le16 MaxReferralLevel; + __u8 RequestFileName[]; +} __packed; + +/* DFS response is struct get_dfs_refer_rsp */ + /* See MS-SMB2 2.2.31.3 */ struct network_resiliency_req { __le32 Timeout; diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 85fc7a789334..69e35873b1de 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -121,7 +121,8 @@ extern int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, struct smb2_err_rsp **err_buf); extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 opcode, - bool is_fsctl, char *in_data, u32 indatalen, + bool is_fsctl, bool use_ipc, + char *in_data, u32 indatalen, char **out_data, u32 *plen /* returned data len */); extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_file_id, u64 volatile_file_id); @@ -180,4 +181,6 @@ extern int SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon, __u8 *lease_key, const __le32 lease_state); extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *); +extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *, + enum securityEnum); #endif /* _SMB2PROTO_H */ diff --git a/fs/coda/coda_linux.h b/fs/coda/coda_linux.h index 5104d84c4f64..d3c361883c28 100644 --- a/fs/coda/coda_linux.h +++ b/fs/coda/coda_linux.h @@ -47,7 +47,7 @@ int coda_open(struct inode *i, struct file *f); int coda_release(struct inode *i, struct file *f); int coda_permission(struct inode *inode, int mask); int coda_revalidate_inode(struct inode *); -int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *); +int coda_getattr(const struct path *, struct kstat *, u32, unsigned int); int coda_setattr(struct dentry *, struct iattr *); /* this file: heloers */ diff --git a/fs/coda/file.c b/fs/coda/file.c index 6e0154eb6fcc..9d956cd6d46f 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -96,7 +96,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma) cfi->cfi_mapcount++; spin_unlock(&cii->c_lock); - return host_file->f_op->mmap(host_file, vma); + return call_mmap(host_file, vma); } int coda_open(struct inode *coda_inode, struct file *coda_file) diff --git a/fs/coda/inode.c b/fs/coda/inode.c index 71dbe7e287ce..2dea594da199 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c @@ -255,11 +255,12 @@ static void coda_evict_inode(struct inode *inode) coda_cache_clear_inode(inode); } -int coda_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +int coda_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - int err = coda_revalidate_inode(d_inode(dentry)); + int err = coda_revalidate_inode(d_inode(path->dentry)); if (!err) - generic_fillattr(d_inode(dentry), stat); + generic_fillattr(d_inode(path->dentry), stat); return err; } diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c index 822629126e89..f40e3953e7fe 100644 --- a/fs/coda/psdev.c +++ b/fs/coda/psdev.c @@ -22,7 +22,7 @@ #include <linux/kernel.h> #include <linux/major.h> #include <linux/time.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/fcntl.h> diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c index f6c6c8adbc01..e82357c89979 100644 --- a/fs/coda/upcall.c +++ b/fs/coda/upcall.c @@ -15,7 +15,7 @@ */ #include <linux/signal.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> diff --git a/fs/compat.c b/fs/compat.c index e50a2114f474..c61b506f5bc9 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -21,6 +21,7 @@ #include <linux/compat.h> #include <linux/errno.h> #include <linux/time.h> +#include <linux/cred.h> #include <linux/fs.h> #include <linux/fcntl.h> #include <linux/namei.h> diff --git a/fs/coredump.c b/fs/coredump.c index ae6b05629ca1..592683711c64 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -16,6 +16,9 @@ #include <linux/personality.h> #include <linux/binfmts.h> #include <linux/coredump.h> +#include <linux/sched/coredump.h> +#include <linux/sched/signal.h> +#include <linux/sched/task_stack.h> #include <linux/utsname.h> #include <linux/pid_namespace.h> #include <linux/module.h> @@ -33,7 +36,6 @@ #include <linux/pipe_fs_i.h> #include <linux/oom.h> #include <linux/compat.h> -#include <linux/sched.h> #include <linux/fs.h> #include <linux/path.h> #include <linux/timekeeping.h> diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 02eb6b9e4438..d5d896fa5a71 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -103,7 +103,7 @@ static int validate_user_key(struct fscrypt_info *crypt_info, goto out; } down_read(&keyring_key->sem); - ukp = user_key_payload(keyring_key); + ukp = user_key_payload_locked(keyring_key); if (ukp->datalen != sizeof(struct fscrypt_key)) { res = -EINVAL; up_read(&keyring_key->sem); @@ -27,6 +27,7 @@ #include <linux/pagevec.h> #include <linux/pmem.h> #include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/uio.h> #include <linux/vmstat.h> #include <linux/pfn_t.h> @@ -1436,7 +1437,8 @@ out: return result; } #else -static int dax_iomap_pmd_fault(struct vm_fault *vmf, struct iomap_ops *ops) +static int dax_iomap_pmd_fault(struct vm_fault *vmf, + const struct iomap_ops *ops) { return VM_FAULT_FALLBACK; } diff --git a/fs/direct-io.c b/fs/direct-io.c index c87bae4376b8..a04ebea77de8 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -587,7 +587,7 @@ static int dio_set_defer_completion(struct dio *dio) /* * Call into the fs to map some more disk blocks. We record the current number * of available blocks at sdio->blocks_available. These are in units of the - * fs blocksize, (1 << inode->i_blkbits). + * fs blocksize, i_blocksize(inode). * * The fs is allowed to map lots of blocks at once. If it wants to do that, * it uses the passed inode-relative block number as the file offset, as usual. diff --git a/fs/dlm/user.c b/fs/dlm/user.c index 1ce908c2232c..23488f559cf9 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -17,6 +17,7 @@ #include <linux/dlm.h> #include <linux/dlm_device.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include "dlm_internal.h" #include "lockspace.h" diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 599a29237cfe..95c1c8d34539 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -117,7 +117,7 @@ ecryptfs_get_key_payload_data(struct key *key) auth_tok = ecryptfs_get_encrypted_key_payload_data(key); if (!auth_tok) - return (struct ecryptfs_auth_tok *)user_key_payload(key)->data; + return (struct ecryptfs_auth_tok *)user_key_payload_locked(key)->data; else return auth_tok; } diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index e7413f82d27b..efc2db42d175 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -959,9 +959,10 @@ out: return rc; } -static int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int ecryptfs_getattr_link(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { + struct dentry *dentry = path->dentry; struct ecryptfs_mount_crypt_stat *mount_crypt_stat; int rc = 0; @@ -983,13 +984,15 @@ static int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry, return rc; } -static int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int ecryptfs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { + struct dentry *dentry = path->dentry; struct kstat lower_stat; int rc; - rc = vfs_getattr(ecryptfs_dentry_to_lower_path(dentry), &lower_stat); + rc = vfs_getattr(ecryptfs_dentry_to_lower_path(dentry), &lower_stat, + request_mask, flags); if (!rc) { fsstack_copy_attr_all(d_inode(dentry), ecryptfs_inode_to_lower(d_inode(dentry))); diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c index 866bb18efefe..e00d45af84ea 100644 --- a/fs/ecryptfs/kthread.c +++ b/fs/ecryptfs/kthread.c @@ -123,7 +123,7 @@ void ecryptfs_destroy_kthread(void) * @lower_dentry: Lower dentry for file to open * @lower_mnt: Lower vfsmount for file to open * - * This function gets a r/w file opened againt the lower dentry. + * This function gets a r/w file opened against the lower dentry. * * Returns zero on success; non-zero otherwise */ diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c index 158a3a39f82d..039e627194a9 100644 --- a/fs/ecryptfs/read_write.c +++ b/fs/ecryptfs/read_write.c @@ -22,6 +22,8 @@ #include <linux/fs.h> #include <linux/pagemap.h> +#include <linux/sched/signal.h> + #include "ecryptfs_kernel.h" /** diff --git a/fs/eventfd.c b/fs/eventfd.c index 1231cd1999d8..68b9fffcb2c8 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -9,7 +9,7 @@ #include <linux/poll.h> #include <linux/init.h> #include <linux/fs.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/list.h> diff --git a/fs/eventpoll.c b/fs/eventpoll.c index bcb68fcc8445..341251421ced 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -13,7 +13,7 @@ #include <linux/init.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/signal.h> @@ -1895,7 +1895,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, * so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation. * Also, we do not currently supported nested exclusive wakeups. */ - if (epds.events & EPOLLEXCLUSIVE) { + if (ep_op_has_event(op) && (epds.events & EPOLLEXCLUSIVE)) { if (op == EPOLL_CTL_MOD) goto error_tgt_fput; if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) || diff --git a/fs/exec.c b/fs/exec.c index 698a86094f76..65145a3df065 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -32,6 +32,11 @@ #include <linux/swap.h> #include <linux/string.h> #include <linux/init.h> +#include <linux/sched/mm.h> +#include <linux/sched/coredump.h> +#include <linux/sched/signal.h> +#include <linux/sched/numa_balancing.h> +#include <linux/sched/task.h> #include <linux/pagemap.h> #include <linux/perf_event.h> #include <linux/highmem.h> @@ -1088,7 +1093,7 @@ static int de_thread(struct task_struct *tsk) struct task_struct *leader = tsk->group_leader; for (;;) { - threadgroup_change_begin(tsk); + cgroup_threadgroup_change_begin(tsk); write_lock_irq(&tasklist_lock); /* * Do this under tasklist_lock to ensure that @@ -1099,7 +1104,7 @@ static int de_thread(struct task_struct *tsk) break; __set_current_state(TASK_KILLABLE); write_unlock_irq(&tasklist_lock); - threadgroup_change_end(tsk); + cgroup_threadgroup_change_end(tsk); schedule(); if (unlikely(__fatal_signal_pending(tsk))) goto killed; @@ -1157,7 +1162,7 @@ static int de_thread(struct task_struct *tsk) if (unlikely(leader->ptrace)) __wake_up_parent(leader, leader->parent); write_unlock_irq(&tasklist_lock); - threadgroup_change_end(tsk); + cgroup_threadgroup_change_end(tsk); release_task(leader); } diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index a4b531be9168..329a5d103846 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -15,6 +15,7 @@ #include <linux/mount.h> #include <linux/namei.h> #include <linux/sched.h> +#include <linux/cred.h> #define dprintk(fmt, args...) do{}while(0) @@ -299,7 +300,8 @@ static int get_name(const struct path *path, char *name, struct dentry *child) * filesystem supports 64-bit inode numbers. So we need to * actually call ->getattr, not just read i_ino: */ - error = vfs_getattr_nosec(&child_path, &stat); + error = vfs_getattr_nosec(&child_path, &stat, + STATX_INO, AT_STATX_SYNC_AS_STAT); if (error) return error; buffer.ino = stat.ino; diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c index 4c40c0786e16..d0bdb74f0e15 100644 --- a/fs/ext2/balloc.c +++ b/fs/ext2/balloc.c @@ -15,6 +15,7 @@ #include <linux/quotaops.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/buffer_head.h> #include <linux/capability.h> diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 2fd17e8e4984..f493af666591 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -28,6 +28,7 @@ #include <linux/timer.h> #include <linux/version.h> #include <linux/wait.h> +#include <linux/sched/signal.h> #include <linux/blockgroup_lock.h> #include <linux/percpu_counter.h> #include <linux/ratelimit.h> @@ -2462,8 +2463,7 @@ extern struct inode *ext4_iget(struct super_block *, unsigned long); extern struct inode *ext4_iget_normal(struct super_block *, unsigned long); extern int ext4_write_inode(struct inode *, struct writeback_control *); extern int ext4_setattr(struct dentry *, struct iattr *); -extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat); +extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int); extern void ext4_evict_inode(struct inode *); extern void ext4_clear_inode(struct inode *); extern int ext4_sync_inode(handle_t *, struct inode *); diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 37e059202cd2..e7f12a204cbc 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -84,7 +84,7 @@ * -- writeout * Writeout looks up whole page cache to see if a buffer is * mapped, If there are not very many delayed buffers, then it is - * time comsuming. + * time consuming. * * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA, * bigalloc and writeout can figure out if a block or a range of diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index b14bae2598bc..17bc043308f3 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -21,6 +21,8 @@ #include <linux/random.h> #include <linux/bitops.h> #include <linux/blkdev.h> +#include <linux/cred.h> + #include <asm/byteorder.h> #include "ext4.h" diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 41d8e53e5a7f..7385e6a6b6cb 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2221,7 +2221,7 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd, { struct inode *inode = mpd->inode; int err; - ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) + ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1) >> inode->i_blkbits; do { @@ -3577,7 +3577,7 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter) if (overwrite) get_block_func = ext4_dio_get_block_overwrite; else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) || - round_down(offset, 1 << inode->i_blkbits) >= inode->i_size) { + round_down(offset, i_blocksize(inode)) >= inode->i_size) { get_block_func = ext4_dio_get_block; dio_flags = DIO_LOCKING | DIO_SKIP_HOLES; } else if (is_sync_kiocb(iocb)) { @@ -5179,7 +5179,7 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode) * do. We do the check mainly to optimize the common PAGE_SIZE == * blocksize case */ - if (offset > PAGE_SIZE - (1 << inode->i_blkbits)) + if (offset > PAGE_SIZE - i_blocksize(inode)) return; while (1) { page = find_lock_page(inode->i_mapping, @@ -5387,13 +5387,13 @@ err_out: return error; } -int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +int ext4_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { struct inode *inode; unsigned long long delalloc_blocks; - inode = d_inode(dentry); + inode = d_inode(path->dentry); generic_fillattr(inode, stat); /* diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 10c62de642c6..354dc1a894c2 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -838,7 +838,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) inode = page->mapping->host; sb = inode->i_sb; ngroups = ext4_get_groups_count(sb); - blocksize = 1 << inode->i_blkbits; + blocksize = i_blocksize(inode); blocks_per_page = PAGE_SIZE / blocksize; groups_per_page = blocks_per_page >> 1; diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 6fc14def0c70..578f8c33fb44 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -187,7 +187,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to) if (PageUptodate(page)) return 0; - blocksize = 1 << inode->i_blkbits; + blocksize = i_blocksize(inode); if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index f73ee9534d83..0339daf4ca02 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -249,7 +249,8 @@ static int f2fs_write_meta_page(struct page *page, dec_page_count(sbi, F2FS_DIRTY_META); if (wbc->for_reclaim) - f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, META, WRITE); + f2fs_submit_merged_bio_cond(sbi, page->mapping->host, + 0, page->index, META, WRITE); unlock_page(page); @@ -493,6 +494,7 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi) #ifdef CONFIG_F2FS_FAULT_INJECTION if (time_to_inject(sbi, FAULT_ORPHAN)) { spin_unlock(&im->ino_lock); + f2fs_show_injection_info(FAULT_ORPHAN); return -ENOSPC; } #endif @@ -681,8 +683,7 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr, return -EINVAL; } - crc = le32_to_cpu(*((__le32 *)((unsigned char *)*cp_block - + crc_offset))); + crc = cur_cp_crc(*cp_block); if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) { f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value"); return -EINVAL; @@ -891,7 +892,7 @@ retry: F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA)); return 0; } - fi = list_entry(head->next, struct f2fs_inode_info, dirty_list); + fi = list_first_entry(head, struct f2fs_inode_info, dirty_list); inode = igrab(&fi->vfs_inode); spin_unlock(&sbi->inode_lock[type]); if (inode) { @@ -924,7 +925,7 @@ int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi) spin_unlock(&sbi->inode_lock[DIRTY_META]); return 0; } - fi = list_entry(head->next, struct f2fs_inode_info, + fi = list_first_entry(head, struct f2fs_inode_info, gdirty_list); inode = igrab(&fi->vfs_inode); spin_unlock(&sbi->inode_lock[DIRTY_META]); @@ -998,8 +999,6 @@ out: static void unblock_operations(struct f2fs_sb_info *sbi) { up_write(&sbi->node_write); - - build_free_nids(sbi, false); f2fs_unlock_all(sbi); } @@ -1025,6 +1024,10 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) spin_lock(&sbi->cp_lock); + if (cpc->reason == CP_UMOUNT && ckpt->cp_pack_total_block_count > + sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks) + disable_nat_bits(sbi, false); + if (cpc->reason == CP_UMOUNT) __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); else @@ -1137,6 +1140,28 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) start_blk = __start_cp_next_addr(sbi); + /* write nat bits */ + if (enabled_nat_bits(sbi, cpc)) { + __u64 cp_ver = cur_cp_version(ckpt); + unsigned int i; + block_t blk; + + cp_ver |= ((__u64)crc32 << 32); + *(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver); + + blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks; + for (i = 0; i < nm_i->nat_bits_blocks; i++) + update_meta_page(sbi, nm_i->nat_bits + + (i << F2FS_BLKSIZE_BITS), blk + i); + + /* Flush all the NAT BITS pages */ + while (get_pages(sbi, F2FS_DIRTY_META)) { + sync_meta_pages(sbi, META, LONG_MAX); + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + } + } + /* need to wait for end_io results */ wait_on_all_pages_writeback(sbi); if (unlikely(f2fs_cp_error(sbi))) @@ -1248,15 +1273,20 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) f2fs_flush_merged_bios(sbi); /* this is the case of multiple fstrims without any changes */ - if (cpc->reason == CP_DISCARD && !is_sbi_flag_set(sbi, SBI_IS_DIRTY)) { - f2fs_bug_on(sbi, NM_I(sbi)->dirty_nat_cnt); - f2fs_bug_on(sbi, SIT_I(sbi)->dirty_sentries); - f2fs_bug_on(sbi, prefree_segments(sbi)); - flush_sit_entries(sbi, cpc); - clear_prefree_segments(sbi, cpc); - f2fs_wait_all_discard_bio(sbi); - unblock_operations(sbi); - goto out; + if (cpc->reason == CP_DISCARD) { + if (!exist_trim_candidates(sbi, cpc)) { + unblock_operations(sbi); + goto out; + } + + if (NM_I(sbi)->dirty_nat_cnt == 0 && + SIT_I(sbi)->dirty_sentries == 0 && + prefree_segments(sbi) == 0) { + flush_sit_entries(sbi, cpc); + clear_prefree_segments(sbi, cpc); + unblock_operations(sbi); + goto out; + } } /* @@ -1268,17 +1298,15 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver); /* write cached NAT/SIT entries to NAT/SIT area */ - flush_nat_entries(sbi); + flush_nat_entries(sbi, cpc); flush_sit_entries(sbi, cpc); /* unlock all the fs_lock[] in do_checkpoint() */ err = do_checkpoint(sbi, cpc); - if (err) { + if (err) release_discard_addrs(sbi); - } else { + else clear_prefree_segments(sbi, cpc); - f2fs_wait_all_discard_bio(sbi); - } unblock_operations(sbi); stat_inc_cp_count(sbi->stat_info); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 9ac262564fa6..1602b4bccae6 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -22,6 +22,7 @@ #include <linux/mm.h> #include <linux/memcontrol.h> #include <linux/cleancache.h> +#include <linux/sched/signal.h> #include "f2fs.h" #include "node.h" @@ -55,8 +56,10 @@ static void f2fs_read_end_io(struct bio *bio) int i; #ifdef CONFIG_F2FS_FAULT_INJECTION - if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) + if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) { + f2fs_show_injection_info(FAULT_IO); bio->bi_error = -EIO; + } #endif if (f2fs_bio_encrypted(bio)) { @@ -93,6 +96,17 @@ static void f2fs_write_end_io(struct bio *bio) struct page *page = bvec->bv_page; enum count_type type = WB_DATA_TYPE(page); + if (IS_DUMMY_WRITTEN_PAGE(page)) { + set_page_private(page, (unsigned long)NULL); + ClearPagePrivate(page); + unlock_page(page); + mempool_free(page, sbi->write_io_dummy); + + if (unlikely(bio->bi_error)) + f2fs_stop_checkpoint(sbi, true); + continue; + } + fscrypt_pullback_bio_page(&page, true); if (unlikely(bio->bi_error)) { @@ -171,10 +185,46 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi, struct bio *bio, enum page_type type) { if (!is_read_io(bio_op(bio))) { + unsigned int start; + if (f2fs_sb_mounted_blkzoned(sbi->sb) && current->plug && (type == DATA || type == NODE)) blk_finish_plug(current->plug); + + if (type != DATA && type != NODE) + goto submit_io; + + start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS; + start %= F2FS_IO_SIZE(sbi); + + if (start == 0) + goto submit_io; + + /* fill dummy pages */ + for (; start < F2FS_IO_SIZE(sbi); start++) { + struct page *page = + mempool_alloc(sbi->write_io_dummy, + GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL); + f2fs_bug_on(sbi, !page); + + SetPagePrivate(page); + set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE); + lock_page(page); + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) + f2fs_bug_on(sbi, 1); + } + /* + * In the NODE case, we lose next block address chain. So, we + * need to do checkpoint in f2fs_sync_file. + */ + if (type == NODE) + set_sbi_flag(sbi, SBI_NEED_CP); } +submit_io: + if (is_read_io(bio_op(bio))) + trace_f2fs_submit_read_bio(sbi->sb, type, bio); + else + trace_f2fs_submit_write_bio(sbi->sb, type, bio); submit_bio(bio); } @@ -185,19 +235,19 @@ static void __submit_merged_bio(struct f2fs_bio_info *io) if (!io->bio) return; + bio_set_op_attrs(io->bio, fio->op, fio->op_flags); + if (is_read_io(fio->op)) - trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); + trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio); else - trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); - - bio_set_op_attrs(io->bio, fio->op, fio->op_flags); + trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio); __submit_bio(io->sbi, io->bio, fio->type); io->bio = NULL; } -static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, - struct page *page, nid_t ino) +static bool __has_merged_page(struct f2fs_bio_info *io, + struct inode *inode, nid_t ino, pgoff_t idx) { struct bio_vec *bvec; struct page *target; @@ -206,7 +256,7 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, if (!io->bio) return false; - if (!inode && !page && !ino) + if (!inode && !ino) return true; bio_for_each_segment_all(bvec, io->bio, i) { @@ -216,10 +266,11 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, else target = fscrypt_control_page(bvec->bv_page); + if (idx != target->index) + continue; + if (inode && inode == target->mapping->host) return true; - if (page && page == target) - return true; if (ino && ino == ino_of_node(target)) return true; } @@ -228,22 +279,21 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, } static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode, - struct page *page, nid_t ino, - enum page_type type) + nid_t ino, pgoff_t idx, enum page_type type) { enum page_type btype = PAGE_TYPE_OF_BIO(type); struct f2fs_bio_info *io = &sbi->write_io[btype]; bool ret; down_read(&io->io_rwsem); - ret = __has_merged_page(io, inode, page, ino); + ret = __has_merged_page(io, inode, ino, idx); up_read(&io->io_rwsem); return ret; } static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, - struct inode *inode, struct page *page, - nid_t ino, enum page_type type, int rw) + struct inode *inode, nid_t ino, pgoff_t idx, + enum page_type type, int rw) { enum page_type btype = PAGE_TYPE_OF_BIO(type); struct f2fs_bio_info *io; @@ -252,16 +302,16 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, down_write(&io->io_rwsem); - if (!__has_merged_page(io, inode, page, ino)) + if (!__has_merged_page(io, inode, ino, idx)) goto out; /* change META to META_FLUSH in the checkpoint procedure */ if (type >= META_FLUSH) { io->fio.type = META_FLUSH; io->fio.op = REQ_OP_WRITE; - io->fio.op_flags = REQ_PREFLUSH | REQ_META | REQ_PRIO; + io->fio.op_flags = REQ_META | REQ_PRIO; if (!test_opt(sbi, NOBARRIER)) - io->fio.op_flags |= REQ_FUA; + io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA; } __submit_merged_bio(io); out: @@ -271,15 +321,15 @@ out: void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type, int rw) { - __f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw); + __f2fs_submit_merged_bio(sbi, NULL, 0, 0, type, rw); } void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi, - struct inode *inode, struct page *page, - nid_t ino, enum page_type type, int rw) + struct inode *inode, nid_t ino, pgoff_t idx, + enum page_type type, int rw) { - if (has_merged_page(sbi, inode, page, ino, type)) - __f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw); + if (has_merged_page(sbi, inode, ino, idx, type)) + __f2fs_submit_merged_bio(sbi, inode, ino, idx, type, rw); } void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi) @@ -315,13 +365,14 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) return 0; } -void f2fs_submit_page_mbio(struct f2fs_io_info *fio) +int f2fs_submit_page_mbio(struct f2fs_io_info *fio) { struct f2fs_sb_info *sbi = fio->sbi; enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); struct f2fs_bio_info *io; bool is_read = is_read_io(fio->op); struct page *bio_page; + int err = 0; io = is_read ? &sbi->read_io : &sbi->write_io[btype]; @@ -331,6 +382,9 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio) bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; + /* set submitted = 1 as a return value */ + fio->submitted = 1; + if (!is_read) inc_page_count(sbi, WB_DATA_TYPE(bio_page)); @@ -342,6 +396,13 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio) __submit_merged_bio(io); alloc_new: if (io->bio == NULL) { + if ((fio->type == DATA || fio->type == NODE) && + fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) { + err = -EAGAIN; + if (!is_read) + dec_page_count(sbi, WB_DATA_TYPE(bio_page)); + goto out_fail; + } io->bio = __bio_alloc(sbi, fio->new_blkaddr, BIO_MAX_PAGES, is_read); io->fio = *fio; @@ -355,9 +416,10 @@ alloc_new: io->last_block_in_bio = fio->new_blkaddr; f2fs_trace_ios(fio, 0); - +out_fail: up_write(&io->io_rwsem); trace_f2fs_submit_page_mbio(fio->page, fio); + return err; } static void __set_data_blkaddr(struct dnode_of_data *dn) @@ -453,7 +515,7 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index) { - struct extent_info ei; + struct extent_info ei = {0,0,0}; struct inode *inode = dn->inode; if (f2fs_lookup_extent_cache(inode, index, &ei)) { @@ -470,7 +532,7 @@ struct page *get_read_data_page(struct inode *inode, pgoff_t index, struct address_space *mapping = inode->i_mapping; struct dnode_of_data dn; struct page *page; - struct extent_info ei; + struct extent_info ei = {0,0,0}; int err; struct f2fs_io_info fio = { .sbi = F2FS_I_SB(inode), @@ -694,6 +756,9 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) struct f2fs_map_blocks map; int err = 0; + if (is_inode_flag_set(inode, FI_NO_PREALLOC)) + return 0; + map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos); map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from)); if (map.m_len > map.m_lblk) @@ -742,7 +807,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int err = 0, ofs = 1; unsigned int ofs_in_node, last_ofs_in_node; blkcnt_t prealloc; - struct extent_info ei; + struct extent_info ei = {0,0,0}; block_t blkaddr; if (!maxblocks) @@ -806,7 +871,7 @@ next_block: } if (err) goto sync_out; - map->m_flags = F2FS_MAP_NEW; + map->m_flags |= F2FS_MAP_NEW; blkaddr = dn.data_blkaddr; } else { if (flag == F2FS_GET_BLOCK_BMAP) { @@ -906,7 +971,7 @@ static int __get_data_block(struct inode *inode, sector_t iblock, if (!err) { map_bh(bh, inode->i_sb, map.m_pblk); bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; - bh->b_size = map.m_len << inode->i_blkbits; + bh->b_size = (u64)map.m_len << inode->i_blkbits; } return err; } @@ -1088,7 +1153,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping, prefetchw(&page->flags); if (pages) { - page = list_entry(pages->prev, struct page, lru); + page = list_last_entry(pages, struct page, lru); list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, page->index, @@ -1207,7 +1272,7 @@ static int f2fs_read_data_pages(struct file *file, struct list_head *pages, unsigned nr_pages) { struct inode *inode = file->f_mapping->host; - struct page *page = list_entry(pages->prev, struct page, lru); + struct page *page = list_last_entry(pages, struct page, lru); trace_f2fs_readpages(inode, page, nr_pages); @@ -1288,8 +1353,8 @@ out_writepage: return err; } -static int f2fs_write_data_page(struct page *page, - struct writeback_control *wbc) +static int __write_data_page(struct page *page, bool *submitted, + struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); @@ -1307,6 +1372,7 @@ static int f2fs_write_data_page(struct page *page, .op_flags = wbc_to_write_flags(wbc), .page = page, .encrypted_page = NULL, + .submitted = false, }; trace_f2fs_writepage(page, DATA); @@ -1352,9 +1418,12 @@ write: goto redirty_out; err = -EAGAIN; - f2fs_lock_op(sbi); - if (f2fs_has_inline_data(inode)) + if (f2fs_has_inline_data(inode)) { err = f2fs_write_inline_data(inode, page); + if (!err) + goto out; + } + f2fs_lock_op(sbi); if (err == -EAGAIN) err = do_write_data_page(&fio); if (F2FS_I(inode)->last_disk_size < psize) @@ -1370,15 +1439,22 @@ out: ClearPageUptodate(page); if (wbc->for_reclaim) { - f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE); + f2fs_submit_merged_bio_cond(sbi, inode, 0, page->index, + DATA, WRITE); remove_dirty_inode(inode); + submitted = NULL; } unlock_page(page); f2fs_balance_fs(sbi, need_balance_fs); - if (unlikely(f2fs_cp_error(sbi))) + if (unlikely(f2fs_cp_error(sbi))) { f2fs_submit_merged_bio(sbi, DATA, WRITE); + submitted = NULL; + } + + if (submitted) + *submitted = fio.submitted; return 0; @@ -1390,6 +1466,12 @@ redirty_out: return err; } +static int f2fs_write_data_page(struct page *page, + struct writeback_control *wbc) +{ + return __write_data_page(page, NULL, wbc); +} + /* * This function was copied from write_cche_pages from mm/page-writeback.c. * The major change is making write step of cold data page separately from @@ -1406,10 +1488,10 @@ static int f2fs_write_cache_pages(struct address_space *mapping, pgoff_t index; pgoff_t end; /* Inclusive */ pgoff_t done_index; + pgoff_t last_idx = ULONG_MAX; int cycled; int range_whole = 0; int tag; - int nwritten = 0; pagevec_init(&pvec, 0); @@ -1446,6 +1528,7 @@ retry: for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; + bool submitted = false; if (page->index > end) { done = 1; @@ -1479,7 +1562,7 @@ continue_unlock: if (!clear_page_dirty_for_io(page)) goto continue_unlock; - ret = mapping->a_ops->writepage(page, wbc); + ret = __write_data_page(page, &submitted, wbc); if (unlikely(ret)) { /* * keep nr_to_write, since vfs uses this to @@ -1493,8 +1576,8 @@ continue_unlock: done_index = page->index + 1; done = 1; break; - } else { - nwritten++; + } else if (submitted) { + last_idx = page->index; } if (--wbc->nr_to_write <= 0 && @@ -1516,9 +1599,9 @@ continue_unlock: if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = done_index; - if (nwritten) + if (last_idx != ULONG_MAX) f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host, - NULL, 0, DATA, WRITE); + 0, last_idx, DATA, WRITE); return ret; } @@ -1591,14 +1674,15 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi, struct dnode_of_data dn; struct page *ipage; bool locked = false; - struct extent_info ei; + struct extent_info ei = {0,0,0}; int err = 0; /* * we already allocated all the blocks, so we don't need to get * the block addresses when there is no need to fill the page. */ - if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE) + if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE && + !is_inode_flag_set(inode, FI_NO_PREALLOC)) return 0; if (f2fs_has_inline_data(inode) || @@ -1682,7 +1766,12 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, goto fail; } repeat: - page = grab_cache_page_write_begin(mapping, index, flags); + /* + * Do not use grab_cache_page_write_begin() to avoid deadlock due to + * wait_for_stable_page. Will wait that below with our IO control. + */ + page = pagecache_get_page(mapping, index, + FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS); if (!page) { err = -ENOMEM; goto fail; @@ -1715,6 +1804,11 @@ repeat: if (len == PAGE_SIZE || PageUptodate(page)) return 0; + if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) { + zero_user_segment(page, len, PAGE_SIZE); + return 0; + } + if (blkaddr == NEW_ADDR) { zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); @@ -1768,7 +1862,7 @@ static int f2fs_write_end(struct file *file, * let generic_perform_write() try to copy data again through copied=0. */ if (!PageUptodate(page)) { - if (unlikely(copied != PAGE_SIZE)) + if (unlikely(copied != len)) copied = 0; else SetPageUptodate(page); @@ -1917,7 +2011,7 @@ static int f2fs_set_data_page_dirty(struct page *page) if (!PageUptodate(page)) SetPageUptodate(page); - if (f2fs_is_atomic_file(inode)) { + if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) { if (!IS_ATOMIC_WRITTEN_PAGE(page)) { register_inmem_page(inode, page); return 1; diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index fbd5184140d0..a77df377e2e8 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -50,8 +50,16 @@ static void update_general_status(struct f2fs_sb_info *sbi) si->ndirty_files = sbi->ndirty_inode[FILE_INODE]; si->ndirty_all = sbi->ndirty_inode[DIRTY_META]; si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES); + si->aw_cnt = atomic_read(&sbi->aw_cnt); + si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt); si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA); si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA); + if (SM_I(sbi) && SM_I(sbi)->fcc_info) + si->nr_flush = + atomic_read(&SM_I(sbi)->fcc_info->submit_flush); + if (SM_I(sbi) && SM_I(sbi)->dcc_info) + si->nr_discard = + atomic_read(&SM_I(sbi)->dcc_info->submit_discard); si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg; si->rsvd_segs = reserved_segments(sbi); si->overp_segs = overprovision_segments(sbi); @@ -62,6 +70,8 @@ static void update_general_status(struct f2fs_sb_info *sbi) si->inline_xattr = atomic_read(&sbi->inline_xattr); si->inline_inode = atomic_read(&sbi->inline_inode); si->inline_dir = atomic_read(&sbi->inline_dir); + si->append = sbi->im[APPEND_INO].ino_num; + si->update = sbi->im[UPDATE_INO].ino_num; si->orphans = sbi->im[ORPHAN_INO].ino_num; si->utilization = utilization(sbi); @@ -183,6 +193,9 @@ static void update_mem_info(struct f2fs_sb_info *sbi) /* build nm */ si->base_mem += sizeof(struct f2fs_nm_info); si->base_mem += __bitmap_size(sbi, NAT_BITMAP); + si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS); + si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE; + si->base_mem += NM_I(sbi)->nat_blocks / 8; get_cache: si->cache_mem = 0; @@ -192,8 +205,10 @@ get_cache: si->cache_mem += sizeof(struct f2fs_gc_kthread); /* build merge flush thread */ - if (SM_I(sbi)->cmd_control_info) + if (SM_I(sbi)->fcc_info) si->cache_mem += sizeof(struct flush_cmd_control); + if (SM_I(sbi)->dcc_info) + si->cache_mem += sizeof(struct discard_cmd_control); /* free nids */ si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID_LIST] + @@ -254,8 +269,8 @@ static int stat_show(struct seq_file *s, void *v) si->inline_inode); seq_printf(s, " - Inline_dentry Inode: %u\n", si->inline_dir); - seq_printf(s, " - Orphan Inode: %u\n", - si->orphans); + seq_printf(s, " - Orphan/Append/Update Inode: %u, %u, %u\n", + si->orphans, si->append, si->update); seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n", si->main_area_segs, si->main_area_sections, si->main_area_zones); @@ -314,8 +329,11 @@ static int stat_show(struct seq_file *s, void *v) seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n", si->ext_tree, si->zombie_tree, si->ext_node); seq_puts(s, "\nBalancing F2FS Async:\n"); - seq_printf(s, " - inmem: %4d, wb_cp_data: %4d, wb_data: %4d\n", - si->inmem_pages, si->nr_wb_cp_data, si->nr_wb_data); + seq_printf(s, " - IO (CP: %4d, Data: %4d, Flush: %4d, Discard: %4d)\n", + si->nr_wb_cp_data, si->nr_wb_data, + si->nr_flush, si->nr_discard); + seq_printf(s, " - inmem: %4d, atomic IO: %4d (Max. %4d)\n", + si->inmem_pages, si->aw_cnt, si->max_aw_cnt); seq_printf(s, " - nodes: %4d in %4d\n", si->ndirty_node, si->node_pages); seq_printf(s, " - dents: %4d in dirs:%4d (%4d)\n", @@ -414,6 +432,9 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi) atomic_set(&sbi->inline_dir, 0); atomic_set(&sbi->inplace_count, 0); + atomic_set(&sbi->aw_cnt, 0); + atomic_set(&sbi->max_aw_cnt, 0); + mutex_lock(&f2fs_stat_mutex); list_add_tail(&si->stat_list, &f2fs_stat_list); mutex_unlock(&f2fs_stat_mutex); diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 18607fc5240d..4650c9b85de7 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -207,9 +207,13 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, f2fs_put_page(dentry_page, 0); } - if (!de && room && F2FS_I(dir)->chash != namehash) { - F2FS_I(dir)->chash = namehash; - F2FS_I(dir)->clevel = level; + /* This is to increase the speed of f2fs_create */ + if (!de && room) { + F2FS_I(dir)->task = current; + if (F2FS_I(dir)->chash != namehash) { + F2FS_I(dir)->chash = namehash; + F2FS_I(dir)->clevel = level; + } } return de; @@ -548,8 +552,10 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, start: #ifdef CONFIG_F2FS_FAULT_INJECTION - if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH)) + if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH)) { + f2fs_show_injection_info(FAULT_DIR_DEPTH); return -ENOSPC; + } #endif if (unlikely(current_depth == MAX_DIR_HASH_DEPTH)) return -ENOSPC; @@ -646,14 +652,34 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *inode, nid_t ino, umode_t mode) { struct fscrypt_name fname; + struct page *page = NULL; + struct f2fs_dir_entry *de = NULL; int err; err = fscrypt_setup_filename(dir, name, 0, &fname); if (err) return err; - err = __f2fs_do_add_link(dir, &fname, inode, ino, mode); - + /* + * An immature stakable filesystem shows a race condition between lookup + * and create. If we have same task when doing lookup and create, it's + * definitely fine as expected by VFS normally. Otherwise, let's just + * verify on-disk dentry one more time, which guarantees filesystem + * consistency more. + */ + if (current != F2FS_I(dir)->task) { + de = __f2fs_find_entry(dir, &fname, &page); + F2FS_I(dir)->task = NULL; + } + if (de) { + f2fs_dentry_kunmap(dir, page); + f2fs_put_page(page, 0); + err = -EEXIST; + } else if (IS_ERR(page)) { + err = PTR_ERR(page); + } else { + err = __f2fs_do_add_link(dir, &fname, inode, ino, mode); + } fscrypt_free_filename(&fname); return err; } diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c index 4db44da7ef69..c6934f014e0f 100644 --- a/fs/f2fs/extent_cache.c +++ b/fs/f2fs/extent_cache.c @@ -77,7 +77,7 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode) struct extent_tree *et; nid_t ino = inode->i_ino; - down_write(&sbi->extent_tree_lock); + mutex_lock(&sbi->extent_tree_lock); et = radix_tree_lookup(&sbi->extent_tree_root, ino); if (!et) { et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS); @@ -94,7 +94,7 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode) atomic_dec(&sbi->total_zombie_tree); list_del_init(&et->list); } - up_write(&sbi->extent_tree_lock); + mutex_unlock(&sbi->extent_tree_lock); /* never died until evict_inode */ F2FS_I(inode)->extent_tree = et; @@ -311,28 +311,24 @@ static struct extent_node *__lookup_extent_tree_ret(struct extent_tree *et, tmp_node = parent; if (parent && fofs > en->ei.fofs) tmp_node = rb_next(parent); - *next_ex = tmp_node ? - rb_entry(tmp_node, struct extent_node, rb_node) : NULL; + *next_ex = rb_entry_safe(tmp_node, struct extent_node, rb_node); tmp_node = parent; if (parent && fofs < en->ei.fofs) tmp_node = rb_prev(parent); - *prev_ex = tmp_node ? - rb_entry(tmp_node, struct extent_node, rb_node) : NULL; + *prev_ex = rb_entry_safe(tmp_node, struct extent_node, rb_node); return NULL; lookup_neighbors: if (fofs == en->ei.fofs) { /* lookup prev node for merging backward later */ tmp_node = rb_prev(&en->rb_node); - *prev_ex = tmp_node ? - rb_entry(tmp_node, struct extent_node, rb_node) : NULL; + *prev_ex = rb_entry_safe(tmp_node, struct extent_node, rb_node); } if (fofs == en->ei.fofs + en->ei.len - 1) { /* lookup next node for merging frontward later */ tmp_node = rb_next(&en->rb_node); - *next_ex = tmp_node ? - rb_entry(tmp_node, struct extent_node, rb_node) : NULL; + *next_ex = rb_entry_safe(tmp_node, struct extent_node, rb_node); } return en; } @@ -352,11 +348,12 @@ static struct extent_node *__try_merge_extent_node(struct inode *inode, } if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) { - if (en) - __release_extent_node(sbi, et, prev_ex); next_ex->ei.fofs = ei->fofs; next_ex->ei.blk = ei->blk; next_ex->ei.len += ei->len; + if (en) + __release_extent_node(sbi, et, prev_ex); + en = next_ex; } @@ -416,7 +413,7 @@ do_insert: return en; } -static unsigned int f2fs_update_extent_tree_range(struct inode *inode, +static void f2fs_update_extent_tree_range(struct inode *inode, pgoff_t fofs, block_t blkaddr, unsigned int len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); @@ -429,7 +426,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode, unsigned int pos = (unsigned int)fofs; if (!et) - return false; + return; trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len); @@ -437,7 +434,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode, if (is_inode_flag_set(inode, FI_NO_EXTENT)) { write_unlock(&et->lock); - return false; + return; } prev = et->largest; @@ -492,9 +489,8 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode, if (!next_en) { struct rb_node *node = rb_next(&en->rb_node); - next_en = node ? - rb_entry(node, struct extent_node, rb_node) - : NULL; + next_en = rb_entry_safe(node, struct extent_node, + rb_node); } if (parts) @@ -535,8 +531,6 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode, __free_extent_tree(sbi, et); write_unlock(&et->lock); - - return !__is_extent_same(&prev, &et->largest); } unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) @@ -552,7 +546,7 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) if (!atomic_read(&sbi->total_zombie_tree)) goto free_node; - if (!down_write_trylock(&sbi->extent_tree_lock)) + if (!mutex_trylock(&sbi->extent_tree_lock)) goto out; /* 1. remove unreferenced extent tree */ @@ -574,11 +568,11 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) goto unlock_out; cond_resched(); } - up_write(&sbi->extent_tree_lock); + mutex_unlock(&sbi->extent_tree_lock); free_node: /* 2. remove LRU extent entries */ - if (!down_write_trylock(&sbi->extent_tree_lock)) + if (!mutex_trylock(&sbi->extent_tree_lock)) goto out; remained = nr_shrink - (node_cnt + tree_cnt); @@ -608,7 +602,7 @@ free_node: spin_unlock(&sbi->extent_lock); unlock_out: - up_write(&sbi->extent_tree_lock); + mutex_unlock(&sbi->extent_tree_lock); out: trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt); @@ -655,10 +649,10 @@ void f2fs_destroy_extent_tree(struct inode *inode) if (inode->i_nlink && !is_bad_inode(inode) && atomic_read(&et->node_cnt)) { - down_write(&sbi->extent_tree_lock); + mutex_lock(&sbi->extent_tree_lock); list_add_tail(&et->list, &sbi->zombie_list); atomic_inc(&sbi->total_zombie_tree); - up_write(&sbi->extent_tree_lock); + mutex_unlock(&sbi->extent_tree_lock); return; } @@ -666,12 +660,12 @@ void f2fs_destroy_extent_tree(struct inode *inode) node_cnt = f2fs_destroy_extent_node(inode); /* delete extent tree entry in radix tree */ - down_write(&sbi->extent_tree_lock); + mutex_lock(&sbi->extent_tree_lock); f2fs_bug_on(sbi, atomic_read(&et->node_cnt)); radix_tree_delete(&sbi->extent_tree_root, inode->i_ino); kmem_cache_free(extent_tree_slab, et); atomic_dec(&sbi->total_ext_tree); - up_write(&sbi->extent_tree_lock); + mutex_unlock(&sbi->extent_tree_lock); F2FS_I(inode)->extent_tree = NULL; @@ -718,7 +712,7 @@ void f2fs_update_extent_cache_range(struct dnode_of_data *dn, void init_extent_cache_info(struct f2fs_sb_info *sbi) { INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO); - init_rwsem(&sbi->extent_tree_lock); + mutex_init(&sbi->extent_tree_lock); INIT_LIST_HEAD(&sbi->extent_list); spin_lock_init(&sbi->extent_lock); atomic_set(&sbi->total_ext_tree, 0); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 069fc7277d8d..e849f83d6114 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -112,9 +112,9 @@ struct f2fs_mount_info { #define F2FS_HAS_FEATURE(sb, mask) \ ((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0) #define F2FS_SET_FEATURE(sb, mask) \ - F2FS_SB(sb)->raw_super->feature |= cpu_to_le32(mask) + (F2FS_SB(sb)->raw_super->feature |= cpu_to_le32(mask)) #define F2FS_CLEAR_FEATURE(sb, mask) \ - F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask) + (F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask)) /* * For checkpoint manager @@ -132,11 +132,14 @@ enum { CP_DISCARD, }; -#define DEF_BATCHED_TRIM_SECTIONS 2 +#define DEF_BATCHED_TRIM_SECTIONS 2048 #define BATCHED_TRIM_SEGMENTS(sbi) \ (SM_I(sbi)->trim_sections * (sbi)->segs_per_sec) #define BATCHED_TRIM_BLOCKS(sbi) \ (BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg) +#define MAX_DISCARD_BLOCKS(sbi) \ + ((1 << (sbi)->log_blocks_per_seg) * (sbi)->segs_per_sec) +#define DISCARD_ISSUE_RATE 8 #define DEF_CP_INTERVAL 60 /* 60 secs */ #define DEF_IDLE_INTERVAL 5 /* 5 secs */ @@ -185,11 +188,30 @@ struct discard_entry { int len; /* # of consecutive blocks of the discard */ }; -struct bio_entry { - struct list_head list; - struct bio *bio; - struct completion event; - int error; +enum { + D_PREP, + D_SUBMIT, + D_DONE, +}; + +struct discard_cmd { + struct list_head list; /* command list */ + struct completion wait; /* compleation */ + block_t lstart; /* logical start address */ + block_t len; /* length */ + struct bio *bio; /* bio */ + int state; /* state */ +}; + +struct discard_cmd_control { + struct task_struct *f2fs_issue_discard; /* discard thread */ + struct list_head discard_entry_list; /* 4KB discard entry list */ + int nr_discards; /* # of discards in the list */ + struct list_head discard_cmd_list; /* discard cmd list */ + wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ + struct mutex cmd_lock; + int max_discards; /* max. discards to be issued */ + atomic_t submit_discard; /* # of issued discard */ }; /* for the list of fsync inodes, used only during recovery */ @@ -214,6 +236,7 @@ struct fsync_inode_entry { static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) { int before = nats_in_cursum(journal); + journal->n_nats = cpu_to_le16(before + i); return before; } @@ -221,6 +244,7 @@ static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) { int before = sits_in_cursum(journal); + journal->n_sits = cpu_to_le16(before + i); return before; } @@ -306,12 +330,14 @@ static inline void make_dentry_ptr(struct inode *inode, if (type == 1) { struct f2fs_dentry_block *t = (struct f2fs_dentry_block *)src; + d->max = NR_DENTRY_IN_BLOCK; d->bitmap = &t->dentry_bitmap; d->dentry = t->dentry; d->filename = t->filename; } else { struct f2fs_inline_dentry *t = (struct f2fs_inline_dentry *)src; + d->max = NR_INLINE_DENTRY; d->bitmap = &t->dentry_bitmap; d->dentry = t->dentry; @@ -438,8 +464,8 @@ struct f2fs_inode_info { atomic_t dirty_pages; /* # of dirty pages */ f2fs_hash_t chash; /* hash value of given file name */ unsigned int clevel; /* maximum level of given file name */ + struct task_struct *task; /* lookup and create consistency */ nid_t i_xattr_nid; /* node id that contains xattrs */ - unsigned long long xattr_ver; /* cp version of xattr modification */ loff_t last_disk_size; /* lastly written file size */ struct list_head dirty_list; /* dirty list for dirs and files */ @@ -474,13 +500,6 @@ static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, ei->len = len; } -static inline bool __is_extent_same(struct extent_info *ei1, - struct extent_info *ei2) -{ - return (ei1->fofs == ei2->fofs && ei1->blk == ei2->blk && - ei1->len == ei2->len); -} - static inline bool __is_extent_mergeable(struct extent_info *back, struct extent_info *front) { @@ -500,7 +519,7 @@ static inline bool __is_front_mergeable(struct extent_info *cur, return __is_extent_mergeable(cur, front); } -extern void f2fs_mark_inode_dirty_sync(struct inode *, bool); +extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); static inline void __try_update_largest_extent(struct inode *inode, struct extent_tree *et, struct extent_node *en) { @@ -532,6 +551,7 @@ struct f2fs_nm_info { struct list_head nat_entries; /* cached nat entry list (clean) */ unsigned int nat_cnt; /* the # of cached nat entries */ unsigned int dirty_nat_cnt; /* total num of nat entries in set */ + unsigned int nat_blocks; /* # of nat blocks */ /* free node ids management */ struct radix_tree_root free_nid_root;/* root of the free_nid cache */ @@ -539,9 +559,19 @@ struct f2fs_nm_info { unsigned int nid_cnt[MAX_NID_LIST]; /* the number of free node id */ spinlock_t nid_list_lock; /* protect nid lists ops */ struct mutex build_lock; /* lock for build free nids */ + unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE]; + unsigned char *nat_block_bitmap; /* for checkpoint */ char *nat_bitmap; /* NAT bitmap pointer */ + + unsigned int nat_bits_blocks; /* # of nat bits blocks */ + unsigned char *nat_bits; /* NAT bits blocks */ + unsigned char *full_nat_bits; /* full NAT pages */ + unsigned char *empty_nat_bits; /* empty NAT pages */ +#ifdef CONFIG_F2FS_CHECK_FS + char *nat_bitmap_mir; /* NAT bitmap mirror */ +#endif int bitmap_size; /* bitmap size */ }; @@ -632,12 +662,6 @@ struct f2fs_sm_info { /* a threshold to reclaim prefree segments */ unsigned int rec_prefree_segments; - /* for small discard management */ - struct list_head discard_list; /* 4KB discard list */ - struct list_head wait_list; /* linked with issued discard bio */ - int nr_discards; /* # of discards in the list */ - int max_discards; /* max. discards to be issued */ - /* for batched trimming */ unsigned int trim_sections; /* # of sections to trim */ @@ -648,8 +672,10 @@ struct f2fs_sm_info { unsigned int min_fsync_blocks; /* threshold for fsync */ /* for flush command control */ - struct flush_cmd_control *cmd_control_info; + struct flush_cmd_control *fcc_info; + /* for discard command control */ + struct discard_cmd_control *dcc_info; }; /* @@ -708,6 +734,7 @@ struct f2fs_io_info { block_t old_blkaddr; /* old block address before Cow */ struct page *page; /* page to be written */ struct page *encrypted_page; /* encrypted page */ + bool submitted; /* indicate IO submission */ }; #define is_read_io(rw) (rw == READ) @@ -787,6 +814,8 @@ struct f2fs_sb_info { struct f2fs_bio_info read_io; /* for read bios */ struct f2fs_bio_info write_io[NR_PAGE_TYPE]; /* for write bios */ struct mutex wio_mutex[NODE + 1]; /* bio ordering for NODE/DATA */ + int write_io_size_bits; /* Write IO size bits */ + mempool_t *write_io_dummy; /* Dummy pages */ /* for checkpoint */ struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ @@ -811,7 +840,7 @@ struct f2fs_sb_info { /* for extent tree cache */ struct radix_tree_root extent_tree_root;/* cache extent cache entries */ - struct rw_semaphore extent_tree_lock; /* locking extent radix tree */ + struct mutex extent_tree_lock; /* locking extent radix tree */ struct list_head extent_list; /* lru list for shrinker */ spinlock_t extent_lock; /* locking extent lru list */ atomic_t total_ext_tree; /* extent tree count */ @@ -858,6 +887,9 @@ struct f2fs_sb_info { struct f2fs_gc_kthread *gc_thread; /* GC thread */ unsigned int cur_victim_sec; /* current victim section num */ + /* threshold for converting bg victims for fg */ + u64 fggc_threshold; + /* maximum # of trials to find a victim segment for SSR and GC */ unsigned int max_victim_search; @@ -877,6 +909,8 @@ struct f2fs_sb_info { atomic_t inline_xattr; /* # of inline_xattr inodes */ atomic_t inline_inode; /* # of inline_data inodes */ atomic_t inline_dir; /* # of inline_dentry inodes */ + atomic_t aw_cnt; /* # of atomic writes */ + atomic_t max_aw_cnt; /* max # of atomic writes */ int bg_gc; /* background gc calls */ unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ #endif @@ -908,6 +942,10 @@ struct f2fs_sb_info { }; #ifdef CONFIG_F2FS_FAULT_INJECTION +#define f2fs_show_injection_info(type) \ + printk("%sF2FS-fs : inject %s in %s of %pF\n", \ + KERN_INFO, fault_name[type], \ + __func__, __builtin_return_address(0)) static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) { struct f2fs_fault_info *ffi = &sbi->fault_info; @@ -921,10 +959,6 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) atomic_inc(&ffi->inject_ops); if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { atomic_set(&ffi->inject_ops, 0); - printk("%sF2FS-fs : inject %s in %pF\n", - KERN_INFO, - fault_name[type], - __builtin_return_address(0)); return true; } return false; @@ -1089,6 +1123,12 @@ static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) return le64_to_cpu(cp->checkpoint_ver); } +static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) +{ + size_t crc_offset = le32_to_cpu(cp->checksum_offset); + return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); +} + static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) { unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); @@ -1133,6 +1173,27 @@ static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) spin_unlock(&sbi->cp_lock); } +static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock) +{ + set_sbi_flag(sbi, SBI_NEED_FSCK); + + if (lock) + spin_lock(&sbi->cp_lock); + __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG); + kfree(NM_I(sbi)->nat_bits); + NM_I(sbi)->nat_bits = NULL; + if (lock) + spin_unlock(&sbi->cp_lock); +} + +static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi, + struct cp_control *cpc) +{ + bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG); + + return (cpc) ? (cpc->reason == CP_UMOUNT) && set : set; +} + static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) { down_read(&sbi->cp_rwsem); @@ -1212,8 +1273,10 @@ static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi, blkcnt_t diff; #ifdef CONFIG_F2FS_FAULT_INJECTION - if (time_to_inject(sbi, FAULT_BLOCK)) + if (time_to_inject(sbi, FAULT_BLOCK)) { + f2fs_show_injection_info(FAULT_BLOCK); return false; + } #endif /* * let's increase this in prior to actual block count change in order @@ -1449,11 +1512,14 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, { #ifdef CONFIG_F2FS_FAULT_INJECTION struct page *page = find_lock_page(mapping, index); + if (page) return page; - if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) + if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) { + f2fs_show_injection_info(FAULT_PAGE_ALLOC); return NULL; + } #endif if (!for_write) return grab_cache_page(mapping, index); @@ -1532,6 +1598,7 @@ static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, static inline bool IS_INODE(struct page *page) { struct f2fs_node *p = F2FS_NODE(page); + return RAW_IS_INODE(p); } @@ -1545,6 +1612,7 @@ static inline block_t datablock_addr(struct page *node_page, { struct f2fs_node *raw_node; __le32 *addr_array; + raw_node = F2FS_NODE(node_page); addr_array = blkaddr_in_node(raw_node); return le32_to_cpu(addr_array[offset]); @@ -1628,6 +1696,7 @@ enum { FI_UPDATE_WRITE, /* inode has in-place-update data */ FI_NEED_IPU, /* used for ipu per file */ FI_ATOMIC_FILE, /* indicate atomic file */ + FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */ FI_VOLATILE_FILE, /* indicate volatile file */ FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ FI_DROP_CACHE, /* drop dirty page cache */ @@ -1635,6 +1704,7 @@ enum { FI_INLINE_DOTS, /* indicate inline dot dentries */ FI_DO_DEFRAG, /* indicate defragment is running */ FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ + FI_NO_PREALLOC, /* indicate skipped preallocated blocks */ }; static inline void __mark_inode_dirty_flag(struct inode *inode, @@ -1779,6 +1849,7 @@ static inline unsigned int addrs_per_inode(struct inode *inode) static inline void *inline_xattr_addr(struct page *page) { struct f2fs_inode *ri = F2FS_INODE(page); + return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS]); } @@ -1817,6 +1888,11 @@ static inline bool f2fs_is_atomic_file(struct inode *inode) return is_inode_flag_set(inode, FI_ATOMIC_FILE); } +static inline bool f2fs_is_commit_atomic_write(struct inode *inode) +{ + return is_inode_flag_set(inode, FI_ATOMIC_COMMIT); +} + static inline bool f2fs_is_volatile_file(struct inode *inode) { return is_inode_flag_set(inode, FI_VOLATILE_FILE); @@ -1835,6 +1911,7 @@ static inline bool f2fs_is_drop_cache(struct inode *inode) static inline void *inline_data_addr(struct page *page) { struct f2fs_inode *ri = F2FS_INODE(page); + return (void *)&(ri->i_addr[1]); } @@ -1918,8 +1995,10 @@ static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, size_t size, gfp_t flags) { #ifdef CONFIG_F2FS_FAULT_INJECTION - if (time_to_inject(sbi, FAULT_KMALLOC)) + if (time_to_inject(sbi, FAULT_KMALLOC)) { + f2fs_show_injection_info(FAULT_KMALLOC); return NULL; + } #endif return kmalloc(size, flags); } @@ -1957,29 +2036,30 @@ static inline void *f2fs_kvzalloc(size_t size, gfp_t flags) /* * file.c */ -int f2fs_sync_file(struct file *, loff_t, loff_t, int); -void truncate_data_blocks(struct dnode_of_data *); -int truncate_blocks(struct inode *, u64, bool); -int f2fs_truncate(struct inode *); -int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *); -int f2fs_setattr(struct dentry *, struct iattr *); -int truncate_hole(struct inode *, pgoff_t, pgoff_t); -int truncate_data_blocks_range(struct dnode_of_data *, int); -long f2fs_ioctl(struct file *, unsigned int, unsigned long); -long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long); +int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); +void truncate_data_blocks(struct dnode_of_data *dn); +int truncate_blocks(struct inode *inode, u64 from, bool lock); +int f2fs_truncate(struct inode *inode); +int f2fs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); +int f2fs_setattr(struct dentry *dentry, struct iattr *attr); +int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); +int truncate_data_blocks_range(struct dnode_of_data *dn, int count); +long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); /* * inode.c */ -void f2fs_set_inode_flags(struct inode *); -struct inode *f2fs_iget(struct super_block *, unsigned long); -struct inode *f2fs_iget_retry(struct super_block *, unsigned long); -int try_to_free_nats(struct f2fs_sb_info *, int); -int update_inode(struct inode *, struct page *); -int update_inode_page(struct inode *); -int f2fs_write_inode(struct inode *, struct writeback_control *); -void f2fs_evict_inode(struct inode *); -void handle_failed_inode(struct inode *); +void f2fs_set_inode_flags(struct inode *inode); +struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); +struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); +int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); +int update_inode(struct inode *inode, struct page *node_page); +int update_inode_page(struct inode *inode); +int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); +void f2fs_evict_inode(struct inode *inode); +void handle_failed_inode(struct inode *inode); /* * namei.c @@ -1989,40 +2069,47 @@ struct dentry *f2fs_get_parent(struct dentry *child); /* * dir.c */ -void set_de_type(struct f2fs_dir_entry *, umode_t); -unsigned char get_de_type(struct f2fs_dir_entry *); -struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *, - f2fs_hash_t, int *, struct f2fs_dentry_ptr *); -int f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *, - unsigned int, struct fscrypt_str *); -void do_make_empty_dir(struct inode *, struct inode *, - struct f2fs_dentry_ptr *); -struct page *init_inode_metadata(struct inode *, struct inode *, - const struct qstr *, const struct qstr *, struct page *); -void update_parent_metadata(struct inode *, struct inode *, unsigned int); -int room_for_filename(const void *, int, int); -void f2fs_drop_nlink(struct inode *, struct inode *); -struct f2fs_dir_entry *__f2fs_find_entry(struct inode *, struct fscrypt_name *, - struct page **); -struct f2fs_dir_entry *f2fs_find_entry(struct inode *, const struct qstr *, - struct page **); -struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **); -ino_t f2fs_inode_by_name(struct inode *, const struct qstr *, struct page **); -void f2fs_set_link(struct inode *, struct f2fs_dir_entry *, - struct page *, struct inode *); -int update_dent_inode(struct inode *, struct inode *, const struct qstr *); -void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *, - const struct qstr *, f2fs_hash_t , unsigned int); -int f2fs_add_regular_entry(struct inode *, const struct qstr *, - const struct qstr *, struct inode *, nid_t, umode_t); -int __f2fs_do_add_link(struct inode *, struct fscrypt_name*, struct inode *, - nid_t, umode_t); -int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t, - umode_t); -void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *, - struct inode *); -int f2fs_do_tmpfile(struct inode *, struct inode *); -bool f2fs_empty_dir(struct inode *); +void set_de_type(struct f2fs_dir_entry *de, umode_t mode); +unsigned char get_de_type(struct f2fs_dir_entry *de); +struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname, + f2fs_hash_t namehash, int *max_slots, + struct f2fs_dentry_ptr *d); +int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, + unsigned int start_pos, struct fscrypt_str *fstr); +void do_make_empty_dir(struct inode *inode, struct inode *parent, + struct f2fs_dentry_ptr *d); +struct page *init_inode_metadata(struct inode *inode, struct inode *dir, + const struct qstr *new_name, + const struct qstr *orig_name, struct page *dpage); +void update_parent_metadata(struct inode *dir, struct inode *inode, + unsigned int current_depth); +int room_for_filename(const void *bitmap, int slots, int max_slots); +void f2fs_drop_nlink(struct inode *dir, struct inode *inode); +struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, + struct fscrypt_name *fname, struct page **res_page); +struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, + const struct qstr *child, struct page **res_page); +struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); +ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, + struct page **page); +void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, + struct page *page, struct inode *inode); +int update_dent_inode(struct inode *inode, struct inode *to, + const struct qstr *name); +void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, + const struct qstr *name, f2fs_hash_t name_hash, + unsigned int bit_pos); +int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, + const struct qstr *orig_name, + struct inode *inode, nid_t ino, umode_t mode); +int __f2fs_do_add_link(struct inode *dir, struct fscrypt_name *fname, + struct inode *inode, nid_t ino, umode_t mode); +int __f2fs_add_link(struct inode *dir, const struct qstr *name, + struct inode *inode, nid_t ino, umode_t mode); +void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, + struct inode *dir, struct inode *inode); +int f2fs_do_tmpfile(struct inode *inode, struct inode *dir); +bool f2fs_empty_dir(struct inode *dir); static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) { @@ -2033,18 +2120,18 @@ static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) /* * super.c */ -int f2fs_inode_dirtied(struct inode *, bool); -void f2fs_inode_synced(struct inode *); -int f2fs_commit_super(struct f2fs_sb_info *, bool); -int f2fs_sync_fs(struct super_block *, int); +int f2fs_inode_dirtied(struct inode *inode, bool sync); +void f2fs_inode_synced(struct inode *inode); +int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); +int f2fs_sync_fs(struct super_block *sb, int sync); extern __printf(3, 4) -void f2fs_msg(struct super_block *, const char *, const char *, ...); +void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...); int sanity_check_ckpt(struct f2fs_sb_info *sbi); /* * hash.c */ -f2fs_hash_t f2fs_dentry_hash(const struct qstr *); +f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info); /* * node.c @@ -2052,163 +2139,183 @@ f2fs_hash_t f2fs_dentry_hash(const struct qstr *); struct dnode_of_data; struct node_info; -bool available_free_memory(struct f2fs_sb_info *, int); -int need_dentry_mark(struct f2fs_sb_info *, nid_t); -bool is_checkpointed_node(struct f2fs_sb_info *, nid_t); -bool need_inode_block_update(struct f2fs_sb_info *, nid_t); -void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *); -pgoff_t get_next_page_offset(struct dnode_of_data *, pgoff_t); -int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int); -int truncate_inode_blocks(struct inode *, pgoff_t); -int truncate_xattr_node(struct inode *, struct page *); -int wait_on_node_pages_writeback(struct f2fs_sb_info *, nid_t); -int remove_inode_page(struct inode *); -struct page *new_inode_page(struct inode *); -struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *); -void ra_node_page(struct f2fs_sb_info *, nid_t); -struct page *get_node_page(struct f2fs_sb_info *, pgoff_t); -struct page *get_node_page_ra(struct page *, int); -void move_node_page(struct page *, int); -int fsync_node_pages(struct f2fs_sb_info *, struct inode *, - struct writeback_control *, bool); -int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *); -void build_free_nids(struct f2fs_sb_info *, bool); -bool alloc_nid(struct f2fs_sb_info *, nid_t *); -void alloc_nid_done(struct f2fs_sb_info *, nid_t); -void alloc_nid_failed(struct f2fs_sb_info *, nid_t); -int try_to_free_nids(struct f2fs_sb_info *, int); -void recover_inline_xattr(struct inode *, struct page *); -void recover_xattr_data(struct inode *, struct page *, block_t); -int recover_inode_page(struct f2fs_sb_info *, struct page *); -int restore_node_summary(struct f2fs_sb_info *, unsigned int, - struct f2fs_summary_block *); -void flush_nat_entries(struct f2fs_sb_info *); -int build_node_manager(struct f2fs_sb_info *); -void destroy_node_manager(struct f2fs_sb_info *); +bool available_free_memory(struct f2fs_sb_info *sbi, int type); +int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); +bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); +bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); +void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni); +pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); +int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); +int truncate_inode_blocks(struct inode *inode, pgoff_t from); +int truncate_xattr_node(struct inode *inode, struct page *page); +int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino); +int remove_inode_page(struct inode *inode); +struct page *new_inode_page(struct inode *inode); +struct page *new_node_page(struct dnode_of_data *dn, + unsigned int ofs, struct page *ipage); +void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); +struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); +struct page *get_node_page_ra(struct page *parent, int start); +void move_node_page(struct page *node_page, int gc_type); +int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, + struct writeback_control *wbc, bool atomic); +int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc); +void build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); +bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); +void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); +void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); +int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); +void recover_inline_xattr(struct inode *inode, struct page *page); +int recover_xattr_data(struct inode *inode, struct page *page, + block_t blkaddr); +int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); +int restore_node_summary(struct f2fs_sb_info *sbi, + unsigned int segno, struct f2fs_summary_block *sum); +void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); +int build_node_manager(struct f2fs_sb_info *sbi); +void destroy_node_manager(struct f2fs_sb_info *sbi); int __init create_node_manager_caches(void); void destroy_node_manager_caches(void); /* * segment.c */ -void register_inmem_page(struct inode *, struct page *); -void drop_inmem_pages(struct inode *); -int commit_inmem_pages(struct inode *); -void f2fs_balance_fs(struct f2fs_sb_info *, bool); -void f2fs_balance_fs_bg(struct f2fs_sb_info *); -int f2fs_issue_flush(struct f2fs_sb_info *); -int create_flush_cmd_control(struct f2fs_sb_info *); -void destroy_flush_cmd_control(struct f2fs_sb_info *, bool); -void invalidate_blocks(struct f2fs_sb_info *, block_t); -bool is_checkpointed_data(struct f2fs_sb_info *, block_t); -void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t); -void f2fs_wait_all_discard_bio(struct f2fs_sb_info *); -void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *); -void release_discard_addrs(struct f2fs_sb_info *); -int npages_for_summary_flush(struct f2fs_sb_info *, bool); -void allocate_new_segments(struct f2fs_sb_info *); -int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *); -struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); -void update_meta_page(struct f2fs_sb_info *, void *, block_t); -void write_meta_page(struct f2fs_sb_info *, struct page *); -void write_node_page(unsigned int, struct f2fs_io_info *); -void write_data_page(struct dnode_of_data *, struct f2fs_io_info *); -void rewrite_data_page(struct f2fs_io_info *); -void __f2fs_replace_block(struct f2fs_sb_info *, struct f2fs_summary *, - block_t, block_t, bool, bool); -void f2fs_replace_block(struct f2fs_sb_info *, struct dnode_of_data *, - block_t, block_t, unsigned char, bool, bool); -void allocate_data_block(struct f2fs_sb_info *, struct page *, - block_t, block_t *, struct f2fs_summary *, int); -void f2fs_wait_on_page_writeback(struct page *, enum page_type, bool); -void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *, block_t); -void write_data_summaries(struct f2fs_sb_info *, block_t); -void write_node_summaries(struct f2fs_sb_info *, block_t); -int lookup_journal_in_cursum(struct f2fs_journal *, int, unsigned int, int); -void flush_sit_entries(struct f2fs_sb_info *, struct cp_control *); -int build_segment_manager(struct f2fs_sb_info *); -void destroy_segment_manager(struct f2fs_sb_info *); +void register_inmem_page(struct inode *inode, struct page *page); +void drop_inmem_pages(struct inode *inode); +int commit_inmem_pages(struct inode *inode); +void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); +void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi); +int f2fs_issue_flush(struct f2fs_sb_info *sbi); +int create_flush_cmd_control(struct f2fs_sb_info *sbi); +void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); +void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); +bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); +void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new); +void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr); +void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc); +void release_discard_addrs(struct f2fs_sb_info *sbi); +int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); +void allocate_new_segments(struct f2fs_sb_info *sbi); +int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); +bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc); +struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); +void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr); +void write_meta_page(struct f2fs_sb_info *sbi, struct page *page); +void write_node_page(unsigned int nid, struct f2fs_io_info *fio); +void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio); +void rewrite_data_page(struct f2fs_io_info *fio); +void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, + block_t old_blkaddr, block_t new_blkaddr, + bool recover_curseg, bool recover_newaddr); +void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, + block_t old_addr, block_t new_addr, + unsigned char version, bool recover_curseg, + bool recover_newaddr); +void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, + block_t old_blkaddr, block_t *new_blkaddr, + struct f2fs_summary *sum, int type); +void f2fs_wait_on_page_writeback(struct page *page, + enum page_type type, bool ordered); +void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi, + block_t blkaddr); +void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); +void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); +int lookup_journal_in_cursum(struct f2fs_journal *journal, int type, + unsigned int val, int alloc); +void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); +int build_segment_manager(struct f2fs_sb_info *sbi); +void destroy_segment_manager(struct f2fs_sb_info *sbi); int __init create_segment_manager_caches(void); void destroy_segment_manager_caches(void); /* * checkpoint.c */ -void f2fs_stop_checkpoint(struct f2fs_sb_info *, bool); -struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t); -struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t); -struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t); -bool is_valid_blkaddr(struct f2fs_sb_info *, block_t, int); -int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int, bool); -void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t); -long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long); -void add_ino_entry(struct f2fs_sb_info *, nid_t, int type); -void remove_ino_entry(struct f2fs_sb_info *, nid_t, int type); -void release_ino_entry(struct f2fs_sb_info *, bool); -bool exist_written_data(struct f2fs_sb_info *, nid_t, int); -int f2fs_sync_inode_meta(struct f2fs_sb_info *); -int acquire_orphan_inode(struct f2fs_sb_info *); -void release_orphan_inode(struct f2fs_sb_info *); -void add_orphan_inode(struct inode *); -void remove_orphan_inode(struct f2fs_sb_info *, nid_t); -int recover_orphan_inodes(struct f2fs_sb_info *); -int get_valid_checkpoint(struct f2fs_sb_info *); -void update_dirty_page(struct inode *, struct page *); -void remove_dirty_inode(struct inode *); -int sync_dirty_inodes(struct f2fs_sb_info *, enum inode_type); -int write_checkpoint(struct f2fs_sb_info *, struct cp_control *); -void init_ino_entry_info(struct f2fs_sb_info *); +void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); +struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); +struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); +struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); +bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type); +int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, + int type, bool sync); +void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index); +long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, + long nr_to_write); +void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); +void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); +void release_ino_entry(struct f2fs_sb_info *sbi, bool all); +bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); +int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi); +int acquire_orphan_inode(struct f2fs_sb_info *sbi); +void release_orphan_inode(struct f2fs_sb_info *sbi); +void add_orphan_inode(struct inode *inode); +void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); +int recover_orphan_inodes(struct f2fs_sb_info *sbi); +int get_valid_checkpoint(struct f2fs_sb_info *sbi); +void update_dirty_page(struct inode *inode, struct page *page); +void remove_dirty_inode(struct inode *inode); +int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); +int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); +void init_ino_entry_info(struct f2fs_sb_info *sbi); int __init create_checkpoint_caches(void); void destroy_checkpoint_caches(void); /* * data.c */ -void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, int); -void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *, struct inode *, - struct page *, nid_t, enum page_type, int); -void f2fs_flush_merged_bios(struct f2fs_sb_info *); -int f2fs_submit_page_bio(struct f2fs_io_info *); -void f2fs_submit_page_mbio(struct f2fs_io_info *); -struct block_device *f2fs_target_device(struct f2fs_sb_info *, - block_t, struct bio *); -int f2fs_target_device_index(struct f2fs_sb_info *, block_t); -void set_data_blkaddr(struct dnode_of_data *); -void f2fs_update_data_blkaddr(struct dnode_of_data *, block_t); -int reserve_new_blocks(struct dnode_of_data *, blkcnt_t); -int reserve_new_block(struct dnode_of_data *); -int f2fs_get_block(struct dnode_of_data *, pgoff_t); -int f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *); -int f2fs_reserve_block(struct dnode_of_data *, pgoff_t); -struct page *get_read_data_page(struct inode *, pgoff_t, int, bool); -struct page *find_data_page(struct inode *, pgoff_t); -struct page *get_lock_data_page(struct inode *, pgoff_t, bool); -struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool); -int do_write_data_page(struct f2fs_io_info *); -int f2fs_map_blocks(struct inode *, struct f2fs_map_blocks *, int, int); -int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64); -void f2fs_set_page_dirty_nobuffers(struct page *); -void f2fs_invalidate_page(struct page *, unsigned int, unsigned int); -int f2fs_release_page(struct page *, gfp_t); +void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type, + int rw); +void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi, + struct inode *inode, nid_t ino, pgoff_t idx, + enum page_type type, int rw); +void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi); +int f2fs_submit_page_bio(struct f2fs_io_info *fio); +int f2fs_submit_page_mbio(struct f2fs_io_info *fio); +struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, + block_t blk_addr, struct bio *bio); +int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); +void set_data_blkaddr(struct dnode_of_data *dn); +void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); +int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); +int reserve_new_block(struct dnode_of_data *dn); +int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index); +int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from); +int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); +struct page *get_read_data_page(struct inode *inode, pgoff_t index, + int op_flags, bool for_write); +struct page *find_data_page(struct inode *inode, pgoff_t index); +struct page *get_lock_data_page(struct inode *inode, pgoff_t index, + bool for_write); +struct page *get_new_data_page(struct inode *inode, + struct page *ipage, pgoff_t index, bool new_i_size); +int do_write_data_page(struct f2fs_io_info *fio); +int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, + int create, int flag); +int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + u64 start, u64 len); +void f2fs_set_page_dirty_nobuffers(struct page *page); +void f2fs_invalidate_page(struct page *page, unsigned int offset, + unsigned int length); +int f2fs_release_page(struct page *page, gfp_t wait); #ifdef CONFIG_MIGRATION -int f2fs_migrate_page(struct address_space *, struct page *, struct page *, - enum migrate_mode); +int f2fs_migrate_page(struct address_space *mapping, struct page *newpage, + struct page *page, enum migrate_mode mode); #endif /* * gc.c */ -int start_gc_thread(struct f2fs_sb_info *); -void stop_gc_thread(struct f2fs_sb_info *); -block_t start_bidx_of_node(unsigned int, struct inode *); -int f2fs_gc(struct f2fs_sb_info *, bool, bool); -void build_gc_manager(struct f2fs_sb_info *); +int start_gc_thread(struct f2fs_sb_info *sbi); +void stop_gc_thread(struct f2fs_sb_info *sbi); +block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode); +int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background); +void build_gc_manager(struct f2fs_sb_info *sbi); /* * recovery.c */ -int recover_fsync_data(struct f2fs_sb_info *, bool); -bool space_for_roll_forward(struct f2fs_sb_info *); +int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); +bool space_for_roll_forward(struct f2fs_sb_info *sbi); /* * debug.c @@ -2227,8 +2334,9 @@ struct f2fs_stat_info { unsigned int ndirty_dirs, ndirty_files, ndirty_all; int nats, dirty_nats, sits, dirty_sits, free_nids, alloc_nids; int total_count, utilization; - int bg_gc, nr_wb_cp_data, nr_wb_data; - int inline_xattr, inline_inode, inline_dir, orphans; + int bg_gc, nr_wb_cp_data, nr_wb_data, nr_flush, nr_discard; + int inline_xattr, inline_inode, inline_dir, append, update, orphans; + int aw_cnt, max_aw_cnt; unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; unsigned int bimodal, avg_vblocks; int util_free, util_valid, util_invalid; @@ -2300,6 +2408,17 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) ((sbi)->block_count[(curseg)->alloc_type]++) #define stat_inc_inplace_blocks(sbi) \ (atomic_inc(&(sbi)->inplace_count)) +#define stat_inc_atomic_write(inode) \ + (atomic_inc(&F2FS_I_SB(inode)->aw_cnt)) +#define stat_dec_atomic_write(inode) \ + (atomic_dec(&F2FS_I_SB(inode)->aw_cnt)) +#define stat_update_max_atomic_write(inode) \ + do { \ + int cur = atomic_read(&F2FS_I_SB(inode)->aw_cnt); \ + int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ + if (cur > max) \ + atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ + } while (0) #define stat_inc_seg_count(sbi, type, gc_type) \ do { \ struct f2fs_stat_info *si = F2FS_STAT(sbi); \ @@ -2332,8 +2451,8 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) si->bg_node_blks += (gc_type == BG_GC) ? (blks) : 0; \ } while (0) -int f2fs_build_stats(struct f2fs_sb_info *); -void f2fs_destroy_stats(struct f2fs_sb_info *); +int f2fs_build_stats(struct f2fs_sb_info *sbi); +void f2fs_destroy_stats(struct f2fs_sb_info *sbi); int __init f2fs_create_root_stats(void); void f2fs_destroy_root_stats(void); #else @@ -2353,6 +2472,9 @@ void f2fs_destroy_root_stats(void); #define stat_dec_inline_inode(inode) #define stat_inc_inline_dir(inode) #define stat_dec_inline_dir(inode) +#define stat_inc_atomic_write(inode) +#define stat_dec_atomic_write(inode) +#define stat_update_max_atomic_write(inode) #define stat_inc_seg_type(sbi, curseg) #define stat_inc_block_count(sbi, curseg) #define stat_inc_inplace_blocks(sbi) @@ -2382,49 +2504,55 @@ extern struct kmem_cache *inode_entry_slab; /* * inline.c */ -bool f2fs_may_inline_data(struct inode *); -bool f2fs_may_inline_dentry(struct inode *); -void read_inline_data(struct page *, struct page *); -bool truncate_inline_inode(struct page *, u64); -int f2fs_read_inline_data(struct inode *, struct page *); -int f2fs_convert_inline_page(struct dnode_of_data *, struct page *); -int f2fs_convert_inline_inode(struct inode *); -int f2fs_write_inline_data(struct inode *, struct page *); -bool recover_inline_data(struct inode *, struct page *); -struct f2fs_dir_entry *find_in_inline_dir(struct inode *, - struct fscrypt_name *, struct page **); -int make_empty_inline_dir(struct inode *inode, struct inode *, struct page *); -int f2fs_add_inline_entry(struct inode *, const struct qstr *, - const struct qstr *, struct inode *, nid_t, umode_t); -void f2fs_delete_inline_entry(struct f2fs_dir_entry *, struct page *, - struct inode *, struct inode *); -bool f2fs_empty_inline_dir(struct inode *); -int f2fs_read_inline_dir(struct file *, struct dir_context *, - struct fscrypt_str *); -int f2fs_inline_data_fiemap(struct inode *, - struct fiemap_extent_info *, __u64, __u64); +bool f2fs_may_inline_data(struct inode *inode); +bool f2fs_may_inline_dentry(struct inode *inode); +void read_inline_data(struct page *page, struct page *ipage); +bool truncate_inline_inode(struct page *ipage, u64 from); +int f2fs_read_inline_data(struct inode *inode, struct page *page); +int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); +int f2fs_convert_inline_inode(struct inode *inode); +int f2fs_write_inline_data(struct inode *inode, struct page *page); +bool recover_inline_data(struct inode *inode, struct page *npage); +struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir, + struct fscrypt_name *fname, struct page **res_page); +int make_empty_inline_dir(struct inode *inode, struct inode *parent, + struct page *ipage); +int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, + const struct qstr *orig_name, + struct inode *inode, nid_t ino, umode_t mode); +void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, + struct inode *dir, struct inode *inode); +bool f2fs_empty_inline_dir(struct inode *dir); +int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, + struct fscrypt_str *fstr); +int f2fs_inline_data_fiemap(struct inode *inode, + struct fiemap_extent_info *fieinfo, + __u64 start, __u64 len); /* * shrinker.c */ -unsigned long f2fs_shrink_count(struct shrinker *, struct shrink_control *); -unsigned long f2fs_shrink_scan(struct shrinker *, struct shrink_control *); -void f2fs_join_shrinker(struct f2fs_sb_info *); -void f2fs_leave_shrinker(struct f2fs_sb_info *); +unsigned long f2fs_shrink_count(struct shrinker *shrink, + struct shrink_control *sc); +unsigned long f2fs_shrink_scan(struct shrinker *shrink, + struct shrink_control *sc); +void f2fs_join_shrinker(struct f2fs_sb_info *sbi); +void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); /* * extent_cache.c */ -unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *, int); -bool f2fs_init_extent_tree(struct inode *, struct f2fs_extent *); -void f2fs_drop_extent_tree(struct inode *); -unsigned int f2fs_destroy_extent_node(struct inode *); -void f2fs_destroy_extent_tree(struct inode *); -bool f2fs_lookup_extent_cache(struct inode *, pgoff_t, struct extent_info *); -void f2fs_update_extent_cache(struct dnode_of_data *); +unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); +bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext); +void f2fs_drop_extent_tree(struct inode *inode); +unsigned int f2fs_destroy_extent_node(struct inode *inode); +void f2fs_destroy_extent_tree(struct inode *inode); +bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, + struct extent_info *ei); +void f2fs_update_extent_cache(struct dnode_of_data *dn); void f2fs_update_extent_cache_range(struct dnode_of_data *dn, - pgoff_t, block_t, unsigned int); -void init_extent_cache_info(struct f2fs_sb_info *); + pgoff_t fofs, block_t blkaddr, unsigned int len); +void init_extent_cache_info(struct f2fs_sb_info *sbi); int __init create_extent_cache(void); void destroy_extent_cache(void); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 1edc86e874e3..5f7317875a67 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -20,6 +20,7 @@ #include <linux/uaccess.h> #include <linux/mount.h> #include <linux/pagevec.h> +#include <linux/uio.h> #include <linux/uuid.h> #include <linux/file.h> @@ -140,8 +141,6 @@ static inline bool need_do_checkpoint(struct inode *inode) need_cp = true; else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) need_cp = true; - else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi))) - need_cp = true; else if (test_opt(sbi, FASTBOOT)) need_cp = true; else if (sbi->active_logs == 2) @@ -167,7 +166,6 @@ static void try_to_fix_pino(struct inode *inode) nid_t pino; down_write(&fi->i_sem); - fi->xattr_ver = 0; if (file_wrong_pino(inode) && inode->i_nlink == 1 && get_parent_ino(inode, &pino)) { f2fs_i_pino_write(inode, pino); @@ -276,7 +274,8 @@ sync_nodes: flush_out: remove_ino_entry(sbi, ino, UPDATE_INO); clear_inode_flag(inode, FI_UPDATE_WRITE); - ret = f2fs_issue_flush(sbi); + if (!atomic) + ret = f2fs_issue_flush(sbi); f2fs_update_time(sbi, REQ_TIME); out: trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); @@ -567,8 +566,9 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock) } if (f2fs_has_inline_data(inode)) { - if (truncate_inline_inode(ipage, from)) - set_page_dirty(ipage); + truncate_inline_inode(ipage, from); + if (from == 0) + clear_inode_flag(inode, FI_DATA_EXIST); f2fs_put_page(ipage, 1); truncate_page = true; goto out; @@ -633,10 +633,10 @@ int f2fs_truncate(struct inode *inode) return 0; } -int f2fs_getattr(struct vfsmount *mnt, - struct dentry *dentry, struct kstat *stat) +int f2fs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); generic_fillattr(inode, stat); stat->blocks <<= 3; return 0; @@ -1541,6 +1541,8 @@ static int f2fs_ioc_start_atomic_write(struct file *filp) if (ret) clear_inode_flag(inode, FI_ATOMIC_FILE); out: + stat_inc_atomic_write(inode); + stat_update_max_atomic_write(inode); inode_unlock(inode); mnt_drop_write_file(filp); return ret; @@ -1564,15 +1566,18 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp) goto err_out; if (f2fs_is_atomic_file(inode)) { - clear_inode_flag(inode, FI_ATOMIC_FILE); ret = commit_inmem_pages(inode); - if (ret) { - set_inode_flag(inode, FI_ATOMIC_FILE); + if (ret) goto err_out; + + ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); + if (!ret) { + clear_inode_flag(inode, FI_ATOMIC_FILE); + stat_dec_atomic_write(inode); } + } else { + ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); } - - ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); err_out: inode_unlock(inode); mnt_drop_write_file(filp); @@ -1870,7 +1875,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, { struct inode *inode = file_inode(filp); struct f2fs_map_blocks map = { .m_next_pgofs = NULL }; - struct extent_info ei; + struct extent_info ei = {0,0,0}; pgoff_t pg_start, pg_end; unsigned int blk_per_seg = sbi->blocks_per_seg; unsigned int total = 0, sec_num; @@ -2250,8 +2255,12 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) inode_lock(inode); ret = generic_write_checks(iocb, from); if (ret > 0) { - int err = f2fs_preallocate_blocks(iocb, from); + int err; + + if (iov_iter_fault_in_readable(from, iov_iter_count(from))) + set_inode_flag(inode, FI_NO_PREALLOC); + err = f2fs_preallocate_blocks(iocb, from); if (err) { inode_unlock(inode); return err; @@ -2259,6 +2268,7 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) blk_start_plug(&plug); ret = __generic_file_write_iter(iocb, from); blk_finish_plug(&plug); + clear_inode_flag(inode, FI_NO_PREALLOC); } inode_unlock(inode); diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 88bfc3dff496..418fd9881646 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -48,8 +48,10 @@ static int gc_thread_func(void *data) } #ifdef CONFIG_F2FS_FAULT_INJECTION - if (time_to_inject(sbi, FAULT_CHECKPOINT)) + if (time_to_inject(sbi, FAULT_CHECKPOINT)) { + f2fs_show_injection_info(FAULT_CHECKPOINT); f2fs_stop_checkpoint(sbi, false); + } #endif /* @@ -166,7 +168,8 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type, p->ofs_unit = sbi->segs_per_sec; } - if (p->max_search > sbi->max_victim_search) + /* we need to check every dirty segments in the FG_GC case */ + if (gc_type != FG_GC && p->max_search > sbi->max_victim_search) p->max_search = sbi->max_victim_search; p->offset = sbi->last_victim[p->gc_mode]; @@ -199,6 +202,10 @@ static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { if (sec_usage_check(sbi, secno)) continue; + + if (no_fggc_candidate(sbi, secno)) + continue; + clear_bit(secno, dirty_i->victim_secmap); return secno * sbi->segs_per_sec; } @@ -237,6 +244,16 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); } +static unsigned int get_greedy_cost(struct f2fs_sb_info *sbi, + unsigned int segno) +{ + unsigned int valid_blocks = + get_valid_blocks(sbi, segno, sbi->segs_per_sec); + + return IS_DATASEG(get_seg_entry(sbi, segno)->type) ? + valid_blocks * 2 : valid_blocks; +} + static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno, struct victim_sel_policy *p) { @@ -245,7 +262,7 @@ static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, /* alloc_mode == LFS */ if (p->gc_mode == GC_GREEDY) - return get_valid_blocks(sbi, segno, sbi->segs_per_sec); + return get_greedy_cost(sbi, segno); else return get_cb_cost(sbi, segno); } @@ -322,13 +339,15 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, nsearched++; } - secno = GET_SECNO(sbi, segno); if (sec_usage_check(sbi, secno)) goto next; if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) goto next; + if (gc_type == FG_GC && p.alloc_mode == LFS && + no_fggc_candidate(sbi, secno)) + goto next; cost = get_gc_cost(sbi, segno, &p); @@ -569,6 +588,9 @@ static void move_encrypted_block(struct inode *inode, block_t bidx, if (!check_valid_map(F2FS_I_SB(inode), segno, off)) goto out; + if (f2fs_is_atomic_file(inode)) + goto out; + set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE); if (err) @@ -661,6 +683,9 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type, if (!check_valid_map(F2FS_I_SB(inode), segno, off)) goto out; + if (f2fs_is_atomic_file(inode)) + goto out; + if (gc_type == BG_GC) { if (PageWriteback(page)) goto out; @@ -921,8 +946,6 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background) cpc.reason = __get_cp_reason(sbi); gc_more: - segno = NULL_SEGNO; - if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) goto stop; if (unlikely(f2fs_cp_error(sbi))) { @@ -930,30 +953,23 @@ gc_more: goto stop; } - if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed, 0)) { - gc_type = FG_GC; + if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) { /* - * If there is no victim and no prefree segment but still not - * enough free sections, we should flush dent/node blocks and do - * garbage collections. + * For example, if there are many prefree_segments below given + * threshold, we can make them free by checkpoint. Then, we + * secure free segments which doesn't need fggc any more. */ - if (__get_victim(sbi, &segno, gc_type) || - prefree_segments(sbi)) { - ret = write_checkpoint(sbi, &cpc); - if (ret) - goto stop; - segno = NULL_SEGNO; - } else if (has_not_enough_free_secs(sbi, 0, 0)) { - ret = write_checkpoint(sbi, &cpc); - if (ret) - goto stop; - } - } else if (gc_type == BG_GC && !background) { - /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */ - goto stop; + ret = write_checkpoint(sbi, &cpc); + if (ret) + goto stop; + if (has_not_enough_free_secs(sbi, 0, 0)) + gc_type = FG_GC; } - if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type)) + /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */ + if (gc_type == BG_GC && !background) + goto stop; + if (!__get_victim(sbi, &segno, gc_type)) goto stop; ret = 0; @@ -983,5 +999,16 @@ stop: void build_gc_manager(struct f2fs_sb_info *sbi) { + u64 main_count, resv_count, ovp_count, blocks_per_sec; + DIRTY_I(sbi)->v_ops = &default_v_ops; + + /* threshold of # of valid blocks in a section for victims of FG_GC */ + main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg; + resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg; + ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg; + blocks_per_sec = sbi->blocks_per_seg * sbi->segs_per_sec; + + sbi->fggc_threshold = div64_u64((main_count - ovp_count) * blocks_per_sec, + (main_count - resv_count)); } diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index af06bda51a54..24bb8213d974 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -373,8 +373,10 @@ void f2fs_evict_inode(struct inode *inode) goto no_delete; #ifdef CONFIG_F2FS_FAULT_INJECTION - if (time_to_inject(sbi, FAULT_EVICT_INODE)) + if (time_to_inject(sbi, FAULT_EVICT_INODE)) { + f2fs_show_injection_info(FAULT_EVICT_INODE); goto no_delete; + } #endif remove_ino_entry(sbi, inode->i_ino, APPEND_INO); diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 11cabcadb1a3..98f00a3a7f50 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -321,9 +321,9 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, if (err) goto err_out; } - if (!IS_ERR(inode) && f2fs_encrypted_inode(dir) && - (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && - !fscrypt_has_permitted_context(dir, inode)) { + if (f2fs_encrypted_inode(dir) && + (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && + !fscrypt_has_permitted_context(dir, inode)) { bool nokey = f2fs_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode); err = nokey ? -ENOKEY : -EPERM; @@ -663,6 +663,12 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, bool is_old_inline = f2fs_has_inline_dentry(old_dir); int err = -ENOENT; + if ((f2fs_encrypted_inode(old_dir) && + !fscrypt_has_encryption_key(old_dir)) || + (f2fs_encrypted_inode(new_dir) && + !fscrypt_has_encryption_key(new_dir))) + return -ENOKEY; + if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) && !fscrypt_has_permitted_context(new_dir, old_inode)) { err = -EPERM; @@ -843,6 +849,12 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, int old_nlink = 0, new_nlink = 0; int err = -ENOENT; + if ((f2fs_encrypted_inode(old_dir) && + !fscrypt_has_encryption_key(old_dir)) || + (f2fs_encrypted_inode(new_dir) && + !fscrypt_has_encryption_key(new_dir))) + return -ENOKEY; + if ((f2fs_encrypted_inode(old_dir) || f2fs_encrypted_inode(new_dir)) && (old_dir != new_dir) && (!fscrypt_has_permitted_context(new_dir, old_inode) || diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index b9078fdb3743..94967171dee8 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -245,12 +245,24 @@ bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) return need_update; } -static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) +static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, + bool no_fail) { struct nat_entry *new; - new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS); - f2fs_radix_tree_insert(&nm_i->nat_root, nid, new); + if (no_fail) { + new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS); + f2fs_radix_tree_insert(&nm_i->nat_root, nid, new); + } else { + new = kmem_cache_alloc(nat_entry_slab, GFP_NOFS); + if (!new) + return NULL; + if (radix_tree_insert(&nm_i->nat_root, nid, new)) { + kmem_cache_free(nat_entry_slab, new); + return NULL; + } + } + memset(new, 0, sizeof(struct nat_entry)); nat_set_nid(new, nid); nat_reset_flag(new); @@ -267,8 +279,9 @@ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, e = __lookup_nat_cache(nm_i, nid); if (!e) { - e = grab_nat_entry(nm_i, nid); - node_info_from_raw_nat(&e->ni, ne); + e = grab_nat_entry(nm_i, nid, false); + if (e) + node_info_from_raw_nat(&e->ni, ne); } else { f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || nat_get_blkaddr(e) != @@ -286,7 +299,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, down_write(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, ni->nid); if (!e) { - e = grab_nat_entry(nm_i, ni->nid); + e = grab_nat_entry(nm_i, ni->nid, true); copy_node_info(&e->ni, ni); f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); } else if (new_blkaddr == NEW_ADDR) { @@ -325,6 +338,9 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, set_nat_flag(e, IS_CHECKPOINTED, false); __set_nat_cache_dirty(nm_i, e); + if (enabled_nat_bits(sbi, NULL) && new_blkaddr == NEW_ADDR) + clear_bit_le(NAT_BLOCK_OFFSET(ni->nid), nm_i->empty_nat_bits); + /* update fsync_mark if its inode nat entry is still alive */ if (ni->nid != ni->ino) e = __lookup_nat_cache(nm_i, ni->ino); @@ -958,9 +974,6 @@ int truncate_xattr_node(struct inode *inode, struct page *page) f2fs_i_xnid_write(inode, 0); - /* need to do checkpoint during fsync */ - F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi)); - set_new_dnode(&dn, inode, page, npage, nid); if (page) @@ -1018,7 +1031,7 @@ struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs, struct page *ipage) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); - struct node_info old_ni, new_ni; + struct node_info new_ni; struct page *page; int err; @@ -1033,13 +1046,15 @@ struct page *new_node_page(struct dnode_of_data *dn, err = -ENOSPC; goto fail; } - - get_node_info(sbi, dn->nid, &old_ni); - - /* Reinitialize old_ni with new node page */ - f2fs_bug_on(sbi, old_ni.blk_addr != NULL_ADDR); - new_ni = old_ni; +#ifdef CONFIG_F2FS_CHECK_FS + get_node_info(sbi, dn->nid, &new_ni); + f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR); +#endif + new_ni.nid = dn->nid; new_ni.ino = dn->inode->i_ino; + new_ni.blk_addr = NULL_ADDR; + new_ni.flag = 0; + new_ni.version = 0; set_node_addr(sbi, &new_ni, NEW_ADDR, false); f2fs_wait_on_page_writeback(page, NODE, true); @@ -1305,16 +1320,99 @@ continue_unlock: return last_page; } +static int __write_node_page(struct page *page, bool atomic, bool *submitted, + struct writeback_control *wbc) +{ + struct f2fs_sb_info *sbi = F2FS_P_SB(page); + nid_t nid; + struct node_info ni; + struct f2fs_io_info fio = { + .sbi = sbi, + .type = NODE, + .op = REQ_OP_WRITE, + .op_flags = wbc_to_write_flags(wbc), + .page = page, + .encrypted_page = NULL, + .submitted = false, + }; + + trace_f2fs_writepage(page, NODE); + + if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) + goto redirty_out; + if (unlikely(f2fs_cp_error(sbi))) + goto redirty_out; + + /* get old block addr of this node page */ + nid = nid_of_node(page); + f2fs_bug_on(sbi, page->index != nid); + + if (wbc->for_reclaim) { + if (!down_read_trylock(&sbi->node_write)) + goto redirty_out; + } else { + down_read(&sbi->node_write); + } + + get_node_info(sbi, nid, &ni); + + /* This page is already truncated */ + if (unlikely(ni.blk_addr == NULL_ADDR)) { + ClearPageUptodate(page); + dec_page_count(sbi, F2FS_DIRTY_NODES); + up_read(&sbi->node_write); + unlock_page(page); + return 0; + } + + if (atomic && !test_opt(sbi, NOBARRIER)) + fio.op_flags |= REQ_PREFLUSH | REQ_FUA; + + set_page_writeback(page); + fio.old_blkaddr = ni.blk_addr; + write_node_page(nid, &fio); + set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); + dec_page_count(sbi, F2FS_DIRTY_NODES); + up_read(&sbi->node_write); + + if (wbc->for_reclaim) { + f2fs_submit_merged_bio_cond(sbi, page->mapping->host, 0, + page->index, NODE, WRITE); + submitted = NULL; + } + + unlock_page(page); + + if (unlikely(f2fs_cp_error(sbi))) { + f2fs_submit_merged_bio(sbi, NODE, WRITE); + submitted = NULL; + } + if (submitted) + *submitted = fio.submitted; + + return 0; + +redirty_out: + redirty_page_for_writepage(wbc, page); + return AOP_WRITEPAGE_ACTIVATE; +} + +static int f2fs_write_node_page(struct page *page, + struct writeback_control *wbc) +{ + return __write_node_page(page, false, NULL, wbc); +} + int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, struct writeback_control *wbc, bool atomic) { pgoff_t index, end; + pgoff_t last_idx = ULONG_MAX; struct pagevec pvec; int ret = 0; struct page *last_page = NULL; bool marked = false; nid_t ino = inode->i_ino; - int nwritten = 0; if (atomic) { last_page = last_fsync_dnode(sbi, ino); @@ -1336,6 +1434,7 @@ retry: for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; + bool submitted = false; if (unlikely(f2fs_cp_error(sbi))) { f2fs_put_page(last_page, 0); @@ -1384,13 +1483,15 @@ continue_unlock: if (!clear_page_dirty_for_io(page)) goto continue_unlock; - ret = NODE_MAPPING(sbi)->a_ops->writepage(page, wbc); + ret = __write_node_page(page, atomic && + page == last_page, + &submitted, wbc); if (ret) { unlock_page(page); f2fs_put_page(last_page, 0); break; - } else { - nwritten++; + } else if (submitted) { + last_idx = page->index; } if (page == last_page) { @@ -1416,8 +1517,9 @@ continue_unlock: goto retry; } out: - if (nwritten) - f2fs_submit_merged_bio_cond(sbi, NULL, NULL, ino, NODE, WRITE); + if (last_idx != ULONG_MAX) + f2fs_submit_merged_bio_cond(sbi, NULL, ino, last_idx, + NODE, WRITE); return ret ? -EIO: 0; } @@ -1445,6 +1547,7 @@ next_step: for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; + bool submitted = false; if (unlikely(f2fs_cp_error(sbi))) { pagevec_release(&pvec); @@ -1498,9 +1601,10 @@ continue_unlock: set_fsync_mark(page, 0); set_dentry_mark(page, 0); - if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc)) + ret = __write_node_page(page, false, &submitted, wbc); + if (ret) unlock_page(page); - else + else if (submitted) nwritten++; if (--wbc->nr_to_write == 0) @@ -1564,72 +1668,6 @@ int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) return ret; } -static int f2fs_write_node_page(struct page *page, - struct writeback_control *wbc) -{ - struct f2fs_sb_info *sbi = F2FS_P_SB(page); - nid_t nid; - struct node_info ni; - struct f2fs_io_info fio = { - .sbi = sbi, - .type = NODE, - .op = REQ_OP_WRITE, - .op_flags = wbc_to_write_flags(wbc), - .page = page, - .encrypted_page = NULL, - }; - - trace_f2fs_writepage(page, NODE); - - if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) - goto redirty_out; - if (unlikely(f2fs_cp_error(sbi))) - goto redirty_out; - - /* get old block addr of this node page */ - nid = nid_of_node(page); - f2fs_bug_on(sbi, page->index != nid); - - if (wbc->for_reclaim) { - if (!down_read_trylock(&sbi->node_write)) - goto redirty_out; - } else { - down_read(&sbi->node_write); - } - - get_node_info(sbi, nid, &ni); - - /* This page is already truncated */ - if (unlikely(ni.blk_addr == NULL_ADDR)) { - ClearPageUptodate(page); - dec_page_count(sbi, F2FS_DIRTY_NODES); - up_read(&sbi->node_write); - unlock_page(page); - return 0; - } - - set_page_writeback(page); - fio.old_blkaddr = ni.blk_addr; - write_node_page(nid, &fio); - set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); - dec_page_count(sbi, F2FS_DIRTY_NODES); - up_read(&sbi->node_write); - - if (wbc->for_reclaim) - f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, NODE, WRITE); - - unlock_page(page); - - if (unlikely(f2fs_cp_error(sbi))) - f2fs_submit_merged_bio(sbi, NODE, WRITE); - - return 0; - -redirty_out: - redirty_page_for_writepage(wbc, page); - return AOP_WRITEPAGE_ACTIVATE; -} - static int f2fs_write_node_pages(struct address_space *mapping, struct writeback_control *wbc) { @@ -1727,7 +1765,8 @@ static void __remove_nid_from_list(struct f2fs_sb_info *sbi, radix_tree_delete(&nm_i->free_nid_root, i->nid); } -static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) +/* return if the nid is recognized as free */ +static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i; @@ -1736,14 +1775,14 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) /* 0 nid should not be used */ if (unlikely(nid == 0)) - return 0; + return false; if (build) { /* do not add allocated nids */ ne = __lookup_nat_cache(nm_i, nid); if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || nat_get_blkaddr(ne) != NULL_ADDR)) - return 0; + return false; } i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); @@ -1752,7 +1791,7 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) if (radix_tree_preload(GFP_NOFS)) { kmem_cache_free(free_nid_slab, i); - return 0; + return true; } spin_lock(&nm_i->nid_list_lock); @@ -1761,9 +1800,9 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) radix_tree_preload_end(); if (err) { kmem_cache_free(free_nid_slab, i); - return 0; + return true; } - return 1; + return true; } static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) @@ -1784,17 +1823,36 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) kmem_cache_free(free_nid_slab, i); } +void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); + unsigned int nid_ofs = nid - START_NID(nid); + + if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) + return; + + if (set) + set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); + else + clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); +} + static void scan_nat_page(struct f2fs_sb_info *sbi, struct page *nat_page, nid_t start_nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct f2fs_nat_block *nat_blk = page_address(nat_page); block_t blk_addr; + unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); int i; + set_bit_le(nat_ofs, nm_i->nat_block_bitmap); + i = start_nid % NAT_ENTRY_PER_BLOCK; for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { + bool freed = false; if (unlikely(start_nid >= nm_i->max_nid)) break; @@ -1802,11 +1860,106 @@ static void scan_nat_page(struct f2fs_sb_info *sbi, blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); f2fs_bug_on(sbi, blk_addr == NEW_ADDR); if (blk_addr == NULL_ADDR) - add_free_nid(sbi, start_nid, true); + freed = add_free_nid(sbi, start_nid, true); + update_free_nid_bitmap(sbi, start_nid, freed); + } +} + +static void scan_free_nid_bits(struct f2fs_sb_info *sbi) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); + struct f2fs_journal *journal = curseg->journal; + unsigned int i, idx; + + down_read(&nm_i->nat_tree_lock); + + for (i = 0; i < nm_i->nat_blocks; i++) { + if (!test_bit_le(i, nm_i->nat_block_bitmap)) + continue; + for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { + nid_t nid; + + if (!test_bit_le(idx, nm_i->free_nid_bitmap[i])) + continue; + + nid = i * NAT_ENTRY_PER_BLOCK + idx; + add_free_nid(sbi, nid, true); + + if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS) + goto out; + } + } +out: + down_read(&curseg->journal_rwsem); + for (i = 0; i < nats_in_cursum(journal); i++) { + block_t addr; + nid_t nid; + + addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); + nid = le32_to_cpu(nid_in_journal(journal, i)); + if (addr == NULL_ADDR) + add_free_nid(sbi, nid, true); + else + remove_free_nid(sbi, nid); } + up_read(&curseg->journal_rwsem); + up_read(&nm_i->nat_tree_lock); } -static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync) +static int scan_nat_bits(struct f2fs_sb_info *sbi) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + struct page *page; + unsigned int i = 0; + nid_t nid; + + if (!enabled_nat_bits(sbi, NULL)) + return -EAGAIN; + + down_read(&nm_i->nat_tree_lock); +check_empty: + i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); + if (i >= nm_i->nat_blocks) { + i = 0; + goto check_partial; + } + + for (nid = i * NAT_ENTRY_PER_BLOCK; nid < (i + 1) * NAT_ENTRY_PER_BLOCK; + nid++) { + if (unlikely(nid >= nm_i->max_nid)) + break; + add_free_nid(sbi, nid, true); + } + + if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS) + goto out; + i++; + goto check_empty; + +check_partial: + i = find_next_zero_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); + if (i >= nm_i->nat_blocks) { + disable_nat_bits(sbi, true); + up_read(&nm_i->nat_tree_lock); + return -EINVAL; + } + + nid = i * NAT_ENTRY_PER_BLOCK; + page = get_current_nat_page(sbi, nid); + scan_nat_page(sbi, page, nid); + f2fs_put_page(page, 1); + + if (nm_i->nid_cnt[FREE_NID_LIST] < MAX_FREE_NIDS) { + i++; + goto check_partial; + } +out: + up_read(&nm_i->nat_tree_lock); + return 0; +} + +static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); @@ -1821,6 +1974,29 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync) if (!sync && !available_free_memory(sbi, FREE_NIDS)) return; + if (!mount) { + /* try to find free nids in free_nid_bitmap */ + scan_free_nid_bits(sbi); + + if (nm_i->nid_cnt[FREE_NID_LIST]) + return; + + /* try to find free nids with nat_bits */ + if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST]) + return; + } + + /* find next valid candidate */ + if (enabled_nat_bits(sbi, NULL)) { + int idx = find_next_zero_bit_le(nm_i->full_nat_bits, + nm_i->nat_blocks, 0); + + if (idx >= nm_i->nat_blocks) + set_sbi_flag(sbi, SBI_NEED_FSCK); + else + nid = idx * NAT_ENTRY_PER_BLOCK; + } + /* readahead nat pages to be scanned */ ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT, true); @@ -1863,10 +2039,10 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync) nm_i->ra_nid_pages, META_NAT, false); } -void build_free_nids(struct f2fs_sb_info *sbi, bool sync) +void build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) { mutex_lock(&NM_I(sbi)->build_lock); - __build_free_nids(sbi, sync); + __build_free_nids(sbi, sync, mount); mutex_unlock(&NM_I(sbi)->build_lock); } @@ -1881,8 +2057,10 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) struct free_nid *i = NULL; retry: #ifdef CONFIG_F2FS_FAULT_INJECTION - if (time_to_inject(sbi, FAULT_ALLOC_NID)) + if (time_to_inject(sbi, FAULT_ALLOC_NID)) { + f2fs_show_injection_info(FAULT_ALLOC_NID); return false; + } #endif spin_lock(&nm_i->nid_list_lock); @@ -1902,13 +2080,16 @@ retry: i->state = NID_ALLOC; __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false); nm_i->available_nids--; + + update_free_nid_bitmap(sbi, *nid, false); + spin_unlock(&nm_i->nid_list_lock); return true; } spin_unlock(&nm_i->nid_list_lock); /* Let's scan nat pages and its caches to get free nids */ - build_free_nids(sbi, true); + build_free_nids(sbi, true, false); goto retry; } @@ -1956,6 +2137,8 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) nm_i->available_nids++; + update_free_nid_bitmap(sbi, nid, true); + spin_unlock(&nm_i->nid_list_lock); if (need_free) @@ -2018,18 +2201,18 @@ update_inode: f2fs_put_page(ipage, 1); } -void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr) +int recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; nid_t new_xnid = nid_of_node(page); struct node_info ni; + struct page *xpage; - /* 1: invalidate the previous xattr nid */ if (!prev_xnid) goto recover_xnid; - /* Deallocate node address */ + /* 1: invalidate the previous xattr nid */ get_node_info(sbi, prev_xnid, &ni); f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR); invalidate_blocks(sbi, ni.blk_addr); @@ -2037,19 +2220,27 @@ void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr) set_node_addr(sbi, &ni, NULL_ADDR, false); recover_xnid: - /* 2: allocate new xattr nid */ + /* 2: update xattr nid in inode */ + remove_free_nid(sbi, new_xnid); + f2fs_i_xnid_write(inode, new_xnid); if (unlikely(!inc_valid_node_count(sbi, inode))) f2fs_bug_on(sbi, 1); + update_inode_page(inode); + + /* 3: update and set xattr node page dirty */ + xpage = grab_cache_page(NODE_MAPPING(sbi), new_xnid); + if (!xpage) + return -ENOMEM; + + memcpy(F2FS_NODE(xpage), F2FS_NODE(page), PAGE_SIZE); - remove_free_nid(sbi, new_xnid); get_node_info(sbi, new_xnid, &ni); ni.ino = inode->i_ino; set_node_addr(sbi, &ni, NEW_ADDR, false); - f2fs_i_xnid_write(inode, new_xnid); + set_page_dirty(xpage); + f2fs_put_page(xpage, 1); - /* 3: update xattr blkaddr */ - refresh_sit_entry(sbi, NEW_ADDR, blkaddr); - set_node_addr(sbi, &ni, blkaddr, false); + return 0; } int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) @@ -2152,7 +2343,7 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi) ne = __lookup_nat_cache(nm_i, nid); if (!ne) { - ne = grab_nat_entry(nm_i, nid); + ne = grab_nat_entry(nm_i, nid, true); node_info_from_raw_nat(&ne->ni, &raw_ne); } @@ -2192,8 +2383,39 @@ add_out: list_add_tail(&nes->set_list, head); } +void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, + struct page *page) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK; + struct f2fs_nat_block *nat_blk = page_address(page); + int valid = 0; + int i; + + if (!enabled_nat_bits(sbi, NULL)) + return; + + for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) { + if (start_nid == 0 && i == 0) + valid++; + if (nat_blk->entries[i].block_addr) + valid++; + } + if (valid == 0) { + set_bit_le(nat_index, nm_i->empty_nat_bits); + clear_bit_le(nat_index, nm_i->full_nat_bits); + return; + } + + clear_bit_le(nat_index, nm_i->empty_nat_bits); + if (valid == NAT_ENTRY_PER_BLOCK) + set_bit_le(nat_index, nm_i->full_nat_bits); + else + clear_bit_le(nat_index, nm_i->full_nat_bits); +} + static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, - struct nat_entry_set *set) + struct nat_entry_set *set, struct cp_control *cpc) { struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_journal *journal = curseg->journal; @@ -2208,7 +2430,8 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, * #1, flush nat entries to journal in current hot data summary block. * #2, flush nat entries to nat page. */ - if (!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL)) + if (enabled_nat_bits(sbi, cpc) || + !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL)) to_journal = false; if (to_journal) { @@ -2244,14 +2467,21 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, add_free_nid(sbi, nid, false); spin_lock(&NM_I(sbi)->nid_list_lock); NM_I(sbi)->available_nids++; + update_free_nid_bitmap(sbi, nid, true); + spin_unlock(&NM_I(sbi)->nid_list_lock); + } else { + spin_lock(&NM_I(sbi)->nid_list_lock); + update_free_nid_bitmap(sbi, nid, false); spin_unlock(&NM_I(sbi)->nid_list_lock); } } - if (to_journal) + if (to_journal) { up_write(&curseg->journal_rwsem); - else + } else { + __update_nat_bits(sbi, start_nid, page); f2fs_put_page(page, 1); + } f2fs_bug_on(sbi, set->entry_cnt); @@ -2262,7 +2492,7 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, /* * This function is called during the checkpointing process. */ -void flush_nat_entries(struct f2fs_sb_info *sbi) +void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); @@ -2283,7 +2513,8 @@ void flush_nat_entries(struct f2fs_sb_info *sbi) * entries, remove all entries from journal and merge them * into nat entry set. */ - if (!__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL)) + if (enabled_nat_bits(sbi, cpc) || + !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL)) remove_nats_in_journal(sbi); while ((found = __gang_lookup_nat_set(nm_i, @@ -2297,27 +2528,69 @@ void flush_nat_entries(struct f2fs_sb_info *sbi) /* flush dirty nats in nat entry set */ list_for_each_entry_safe(set, tmp, &sets, set_list) - __flush_nat_entry_set(sbi, set); + __flush_nat_entry_set(sbi, set, cpc); up_write(&nm_i->nat_tree_lock); f2fs_bug_on(sbi, nm_i->dirty_nat_cnt); } +static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) +{ + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + struct f2fs_nm_info *nm_i = NM_I(sbi); + unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE; + unsigned int i; + __u64 cp_ver = cur_cp_version(ckpt); + block_t nat_bits_addr; + + if (!enabled_nat_bits(sbi, NULL)) + return 0; + + nm_i->nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 + + F2FS_BLKSIZE - 1); + nm_i->nat_bits = kzalloc(nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, + GFP_KERNEL); + if (!nm_i->nat_bits) + return -ENOMEM; + + nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg - + nm_i->nat_bits_blocks; + for (i = 0; i < nm_i->nat_bits_blocks; i++) { + struct page *page = get_meta_page(sbi, nat_bits_addr++); + + memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS), + page_address(page), F2FS_BLKSIZE); + f2fs_put_page(page, 1); + } + + cp_ver |= (cur_cp_crc(ckpt) << 32); + if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) { + disable_nat_bits(sbi, true); + return 0; + } + + nm_i->full_nat_bits = nm_i->nat_bits + 8; + nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; + + f2fs_msg(sbi->sb, KERN_NOTICE, "Found nat_bits in checkpoint"); + return 0; +} + static int init_node_manager(struct f2fs_sb_info *sbi) { struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi); unsigned char *version_bitmap; - unsigned int nat_segs, nat_blocks; + unsigned int nat_segs; + int err; nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); /* segment_count_nat includes pair segment so divide to 2. */ nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; - nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); - - nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; + nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); + nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks; /* not used nids: 0, node, meta, (and root counted as valid node) */ nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - @@ -2350,6 +2623,34 @@ static int init_node_manager(struct f2fs_sb_info *sbi) GFP_KERNEL); if (!nm_i->nat_bitmap) return -ENOMEM; + + err = __get_nat_bitmaps(sbi); + if (err) + return err; + +#ifdef CONFIG_F2FS_CHECK_FS + nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size, + GFP_KERNEL); + if (!nm_i->nat_bitmap_mir) + return -ENOMEM; +#endif + + return 0; +} + +int init_free_nid_cache(struct f2fs_sb_info *sbi) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + + nm_i->free_nid_bitmap = f2fs_kvzalloc(nm_i->nat_blocks * + NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL); + if (!nm_i->free_nid_bitmap) + return -ENOMEM; + + nm_i->nat_block_bitmap = f2fs_kvzalloc(nm_i->nat_blocks / 8, + GFP_KERNEL); + if (!nm_i->nat_block_bitmap) + return -ENOMEM; return 0; } @@ -2365,7 +2666,11 @@ int build_node_manager(struct f2fs_sb_info *sbi) if (err) return err; - build_free_nids(sbi, true); + err = init_free_nid_cache(sbi); + if (err) + return err; + + build_free_nids(sbi, true, true); return 0; } @@ -2423,7 +2728,14 @@ void destroy_node_manager(struct f2fs_sb_info *sbi) } up_write(&nm_i->nat_tree_lock); + kvfree(nm_i->nat_block_bitmap); + kvfree(nm_i->free_nid_bitmap); + kfree(nm_i->nat_bitmap); + kfree(nm_i->nat_bits); +#ifdef CONFIG_F2FS_CHECK_FS + kfree(nm_i->nat_bitmap_mir); +#endif sbi->nm_info = NULL; kfree(nm_i); } diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index e7997e240366..2f9603fa85a5 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -174,7 +174,7 @@ static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid) spin_unlock(&nm_i->nid_list_lock); return; } - fnid = list_entry(nm_i->nid_list[FREE_NID_LIST].next, + fnid = list_first_entry(&nm_i->nid_list[FREE_NID_LIST], struct free_nid, list); *nid = fnid->nid; spin_unlock(&nm_i->nid_list_lock); @@ -186,6 +186,12 @@ static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid) static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr) { struct f2fs_nm_info *nm_i = NM_I(sbi); + +#ifdef CONFIG_F2FS_CHECK_FS + if (memcmp(nm_i->nat_bitmap, nm_i->nat_bitmap_mir, + nm_i->bitmap_size)) + f2fs_bug_on(sbi, 1); +#endif memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size); } @@ -228,6 +234,9 @@ static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid) unsigned int block_off = NAT_BLOCK_OFFSET(start_nid); f2fs_change_bit(block_off, nm_i->nat_bitmap); +#ifdef CONFIG_F2FS_CHECK_FS + f2fs_change_bit(block_off, nm_i->nat_bitmap_mir); +#endif } static inline nid_t ino_of_node(struct page *node_page) @@ -291,14 +300,11 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); struct f2fs_node *rn = F2FS_NODE(page); - size_t crc_offset = le32_to_cpu(ckpt->checksum_offset); - __u64 cp_ver = le64_to_cpu(ckpt->checkpoint_ver); + __u64 cp_ver = cur_cp_version(ckpt); + + if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) + cp_ver |= (cur_cp_crc(ckpt) << 32); - if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) { - __u64 crc = le32_to_cpu(*((__le32 *) - ((unsigned char *)ckpt + crc_offset))); - cp_ver |= (crc << 32); - } rn->footer.cp_ver = cpu_to_le64(cp_ver); rn->footer.next_blkaddr = cpu_to_le32(blkaddr); } @@ -306,14 +312,11 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) static inline bool is_recoverable_dnode(struct page *page) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); - size_t crc_offset = le32_to_cpu(ckpt->checksum_offset); __u64 cp_ver = cur_cp_version(ckpt); - if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) { - __u64 crc = le32_to_cpu(*((__le32 *) - ((unsigned char *)ckpt + crc_offset))); - cp_ver |= (crc << 32); - } + if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) + cp_ver |= (cur_cp_crc(ckpt) << 32); + return cp_ver == cpver_of_node(page); } @@ -343,7 +346,7 @@ static inline bool IS_DNODE(struct page *node_page) unsigned int ofs = ofs_of_node(node_page); if (f2fs_has_xattr_block(ofs)) - return false; + return true; if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK || ofs == 5 + 2 * NIDS_PER_BLOCK) diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 981a9584b62f..d025aa83fb5b 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -378,11 +378,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, if (IS_INODE(page)) { recover_inline_xattr(inode, page); } else if (f2fs_has_xattr_block(ofs_of_node(page))) { - /* - * Deprecated; xattr blocks should be found from cold log. - * But, we should remain this for backward compatibility. - */ - recover_xattr_data(inode, page, blkaddr); + err = recover_xattr_data(inode, page, blkaddr); + if (!err) + recovered++; goto out; } @@ -428,8 +426,9 @@ retry_dn: } if (!file_keep_isize(inode) && - (i_size_read(inode) <= (start << PAGE_SHIFT))) - f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT); + (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT))) + f2fs_i_size_write(inode, + (loff_t)(start + 1) << PAGE_SHIFT); /* * dest is reserved block, invalidate src block @@ -552,10 +551,8 @@ next: int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) { - struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); struct list_head inode_list; struct list_head dir_list; - block_t blkaddr; int err; int ret = 0; bool need_writecp = false; @@ -571,8 +568,6 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) /* prevent checkpoint */ mutex_lock(&sbi->cp_mutex); - blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); - /* step #1: find fsynced inode numbers */ err = find_fsync_dnodes(sbi, &inode_list); if (err || list_empty(&inode_list)) diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 0d8802453758..4bd7a8b19332 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -26,7 +26,7 @@ #define __reverse_ffz(x) __reverse_ffs(~(x)) static struct kmem_cache *discard_entry_slab; -static struct kmem_cache *bio_entry_slab; +static struct kmem_cache *discard_cmd_slab; static struct kmem_cache *sit_entry_set_slab; static struct kmem_cache *inmem_entry_slab; @@ -242,11 +242,12 @@ void drop_inmem_pages(struct inode *inode) { struct f2fs_inode_info *fi = F2FS_I(inode); - clear_inode_flag(inode, FI_ATOMIC_FILE); - mutex_lock(&fi->inmem_lock); __revoke_inmem_pages(inode, &fi->inmem_pages, true, false); mutex_unlock(&fi->inmem_lock); + + clear_inode_flag(inode, FI_ATOMIC_FILE); + stat_dec_atomic_write(inode); } static int __commit_inmem_pages(struct inode *inode, @@ -262,7 +263,7 @@ static int __commit_inmem_pages(struct inode *inode, .op_flags = REQ_SYNC | REQ_PRIO, .encrypted_page = NULL, }; - bool submit_bio = false; + pgoff_t last_idx = ULONG_MAX; int err = 0; list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) { @@ -288,15 +289,15 @@ static int __commit_inmem_pages(struct inode *inode, /* record old blkaddr for revoking */ cur->old_addr = fio.old_blkaddr; - - submit_bio = true; + last_idx = page->index; } unlock_page(page); list_move_tail(&cur->list, revoke_list); } - if (submit_bio) - f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE); + if (last_idx != ULONG_MAX) + f2fs_submit_merged_bio_cond(sbi, inode, 0, last_idx, + DATA, WRITE); if (!err) __revoke_inmem_pages(inode, revoke_list, false, false); @@ -315,6 +316,8 @@ int commit_inmem_pages(struct inode *inode) f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); + set_inode_flag(inode, FI_ATOMIC_COMMIT); + mutex_lock(&fi->inmem_lock); err = __commit_inmem_pages(inode, &revoke_list); if (err) { @@ -336,6 +339,8 @@ int commit_inmem_pages(struct inode *inode) } mutex_unlock(&fi->inmem_lock); + clear_inode_flag(inode, FI_ATOMIC_COMMIT); + f2fs_unlock_op(sbi); return err; } @@ -347,8 +352,10 @@ int commit_inmem_pages(struct inode *inode) void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) { #ifdef CONFIG_F2FS_FAULT_INJECTION - if (time_to_inject(sbi, FAULT_CHECKPOINT)) + if (time_to_inject(sbi, FAULT_CHECKPOINT)) { + f2fs_show_injection_info(FAULT_CHECKPOINT); f2fs_stop_checkpoint(sbi, false); + } #endif if (!need) @@ -381,7 +388,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) if (!available_free_memory(sbi, FREE_NIDS)) try_to_free_nids(sbi, MAX_FREE_NIDS); else - build_free_nids(sbi, false); + build_free_nids(sbi, false, false); if (!is_idle(sbi)) return; @@ -423,6 +430,9 @@ static int submit_flush_wait(struct f2fs_sb_info *sbi) if (sbi->s_ndevs && !ret) { for (i = 1; i < sbi->s_ndevs; i++) { + trace_f2fs_issue_flush(FDEV(i).bdev, + test_opt(sbi, NOBARRIER), + test_opt(sbi, FLUSH_MERGE)); ret = __submit_flush_wait(FDEV(i).bdev); if (ret) break; @@ -434,7 +444,7 @@ static int submit_flush_wait(struct f2fs_sb_info *sbi) static int issue_flush_thread(void *data) { struct f2fs_sb_info *sbi = data; - struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; + struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; wait_queue_head_t *q = &fcc->flush_wait_queue; repeat: if (kthread_should_stop()) @@ -463,16 +473,16 @@ repeat: int f2fs_issue_flush(struct f2fs_sb_info *sbi) { - struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; + struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; struct flush_cmd cmd; - trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER), - test_opt(sbi, FLUSH_MERGE)); - if (test_opt(sbi, NOBARRIER)) return 0; - if (!test_opt(sbi, FLUSH_MERGE) || !atomic_read(&fcc->submit_flush)) { + if (!test_opt(sbi, FLUSH_MERGE)) + return submit_flush_wait(sbi); + + if (!atomic_read(&fcc->submit_flush)) { int ret; atomic_inc(&fcc->submit_flush); @@ -506,8 +516,8 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi) struct flush_cmd_control *fcc; int err = 0; - if (SM_I(sbi)->cmd_control_info) { - fcc = SM_I(sbi)->cmd_control_info; + if (SM_I(sbi)->fcc_info) { + fcc = SM_I(sbi)->fcc_info; goto init_thread; } @@ -517,14 +527,14 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi) atomic_set(&fcc->submit_flush, 0); init_waitqueue_head(&fcc->flush_wait_queue); init_llist_head(&fcc->issue_list); - SM_I(sbi)->cmd_control_info = fcc; + SM_I(sbi)->fcc_info = fcc; init_thread: fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev)); if (IS_ERR(fcc->f2fs_issue_flush)) { err = PTR_ERR(fcc->f2fs_issue_flush); kfree(fcc); - SM_I(sbi)->cmd_control_info = NULL; + SM_I(sbi)->fcc_info = NULL; return err; } @@ -533,7 +543,7 @@ init_thread: void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free) { - struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; + struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; if (fcc && fcc->f2fs_issue_flush) { struct task_struct *flush_thread = fcc->f2fs_issue_flush; @@ -543,7 +553,7 @@ void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free) } if (free) { kfree(fcc); - SM_I(sbi)->cmd_control_info = NULL; + SM_I(sbi)->fcc_info = NULL; } } @@ -623,60 +633,144 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) mutex_unlock(&dirty_i->seglist_lock); } -static struct bio_entry *__add_bio_entry(struct f2fs_sb_info *sbi, - struct bio *bio) +static void __add_discard_cmd(struct f2fs_sb_info *sbi, + struct bio *bio, block_t lstart, block_t len) { - struct list_head *wait_list = &(SM_I(sbi)->wait_list); - struct bio_entry *be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS); + struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + struct list_head *cmd_list = &(dcc->discard_cmd_list); + struct discard_cmd *dc; - INIT_LIST_HEAD(&be->list); - be->bio = bio; - init_completion(&be->event); - list_add_tail(&be->list, wait_list); + dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS); + INIT_LIST_HEAD(&dc->list); + dc->bio = bio; + bio->bi_private = dc; + dc->lstart = lstart; + dc->len = len; + dc->state = D_PREP; + init_completion(&dc->wait); - return be; + mutex_lock(&dcc->cmd_lock); + list_add_tail(&dc->list, cmd_list); + mutex_unlock(&dcc->cmd_lock); } -void f2fs_wait_all_discard_bio(struct f2fs_sb_info *sbi) +static void __remove_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd *dc) { - struct list_head *wait_list = &(SM_I(sbi)->wait_list); - struct bio_entry *be, *tmp; + int err = dc->bio->bi_error; - list_for_each_entry_safe(be, tmp, wait_list, list) { - struct bio *bio = be->bio; - int err; + if (dc->state == D_DONE) + atomic_dec(&(SM_I(sbi)->dcc_info->submit_discard)); - wait_for_completion_io(&be->event); - err = be->error; - if (err == -EOPNOTSUPP) - err = 0; + if (err == -EOPNOTSUPP) + err = 0; - if (err) - f2fs_msg(sbi->sb, KERN_INFO, + if (err) + f2fs_msg(sbi->sb, KERN_INFO, "Issue discard failed, ret: %d", err); + bio_put(dc->bio); + list_del(&dc->list); + kmem_cache_free(discard_cmd_slab, dc); +} + +/* This should be covered by global mutex, &sit_i->sentry_lock */ +void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr) +{ + struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + struct list_head *wait_list = &(dcc->discard_cmd_list); + struct discard_cmd *dc, *tmp; + struct blk_plug plug; + + mutex_lock(&dcc->cmd_lock); - bio_put(bio); - list_del(&be->list); - kmem_cache_free(bio_entry_slab, be); + blk_start_plug(&plug); + + list_for_each_entry_safe(dc, tmp, wait_list, list) { + + if (blkaddr == NULL_ADDR) { + if (dc->state == D_PREP) { + dc->state = D_SUBMIT; + submit_bio(dc->bio); + atomic_inc(&dcc->submit_discard); + } + continue; + } + + if (dc->lstart <= blkaddr && blkaddr < dc->lstart + dc->len) { + if (dc->state == D_SUBMIT) + wait_for_completion_io(&dc->wait); + else + __remove_discard_cmd(sbi, dc); + } + } + blk_finish_plug(&plug); + + /* this comes from f2fs_put_super */ + if (blkaddr == NULL_ADDR) { + list_for_each_entry_safe(dc, tmp, wait_list, list) { + wait_for_completion_io(&dc->wait); + __remove_discard_cmd(sbi, dc); + } } + mutex_unlock(&dcc->cmd_lock); +} + +static void f2fs_submit_discard_endio(struct bio *bio) +{ + struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private; + + complete(&dc->wait); + dc->state = D_DONE; } -static void f2fs_submit_bio_wait_endio(struct bio *bio) +static int issue_discard_thread(void *data) { - struct bio_entry *be = (struct bio_entry *)bio->bi_private; + struct f2fs_sb_info *sbi = data; + struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + wait_queue_head_t *q = &dcc->discard_wait_queue; + struct list_head *cmd_list = &dcc->discard_cmd_list; + struct discard_cmd *dc, *tmp; + struct blk_plug plug; + int iter = 0; +repeat: + if (kthread_should_stop()) + return 0; + + blk_start_plug(&plug); + + mutex_lock(&dcc->cmd_lock); + list_for_each_entry_safe(dc, tmp, cmd_list, list) { + if (dc->state == D_PREP) { + dc->state = D_SUBMIT; + submit_bio(dc->bio); + atomic_inc(&dcc->submit_discard); + if (iter++ > DISCARD_ISSUE_RATE) + break; + } else if (dc->state == D_DONE) { + __remove_discard_cmd(sbi, dc); + } + } + mutex_unlock(&dcc->cmd_lock); + + blk_finish_plug(&plug); + + iter = 0; + congestion_wait(BLK_RW_SYNC, HZ/50); - be->error = bio->bi_error; - complete(&be->event); + wait_event_interruptible(*q, + kthread_should_stop() || !list_empty(&dcc->discard_cmd_list)); + goto repeat; } + /* this function is copied from blkdev_issue_discard from block/blk-lib.c */ static int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t blkstart, block_t blklen) { struct bio *bio = NULL; + block_t lblkstart = blkstart; int err; - trace_f2fs_issue_discard(sbi->sb, blkstart, blklen); + trace_f2fs_issue_discard(bdev, blkstart, blklen); if (sbi->s_ndevs) { int devi = f2fs_target_device_index(sbi, blkstart); @@ -688,14 +782,12 @@ static int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi, SECTOR_FROM_BLOCK(blklen), GFP_NOFS, 0, &bio); if (!err && bio) { - struct bio_entry *be = __add_bio_entry(sbi, bio); - - bio->bi_private = be; - bio->bi_end_io = f2fs_submit_bio_wait_endio; + bio->bi_end_io = f2fs_submit_discard_endio; bio->bi_opf |= REQ_SYNC; - submit_bio(bio); - } + __add_discard_cmd(sbi, bio, lblkstart, blklen); + wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue); + } return err; } @@ -703,24 +795,13 @@ static int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi, static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t blkstart, block_t blklen) { - sector_t nr_sects = SECTOR_FROM_BLOCK(blklen); - sector_t sector; + sector_t sector, nr_sects; int devi = 0; if (sbi->s_ndevs) { devi = f2fs_target_device_index(sbi, blkstart); blkstart -= FDEV(devi).start_blk; } - sector = SECTOR_FROM_BLOCK(blkstart); - - if (sector & (bdev_zone_sectors(bdev) - 1) || - nr_sects != bdev_zone_sectors(bdev)) { - f2fs_msg(sbi->sb, KERN_INFO, - "(%d) %s: Unaligned discard attempted (block %x + %x)", - devi, sbi->s_ndevs ? FDEV(devi).path: "", - blkstart, blklen); - return -EIO; - } /* * We need to know the type of the zone: for conventional zones, @@ -735,7 +816,18 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi, return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen); case BLK_ZONE_TYPE_SEQWRITE_REQ: case BLK_ZONE_TYPE_SEQWRITE_PREF: - trace_f2fs_issue_reset_zone(sbi->sb, blkstart); + sector = SECTOR_FROM_BLOCK(blkstart); + nr_sects = SECTOR_FROM_BLOCK(blklen); + + if (sector & (bdev_zone_sectors(bdev) - 1) || + nr_sects != bdev_zone_sectors(bdev)) { + f2fs_msg(sbi->sb, KERN_INFO, + "(%d) %s: Unaligned discard attempted (block %x + %x)", + devi, sbi->s_ndevs ? FDEV(devi).path: "", + blkstart, blklen); + return -EIO; + } + trace_f2fs_issue_reset_zone(bdev, blkstart); return blkdev_reset_zones(bdev, sector, nr_sects, GFP_NOFS); default: @@ -800,13 +892,14 @@ static void __add_discard_entry(struct f2fs_sb_info *sbi, struct cp_control *cpc, struct seg_entry *se, unsigned int start, unsigned int end) { - struct list_head *head = &SM_I(sbi)->discard_list; + struct list_head *head = &SM_I(sbi)->dcc_info->discard_entry_list; struct discard_entry *new, *last; if (!list_empty(head)) { last = list_last_entry(head, struct discard_entry, list); if (START_BLOCK(sbi, cpc->trim_start) + start == - last->blkaddr + last->len) { + last->blkaddr + last->len && + last->len < MAX_DISCARD_BLOCKS(sbi)) { last->len += end - start; goto done; } @@ -818,10 +911,11 @@ static void __add_discard_entry(struct f2fs_sb_info *sbi, new->len = end - start; list_add_tail(&new->list, head); done: - SM_I(sbi)->nr_discards += end - start; + SM_I(sbi)->dcc_info->nr_discards += end - start; } -static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) +static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc, + bool check_only) { int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); int max_blocks = sbi->blocks_per_seg; @@ -835,12 +929,13 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) int i; if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi)) - return; + return false; if (!force) { if (!test_opt(sbi, DISCARD) || !se->valid_blocks || - SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards) - return; + SM_I(sbi)->dcc_info->nr_discards >= + SM_I(sbi)->dcc_info->max_discards) + return false; } /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ @@ -848,7 +943,8 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] : (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; - while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) { + while (force || SM_I(sbi)->dcc_info->nr_discards <= + SM_I(sbi)->dcc_info->max_discards) { start = __find_rev_next_bit(dmap, max_blocks, end + 1); if (start >= max_blocks) break; @@ -858,13 +954,17 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) && (end - start) < cpc->trim_minlen) continue; + if (check_only) + return true; + __add_discard_entry(sbi, cpc, se, start, end); } + return false; } void release_discard_addrs(struct f2fs_sb_info *sbi) { - struct list_head *head = &(SM_I(sbi)->discard_list); + struct list_head *head = &(SM_I(sbi)->dcc_info->discard_entry_list); struct discard_entry *entry, *this; /* drop caches */ @@ -890,17 +990,14 @@ static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc) { - struct list_head *head = &(SM_I(sbi)->discard_list); + struct list_head *head = &(SM_I(sbi)->dcc_info->discard_entry_list); struct discard_entry *entry, *this; struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); - struct blk_plug plug; unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; unsigned int start = 0, end = -1; unsigned int secno, start_segno; bool force = (cpc->reason == CP_DISCARD); - blk_start_plug(&plug); - mutex_lock(&dirty_i->seglist_lock); while (1) { @@ -916,9 +1013,13 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc) dirty_i->nr_dirty[PRE] -= end - start; - if (force || !test_opt(sbi, DISCARD)) + if (!test_opt(sbi, DISCARD)) continue; + if (force && start >= cpc->trim_start && + (end - 1) <= cpc->trim_end) + continue; + if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) { f2fs_issue_discard(sbi, START_BLOCK(sbi, start), (end - start) << sbi->log_blocks_per_seg); @@ -935,6 +1036,8 @@ next: start = start_segno + sbi->segs_per_sec; if (start < end) goto next; + else + end = start - 1; } mutex_unlock(&dirty_i->seglist_lock); @@ -946,11 +1049,62 @@ next: cpc->trimmed += entry->len; skip: list_del(&entry->list); - SM_I(sbi)->nr_discards -= entry->len; + SM_I(sbi)->dcc_info->nr_discards -= entry->len; kmem_cache_free(discard_entry_slab, entry); } +} - blk_finish_plug(&plug); +static int create_discard_cmd_control(struct f2fs_sb_info *sbi) +{ + dev_t dev = sbi->sb->s_bdev->bd_dev; + struct discard_cmd_control *dcc; + int err = 0; + + if (SM_I(sbi)->dcc_info) { + dcc = SM_I(sbi)->dcc_info; + goto init_thread; + } + + dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL); + if (!dcc) + return -ENOMEM; + + INIT_LIST_HEAD(&dcc->discard_entry_list); + INIT_LIST_HEAD(&dcc->discard_cmd_list); + mutex_init(&dcc->cmd_lock); + atomic_set(&dcc->submit_discard, 0); + dcc->nr_discards = 0; + dcc->max_discards = 0; + + init_waitqueue_head(&dcc->discard_wait_queue); + SM_I(sbi)->dcc_info = dcc; +init_thread: + dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi, + "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev)); + if (IS_ERR(dcc->f2fs_issue_discard)) { + err = PTR_ERR(dcc->f2fs_issue_discard); + kfree(dcc); + SM_I(sbi)->dcc_info = NULL; + return err; + } + + return err; +} + +static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi, bool free) +{ + struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + + if (dcc && dcc->f2fs_issue_discard) { + struct task_struct *discard_thread = dcc->f2fs_issue_discard; + + dcc->f2fs_issue_discard = NULL; + kthread_stop(discard_thread); + } + if (free) { + kfree(dcc); + SM_I(sbi)->dcc_info = NULL; + } } static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) @@ -995,14 +1149,32 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) /* Update valid block bitmap */ if (del > 0) { - if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) + if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) { +#ifdef CONFIG_F2FS_CHECK_FS + if (f2fs_test_and_set_bit(offset, + se->cur_valid_map_mir)) + f2fs_bug_on(sbi, 1); + else + WARN_ON(1); +#else f2fs_bug_on(sbi, 1); +#endif + } if (f2fs_discard_en(sbi) && !f2fs_test_and_set_bit(offset, se->discard_map)) sbi->discard_blks--; } else { - if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) + if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) { +#ifdef CONFIG_F2FS_CHECK_FS + if (!f2fs_test_and_clear_bit(offset, + se->cur_valid_map_mir)) + f2fs_bug_on(sbi, 1); + else + WARN_ON(1); +#else f2fs_bug_on(sbi, 1); +#endif + } if (f2fs_discard_en(sbi) && f2fs_test_and_clear_bit(offset, se->discard_map)) sbi->discard_blks++; @@ -1167,17 +1339,6 @@ static void write_current_sum_page(struct f2fs_sb_info *sbi, f2fs_put_page(page, 1); } -static int is_next_segment_free(struct f2fs_sb_info *sbi, int type) -{ - struct curseg_info *curseg = CURSEG_I(sbi, type); - unsigned int segno = curseg->segno + 1; - struct free_segmap_info *free_i = FREE_I(sbi); - - if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec) - return !test_bit(segno, free_i->free_segmap); - return 0; -} - /* * Find a new segment from the free segments bitmap to right order * This function should be returned with success, otherwise BUG @@ -1382,16 +1543,39 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) { struct curseg_info *curseg = CURSEG_I(sbi, type); const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops; + int i, cnt; + bool reversed = false; + + /* need_SSR() already forces to do this */ + if (v_ops->get_victim(sbi, &(curseg)->next_segno, BG_GC, type, SSR)) + return 1; - if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0, 0)) - return v_ops->get_victim(sbi, - &(curseg)->next_segno, BG_GC, type, SSR); + /* For node segments, let's do SSR more intensively */ + if (IS_NODESEG(type)) { + if (type >= CURSEG_WARM_NODE) { + reversed = true; + i = CURSEG_COLD_NODE; + } else { + i = CURSEG_HOT_NODE; + } + cnt = NR_CURSEG_NODE_TYPE; + } else { + if (type >= CURSEG_WARM_DATA) { + reversed = true; + i = CURSEG_COLD_DATA; + } else { + i = CURSEG_HOT_DATA; + } + cnt = NR_CURSEG_DATA_TYPE; + } - /* For data segments, let's do SSR more intensively */ - for (; type >= CURSEG_HOT_DATA; type--) + for (; cnt-- > 0; reversed ? i-- : i++) { + if (i == type) + continue; if (v_ops->get_victim(sbi, &(curseg)->next_segno, - BG_GC, type, SSR)) + BG_GC, i, SSR)) return 1; + } return 0; } @@ -1402,20 +1586,17 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) static void allocate_segment_by_default(struct f2fs_sb_info *sbi, int type, bool force) { - struct curseg_info *curseg = CURSEG_I(sbi, type); - if (force) new_curseg(sbi, type, true); - else if (type == CURSEG_WARM_NODE) - new_curseg(sbi, type, false); - else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type)) + else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) && + type == CURSEG_WARM_NODE) new_curseg(sbi, type, false); else if (need_SSR(sbi) && get_ssr_segment(sbi, type)) change_curseg(sbi, type, true); else new_curseg(sbi, type, false); - stat_inc_seg_type(sbi, curseg); + stat_inc_seg_type(sbi, CURSEG_I(sbi, type)); } void allocate_new_segments(struct f2fs_sb_info *sbi) @@ -1424,9 +1605,6 @@ void allocate_new_segments(struct f2fs_sb_info *sbi) unsigned int old_segno; int i; - if (test_opt(sbi, LFS)) - return; - for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { curseg = CURSEG_I(sbi, i); old_segno = curseg->segno; @@ -1439,6 +1617,24 @@ static const struct segment_allocation default_salloc_ops = { .allocate_segment = allocate_segment_by_default, }; +bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc) +{ + __u64 trim_start = cpc->trim_start; + bool has_candidate = false; + + mutex_lock(&SIT_I(sbi)->sentry_lock); + for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) { + if (add_discard_addrs(sbi, cpc, true)) { + has_candidate = true; + break; + } + } + mutex_unlock(&SIT_I(sbi)->sentry_lock); + + cpc->trim_start = trim_start; + return has_candidate; +} + int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) { __u64 start = F2FS_BYTES_TO_BLK(range->start); @@ -1573,6 +1769,8 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); + f2fs_wait_discard_bio(sbi, *new_blkaddr); + /* * __add_sum_entry should be resided under the curseg_mutex * because, this function updates a summary entry in the @@ -1584,14 +1782,15 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, stat_inc_block_count(sbi, curseg); - if (!__has_curseg_space(sbi, type)) - sit_i->s_ops->allocate_segment(sbi, type, false); /* * SIT information should be updated before segment allocation, * since SSR needs latest valid block information. */ refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); + if (!__has_curseg_space(sbi, type)) + sit_i->s_ops->allocate_segment(sbi, type, false); + mutex_unlock(&sit_i->sentry_lock); if (page && IS_NODESEG(type)) @@ -1603,15 +1802,20 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) { int type = __get_segment_type(fio->page, fio->type); + int err; if (fio->type == NODE || fio->type == DATA) mutex_lock(&fio->sbi->wio_mutex[fio->type]); - +reallocate: allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, &fio->new_blkaddr, sum, type); /* writeout dirty page into bdev */ - f2fs_submit_page_mbio(fio); + err = f2fs_submit_page_mbio(fio); + if (err == -EAGAIN) { + fio->old_blkaddr = fio->new_blkaddr; + goto reallocate; + } if (fio->type == NODE || fio->type == DATA) mutex_unlock(&fio->sbi->wio_mutex[fio->type]); @@ -1753,7 +1957,8 @@ void f2fs_wait_on_page_writeback(struct page *page, if (PageWriteback(page)) { struct f2fs_sb_info *sbi = F2FS_P_SB(page); - f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, type, WRITE); + f2fs_submit_merged_bio_cond(sbi, page->mapping->host, + 0, page->index, type, WRITE); if (ordered) wait_on_page_writeback(page); else @@ -2228,7 +2433,7 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) /* add discard candidates */ if (cpc->reason != CP_DISCARD) { cpc->trim_start = segno; - add_discard_addrs(sbi, cpc); + add_discard_addrs(sbi, cpc, false); } if (to_journal) { @@ -2263,8 +2468,12 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) f2fs_bug_on(sbi, sit_i->dirty_sentries); out: if (cpc->reason == CP_DISCARD) { + __u64 trim_start = cpc->trim_start; + for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) - add_discard_addrs(sbi, cpc); + add_discard_addrs(sbi, cpc, false); + + cpc->trim_start = trim_start; } mutex_unlock(&sit_i->sentry_lock); @@ -2276,7 +2485,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi) struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct sit_info *sit_i; unsigned int sit_segs, start; - char *src_bitmap, *dst_bitmap; + char *src_bitmap; unsigned int bitmap_size; /* allocate memory for SIT information */ @@ -2305,6 +2514,13 @@ static int build_sit_info(struct f2fs_sb_info *sbi) !sit_i->sentries[start].ckpt_valid_map) return -ENOMEM; +#ifdef CONFIG_F2FS_CHECK_FS + sit_i->sentries[start].cur_valid_map_mir + = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); + if (!sit_i->sentries[start].cur_valid_map_mir) + return -ENOMEM; +#endif + if (f2fs_discard_en(sbi)) { sit_i->sentries[start].discard_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); @@ -2331,17 +2547,22 @@ static int build_sit_info(struct f2fs_sb_info *sbi) bitmap_size = __bitmap_size(sbi, SIT_BITMAP); src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); - dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); - if (!dst_bitmap) + sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); + if (!sit_i->sit_bitmap) return -ENOMEM; +#ifdef CONFIG_F2FS_CHECK_FS + sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); + if (!sit_i->sit_bitmap_mir) + return -ENOMEM; +#endif + /* init SIT information */ sit_i->s_ops = &default_salloc_ops; sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; sit_i->written_valid_blocks = 0; - sit_i->sit_bitmap = dst_bitmap; sit_i->bitmap_size = bitmap_size; sit_i->dirty_sentries = 0; sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; @@ -2626,11 +2847,6 @@ int build_segment_manager(struct f2fs_sb_info *sbi) sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; - INIT_LIST_HEAD(&sm_info->discard_list); - INIT_LIST_HEAD(&sm_info->wait_list); - sm_info->nr_discards = 0; - sm_info->max_discards = 0; - sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS; INIT_LIST_HEAD(&sm_info->sit_entry_set); @@ -2641,6 +2857,10 @@ int build_segment_manager(struct f2fs_sb_info *sbi) return err; } + err = create_discard_cmd_control(sbi); + if (err) + return err; + err = build_sit_info(sbi); if (err) return err; @@ -2734,6 +2954,9 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi) if (sit_i->sentries) { for (start = 0; start < MAIN_SEGS(sbi); start++) { kfree(sit_i->sentries[start].cur_valid_map); +#ifdef CONFIG_F2FS_CHECK_FS + kfree(sit_i->sentries[start].cur_valid_map_mir); +#endif kfree(sit_i->sentries[start].ckpt_valid_map); kfree(sit_i->sentries[start].discard_map); } @@ -2746,6 +2969,9 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi) SM_I(sbi)->sit_info = NULL; kfree(sit_i->sit_bitmap); +#ifdef CONFIG_F2FS_CHECK_FS + kfree(sit_i->sit_bitmap_mir); +#endif kfree(sit_i); } @@ -2756,6 +2982,7 @@ void destroy_segment_manager(struct f2fs_sb_info *sbi) if (!sm_info) return; destroy_flush_cmd_control(sbi, true); + destroy_discard_cmd_control(sbi, true); destroy_dirty_segmap(sbi); destroy_curseg(sbi); destroy_free_segmap(sbi); @@ -2771,15 +2998,15 @@ int __init create_segment_manager_caches(void) if (!discard_entry_slab) goto fail; - bio_entry_slab = f2fs_kmem_cache_create("bio_entry", - sizeof(struct bio_entry)); - if (!bio_entry_slab) + discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd", + sizeof(struct discard_cmd)); + if (!discard_cmd_slab) goto destroy_discard_entry; sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set", sizeof(struct sit_entry_set)); if (!sit_entry_set_slab) - goto destroy_bio_entry; + goto destroy_discard_cmd; inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry", sizeof(struct inmem_pages)); @@ -2789,8 +3016,8 @@ int __init create_segment_manager_caches(void) destroy_sit_entry_set: kmem_cache_destroy(sit_entry_set_slab); -destroy_bio_entry: - kmem_cache_destroy(bio_entry_slab); +destroy_discard_cmd: + kmem_cache_destroy(discard_cmd_slab); destroy_discard_entry: kmem_cache_destroy(discard_entry_slab); fail: @@ -2800,7 +3027,7 @@ fail: void destroy_segment_manager_caches(void) { kmem_cache_destroy(sit_entry_set_slab); - kmem_cache_destroy(bio_entry_slab); + kmem_cache_destroy(discard_cmd_slab); kmem_cache_destroy(discard_entry_slab); kmem_cache_destroy(inmem_entry_slab); } diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 9d44ce83acb2..5e8ad4280a50 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -164,6 +164,9 @@ struct seg_entry { unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */ unsigned int padding:6; /* padding */ unsigned char *cur_valid_map; /* validity bitmap of blocks */ +#ifdef CONFIG_F2FS_CHECK_FS + unsigned char *cur_valid_map_mir; /* mirror of current valid bitmap */ +#endif /* * # of valid blocks and the validity bitmap stored in the the last * checkpoint pack. This information is used by the SSR mode. @@ -186,9 +189,12 @@ struct segment_allocation { * the page is atomically written, and it is in inmem_pages list. */ #define ATOMIC_WRITTEN_PAGE ((unsigned long)-1) +#define DUMMY_WRITTEN_PAGE ((unsigned long)-2) #define IS_ATOMIC_WRITTEN_PAGE(page) \ (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE) +#define IS_DUMMY_WRITTEN_PAGE(page) \ + (page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE) struct inmem_pages { struct list_head list; @@ -203,6 +209,9 @@ struct sit_info { block_t sit_blocks; /* # of blocks used by SIT area */ block_t written_valid_blocks; /* # of valid blocks in main area */ char *sit_bitmap; /* SIT bitmap pointer */ +#ifdef CONFIG_F2FS_CHECK_FS + char *sit_bitmap_mir; /* SIT bitmap mirror */ +#endif unsigned int bitmap_size; /* SIT bitmap size */ unsigned long *tmp_map; /* bitmap for temporal use */ @@ -317,6 +326,9 @@ static inline void seg_info_from_raw_sit(struct seg_entry *se, se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs); memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); +#ifdef CONFIG_F2FS_CHECK_FS + memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE); +#endif se->type = GET_SIT_TYPE(rs); se->mtime = le64_to_cpu(rs->mtime); } @@ -414,6 +426,12 @@ static inline void get_sit_bitmap(struct f2fs_sb_info *sbi, void *dst_addr) { struct sit_info *sit_i = SIT_I(sbi); + +#ifdef CONFIG_F2FS_CHECK_FS + if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir, + sit_i->bitmap_size)) + f2fs_bug_on(sbi, 1); +#endif memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size); } @@ -634,6 +652,12 @@ static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi, check_seg_range(sbi, start); +#ifdef CONFIG_F2FS_CHECK_FS + if (f2fs_test_bit(offset, sit_i->sit_bitmap) != + f2fs_test_bit(offset, sit_i->sit_bitmap_mir)) + f2fs_bug_on(sbi, 1); +#endif + /* calculate sit block address */ if (f2fs_test_bit(offset, sit_i->sit_bitmap)) blk_addr += sit_i->sit_blocks; @@ -659,6 +683,9 @@ static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start) unsigned int block_off = SIT_BLOCK_OFFSET(start); f2fs_change_bit(block_off, sit_i->sit_bitmap); +#ifdef CONFIG_F2FS_CHECK_FS + f2fs_change_bit(block_off, sit_i->sit_bitmap_mir); +#endif } static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi) @@ -689,6 +716,15 @@ static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type) - (base + 1) + type; } +static inline bool no_fggc_candidate(struct f2fs_sb_info *sbi, + unsigned int secno) +{ + if (get_valid_blocks(sbi, secno, sbi->segs_per_sec) >= + sbi->fggc_threshold) + return true; + return false; +} + static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno) { if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno)) @@ -700,8 +736,8 @@ static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno) * It is very important to gather dirty pages and write at once, so that we can * submit a big bio without interfering other data writes. * By default, 512 pages for directory data, - * 512 pages (2MB) * 3 for three types of nodes, and - * max_bio_blocks for meta are set. + * 512 pages (2MB) * 8 for nodes, and + * 256 pages * 8 for meta are set. */ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type) { diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index a831303bb777..96fe8ed73100 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -89,6 +89,7 @@ enum { Opt_active_logs, Opt_disable_ext_identify, Opt_inline_xattr, + Opt_noinline_xattr, Opt_inline_data, Opt_inline_dentry, Opt_noinline_dentry, @@ -101,6 +102,7 @@ enum { Opt_noinline_data, Opt_data_flush, Opt_mode, + Opt_io_size_bits, Opt_fault_injection, Opt_lazytime, Opt_nolazytime, @@ -121,6 +123,7 @@ static match_table_t f2fs_tokens = { {Opt_active_logs, "active_logs=%u"}, {Opt_disable_ext_identify, "disable_ext_identify"}, {Opt_inline_xattr, "inline_xattr"}, + {Opt_noinline_xattr, "noinline_xattr"}, {Opt_inline_data, "inline_data"}, {Opt_inline_dentry, "inline_dentry"}, {Opt_noinline_dentry, "noinline_dentry"}, @@ -133,6 +136,7 @@ static match_table_t f2fs_tokens = { {Opt_noinline_data, "noinline_data"}, {Opt_data_flush, "data_flush"}, {Opt_mode, "mode=%s"}, + {Opt_io_size_bits, "io_bits=%u"}, {Opt_fault_injection, "fault_injection=%u"}, {Opt_lazytime, "lazytime"}, {Opt_nolazytime, "nolazytime"}, @@ -143,6 +147,7 @@ static match_table_t f2fs_tokens = { enum { GC_THREAD, /* struct f2fs_gc_thread */ SM_INFO, /* struct f2fs_sm_info */ + DCC_INFO, /* struct discard_cmd_control */ NM_INFO, /* struct f2fs_nm_info */ F2FS_SBI, /* struct f2fs_sb_info */ #ifdef CONFIG_F2FS_FAULT_INJECTION @@ -166,6 +171,8 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type) return (unsigned char *)sbi->gc_thread; else if (struct_type == SM_INFO) return (unsigned char *)SM_I(sbi); + else if (struct_type == DCC_INFO) + return (unsigned char *)SM_I(sbi)->dcc_info; else if (struct_type == NM_INFO) return (unsigned char *)NM_I(sbi); else if (struct_type == F2FS_SBI) @@ -281,7 +288,7 @@ F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time); F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time); F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments); -F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards); +F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_small_discards, max_discards); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util); @@ -439,6 +446,9 @@ static int parse_options(struct super_block *sb, char *options) case Opt_inline_xattr: set_opt(sbi, INLINE_XATTR); break; + case Opt_noinline_xattr: + clear_opt(sbi, INLINE_XATTR); + break; #else case Opt_user_xattr: f2fs_msg(sb, KERN_INFO, @@ -452,6 +462,10 @@ static int parse_options(struct super_block *sb, char *options) f2fs_msg(sb, KERN_INFO, "inline_xattr options not supported"); break; + case Opt_noinline_xattr: + f2fs_msg(sb, KERN_INFO, + "noinline_xattr options not supported"); + break; #endif #ifdef CONFIG_F2FS_FS_POSIX_ACL case Opt_acl: @@ -535,11 +549,23 @@ static int parse_options(struct super_block *sb, char *options) } kfree(name); break; + case Opt_io_size_bits: + if (args->from && match_int(args, &arg)) + return -EINVAL; + if (arg > __ilog2_u32(BIO_MAX_PAGES)) { + f2fs_msg(sb, KERN_WARNING, + "Not support %d, larger than %d", + 1 << arg, BIO_MAX_PAGES); + return -EINVAL; + } + sbi->write_io_size_bits = arg; + break; case Opt_fault_injection: if (args->from && match_int(args, &arg)) return -EINVAL; #ifdef CONFIG_F2FS_FAULT_INJECTION f2fs_build_fault_attr(sbi, arg); + set_opt(sbi, FAULT_INJECTION); #else f2fs_msg(sb, KERN_INFO, "FAULT_INJECTION was not selected"); @@ -558,6 +584,13 @@ static int parse_options(struct super_block *sb, char *options) return -EINVAL; } } + + if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) { + f2fs_msg(sb, KERN_ERR, + "Should set mode=lfs with %uKB-sized IO", + F2FS_IO_SIZE_KB(sbi)); + return -EINVAL; + } return 0; } @@ -591,6 +624,7 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) static int f2fs_drop_inode(struct inode *inode) { + int ret; /* * This is to avoid a deadlock condition like below. * writeback_single_inode(inode) @@ -623,10 +657,12 @@ static int f2fs_drop_inode(struct inode *inode) spin_lock(&inode->i_lock); atomic_dec(&inode->i_count); } + trace_f2fs_drop_inode(inode, 0); return 0; } - - return generic_drop_inode(inode); + ret = generic_drop_inode(inode); + trace_f2fs_drop_inode(inode, ret); + return ret; } int f2fs_inode_dirtied(struct inode *inode, bool sync) @@ -750,6 +786,9 @@ static void f2fs_put_super(struct super_block *sb) write_checkpoint(sbi, &cpc); } + /* be sure to wait for any on-going discard commands */ + f2fs_wait_discard_bio(sbi, NULL_ADDR); + /* write_checkpoint can update stat informaion */ f2fs_destroy_stats(sbi); @@ -782,7 +821,7 @@ static void f2fs_put_super(struct super_block *sb) kfree(sbi->raw_super); destroy_device_list(sbi); - + mempool_destroy(sbi->write_io_dummy); destroy_percpu_info(sbi); kfree(sbi); } @@ -882,6 +921,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) seq_puts(seq, ",nouser_xattr"); if (test_opt(sbi, INLINE_XATTR)) seq_puts(seq, ",inline_xattr"); + else + seq_puts(seq, ",noinline_xattr"); #endif #ifdef CONFIG_F2FS_FS_POSIX_ACL if (test_opt(sbi, POSIX_ACL)) @@ -918,6 +959,12 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) else if (test_opt(sbi, LFS)) seq_puts(seq, "lfs"); seq_printf(seq, ",active_logs=%u", sbi->active_logs); + if (F2FS_IO_SIZE_BITS(sbi)) + seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi)); +#ifdef CONFIG_F2FS_FAULT_INJECTION + if (test_opt(sbi, FAULT_INJECTION)) + seq_puts(seq, ",fault_injection"); +#endif return 0; } @@ -995,6 +1042,7 @@ static void default_options(struct f2fs_sb_info *sbi) sbi->active_logs = NR_CURSEG_TYPE; set_opt(sbi, BG_GC); + set_opt(sbi, INLINE_XATTR); set_opt(sbi, INLINE_DATA); set_opt(sbi, INLINE_DENTRY); set_opt(sbi, EXTENT_CACHE); @@ -1686,36 +1734,55 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) static int f2fs_scan_devices(struct f2fs_sb_info *sbi) { struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); + unsigned int max_devices = MAX_DEVICES; int i; - for (i = 0; i < MAX_DEVICES; i++) { - if (!RDEV(i).path[0]) + /* Initialize single device information */ + if (!RDEV(0).path[0]) { + if (!bdev_is_zoned(sbi->sb->s_bdev)) return 0; + max_devices = 1; + } - if (i == 0) { - sbi->devs = kzalloc(sizeof(struct f2fs_dev_info) * - MAX_DEVICES, GFP_KERNEL); - if (!sbi->devs) - return -ENOMEM; - } + /* + * Initialize multiple devices information, or single + * zoned block device information. + */ + sbi->devs = kcalloc(max_devices, sizeof(struct f2fs_dev_info), + GFP_KERNEL); + if (!sbi->devs) + return -ENOMEM; - memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN); - FDEV(i).total_segments = le32_to_cpu(RDEV(i).total_segments); - if (i == 0) { - FDEV(i).start_blk = 0; - FDEV(i).end_blk = FDEV(i).start_blk + - (FDEV(i).total_segments << - sbi->log_blocks_per_seg) - 1 + - le32_to_cpu(raw_super->segment0_blkaddr); - } else { - FDEV(i).start_blk = FDEV(i - 1).end_blk + 1; - FDEV(i).end_blk = FDEV(i).start_blk + - (FDEV(i).total_segments << - sbi->log_blocks_per_seg) - 1; - } + for (i = 0; i < max_devices; i++) { - FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path, + if (i > 0 && !RDEV(i).path[0]) + break; + + if (max_devices == 1) { + /* Single zoned block device mount */ + FDEV(0).bdev = + blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev, + sbi->sb->s_mode, sbi->sb->s_type); + } else { + /* Multi-device mount */ + memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN); + FDEV(i).total_segments = + le32_to_cpu(RDEV(i).total_segments); + if (i == 0) { + FDEV(i).start_blk = 0; + FDEV(i).end_blk = FDEV(i).start_blk + + (FDEV(i).total_segments << + sbi->log_blocks_per_seg) - 1 + + le32_to_cpu(raw_super->segment0_blkaddr); + } else { + FDEV(i).start_blk = FDEV(i - 1).end_blk + 1; + FDEV(i).end_blk = FDEV(i).start_blk + + (FDEV(i).total_segments << + sbi->log_blocks_per_seg) - 1; + } + FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path, sbi->sb->s_mode, sbi->sb->s_type); + } if (IS_ERR(FDEV(i).bdev)) return PTR_ERR(FDEV(i).bdev); @@ -1735,6 +1802,8 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi) "Failed to initialize F2FS blkzone information"); return -EINVAL; } + if (max_devices == 1) + break; f2fs_msg(sbi->sb, KERN_INFO, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)", i, FDEV(i).path, @@ -1751,6 +1820,8 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi) FDEV(i).total_segments, FDEV(i).start_blk, FDEV(i).end_blk); } + f2fs_msg(sbi->sb, KERN_INFO, + "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi)); return 0; } @@ -1868,12 +1939,19 @@ try_onemore: if (err) goto free_options; + if (F2FS_IO_SIZE(sbi) > 1) { + sbi->write_io_dummy = + mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0); + if (!sbi->write_io_dummy) + goto free_options; + } + /* get an inode for meta space */ sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); if (IS_ERR(sbi->meta_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); err = PTR_ERR(sbi->meta_inode); - goto free_options; + goto free_io_dummy; } err = get_valid_checkpoint(sbi); @@ -2048,6 +2126,8 @@ skip_recovery: sbi->valid_super_block ? 1 : 2, err); } + f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx", + cur_cp_version(F2FS_CKPT(sbi))); f2fs_update_time(sbi, CP_TIME); f2fs_update_time(sbi, REQ_TIME); return 0; @@ -2091,6 +2171,8 @@ free_devices: free_meta_inode: make_bad_inode(sbi->meta_inode); iput(sbi->meta_inode); +free_io_dummy: + mempool_destroy(sbi->write_io_dummy); free_options: destroy_percpu_info(sbi); kfree(options); diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index c47ce2f330a1..7298a4488f7f 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -217,6 +217,112 @@ static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index, return entry; } +static struct f2fs_xattr_entry *__find_inline_xattr(void *base_addr, + void **last_addr, int index, + size_t len, const char *name) +{ + struct f2fs_xattr_entry *entry; + unsigned int inline_size = F2FS_INLINE_XATTR_ADDRS << 2; + + list_for_each_xattr(entry, base_addr) { + if ((void *)entry + sizeof(__u32) > base_addr + inline_size || + (void *)XATTR_NEXT_ENTRY(entry) + sizeof(__u32) > + base_addr + inline_size) { + *last_addr = entry; + return NULL; + } + if (entry->e_name_index != index) + continue; + if (entry->e_name_len != len) + continue; + if (!memcmp(entry->e_name, name, len)) + break; + } + return entry; +} + +static int lookup_all_xattrs(struct inode *inode, struct page *ipage, + unsigned int index, unsigned int len, + const char *name, struct f2fs_xattr_entry **xe, + void **base_addr) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + void *cur_addr, *txattr_addr, *last_addr = NULL; + nid_t xnid = F2FS_I(inode)->i_xattr_nid; + unsigned int size = xnid ? VALID_XATTR_BLOCK_SIZE : 0; + unsigned int inline_size = 0; + int err = 0; + + inline_size = inline_xattr_size(inode); + + if (!size && !inline_size) + return -ENODATA; + + txattr_addr = kzalloc(inline_size + size + sizeof(__u32), + GFP_F2FS_ZERO); + if (!txattr_addr) + return -ENOMEM; + + /* read from inline xattr */ + if (inline_size) { + struct page *page = NULL; + void *inline_addr; + + if (ipage) { + inline_addr = inline_xattr_addr(ipage); + } else { + page = get_node_page(sbi, inode->i_ino); + if (IS_ERR(page)) { + err = PTR_ERR(page); + goto out; + } + inline_addr = inline_xattr_addr(page); + } + memcpy(txattr_addr, inline_addr, inline_size); + f2fs_put_page(page, 1); + + *xe = __find_inline_xattr(txattr_addr, &last_addr, + index, len, name); + if (*xe) + goto check; + } + + /* read from xattr node block */ + if (xnid) { + struct page *xpage; + void *xattr_addr; + + /* The inode already has an extended attribute block. */ + xpage = get_node_page(sbi, xnid); + if (IS_ERR(xpage)) { + err = PTR_ERR(xpage); + goto out; + } + + xattr_addr = page_address(xpage); + memcpy(txattr_addr + inline_size, xattr_addr, size); + f2fs_put_page(xpage, 1); + } + + if (last_addr) + cur_addr = XATTR_HDR(last_addr) - 1; + else + cur_addr = txattr_addr; + + *xe = __find_xattr(cur_addr, index, len, name); +check: + if (IS_XATTR_LAST_ENTRY(*xe)) { + err = -ENODATA; + goto out; + } + + *base_addr = txattr_addr; + return 0; +out: + kzfree(txattr_addr); + return err; +} + static int read_all_xattrs(struct inode *inode, struct page *ipage, void **base_addr) { @@ -348,23 +454,20 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize, } xattr_addr = page_address(xpage); - memcpy(xattr_addr, txattr_addr + inline_size, PAGE_SIZE - - sizeof(struct node_footer)); + memcpy(xattr_addr, txattr_addr + inline_size, MAX_XATTR_BLOCK_SIZE); set_page_dirty(xpage); f2fs_put_page(xpage, 1); - /* need to checkpoint during fsync */ - F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi)); return 0; } int f2fs_getxattr(struct inode *inode, int index, const char *name, void *buffer, size_t buffer_size, struct page *ipage) { - struct f2fs_xattr_entry *entry; - void *base_addr; + struct f2fs_xattr_entry *entry = NULL; int error = 0; - size_t size, len; + unsigned int size, len; + void *base_addr = NULL; if (name == NULL) return -EINVAL; @@ -373,21 +476,16 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name, if (len > F2FS_NAME_LEN) return -ERANGE; - error = read_all_xattrs(inode, ipage, &base_addr); + error = lookup_all_xattrs(inode, ipage, index, len, name, + &entry, &base_addr); if (error) return error; - entry = __find_xattr(base_addr, index, len, name); - if (IS_XATTR_LAST_ENTRY(entry)) { - error = -ENODATA; - goto cleanup; - } - size = le16_to_cpu(entry->e_value_size); if (buffer && size > buffer_size) { error = -ERANGE; - goto cleanup; + goto out; } if (buffer) { @@ -395,8 +493,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name, memcpy(buffer, pval, size); } error = size; - -cleanup: +out: kzfree(base_addr); return error; } @@ -445,6 +542,13 @@ cleanup: return error; } +static bool f2fs_xattr_value_same(struct f2fs_xattr_entry *entry, + const void *value, size_t size) +{ + void *pval = entry->e_name + entry->e_name_len; + return (entry->e_value_size == size) && !memcmp(pval, value, size); +} + static int __f2fs_setxattr(struct inode *inode, int index, const char *name, const void *value, size_t size, struct page *ipage, int flags) @@ -479,12 +583,17 @@ static int __f2fs_setxattr(struct inode *inode, int index, found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1; - if ((flags & XATTR_REPLACE) && !found) { + if (found) { + if ((flags & XATTR_CREATE)) { + error = -EEXIST; + goto exit; + } + + if (f2fs_xattr_value_same(here, value, size)) + goto exit; + } else if ((flags & XATTR_REPLACE)) { error = -ENODATA; goto exit; - } else if ((flags & XATTR_CREATE) && found) { - error = -EEXIST; - goto exit; } last = here; diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h index f990de20cdcd..d5a94928c116 100644 --- a/fs/f2fs/xattr.h +++ b/fs/f2fs/xattr.h @@ -72,9 +72,10 @@ struct f2fs_xattr_entry { for (entry = XATTR_FIRST_ENTRY(addr);\ !IS_XATTR_LAST_ENTRY(entry);\ entry = XATTR_NEXT_ENTRY(entry)) - -#define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + PAGE_SIZE - \ - sizeof(struct node_footer) - sizeof(__u32)) +#define MAX_XATTR_BLOCK_SIZE (PAGE_SIZE - sizeof(struct node_footer)) +#define VALID_XATTR_BLOCK_SIZE (MAX_XATTR_BLOCK_SIZE - sizeof(__u32)) +#define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + \ + VALID_XATTR_BLOCK_SIZE) #define MAX_VALUE_LEN(i) (MIN_OFFSET(i) - \ sizeof(struct f2fs_xattr_header) - \ diff --git a/fs/fat/fat.h b/fs/fat/fat.h index e6b764a17a9c..051dac1ce3be 100644 --- a/fs/fat/fat.h +++ b/fs/fat/fat.h @@ -364,8 +364,8 @@ extern const struct file_operations fat_file_operations; extern const struct inode_operations fat_file_inode_operations; extern int fat_setattr(struct dentry *dentry, struct iattr *attr); extern void fat_truncate_blocks(struct inode *inode, loff_t offset); -extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat); +extern int fat_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); extern int fat_file_fsync(struct file *file, loff_t start, loff_t end, int datasync); diff --git a/fs/fat/file.c b/fs/fat/file.c index 3d04b124bce0..4724cc9ad650 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -365,9 +365,10 @@ void fat_truncate_blocks(struct inode *inode, loff_t offset) fat_flush_inodes(inode->i_sb, inode, NULL); } -int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +int fat_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); generic_fillattr(inode, stat); stat->blksize = MSDOS_SB(inode->i_sb)->cluster_size; diff --git a/fs/fcntl.c b/fs/fcntl.c index e1c54f20325c..be8fbe289087 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -7,6 +7,7 @@ #include <linux/syscalls.h> #include <linux/init.h> #include <linux/mm.h> +#include <linux/sched/task.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/fdtable.h> diff --git a/fs/file.c b/fs/file.c index 69d6990e3021..ad6f094f2eff 100644 --- a/fs/file.c +++ b/fs/file.c @@ -12,7 +12,7 @@ #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/time.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/file.h> diff --git a/fs/file_table.c b/fs/file_table.c index 6d982b57de92..954d510b765a 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -13,6 +13,7 @@ #include <linux/module.h> #include <linux/fs.h> #include <linux/security.h> +#include <linux/cred.h> #include <linux/eventpoll.h> #include <linux/rcupdate.h> #include <linux/mount.h> diff --git a/fs/fs_struct.c b/fs/fs_struct.c index 7dca743b2ce1..be0250788b73 100644 --- a/fs/fs_struct.c +++ b/fs/fs_struct.c @@ -1,5 +1,6 @@ #include <linux/export.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task.h> #include <linux/fs.h> #include <linux/path.h> #include <linux/slab.h> diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c index 5d5ddaa84b21..67f940892ef8 100644 --- a/fs/fscache/object-list.c +++ b/fs/fscache/object-list.c @@ -329,7 +329,7 @@ static void fscache_objlist_config(struct fscache_objlist_data *data) config = 0; rcu_read_lock(); - confkey = user_key_payload(key); + confkey = user_key_payload_rcu(key); buf = confkey->data; for (len = confkey->datalen - 1; len >= 0; len--) { diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index f11792672977..b681b43c766e 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -11,6 +11,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/poll.h> +#include <linux/sched/signal.h> #include <linux/uio.h> #include <linux/miscdevice.h> #include <linux/pagemap.h> diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 811fd8929a18..00800c07ba1c 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -473,7 +473,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, if (err) { fuse_sync_release(ff, flags); } else { - file->private_data = fuse_file_get(ff); + file->private_data = ff; fuse_finish_open(inode, file); } return err; @@ -1777,10 +1777,10 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr) return ret; } -static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry, - struct kstat *stat) +static int fuse_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct inode *inode = d_inode(entry); + struct inode *inode = d_inode(path->dentry); struct fuse_conn *fc = get_fuse_conn(inode); if (!fuse_allow_current_process(fc)) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index e80bfd06daf5..ec238fb5a584 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -58,7 +58,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) } INIT_LIST_HEAD(&ff->write_entry); - atomic_set(&ff->count, 0); + atomic_set(&ff->count, 1); RB_CLEAR_NODE(&ff->polled_node); init_waitqueue_head(&ff->poll_wait); @@ -75,7 +75,7 @@ void fuse_file_free(struct fuse_file *ff) kfree(ff); } -struct fuse_file *fuse_file_get(struct fuse_file *ff) +static struct fuse_file *fuse_file_get(struct fuse_file *ff) { atomic_inc(&ff->count); return ff; @@ -100,6 +100,7 @@ static void fuse_file_put(struct fuse_file *ff, bool sync) iput(req->misc.release.inode); fuse_put_request(ff->fc, req); } else if (sync) { + __set_bit(FR_FORCE, &req->flags); __clear_bit(FR_BACKGROUND, &req->flags); fuse_request_send(ff->fc, req); iput(req->misc.release.inode); @@ -146,7 +147,7 @@ int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, ff->open_flags &= ~FOPEN_DIRECT_IO; ff->nodeid = nodeid; - file->private_data = fuse_file_get(ff); + file->private_data = ff; return 0; } @@ -245,14 +246,9 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) void fuse_release_common(struct file *file, int opcode) { - struct fuse_file *ff; - struct fuse_req *req; - - ff = file->private_data; - if (unlikely(!ff)) - return; + struct fuse_file *ff = file->private_data; + struct fuse_req *req = ff->reserved_req; - req = ff->reserved_req; fuse_prepare_release(ff, file->f_flags, opcode); if (ff->flock) { @@ -297,13 +293,13 @@ static int fuse_release(struct inode *inode, struct file *file) void fuse_sync_release(struct fuse_file *ff, int flags) { - WARN_ON(atomic_read(&ff->count) > 1); + WARN_ON(atomic_read(&ff->count) != 1); fuse_prepare_release(ff, flags, FUSE_RELEASE); - __set_bit(FR_FORCE, &ff->reserved_req->flags); - __clear_bit(FR_BACKGROUND, &ff->reserved_req->flags); - fuse_request_send(ff->fc, ff->reserved_req); - fuse_put_request(ff->fc, ff->reserved_req); - kfree(ff); + /* + * iput(NULL) is a no-op and since the refcount is 1 and everything's + * synchronous, we are fine with not doing igrab() here" + */ + fuse_file_put(ff, true); } EXPORT_SYMBOL_GPL(fuse_sync_release); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 052f8d3c41cb..32ac2c9b09c0 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -732,7 +732,6 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, int fuse_open_common(struct inode *inode, struct file *file, bool isdir); struct fuse_file *fuse_file_alloc(struct fuse_conn *fc); -struct fuse_file *fuse_file_get(struct fuse_file *ff); void fuse_file_free(struct fuse_file *ff); void fuse_finish_open(struct inode *inode, struct file *file); diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index eb7724b8578a..e279c3ce27be 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -13,6 +13,7 @@ #include <linux/buffer_head.h> #include <linux/namei.h> #include <linux/mm.h> +#include <linux/cred.h> #include <linux/xattr.h> #include <linux/posix_acl.h> #include <linux/gfs2_ondisk.h> @@ -1959,9 +1960,10 @@ out: /** * gfs2_getattr - Read out an inode's attributes - * @mnt: The vfsmount the inode is being accessed from - * @dentry: The dentry to stat + * @path: Object to query * @stat: The inode's stats + * @request_mask: Mask of STATX_xxx flags indicating the caller's interests + * @flags: AT_STATX_xxx setting * * This may be called from the VFS directly, or from within GFS2 with the * inode locked, so we look to see if the glock is already locked and only @@ -1972,10 +1974,10 @@ out: * Returns: errno */ -static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int gfs2_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int error; diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 8b907c5cc913..0515f0a68637 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -15,6 +15,7 @@ #include <linux/types.h> #include <linux/delay.h> #include <linux/gfs2_ondisk.h> +#include <linux/sched/signal.h> #include "incore.h" #include "glock.h" diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index e3ee387a6dfe..361796a84fce 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -10,7 +10,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bio.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index f8d30e41d1d3..7a515345610c 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -10,6 +10,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/sched.h> +#include <linux/cred.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index f776acf2378a..bfbba799430f 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -14,6 +14,7 @@ #include <linux/pagemap.h> #include <linux/mpage.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/uio.h> #include <linux/xattr.h> diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c index a3ec3ae7d347..482081bcdf70 100644 --- a/fs/hfs/mdb.c +++ b/fs/hfs/mdb.c @@ -38,7 +38,7 @@ static int hfs_get_last_session(struct super_block *sb, /* default values */ *start = 0; - *size = sb->s_bdev->bd_inode->i_size >> 9; + *size = i_size_read(sb->s_bdev->bd_inode) >> 9; if (HFS_SB(sb)->session >= 0) { te.cdte_track = HFS_SB(sb)->session; diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 2e796f8302ff..e8638d528195 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -14,6 +14,7 @@ #include <linux/pagemap.h> #include <linux/mpage.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/uio.h> #include "hfsplus_fs.h" diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index ebb85e5f6549..e254fa0f0697 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -132,7 +132,7 @@ static int hfsplus_get_last_session(struct super_block *sb, /* default values */ *start = 0; - *size = sb->s_bdev->bd_inode->i_size >> 9; + *size = i_size_read(sb->s_bdev->bd_inode) >> 9; if (HFSPLUS_SB(sb)->session >= 0) { te.cdte_track = HFSPLUS_SB(sb)->session; diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index aebb78f9e47f..d352f3a6af7f 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -18,7 +18,7 @@ #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/slab.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/blkdev.h> #include <asm/unaligned.h> diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 54de77e78775..8f96461236f6 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -11,7 +11,7 @@ #include <linux/thread_info.h> #include <asm/current.h> -#include <linux/sched.h> /* remove ASAP */ +#include <linux/sched/signal.h> /* remove ASAP */ #include <linux/falloc.h> #include <linux/fs.h> #include <linux/mount.h> diff --git a/fs/ioctl.c b/fs/ioctl.c index cb9b02940805..569db68d02b3 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -15,6 +15,8 @@ #include <linux/writeback.h> #include <linux/buffer_head.h> #include <linux/falloc.h> +#include <linux/sched/signal.h> + #include "internal.h" #include <asm/ioctls.h> diff --git a/fs/iomap.c b/fs/iomap.c index d209f42cdcb8..3ca1a8e44135 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -26,6 +26,8 @@ #include <linux/buffer_head.h> #include <linux/task_io_accounting_ops.h> #include <linux/dax.h> +#include <linux/sched/signal.h> + #include "internal.h" /* @@ -420,8 +422,8 @@ int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, const struct iomap_ops *ops) { - unsigned blocksize = (1 << inode->i_blkbits); - unsigned off = pos & (blocksize - 1); + unsigned int blocksize = i_blocksize(inode); + unsigned int off = pos & (blocksize - 1); /* Block boundary? Nothing to do */ if (!off) @@ -735,9 +737,9 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, void *data, struct iomap *iomap) { struct iomap_dio *dio = data; - unsigned blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev)); - unsigned fs_block_size = (1 << inode->i_blkbits), pad; - unsigned align = iov_iter_alignment(dio->submit.iter); + unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev)); + unsigned int fs_block_size = i_blocksize(inode), pad; + unsigned int align = iov_iter_alignment(dio->submit.iter); struct iov_iter iter; struct bio *bio; bool need_zeroout = false; diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 871c8b392099..020ba0936146 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -15,6 +15,7 @@ #include <linux/module.h> #include <linux/slab.h> +#include <linux/cred.h> #include <linux/nls.h> #include <linux/ctype.h> #include <linux/statfs.h> diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c index e5c1783ab64a..453a6a1fff34 100644 --- a/fs/jffs2/background.c +++ b/fs/jffs2/background.c @@ -16,7 +16,7 @@ #include <linux/jffs2.h> #include <linux/mtd/mtd.h> #include <linux/completion.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/freezer.h> #include <linux/kthread.h> #include "nodelist.h" diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 567653f7c0ce..76fa814df3d1 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -15,6 +15,7 @@ #include <linux/capability.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/mtd/mtd.h> diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index cda0774c2c9c..a7bbe879cfc3 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c @@ -14,7 +14,7 @@ #include <linux/kernel.h> #include <linux/mtd/mtd.h> #include <linux/compiler.h> -#include <linux/sched.h> /* For cond_resched() */ +#include <linux/sched/signal.h> #include "nodelist.h" #include "debug.h" diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 2be7c9ce6663..c64c2574a0aa 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -758,7 +758,7 @@ static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data, sb->s_blocksize - offset : toread; tmp_bh.b_state = 0; - tmp_bh.b_size = 1 << inode->i_blkbits; + tmp_bh.b_size = i_blocksize(inode); err = jfs_get_block(inode, blk, &tmp_bh, 0); if (err) return err; @@ -798,7 +798,7 @@ static ssize_t jfs_quota_write(struct super_block *sb, int type, sb->s_blocksize - offset : towrite; tmp_bh.b_state = 0; - tmp_bh.b_size = 1 << inode->i_blkbits; + tmp_bh.b_size = i_blocksize(inode); err = jfs_get_block(inode, blk, &tmp_bh, 1); if (err) goto out; diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 439b946c4808..db5900aaa55a 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -478,7 +478,7 @@ static void kernfs_drain(struct kernfs_node *kn) rwsem_release(&kn->dep_map, 1, _RET_IP_); } - kernfs_unmap_bin_file(kn); + kernfs_drain_open_files(kn); mutex_lock(&kernfs_mutex); } diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 4f0535890b30..8e4dc7ab584c 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -13,7 +13,7 @@ #include <linux/slab.h> #include <linux/poll.h> #include <linux/pagemap.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/fsnotify.h> #include "kernfs-internal.h" @@ -515,7 +515,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma) goto out_put; rc = 0; - of->mmapped = 1; + of->mmapped = true; of->vm_ops = vma->vm_ops; vma->vm_ops = &kernfs_vm_ops; out_put: @@ -707,7 +707,8 @@ static int kernfs_fop_open(struct inode *inode, struct file *file) if (error) goto err_free; - ((struct seq_file *)file->private_data)->private = of; + of->seq_file = file->private_data; + of->seq_file->private = of; /* seq_file clears PWRITE unconditionally, restore it if WRITE */ if (file->f_mode & FMODE_WRITE) @@ -716,13 +717,22 @@ static int kernfs_fop_open(struct inode *inode, struct file *file) /* make sure we have open node struct */ error = kernfs_get_open_node(kn, of); if (error) - goto err_close; + goto err_seq_release; + + if (ops->open) { + /* nobody has access to @of yet, skip @of->mutex */ + error = ops->open(of); + if (error) + goto err_put_node; + } /* open succeeded, put active references */ kernfs_put_active(kn); return 0; -err_close: +err_put_node: + kernfs_put_open_node(kn, of); +err_seq_release: seq_release(inode, file); err_free: kfree(of->prealloc_buf); @@ -732,11 +742,41 @@ err_out: return error; } +/* used from release/drain to ensure that ->release() is called exactly once */ +static void kernfs_release_file(struct kernfs_node *kn, + struct kernfs_open_file *of) +{ + /* + * @of is guaranteed to have no other file operations in flight and + * we just want to synchronize release and drain paths. + * @kernfs_open_file_mutex is enough. @of->mutex can't be used + * here because drain path may be called from places which can + * cause circular dependency. + */ + lockdep_assert_held(&kernfs_open_file_mutex); + + if (!of->released) { + /* + * A file is never detached without being released and we + * need to be able to release files which are deactivated + * and being drained. Don't use kernfs_ops(). + */ + kn->attr.ops->release(of); + of->released = true; + } +} + static int kernfs_fop_release(struct inode *inode, struct file *filp) { struct kernfs_node *kn = filp->f_path.dentry->d_fsdata; struct kernfs_open_file *of = kernfs_of(filp); + if (kn->flags & KERNFS_HAS_RELEASE) { + mutex_lock(&kernfs_open_file_mutex); + kernfs_release_file(kn, of); + mutex_unlock(&kernfs_open_file_mutex); + } + kernfs_put_open_node(kn, of); seq_release(inode, filp); kfree(of->prealloc_buf); @@ -745,12 +785,12 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp) return 0; } -void kernfs_unmap_bin_file(struct kernfs_node *kn) +void kernfs_drain_open_files(struct kernfs_node *kn) { struct kernfs_open_node *on; struct kernfs_open_file *of; - if (!(kn->flags & KERNFS_HAS_MMAP)) + if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE))) return; spin_lock_irq(&kernfs_open_node_lock); @@ -762,10 +802,16 @@ void kernfs_unmap_bin_file(struct kernfs_node *kn) return; mutex_lock(&kernfs_open_file_mutex); + list_for_each_entry(of, &on->files, list) { struct inode *inode = file_inode(of->file); - unmap_mapping_range(inode->i_mapping, 0, 0, 1); + + if (kn->flags & KERNFS_HAS_MMAP) + unmap_mapping_range(inode->i_mapping, 0, 0, 1); + + kernfs_release_file(kn, of); } + mutex_unlock(&kernfs_open_file_mutex); kernfs_put_open_node(kn, NULL); @@ -964,6 +1010,8 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, kn->flags |= KERNFS_HAS_SEQ_SHOW; if (ops->mmap) kn->flags |= KERNFS_HAS_MMAP; + if (ops->release) + kn->flags |= KERNFS_HAS_RELEASE; rc = kernfs_add_one(kn); if (rc) { diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c index ac9e108ce1ea..fb4b4a79a0d6 100644 --- a/fs/kernfs/inode.c +++ b/fs/kernfs/inode.c @@ -200,11 +200,11 @@ static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode) set_nlink(inode, kn->dir.subdirs + 2); } -int kernfs_iop_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +int kernfs_iop_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct kernfs_node *kn = dentry->d_fsdata; - struct inode *inode = d_inode(dentry); + struct kernfs_node *kn = path->dentry->d_fsdata; + struct inode *inode = d_inode(path->dentry); mutex_lock(&kernfs_mutex); kernfs_refresh_inode(kn, inode); diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h index bfd551bbf231..2d5144ab4251 100644 --- a/fs/kernfs/kernfs-internal.h +++ b/fs/kernfs/kernfs-internal.h @@ -80,8 +80,8 @@ extern const struct xattr_handler *kernfs_xattr_handlers[]; void kernfs_evict_inode(struct inode *inode); int kernfs_iop_permission(struct inode *inode, int mask); int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr); -int kernfs_iop_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat); +int kernfs_iop_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags); ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size); /* @@ -104,7 +104,7 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, */ extern const struct file_operations kernfs_file_fops; -void kernfs_unmap_bin_file(struct kernfs_node *kn); +void kernfs_drain_open_files(struct kernfs_node *kn); /* * symlink.c diff --git a/fs/libfs.c b/fs/libfs.c index 28d6f35feed6..a8b62e5d43a9 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -7,6 +7,7 @@ #include <linux/export.h> #include <linux/pagemap.h> #include <linux/slab.h> +#include <linux/cred.h> #include <linux/mount.h> #include <linux/vfs.h> #include <linux/quotaops.h> @@ -20,10 +21,10 @@ #include "internal.h" -int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +int simple_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); generic_fillattr(inode, stat); stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9); return 0; @@ -1143,10 +1144,10 @@ static struct dentry *empty_dir_lookup(struct inode *dir, struct dentry *dentry, return ERR_PTR(-ENOENT); } -static int empty_dir_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int empty_dir_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); generic_fillattr(inode, stat); return 0; } diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 1c13dd80744f..e7c8b9c76e48 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -17,7 +17,7 @@ #include <linux/sysctl.h> #include <linux/moduleparam.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/uio.h> @@ -322,6 +322,8 @@ static int lockd_inet6addr_event(struct notifier_block *this, dprintk("lockd_inet6addr_event: removed %pI6\n", &ifa->addr); sin6.sin6_family = AF_INET6; sin6.sin6_addr = ifa->addr; + if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL) + sin6.sin6_scope_id = ifa->idev->dev->ifindex; svc_age_temp_xprts_now(nlmsvc_rqst->rq_server, (struct sockaddr *)&sin6); } diff --git a/fs/minix/inode.c b/fs/minix/inode.c index e7d9bf86d975..6ac76b0434e9 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -622,11 +622,14 @@ static int minix_write_inode(struct inode *inode, struct writeback_control *wbc) return err; } -int minix_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +int minix_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct super_block *sb = dentry->d_sb; - generic_fillattr(d_inode(dentry), stat); - if (INODE_VERSION(d_inode(dentry)) == MINIX_V1) + struct super_block *sb = path->dentry->d_sb; + struct inode *inode = d_inode(path->dentry); + + generic_fillattr(inode, stat); + if (INODE_VERSION(inode) == MINIX_V1) stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb); else stat->blocks = (sb->s_blocksize / 512) * V2_minix_blocks(stat->size, sb); diff --git a/fs/minix/minix.h b/fs/minix/minix.h index 01ad81dcacc5..663d66138d06 100644 --- a/fs/minix/minix.h +++ b/fs/minix/minix.h @@ -51,7 +51,7 @@ extern unsigned long minix_count_free_inodes(struct super_block *sb); extern int minix_new_block(struct inode * inode); extern void minix_free_block(struct inode *inode, unsigned long block); extern unsigned long minix_count_free_blocks(struct super_block *sb); -extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int minix_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len); extern void V1_minix_truncate(struct inode *); diff --git a/fs/mpage.c b/fs/mpage.c index 28af984a3d96..baff8f820c29 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -115,7 +115,7 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) SetPageUptodate(page); return; } - create_empty_buffers(page, 1 << inode->i_blkbits, 0); + create_empty_buffers(page, i_blocksize(inode), 0); } head = page_buffers(page); page_bh = head; diff --git a/fs/namei.c b/fs/namei.c index da689c9c005e..d41fab78798b 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -672,17 +672,15 @@ static bool legitimize_links(struct nameidata *nd) /** * unlazy_walk - try to switch to ref-walk mode. * @nd: nameidata pathwalk data - * @dentry: child of nd->path.dentry or NULL - * @seq: seq number to check dentry against * Returns: 0 on success, -ECHILD on failure * - * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry - * for ref-walk mode. @dentry must be a path found by a do_lookup call on - * @nd or NULL. Must be called from rcu-walk context. + * unlazy_walk attempts to legitimize the current nd->path and nd->root + * for ref-walk mode. + * Must be called from rcu-walk context. * Nothing should touch nameidata between unlazy_walk() failure and * terminate_walk(). */ -static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq) +static int unlazy_walk(struct nameidata *nd) { struct dentry *parent = nd->path.dentry; @@ -691,33 +689,66 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq nd->flags &= ~LOOKUP_RCU; if (unlikely(!legitimize_links(nd))) goto out2; + if (unlikely(!legitimize_path(nd, &nd->path, nd->seq))) + goto out1; + if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { + if (unlikely(!legitimize_path(nd, &nd->root, nd->root_seq))) + goto out; + } + rcu_read_unlock(); + BUG_ON(nd->inode != parent->d_inode); + return 0; + +out2: + nd->path.mnt = NULL; + nd->path.dentry = NULL; +out1: + if (!(nd->flags & LOOKUP_ROOT)) + nd->root.mnt = NULL; +out: + rcu_read_unlock(); + return -ECHILD; +} + +/** + * unlazy_child - try to switch to ref-walk mode. + * @nd: nameidata pathwalk data + * @dentry: child of nd->path.dentry + * @seq: seq number to check dentry against + * Returns: 0 on success, -ECHILD on failure + * + * unlazy_child attempts to legitimize the current nd->path, nd->root and dentry + * for ref-walk mode. @dentry must be a path found by a do_lookup call on + * @nd. Must be called from rcu-walk context. + * Nothing should touch nameidata between unlazy_child() failure and + * terminate_walk(). + */ +static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned seq) +{ + BUG_ON(!(nd->flags & LOOKUP_RCU)); + + nd->flags &= ~LOOKUP_RCU; + if (unlikely(!legitimize_links(nd))) + goto out2; if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq))) goto out2; - if (unlikely(!lockref_get_not_dead(&parent->d_lockref))) + if (unlikely(!lockref_get_not_dead(&nd->path.dentry->d_lockref))) goto out1; /* - * For a negative lookup, the lookup sequence point is the parents - * sequence point, and it only needs to revalidate the parent dentry. - * - * For a positive lookup, we need to move both the parent and the - * dentry from the RCU domain to be properly refcounted. And the - * sequence number in the dentry validates *both* dentry counters, - * since we checked the sequence number of the parent after we got - * the child sequence number. So we know the parent must still - * be valid if the child sequence number is still valid. + * We need to move both the parent and the dentry from the RCU domain + * to be properly refcounted. And the sequence number in the dentry + * validates *both* dentry counters, since we checked the sequence + * number of the parent after we got the child sequence number. So we + * know the parent must still be valid if the child sequence number is */ - if (!dentry) { - if (read_seqcount_retry(&parent->d_seq, nd->seq)) - goto out; - BUG_ON(nd->inode != parent->d_inode); - } else { - if (!lockref_get_not_dead(&dentry->d_lockref)) - goto out; - if (read_seqcount_retry(&dentry->d_seq, seq)) - goto drop_dentry; + if (unlikely(!lockref_get_not_dead(&dentry->d_lockref))) + goto out; + if (unlikely(read_seqcount_retry(&dentry->d_seq, seq))) { + rcu_read_unlock(); + dput(dentry); + goto drop_root_mnt; } - /* * Sequence counts matched. Now make sure that the root is * still valid and get it if required. @@ -733,10 +764,6 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq rcu_read_unlock(); return 0; -drop_dentry: - rcu_read_unlock(); - dput(dentry); - goto drop_root_mnt; out2: nd->path.mnt = NULL; out1: @@ -749,27 +776,12 @@ drop_root_mnt: return -ECHILD; } -static int unlazy_link(struct nameidata *nd, struct path *link, unsigned seq) -{ - if (unlikely(!legitimize_path(nd, link, seq))) { - drop_links(nd); - nd->depth = 0; - nd->flags &= ~LOOKUP_RCU; - nd->path.mnt = NULL; - nd->path.dentry = NULL; - if (!(nd->flags & LOOKUP_ROOT)) - nd->root.mnt = NULL; - rcu_read_unlock(); - } else if (likely(unlazy_walk(nd, NULL, 0)) == 0) { - return 0; - } - path_put(link); - return -ECHILD; -} - static inline int d_revalidate(struct dentry *dentry, unsigned int flags) { - return dentry->d_op->d_revalidate(dentry, flags); + if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) + return dentry->d_op->d_revalidate(dentry, flags); + else + return 1; } /** @@ -790,7 +802,7 @@ static int complete_walk(struct nameidata *nd) if (nd->flags & LOOKUP_RCU) { if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; - if (unlikely(unlazy_walk(nd, NULL, 0))) + if (unlikely(unlazy_walk(nd))) return -ECHILD; } @@ -1016,7 +1028,7 @@ const char *get_link(struct nameidata *nd) touch_atime(&last->link); cond_resched(); } else if (atime_needs_update_rcu(&last->link, inode)) { - if (unlikely(unlazy_walk(nd, NULL, 0))) + if (unlikely(unlazy_walk(nd))) return ERR_PTR(-ECHILD); touch_atime(&last->link); } @@ -1035,7 +1047,7 @@ const char *get_link(struct nameidata *nd) if (nd->flags & LOOKUP_RCU) { res = get(NULL, inode, &last->done); if (res == ERR_PTR(-ECHILD)) { - if (unlikely(unlazy_walk(nd, NULL, 0))) + if (unlikely(unlazy_walk(nd))) return ERR_PTR(-ECHILD); res = get(dentry, inode, &last->done); } @@ -1469,19 +1481,14 @@ static struct dentry *lookup_dcache(const struct qstr *name, struct dentry *dir, unsigned int flags) { - struct dentry *dentry; - int error; - - dentry = d_lookup(dir, name); + struct dentry *dentry = d_lookup(dir, name); if (dentry) { - if (dentry->d_flags & DCACHE_OP_REVALIDATE) { - error = d_revalidate(dentry, flags); - if (unlikely(error <= 0)) { - if (!error) - d_invalidate(dentry); - dput(dentry); - return ERR_PTR(error); - } + int error = d_revalidate(dentry, flags); + if (unlikely(error <= 0)) { + if (!error) + d_invalidate(dentry); + dput(dentry); + return ERR_PTR(error); } } return dentry; @@ -1546,7 +1553,7 @@ static int lookup_fast(struct nameidata *nd, bool negative; dentry = __d_lookup_rcu(parent, &nd->last, &seq); if (unlikely(!dentry)) { - if (unlazy_walk(nd, NULL, 0)) + if (unlazy_walk(nd)) return -ECHILD; return 0; } @@ -1571,14 +1578,8 @@ static int lookup_fast(struct nameidata *nd, return -ECHILD; *seqp = seq; - if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) - status = d_revalidate(dentry, nd->flags); - if (unlikely(status <= 0)) { - if (unlazy_walk(nd, dentry, seq)) - return -ECHILD; - if (status == -ECHILD) - status = d_revalidate(dentry, nd->flags); - } else { + status = d_revalidate(dentry, nd->flags); + if (likely(status > 0)) { /* * Note: do negative dentry check after revalidation in * case that drops it. @@ -1589,15 +1590,17 @@ static int lookup_fast(struct nameidata *nd, path->dentry = dentry; if (likely(__follow_mount_rcu(nd, path, inode, seqp))) return 1; - if (unlazy_walk(nd, dentry, seq)) - return -ECHILD; } + if (unlazy_child(nd, dentry, seq)) + return -ECHILD; + if (unlikely(status == -ECHILD)) + /* we'd been told to redo it in non-rcu mode */ + status = d_revalidate(dentry, nd->flags); } else { dentry = __d_lookup(parent, &nd->last); if (unlikely(!dentry)) return 0; - if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) - status = d_revalidate(dentry, nd->flags); + status = d_revalidate(dentry, nd->flags); } if (unlikely(status <= 0)) { if (!status) @@ -1636,8 +1639,7 @@ again: if (IS_ERR(dentry)) goto out; if (unlikely(!d_in_lookup(dentry))) { - if ((dentry->d_flags & DCACHE_OP_REVALIDATE) && - !(flags & LOOKUP_NO_REVAL)) { + if (!(flags & LOOKUP_NO_REVAL)) { int error = d_revalidate(dentry, flags); if (unlikely(error <= 0)) { if (!error) { @@ -1668,7 +1670,7 @@ static inline int may_lookup(struct nameidata *nd) int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK); if (err != -ECHILD) return err; - if (unlazy_walk(nd, NULL, 0)) + if (unlazy_walk(nd)) return -ECHILD; } return inode_permission(nd->inode, MAY_EXEC); @@ -1703,9 +1705,17 @@ static int pick_link(struct nameidata *nd, struct path *link, error = nd_alloc_stack(nd); if (unlikely(error)) { if (error == -ECHILD) { - if (unlikely(unlazy_link(nd, link, seq))) - return -ECHILD; - error = nd_alloc_stack(nd); + if (unlikely(!legitimize_path(nd, link, seq))) { + drop_links(nd); + nd->depth = 0; + nd->flags &= ~LOOKUP_RCU; + nd->path.mnt = NULL; + nd->path.dentry = NULL; + if (!(nd->flags & LOOKUP_ROOT)) + nd->root.mnt = NULL; + rcu_read_unlock(); + } else if (likely(unlazy_walk(nd)) == 0) + error = nd_alloc_stack(nd); } if (error) { path_put(link); @@ -2122,7 +2132,7 @@ OK: } if (unlikely(!d_can_lookup(nd->path.dentry))) { if (nd->flags & LOOKUP_RCU) { - if (unlazy_walk(nd, NULL, 0)) + if (unlazy_walk(nd)) return -ECHILD; } return -ENOTDIR; @@ -2579,7 +2589,7 @@ mountpoint_last(struct nameidata *nd) /* If we're in rcuwalk, drop out of it to handle last component */ if (nd->flags & LOOKUP_RCU) { - if (unlazy_walk(nd, NULL, 0)) + if (unlazy_walk(nd)) return -ECHILD; } @@ -3072,9 +3082,6 @@ static int lookup_open(struct nameidata *nd, struct path *path, if (d_in_lookup(dentry)) break; - if (!(dentry->d_flags & DCACHE_OP_REVALIDATE)) - break; - error = d_revalidate(dentry, nd->flags); if (likely(error > 0)) break; @@ -3356,13 +3363,50 @@ out: return error; } +struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag) +{ + static const struct qstr name = QSTR_INIT("/", 1); + struct dentry *child = NULL; + struct inode *dir = dentry->d_inode; + struct inode *inode; + int error; + + /* we want directory to be writable */ + error = inode_permission(dir, MAY_WRITE | MAY_EXEC); + if (error) + goto out_err; + error = -EOPNOTSUPP; + if (!dir->i_op->tmpfile) + goto out_err; + error = -ENOMEM; + child = d_alloc(dentry, &name); + if (unlikely(!child)) + goto out_err; + error = dir->i_op->tmpfile(dir, child, mode); + if (error) + goto out_err; + error = -ENOENT; + inode = child->d_inode; + if (unlikely(!inode)) + goto out_err; + if (!(open_flag & O_EXCL)) { + spin_lock(&inode->i_lock); + inode->i_state |= I_LINKABLE; + spin_unlock(&inode->i_lock); + } + return child; + +out_err: + dput(child); + return ERR_PTR(error); +} +EXPORT_SYMBOL(vfs_tmpfile); + static int do_tmpfile(struct nameidata *nd, unsigned flags, const struct open_flags *op, struct file *file, int *opened) { - static const struct qstr name = QSTR_INIT("/", 1); struct dentry *child; - struct inode *dir; struct path path; int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path); if (unlikely(error)) @@ -3370,25 +3414,12 @@ static int do_tmpfile(struct nameidata *nd, unsigned flags, error = mnt_want_write(path.mnt); if (unlikely(error)) goto out; - dir = path.dentry->d_inode; - /* we want directory to be writable */ - error = inode_permission(dir, MAY_WRITE | MAY_EXEC); - if (error) + child = vfs_tmpfile(path.dentry, op->mode, op->open_flag); + error = PTR_ERR(child); + if (unlikely(IS_ERR(child))) goto out2; - if (!dir->i_op->tmpfile) { - error = -EOPNOTSUPP; - goto out2; - } - child = d_alloc(path.dentry, &name); - if (unlikely(!child)) { - error = -ENOMEM; - goto out2; - } dput(path.dentry); path.dentry = child; - error = dir->i_op->tmpfile(dir, child, op->mode); - if (error) - goto out2; audit_inode(nd->name, child, 0); /* Don't check for other permissions, the inode was just created */ error = may_open(&path, 0, op->open_flag); @@ -3399,14 +3430,8 @@ static int do_tmpfile(struct nameidata *nd, unsigned flags, if (error) goto out2; error = open_check_o_direct(file); - if (error) { + if (error) fput(file); - } else if (!(op->open_flag & O_EXCL)) { - struct inode *inode = file_inode(file); - spin_lock(&inode->i_lock); - inode->i_state |= I_LINKABLE; - spin_unlock(&inode->i_lock); - } out2: mnt_drop_write(path.mnt); out: diff --git a/fs/namespace.c b/fs/namespace.c index 8bfad42c1ccf..cc1375eff88c 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -15,6 +15,7 @@ #include <linux/user_namespace.h> #include <linux/namei.h> #include <linux/security.h> +#include <linux/cred.h> #include <linux/idr.h> #include <linux/init.h> /* init_rootfs */ #include <linux/fs_struct.h> /* get_fs_root et.al. */ @@ -24,6 +25,8 @@ #include <linux/magic.h> #include <linux/bootmem.h> #include <linux/task_work.h> +#include <linux/sched/task.h> + #include "pnode.h" #include "internal.h" diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 7eb89c23c847..d5606099712a 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -30,6 +30,7 @@ #include <linux/vfs.h> #include <linux/mount.h> #include <linux/seq_file.h> +#include <linux/sched/signal.h> #include <linux/namei.h> #include <net/sock.h> diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c index 4434e4977cf3..12550c2320cc 100644 --- a/fs/ncpfs/ioctl.c +++ b/fs/ncpfs/ioctl.c @@ -19,6 +19,7 @@ #include <linux/highuid.h> #include <linux/vmalloc.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/uaccess.h> diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c index f32f272ee501..98b6db0ed63e 100644 --- a/fs/ncpfs/sock.c +++ b/fs/ncpfs/sock.c @@ -16,6 +16,7 @@ #include <linux/fcntl.h> #include <linux/stat.h> #include <linux/string.h> +#include <linux/sched/signal.h> #include <linux/uaccess.h> #include <linux/in.h> #include <linux/net.h> @@ -40,19 +41,12 @@ static int _recv(struct socket *sock, void *buf, int size, unsigned flags) return kernel_recvmsg(sock, &msg, &iov, 1, size, flags); } -static inline int do_send(struct socket *sock, struct kvec *vec, int count, - int len, unsigned flags) -{ - struct msghdr msg = { .msg_flags = flags }; - return kernel_sendmsg(sock, &msg, vec, count, len); -} - static int _send(struct socket *sock, const void *buff, int len) { - struct kvec vec; - vec.iov_base = (void *) buff; - vec.iov_len = len; - return do_send(sock, &vec, 1, len, 0); + struct msghdr msg = { .msg_flags = 0 }; + struct kvec vec = {.iov_base = (void *)buff, .iov_len = len}; + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &vec, 1, len); + return sock_sendmsg(sock, &msg); } struct ncp_request_reply { @@ -63,9 +57,7 @@ struct ncp_request_reply { size_t datalen; int result; enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status; - struct kvec* tx_ciov; - size_t tx_totallen; - size_t tx_iovlen; + struct iov_iter from; struct kvec tx_iov[3]; u_int16_t tx_type; u_int32_t sign[6]; @@ -205,28 +197,22 @@ static inline void __ncptcp_abort(struct ncp_server *server) static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req) { - struct kvec vec[3]; - /* sock_sendmsg updates iov pointers for us :-( */ - memcpy(vec, req->tx_ciov, req->tx_iovlen * sizeof(vec[0])); - return do_send(sock, vec, req->tx_iovlen, - req->tx_totallen, MSG_DONTWAIT); + struct msghdr msg = { .msg_iter = req->from, .msg_flags = MSG_DONTWAIT }; + return sock_sendmsg(sock, &msg); } static void __ncptcp_try_send(struct ncp_server *server) { struct ncp_request_reply *rq; - struct kvec *iov; - struct kvec iovc[3]; + struct msghdr msg = { .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT }; int result; rq = server->tx.creq; if (!rq) return; - /* sock_sendmsg updates iov pointers for us :-( */ - memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0])); - result = do_send(server->ncp_sock, iovc, rq->tx_iovlen, - rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT); + msg.msg_iter = rq->from; + result = sock_sendmsg(server->ncp_sock, &msg); if (result == -EAGAIN) return; @@ -236,21 +222,12 @@ static void __ncptcp_try_send(struct ncp_server *server) __ncp_abort_request(server, rq, result); return; } - if (result >= rq->tx_totallen) { + if (!msg_data_left(&msg)) { server->rcv.creq = rq; server->tx.creq = NULL; return; } - rq->tx_totallen -= result; - iov = rq->tx_ciov; - while (iov->iov_len <= result) { - result -= iov->iov_len; - iov++; - rq->tx_iovlen--; - } - iov->iov_base += result; - iov->iov_len -= result; - rq->tx_ciov = iov; + rq->from = msg.msg_iter; } static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h) @@ -263,22 +240,21 @@ static inline void ncp_init_header(struct ncp_server *server, struct ncp_request static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req) { - size_t signlen; - struct ncp_request_header* h; + size_t signlen, len = req->tx_iov[1].iov_len; + struct ncp_request_header *h = req->tx_iov[1].iov_base; - req->tx_ciov = req->tx_iov + 1; - - h = req->tx_iov[1].iov_base; ncp_init_header(server, req, h); - signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1, - req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1, - cpu_to_le32(req->tx_totallen), req->sign); + signlen = sign_packet(server, + req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1, + len - sizeof(struct ncp_request_header) + 1, + cpu_to_le32(len), req->sign); if (signlen) { - req->tx_ciov[1].iov_base = req->sign; - req->tx_ciov[1].iov_len = signlen; - req->tx_iovlen += 1; - req->tx_totallen += signlen; + /* NCP over UDP appends signature */ + req->tx_iov[2].iov_base = req->sign; + req->tx_iov[2].iov_len = signlen; } + iov_iter_kvec(&req->from, WRITE | ITER_KVEC, + req->tx_iov + 1, signlen ? 2 : 1, len + signlen); server->rcv.creq = req; server->timeout_last = server->m.time_out; server->timeout_retries = server->m.retry_count; @@ -292,24 +268,23 @@ static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req) { - size_t signlen; - struct ncp_request_header* h; + size_t signlen, len = req->tx_iov[1].iov_len; + struct ncp_request_header *h = req->tx_iov[1].iov_base; - req->tx_ciov = req->tx_iov; - h = req->tx_iov[1].iov_base; ncp_init_header(server, req, h); signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1, - req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1, - cpu_to_be32(req->tx_totallen + 24), req->sign + 4) + 16; + len - sizeof(struct ncp_request_header) + 1, + cpu_to_be32(len + 24), req->sign + 4) + 16; req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC); - req->sign[1] = htonl(req->tx_totallen + signlen); + req->sign[1] = htonl(len + signlen); req->sign[2] = htonl(NCP_TCP_XMIT_VERSION); req->sign[3] = htonl(req->datalen + 8); + /* NCP over TCP prepends signature */ req->tx_iov[0].iov_base = req->sign; req->tx_iov[0].iov_len = signlen; - req->tx_iovlen += 1; - req->tx_totallen += signlen; + iov_iter_kvec(&req->from, WRITE | ITER_KVEC, + req->tx_iov, 2, len + signlen); server->tx.creq = req; __ncptcp_try_send(server); @@ -364,18 +339,17 @@ static void __ncp_next_request(struct ncp_server *server) static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len) { if (server->info_sock) { - struct kvec iov[2]; - __be32 hdr[2]; - - hdr[0] = cpu_to_be32(len + 8); - hdr[1] = cpu_to_be32(id); - - iov[0].iov_base = hdr; - iov[0].iov_len = 8; - iov[1].iov_base = (void *) data; - iov[1].iov_len = len; + struct msghdr msg = { .msg_flags = MSG_NOSIGNAL }; + __be32 hdr[2] = {cpu_to_be32(len + 8), cpu_to_be32(id)}; + struct kvec iov[2] = { + {.iov_base = hdr, .iov_len = 8}, + {.iov_base = (void *)data, .iov_len = len}, + }; + + iov_iter_kvec(&msg.msg_iter, ITER_KVEC | WRITE, + iov, 2, len + 8); - do_send(server->info_sock, iov, 2, len + 8, MSG_NOSIGNAL); + sock_sendmsg(server->info_sock, &msg); } } @@ -525,7 +499,7 @@ static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len) return result; } if (result > len) { - pr_err("tcp: bug in recvmsg (%u > %Zu)\n", result, len); + pr_err("tcp: bug in recvmsg (%u > %zu)\n", result, len); return -EIO; } return result; @@ -619,7 +593,7 @@ skipdata:; goto skipdata2; } if (datalen > req->datalen + 8) { - pr_err("tcp: Unexpected reply len %d (expected at most %Zd)\n", datalen, req->datalen + 8); + pr_err("tcp: Unexpected reply len %d (expected at most %zd)\n", datalen, req->datalen + 8); server->rcv.state = 3; goto skipdata; } @@ -711,8 +685,6 @@ static int do_ncp_rpc_call(struct ncp_server *server, int size, req->datalen = max_reply_size; req->tx_iov[1].iov_base = server->packet; req->tx_iov[1].iov_len = size; - req->tx_iovlen = 1; - req->tx_totallen = size; req->tx_type = *(u_int16_t*)server->packet; result = ncp_add_request(server, req); diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 2905479f214a..0ca370d23ddb 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -381,7 +381,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync) struct blk_plug plug; int i; - dprintk("%s enter, %Zu@%lld\n", __func__, count, offset); + dprintk("%s enter, %zu@%lld\n", __func__, count, offset); /* At this point, header->page_aray is a (sequential) list of nfs_pages. * We want to write each, and if there is an error set pnfs_error diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c index 6de15709d024..2ae676f93e6b 100644 --- a/fs/nfs/cache_lib.c +++ b/fs/nfs/cache_lib.c @@ -141,8 +141,7 @@ int nfs_cache_register_net(struct net *net, struct cache_detail *cd) void nfs_cache_unregister_sb(struct super_block *sb, struct cache_detail *cd) { - if (cd->u.pipefs.dir) - sunrpc_cache_unregister_pipefs(cd); + sunrpc_cache_unregister_pipefs(cd); } void nfs_cache_unregister_net(struct net *net, struct cache_detail *cd) diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 484bebc20bca..bb79972dc638 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -9,6 +9,7 @@ #include <linux/completion.h> #include <linux/ip.h> #include <linux/module.h> +#include <linux/sched/signal.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/svcsock.h> #include <linux/nfs_fs.h> diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index eb094c6011d8..d051fc3583a9 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -83,23 +83,15 @@ static __be32 *read_buf(struct xdr_stream *xdr, size_t nbytes) return p; } -static __be32 decode_string(struct xdr_stream *xdr, unsigned int *len, const char **str) +static __be32 decode_string(struct xdr_stream *xdr, unsigned int *len, + const char **str, size_t maxlen) { - __be32 *p; - - p = read_buf(xdr, 4); - if (unlikely(p == NULL)) - return htonl(NFS4ERR_RESOURCE); - *len = ntohl(*p); - - if (*len != 0) { - p = read_buf(xdr, *len); - if (unlikely(p == NULL)) - return htonl(NFS4ERR_RESOURCE); - *str = (const char *)p; - } else - *str = NULL; + ssize_t err; + err = xdr_stream_decode_opaque_inline(xdr, (void **)str, maxlen); + if (err < 0) + return cpu_to_be32(NFS4ERR_RESOURCE); + *len = err; return 0; } @@ -162,15 +154,9 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound __be32 *p; __be32 status; - status = decode_string(xdr, &hdr->taglen, &hdr->tag); + status = decode_string(xdr, &hdr->taglen, &hdr->tag, CB_OP_TAGLEN_MAXSZ); if (unlikely(status != 0)) return status; - /* We do not like overly long tags! */ - if (hdr->taglen > CB_OP_TAGLEN_MAXSZ) { - printk("NFS: NFSv4 CALLBACK %s: client sent tag of length %u\n", - __func__, hdr->taglen); - return htonl(NFS4ERR_RESOURCE); - } p = read_buf(xdr, 12); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); @@ -582,12 +568,8 @@ out: static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) { - __be32 *p; - - p = xdr_reserve_space(xdr, 4 + len); - if (unlikely(p == NULL)) - return htonl(NFS4ERR_RESOURCE); - xdr_encode_opaque(p, str, len); + if (unlikely(xdr_stream_encode_opaque(xdr, str, len) < 0)) + return cpu_to_be32(NFS4ERR_RESOURCE); return 0; } @@ -1083,7 +1065,8 @@ struct svc_version nfs4_callback_version1 = { .vs_proc = nfs4_callback_procedures1, .vs_xdrsize = NFS4_CALLBACK_XDRSIZE, .vs_dispatch = NULL, - .vs_hidden = 1, + .vs_hidden = true, + .vs_need_cong_ctrl = true, }; struct svc_version nfs4_callback_version4 = { @@ -1092,5 +1075,6 @@ struct svc_version nfs4_callback_version4 = { .vs_proc = nfs4_callback_procedures1, .vs_xdrsize = NFS4_CALLBACK_XDRSIZE, .vs_dispatch = NULL, - .vs_hidden = 1, + .vs_hidden = true, + .vs_need_cong_ctrl = true, }; diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index fad81041f5ab..fb499a3f21b5 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2002,6 +2002,29 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) } EXPORT_SYMBOL_GPL(nfs_link); +static void +nfs_complete_rename(struct rpc_task *task, struct nfs_renamedata *data) +{ + struct dentry *old_dentry = data->old_dentry; + struct dentry *new_dentry = data->new_dentry; + struct inode *old_inode = d_inode(old_dentry); + struct inode *new_inode = d_inode(new_dentry); + + nfs_mark_for_revalidate(old_inode); + + switch (task->tk_status) { + case 0: + if (new_inode != NULL) + nfs_drop_nlink(new_inode); + d_move(old_dentry, new_dentry); + nfs_set_verifier(new_dentry, + nfs_save_change_attribute(data->new_dir)); + break; + case -ENOENT: + nfs_dentry_handle_enoent(old_dentry); + } +} + /* * RENAME * FIXME: Some nfsds, like the Linux user space nfsd, may generate a @@ -2084,7 +2107,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_inode != NULL) NFS_PROTO(new_inode)->return_delegation(new_inode); - task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL); + task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, + nfs_complete_rename); if (IS_ERR(task)) { error = PTR_ERR(task); goto out; @@ -2094,21 +2118,11 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (error == 0) error = task->tk_status; rpc_put_task(task); - nfs_mark_for_revalidate(old_inode); out: if (rehash) d_rehash(rehash); trace_nfs_rename_exit(old_dir, old_dentry, new_dir, new_dentry, error); - if (!error) { - if (new_inode != NULL) - nfs_drop_nlink(new_inode); - d_move(old_dentry, new_dentry); - nfs_set_verifier(new_dentry, - nfs_save_change_attribute(new_dir)); - } else if (error == -ENOENT) - nfs_dentry_handle_enoent(old_dentry); - /* new dentry created? */ if (dentry) dput(dentry); diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index a3fc48ba4931..44347f4bdc15 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -305,7 +305,7 @@ static void filelayout_read_prepare(struct rpc_task *task, void *data) } hdr->pgio_done_cb = filelayout_read_done_cb; - if (nfs41_setup_sequence(hdr->ds_clp->cl_session, + if (nfs4_setup_sequence(hdr->ds_clp, &hdr->args.seq_args, &hdr->res.seq_res, task)) @@ -403,7 +403,7 @@ static void filelayout_write_prepare(struct rpc_task *task, void *data) rpc_exit(task, 0); return; } - if (nfs41_setup_sequence(hdr->ds_clp->cl_session, + if (nfs4_setup_sequence(hdr->ds_clp, &hdr->args.seq_args, &hdr->res.seq_res, task)) @@ -438,7 +438,7 @@ static void filelayout_commit_prepare(struct rpc_task *task, void *data) { struct nfs_commit_data *wdata = data; - nfs41_setup_sequence(wdata->ds_clp->cl_session, + nfs4_setup_sequence(wdata->ds_clp, &wdata->args.seq_args, &wdata->res.seq_res, task); @@ -482,7 +482,7 @@ filelayout_read_pagelist(struct nfs_pgio_header *hdr) u32 j, idx; struct nfs_fh *fh; - dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n", + dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n", __func__, hdr->inode->i_ino, hdr->args.pgbase, (size_t)hdr->args.count, offset); @@ -540,7 +540,7 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync) if (IS_ERR(ds_clnt)) return PNFS_NOT_ATTEMPTED; - dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d\n", + dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d\n", __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count, offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count)); diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 0ca4af8cca5d..42dedf2d625f 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1053,9 +1053,6 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task, struct nfs_client *mds_client = mds_server->nfs_client; struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; - if (task->tk_status >= 0) - return 0; - switch (task->tk_status) { /* MDS state errors */ case -NFS4ERR_DELEG_REVOKED: @@ -1157,9 +1154,6 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task, { struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); - if (task->tk_status >= 0) - return 0; - switch (task->tk_status) { /* File access problems. Don't mark the device as unavailable */ case -EACCES: @@ -1195,6 +1189,13 @@ static int ff_layout_async_handle_error(struct rpc_task *task, { int vers = clp->cl_nfs_mod->rpc_vers->number; + if (task->tk_status >= 0) + return 0; + + /* Handle the case of an invalid layout segment */ + if (!pnfs_is_valid_lseg(lseg)) + return -NFS4ERR_RESET_TO_PNFS; + switch (vers) { case 3: return ff_layout_async_handle_error_v3(task, lseg, idx); @@ -1384,30 +1385,14 @@ static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data) rpc_call_start(task); } -static int ff_layout_setup_sequence(struct nfs_client *ds_clp, - struct nfs4_sequence_args *args, - struct nfs4_sequence_res *res, - struct rpc_task *task) -{ - if (ds_clp->cl_session) - return nfs41_setup_sequence(ds_clp->cl_session, - args, - res, - task); - return nfs40_setup_sequence(ds_clp->cl_slot_tbl, - args, - res, - task); -} - static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data) { struct nfs_pgio_header *hdr = data; - if (ff_layout_setup_sequence(hdr->ds_clp, - &hdr->args.seq_args, - &hdr->res.seq_res, - task)) + if (nfs4_setup_sequence(hdr->ds_clp, + &hdr->args.seq_args, + &hdr->res.seq_res, + task)) return; if (ff_layout_read_prepare_common(task, hdr)) @@ -1578,10 +1563,10 @@ static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data) { struct nfs_pgio_header *hdr = data; - if (ff_layout_setup_sequence(hdr->ds_clp, - &hdr->args.seq_args, - &hdr->res.seq_res, - task)) + if (nfs4_setup_sequence(hdr->ds_clp, + &hdr->args.seq_args, + &hdr->res.seq_res, + task)) return; if (ff_layout_write_prepare_common(task, hdr)) @@ -1667,10 +1652,10 @@ static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data) { struct nfs_commit_data *wdata = data; - if (ff_layout_setup_sequence(wdata->ds_clp, - &wdata->args.seq_args, - &wdata->res.seq_res, - task)) + if (nfs4_setup_sequence(wdata->ds_clp, + &wdata->args.seq_args, + &wdata->res.seq_res, + task)) return; ff_layout_commit_prepare_common(task, data); } @@ -1751,7 +1736,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr) int vers; struct nfs_fh *fh; - dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n", + dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n", __func__, hdr->inode->i_ino, hdr->args.pgbase, (size_t)hdr->args.count, offset); @@ -1828,7 +1813,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync) vers = nfs4_ff_layout_ds_version(lseg, idx); - dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n", + dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n", __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count, offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers); @@ -1965,10 +1950,7 @@ static int ff_layout_encode_ioerr(struct xdr_stream *xdr, static void encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len) { - __be32 *p; - - p = xdr_reserve_space(xdr, len); - xdr_encode_opaque_fixed(p, buf, len); + WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0); } static void @@ -2092,7 +2074,7 @@ ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args) kfree(ff_args); } -const struct nfs4_xdr_opaque_ops layoutreturn_ops = { +static const struct nfs4_xdr_opaque_ops layoutreturn_ops = { .encode = ff_layout_encode_layoutreturn, .free = ff_layout_free_layoutreturn, }; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 5ca4d96b1942..f489a5a71bd5 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -15,7 +15,7 @@ #include <linux/module.h> #include <linux/init.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/time.h> #include <linux/kernel.h> #include <linux/mm.h> @@ -703,9 +703,10 @@ static bool nfs_need_revalidate_inode(struct inode *inode) return false; } -int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +int nfs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME; int err = 0; @@ -726,17 +727,17 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) * - NFS never sets MS_NOATIME or MS_NODIRATIME so there is * no point in checking those. */ - if ((mnt->mnt_flags & MNT_NOATIME) || - ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))) + if ((path->mnt->mnt_flags & MNT_NOATIME) || + ((path->mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))) need_atime = 0; if (need_atime || nfs_need_revalidate_inode(inode)) { struct nfs_server *server = NFS_SERVER(inode); - nfs_readdirplus_parent_cache_miss(dentry); + nfs_readdirplus_parent_cache_miss(path->dentry); err = __nfs_revalidate_inode(server, inode); } else - nfs_readdirplus_parent_cache_hit(dentry); + nfs_readdirplus_parent_cache_hit(path->dentry); if (!err) { generic_fillattr(inode, stat); stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode)); diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index e49d831c4e85..786f17580582 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -178,11 +178,12 @@ out_nofree: } static int -nfs_namespace_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +nfs_namespace_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - if (NFS_FH(d_inode(dentry))->size != 0) - return nfs_getattr(mnt, dentry, stat); - generic_fillattr(d_inode(dentry), stat); + if (NFS_FH(d_inode(path->dentry))->size != 0) + return nfs_getattr(path, stat, request_mask, query_flags); + generic_fillattr(d_inode(path->dentry), stat); return 0; } diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index d12ff9385f49..1e486c73ec94 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -12,6 +12,7 @@ #include "nfs42.h" #include "iostat.h" #include "pnfs.h" +#include "nfs4session.h" #include "internal.h" #define NFSDBG_FACILITY NFSDBG_PROC @@ -128,30 +129,26 @@ out_unlock: return err; } -static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src, +static ssize_t _nfs42_proc_copy(struct file *src, struct nfs_lock_context *src_lock, - struct file *dst, loff_t pos_dst, + struct file *dst, struct nfs_lock_context *dst_lock, - size_t count) + struct nfs42_copy_args *args, + struct nfs42_copy_res *res) { - struct nfs42_copy_args args = { - .src_fh = NFS_FH(file_inode(src)), - .src_pos = pos_src, - .dst_fh = NFS_FH(file_inode(dst)), - .dst_pos = pos_dst, - .count = count, - }; - struct nfs42_copy_res res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY], - .rpc_argp = &args, - .rpc_resp = &res, + .rpc_argp = args, + .rpc_resp = res, }; struct inode *dst_inode = file_inode(dst); struct nfs_server *server = NFS_SERVER(dst_inode); + loff_t pos_src = args->src_pos; + loff_t pos_dst = args->dst_pos; + size_t count = args->count; int status; - status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context, + status = nfs4_set_rw_stateid(&args->src_stateid, src_lock->open_context, src_lock, FMODE_READ); if (status) return status; @@ -161,7 +158,7 @@ static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src, if (status) return status; - status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context, + status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context, dst_lock, FMODE_WRITE); if (status) return status; @@ -171,22 +168,22 @@ static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src, return status; status = nfs4_call_sync(server->client, server, &msg, - &args.seq_args, &res.seq_res, 0); + &args->seq_args, &res->seq_res, 0); if (status == -ENOTSUPP) server->caps &= ~NFS_CAP_COPY; if (status) return status; - if (res.write_res.verifier.committed != NFS_FILE_SYNC) { - status = nfs_commit_file(dst, &res.write_res.verifier.verifier); + if (res->write_res.verifier.committed != NFS_FILE_SYNC) { + status = nfs_commit_file(dst, &res->write_res.verifier.verifier); if (status) return status; } truncate_pagecache_range(dst_inode, pos_dst, - pos_dst + res.write_res.count); + pos_dst + res->write_res.count); - return res.write_res.count; + return res->write_res.count; } ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, @@ -196,8 +193,22 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, struct nfs_server *server = NFS_SERVER(file_inode(dst)); struct nfs_lock_context *src_lock; struct nfs_lock_context *dst_lock; - struct nfs4_exception src_exception = { }; - struct nfs4_exception dst_exception = { }; + struct nfs42_copy_args args = { + .src_fh = NFS_FH(file_inode(src)), + .src_pos = pos_src, + .dst_fh = NFS_FH(file_inode(dst)), + .dst_pos = pos_dst, + .count = count, + }; + struct nfs42_copy_res res; + struct nfs4_exception src_exception = { + .inode = file_inode(src), + .stateid = &args.src_stateid, + }; + struct nfs4_exception dst_exception = { + .inode = file_inode(dst), + .stateid = &args.dst_stateid, + }; ssize_t err, err2; if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY)) @@ -207,7 +218,6 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, if (IS_ERR(src_lock)) return PTR_ERR(src_lock); - src_exception.inode = file_inode(src); src_exception.state = src_lock->open_context->state; dst_lock = nfs_get_lock_context(nfs_file_open_context(dst)); @@ -216,15 +226,17 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, goto out_put_src_lock; } - dst_exception.inode = file_inode(dst); dst_exception.state = dst_lock->open_context->state; do { inode_lock(file_inode(dst)); - err = _nfs42_proc_copy(src, pos_src, src_lock, - dst, pos_dst, dst_lock, count); + err = _nfs42_proc_copy(src, src_lock, + dst, dst_lock, + &args, &res); inode_unlock(file_inode(dst)); + if (err >= 0) + break; if (err == -ENOTSUPP) { err = -EOPNOTSUPP; break; @@ -331,9 +343,8 @@ nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) } nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid); spin_unlock(&inode->i_lock); - nfs41_setup_sequence(nfs4_get_session(server), &data->args.seq_args, - &data->res.seq_res, task); - + nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, + &data->res.seq_res, task); } static void diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 665165833660..af285cc27ccf 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -273,14 +273,6 @@ extern int nfs4_set_rw_stateid(nfs4_stateid *stateid, fmode_t fmode); #if defined(CONFIG_NFS_V4_1) -static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server) -{ - return server->nfs_client->cl_session; -} - -extern int nfs41_setup_sequence(struct nfs4_session *session, - struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, - struct rpc_task *task); extern int nfs41_sequence_done(struct rpc_task *, struct nfs4_sequence_res *); extern int nfs4_proc_create_session(struct nfs_client *, struct rpc_cred *); extern int nfs4_proc_destroy_session(struct nfs4_session *, struct rpc_cred *); @@ -357,11 +349,6 @@ nfs4_state_protect_write(struct nfs_client *clp, struct rpc_clnt **clntp, hdr->args.stable = NFS_FILE_SYNC; } #else /* CONFIG_NFS_v4_1 */ -static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server) -{ - return NULL; -} - static inline bool is_ds_only_client(struct nfs_client *clp) { @@ -466,7 +453,7 @@ extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid); extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid); extern void nfs_release_seqid(struct nfs_seqid *seqid); extern void nfs_free_seqid(struct nfs_seqid *seqid); -extern int nfs40_setup_sequence(struct nfs4_slot_table *tbl, +extern int nfs4_setup_sequence(const struct nfs_client *client, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, struct rpc_task *task); diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index c444285bb1b1..835c163f61af 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c @@ -316,7 +316,7 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen, if (ret < 0) goto out_up; - payload = user_key_payload(rkey); + payload = user_key_payload_rcu(rkey); if (IS_ERR_OR_NULL(payload)) { ret = PTR_ERR(payload); goto out_up; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 0a0eaecf9676..1b183686c6d4 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -577,12 +577,7 @@ nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, static bool _nfs4_is_integrity_protected(struct nfs_client *clp) { rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; - - if (flavor == RPC_AUTH_GSS_KRB5I || - flavor == RPC_AUTH_GSS_KRB5P) - return true; - - return false; + return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P); } static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) @@ -622,48 +617,6 @@ static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) args->sa_privileged = 1; } -int nfs40_setup_sequence(struct nfs4_slot_table *tbl, - struct nfs4_sequence_args *args, - struct nfs4_sequence_res *res, - struct rpc_task *task) -{ - struct nfs4_slot *slot; - - /* slot already allocated? */ - if (res->sr_slot != NULL) - goto out_start; - - spin_lock(&tbl->slot_tbl_lock); - if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) - goto out_sleep; - - slot = nfs4_alloc_slot(tbl); - if (IS_ERR(slot)) { - if (slot == ERR_PTR(-ENOMEM)) - task->tk_timeout = HZ >> 2; - goto out_sleep; - } - spin_unlock(&tbl->slot_tbl_lock); - - slot->privileged = args->sa_privileged ? 1 : 0; - args->sa_slot = slot; - res->sr_slot = slot; - -out_start: - rpc_call_start(task); - return 0; - -out_sleep: - if (args->sa_privileged) - rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, - NULL, RPC_PRIORITY_PRIVILEGED); - else - rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); - spin_unlock(&tbl->slot_tbl_lock); - return -EAGAIN; -} -EXPORT_SYMBOL_GPL(nfs40_setup_sequence); - static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res) { struct nfs4_slot *slot = res->sr_slot; @@ -815,10 +768,6 @@ static int nfs41_sequence_process(struct rpc_task *task, case -NFS4ERR_SEQ_FALSE_RETRY: ++slot->seq_nr; goto retry_nowait; - case -NFS4ERR_DEADSESSION: - case -NFS4ERR_BADSESSION: - nfs4_schedule_session_recovery(session, res->sr_status); - goto retry_nowait; default: /* Just update the slot sequence no. */ slot->seq_done = 1; @@ -882,101 +831,14 @@ int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) } EXPORT_SYMBOL_GPL(nfs4_sequence_done); -int nfs41_setup_sequence(struct nfs4_session *session, - struct nfs4_sequence_args *args, - struct nfs4_sequence_res *res, - struct rpc_task *task) -{ - struct nfs4_slot *slot; - struct nfs4_slot_table *tbl; - - dprintk("--> %s\n", __func__); - /* slot already allocated? */ - if (res->sr_slot != NULL) - goto out_success; - - tbl = &session->fc_slot_table; - - task->tk_timeout = 0; - - spin_lock(&tbl->slot_tbl_lock); - if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) && - !args->sa_privileged) { - /* The state manager will wait until the slot table is empty */ - dprintk("%s session is draining\n", __func__); - goto out_sleep; - } - - slot = nfs4_alloc_slot(tbl); - if (IS_ERR(slot)) { - /* If out of memory, try again in 1/4 second */ - if (slot == ERR_PTR(-ENOMEM)) - task->tk_timeout = HZ >> 2; - dprintk("<-- %s: no free slots\n", __func__); - goto out_sleep; - } - spin_unlock(&tbl->slot_tbl_lock); - - slot->privileged = args->sa_privileged ? 1 : 0; - args->sa_slot = slot; - - dprintk("<-- %s slotid=%u seqid=%u\n", __func__, - slot->slot_nr, slot->seq_nr); - - res->sr_slot = slot; - res->sr_timestamp = jiffies; - res->sr_status_flags = 0; - /* - * sr_status is only set in decode_sequence, and so will remain - * set to 1 if an rpc level failure occurs. - */ - res->sr_status = 1; - trace_nfs4_setup_sequence(session, args); -out_success: - rpc_call_start(task); - return 0; -out_sleep: - /* Privileged tasks are queued with top priority */ - if (args->sa_privileged) - rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, - NULL, RPC_PRIORITY_PRIVILEGED); - else - rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); - spin_unlock(&tbl->slot_tbl_lock); - return -EAGAIN; -} -EXPORT_SYMBOL_GPL(nfs41_setup_sequence); - -static int nfs4_setup_sequence(const struct nfs_server *server, - struct nfs4_sequence_args *args, - struct nfs4_sequence_res *res, - struct rpc_task *task) -{ - struct nfs4_session *session = nfs4_get_session(server); - int ret = 0; - - if (!session) - return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, - args, res, task); - - dprintk("--> %s clp %p session %p sr_slot %u\n", - __func__, session->clp, session, res->sr_slot ? - res->sr_slot->slot_nr : NFS4_NO_SLOT); - - ret = nfs41_setup_sequence(session, args, res, task); - - dprintk("<-- %s status=%d\n", __func__, ret); - return ret; -} - static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) { struct nfs4_call_sync_data *data = calldata; - struct nfs4_session *session = nfs4_get_session(data->seq_server); dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); - nfs41_setup_sequence(session, data->seq_args, data->seq_res, task); + nfs4_setup_sequence(data->seq_server->nfs_client, + data->seq_args, data->seq_res, task); } static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) @@ -993,15 +855,6 @@ static const struct rpc_call_ops nfs41_call_sync_ops = { #else /* !CONFIG_NFS_V4_1 */ -static int nfs4_setup_sequence(const struct nfs_server *server, - struct nfs4_sequence_args *args, - struct nfs4_sequence_res *res, - struct rpc_task *task) -{ - return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, - args, res, task); -} - static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) { return nfs40_sequence_done(task, res); @@ -1022,10 +875,68 @@ EXPORT_SYMBOL_GPL(nfs4_sequence_done); #endif /* !CONFIG_NFS_V4_1 */ +int nfs4_setup_sequence(const struct nfs_client *client, + struct nfs4_sequence_args *args, + struct nfs4_sequence_res *res, + struct rpc_task *task) +{ + struct nfs4_session *session = nfs4_get_session(client); + struct nfs4_slot_table *tbl = client->cl_slot_tbl; + struct nfs4_slot *slot; + + /* slot already allocated? */ + if (res->sr_slot != NULL) + goto out_start; + + if (session) { + tbl = &session->fc_slot_table; + task->tk_timeout = 0; + } + + spin_lock(&tbl->slot_tbl_lock); + /* The state manager will wait until the slot table is empty */ + if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) + goto out_sleep; + + slot = nfs4_alloc_slot(tbl); + if (IS_ERR(slot)) { + /* Try again in 1/4 second */ + if (slot == ERR_PTR(-ENOMEM)) + task->tk_timeout = HZ >> 2; + goto out_sleep; + } + spin_unlock(&tbl->slot_tbl_lock); + + slot->privileged = args->sa_privileged ? 1 : 0; + args->sa_slot = slot; + + res->sr_slot = slot; + if (session) { + res->sr_timestamp = jiffies; + res->sr_status_flags = 0; + res->sr_status = 1; + } + + trace_nfs4_setup_sequence(session, args); +out_start: + rpc_call_start(task); + return 0; + +out_sleep: + if (args->sa_privileged) + rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, + NULL, RPC_PRIORITY_PRIVILEGED); + else + rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); + spin_unlock(&tbl->slot_tbl_lock); + return -EAGAIN; +} +EXPORT_SYMBOL_GPL(nfs4_setup_sequence); + static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) { struct nfs4_call_sync_data *data = calldata; - nfs4_setup_sequence(data->seq_server, + nfs4_setup_sequence(data->seq_server->nfs_client, data->seq_args, data->seq_res, task); } @@ -1330,14 +1241,6 @@ static void nfs4_opendata_put(struct nfs4_opendata *p) kref_put(&p->kref, nfs4_opendata_free); } -static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) -{ - int ret; - - ret = rpc_wait_for_completion_task(task); - return ret; -} - static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, fmode_t fmode) { @@ -1732,17 +1635,15 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) int ret; if (!data->rpc_done) { - if (data->rpc_status) { - ret = data->rpc_status; - goto err; - } + if (data->rpc_status) + return ERR_PTR(data->rpc_status); /* cached opens have already been processed */ goto update; } ret = nfs_refresh_inode(inode, &data->f_attr); if (ret) - goto err; + return ERR_PTR(ret); if (data->o_res.delegation_type != 0) nfs4_opendata_check_deleg(data, state); @@ -1752,9 +1653,6 @@ update: atomic_inc(&state->count); return state; -err: - return ERR_PTR(ret); - } static struct nfs4_state * @@ -2048,8 +1946,8 @@ static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) { struct nfs4_opendata *data = calldata; - nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl, - &data->c_arg.seq_args, &data->c_res.seq_res, task); + nfs4_setup_sequence(data->o_arg.server->nfs_client, + &data->c_arg.seq_args, &data->c_res.seq_res, task); } static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) @@ -2124,7 +2022,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); - status = nfs4_wait_for_completion_rpc_task(task); + status = rpc_wait_for_completion_task(task); if (status != 0) { data->cancelled = 1; smp_wmb(); @@ -2172,7 +2070,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); } data->timestamp = jiffies; - if (nfs4_setup_sequence(data->o_arg.server, + if (nfs4_setup_sequence(data->o_arg.server->nfs_client, &data->o_arg.seq_args, &data->o_res.seq_res, task) != 0) @@ -2289,15 +2187,15 @@ static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) data->is_recover = 1; } task = rpc_run_task(&task_setup_data); - if (IS_ERR(task)) - return PTR_ERR(task); - status = nfs4_wait_for_completion_rpc_task(task); - if (status != 0) { - data->cancelled = 1; - smp_wmb(); - } else - status = data->rpc_status; - rpc_put_task(task); + if (IS_ERR(task)) + return PTR_ERR(task); + status = rpc_wait_for_completion_task(task); + if (status != 0) { + data->cancelled = 1; + smp_wmb(); + } else + status = data->rpc_status; + rpc_put_task(task); return status; } @@ -2306,7 +2204,7 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data) { struct inode *dir = d_inode(data->dir); struct nfs_openres *o_res = &data->o_res; - int status; + int status; status = nfs4_run_open_task(data, 1); if (status != 0 || !data->rpc_done) @@ -2314,11 +2212,8 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data) nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); - if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { + if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) status = _nfs4_proc_open_confirm(data); - if (status != 0) - return status; - } return status; } @@ -2412,11 +2307,6 @@ static int _nfs4_proc_open(struct nfs4_opendata *data) return 0; } -static int nfs4_recover_expired_lease(struct nfs_server *server) -{ - return nfs4_client_recover_expired_lease(server->nfs_client); -} - /* * OPEN_EXPIRED: * reclaim state on the server after a network partition. @@ -2730,6 +2620,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, ret = PTR_ERR(state); if (IS_ERR(state)) goto out; + ctx->state = state; if (server->caps & NFS_CAP_POSIX_LOCK) set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) @@ -2755,7 +2646,6 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, if (ret != 0) goto out; - ctx->state = state; if (d_inode(dentry) == state->inode) { nfs_inode_attach_open_context(ctx); if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) @@ -2794,7 +2684,7 @@ static int _nfs4_do_open(struct inode *dir, dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); goto out_err; } - status = nfs4_recover_expired_lease(server); + status = nfs4_client_recover_expired_lease(server->nfs_client); if (status != 0) goto err_put_state_owner; if (d_really_is_positive(dentry)) @@ -2940,12 +2830,12 @@ static int _nfs4_do_setattr(struct inode *inode, struct nfs_open_context *ctx) { struct nfs_server *server = NFS_SERVER(inode); - struct rpc_message msg = { + struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], .rpc_argp = arg, .rpc_resp = res, .rpc_cred = cred, - }; + }; struct rpc_cred *delegation_cred = NULL; unsigned long timestamp = jiffies; fmode_t fmode; @@ -2993,18 +2883,18 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, { struct nfs_server *server = NFS_SERVER(inode); struct nfs4_state *state = ctx ? ctx->state : NULL; - struct nfs_setattrargs arg = { - .fh = NFS_FH(inode), - .iap = sattr, + struct nfs_setattrargs arg = { + .fh = NFS_FH(inode), + .iap = sattr, .server = server, .bitmask = server->attr_bitmask, .label = ilabel, - }; - struct nfs_setattrres res = { + }; + struct nfs_setattrres res = { .fattr = fattr, .label = olabel, .server = server, - }; + }; struct nfs4_exception exception = { .state = state, .inode = inode, @@ -3118,7 +3008,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data) } } - /* hmm. we are done with the inode, and in the process of freeing + /* hmm. we are done with the inode, and in the process of freeing * the state_owner. we keep this around to process errors */ switch (task->tk_status) { @@ -3234,7 +3124,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) else if (calldata->arg.bitmask == NULL) calldata->res.fattr = NULL; calldata->timestamp = jiffies; - if (nfs4_setup_sequence(NFS_SERVER(inode), + if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client, &calldata->arg.seq_args, &calldata->res.seq_res, task) != 0) @@ -3522,16 +3412,11 @@ static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandl .pseudoflavor = flavor, }; struct rpc_auth *auth; - int ret; auth = rpcauth_create(&auth_args, server->client); - if (IS_ERR(auth)) { - ret = -EACCES; - goto out; - } - ret = nfs4_lookup_root(server, fhandle, info); -out: - return ret; + if (IS_ERR(auth)) + return -EACCES; + return nfs4_lookup_root(server, fhandle, info); } /* @@ -4114,7 +3999,7 @@ static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) { - nfs4_setup_sequence(NFS_SB(data->dentry->d_sb), + nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client, &data->args.seq_args, &data->res.seq_res, task); @@ -4148,7 +4033,7 @@ static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) { - nfs4_setup_sequence(NFS_SERVER(data->old_dir), + nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client, &data->args.seq_args, &data->res.seq_res, task); @@ -4723,7 +4608,7 @@ static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, struct nfs_pgio_header *hdr) { - if (nfs4_setup_sequence(NFS_SERVER(hdr->inode), + if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client, &hdr->args.seq_args, &hdr->res.seq_res, task)) @@ -4822,7 +4707,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) { - nfs4_setup_sequence(NFS_SERVER(data->inode), + nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client, &data->args.seq_args, &data->res.seq_res, task); @@ -4975,8 +4860,8 @@ static int buf_to_pages_noslab(const void *buf, size_t buflen, if (newpage == NULL) goto unwind; memcpy(page_address(newpage), buf, len); - buf += len; - buflen -= len; + buf += len; + buflen -= len; *pages++ = newpage; rc++; } while (buflen != 0); @@ -5069,7 +4954,7 @@ out: */ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) { - struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; + struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, }; struct nfs_getaclargs args = { .fh = NFS_FH(inode), .acl_pages = pages, @@ -5083,13 +4968,9 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu .rpc_argp = &args, .rpc_resp = &res, }; - unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); + unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; int ret = -ENOMEM, i; - /* As long as we're doing a round trip to the server anyway, - * let's be prepared for a page of acl data. */ - if (npages == 0) - npages = 1; if (npages > ARRAY_SIZE(pages)) return -ERANGE; @@ -5299,8 +5180,8 @@ static int _nfs4_do_set_security_label(struct inode *inode, struct nfs_server *server = NFS_SERVER(inode); const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; struct nfs_setattrargs arg = { - .fh = NFS_FH(inode), - .iap = &sattr, + .fh = NFS_FH(inode), + .iap = &sattr, .server = server, .bitmask = bitmask, .label = ilabel, @@ -5311,9 +5192,9 @@ static int _nfs4_do_set_security_label(struct inode *inode, .server = server, }; struct rpc_message msg = { - .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], - .rpc_argp = &arg, - .rpc_resp = &res, + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], + .rpc_argp = &arg, + .rpc_resp = &res, }; int status; @@ -5747,7 +5628,7 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) return; - nfs4_setup_sequence(d_data->res.server, + nfs4_setup_sequence(d_data->res.server->nfs_client, &d_data->args.seq_args, &d_data->res.seq_res, task); @@ -5817,7 +5698,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co return PTR_ERR(task); if (!issync) goto out; - status = nfs4_wait_for_completion_rpc_task(task); + status = rpc_wait_for_completion_task(task); if (status != 0) goto out; status = data->rpc_status; @@ -5859,8 +5740,8 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], - .rpc_argp = &arg, - .rpc_resp = &res, + .rpc_argp = &arg, + .rpc_resp = &res, .rpc_cred = state->owner->so_cred, }; struct nfs4_lock_state *lsp; @@ -5989,7 +5870,7 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data) goto out_no_action; } calldata->timestamp = jiffies; - if (nfs4_setup_sequence(calldata->server, + if (nfs4_setup_sequence(calldata->server->nfs_client, &calldata->arg.seq_args, &calldata->res.seq_res, task) != 0) @@ -6087,7 +5968,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * status = PTR_ERR(task); if (IS_ERR(task)) goto out; - status = nfs4_wait_for_completion_rpc_task(task); + status = rpc_wait_for_completion_task(task); rpc_put_task(task); out: request->fl_flags = fl_flags; @@ -6174,7 +6055,7 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) goto out_release_open_seqid; } data->timestamp = jiffies; - if (nfs4_setup_sequence(data->server, + if (nfs4_setup_sequence(data->server->nfs_client, &data->arg.seq_args, &data->res.seq_res, task) == 0) @@ -6314,7 +6195,7 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); - ret = nfs4_wait_for_completion_rpc_task(task); + ret = rpc_wait_for_completion_task(task); if (ret == 0) { ret = data->rpc_status; if (ret) @@ -6393,8 +6274,7 @@ static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *reques if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) || test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) return 0; - status = nfs4_lock_expired(state, request); - return status; + return nfs4_lock_expired(state, request); } #endif @@ -6640,8 +6520,8 @@ static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata { struct nfs_release_lockowner_data *data = calldata; struct nfs_server *server = data->server; - nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, - &data->args.seq_args, &data->res.seq_res, task); + nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, + &data->res.seq_res, task); data->args.lock_owner.clientid = server->nfs_client->cl_clientid; data->timestamp = jiffies; } @@ -7232,11 +7112,9 @@ static bool nfs41_same_server_scope(struct nfs41_server_scope *a, struct nfs41_server_scope *b) { - if (a->server_scope_sz == b->server_scope_sz && - memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) - return true; - - return false; + if (a->server_scope_sz != b->server_scope_sz) + return false; + return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0; } static void @@ -7831,7 +7709,7 @@ static void nfs4_get_lease_time_prepare(struct rpc_task *task, dprintk("--> %s\n", __func__); /* just setup sequence, do not trigger session recovery since we're invoked within one */ - nfs41_setup_sequence(data->clp->cl_session, + nfs4_setup_sequence(data->clp, &data->args->la_seq_args, &data->res->lr_seq_res, task); @@ -8202,7 +8080,7 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data) args = task->tk_msg.rpc_argp; res = task->tk_msg.rpc_resp; - nfs41_setup_sequence(clp->cl_session, args, res, task); + nfs4_setup_sequence(clp, args, res, task); } static const struct rpc_call_ops nfs41_sequence_ops = { @@ -8290,7 +8168,7 @@ static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) { struct nfs4_reclaim_complete_data *calldata = data; - nfs41_setup_sequence(calldata->clp->cl_session, + nfs4_setup_sequence(calldata->clp, &calldata->arg.seq_args, &calldata->res.seq_res, task); @@ -8382,7 +8260,7 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp, status = PTR_ERR(task); goto out; } - status = nfs4_wait_for_completion_rpc_task(task); + status = rpc_wait_for_completion_task(task); if (status == 0) status = task->tk_status; rpc_put_task(task); @@ -8397,10 +8275,9 @@ nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) { struct nfs4_layoutget *lgp = calldata; struct nfs_server *server = NFS_SERVER(lgp->args.inode); - struct nfs4_session *session = nfs4_get_session(server); dprintk("--> %s\n", __func__); - nfs41_setup_sequence(session, &lgp->args.seq_args, + nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args, &lgp->res.seq_res, task); dprintk("<-- %s\n", __func__); } @@ -8615,7 +8492,7 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags) task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return ERR_CAST(task); - status = nfs4_wait_for_completion_rpc_task(task); + status = rpc_wait_for_completion_task(task); if (status == 0) { status = nfs4_layoutget_handle_exception(task, lgp, &exception); *timeout = exception.timeout; @@ -8644,7 +8521,7 @@ nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) struct nfs4_layoutreturn *lrp = calldata; dprintk("--> %s\n", __func__); - nfs41_setup_sequence(lrp->clp->cl_session, + nfs4_setup_sequence(lrp->clp, &lrp->args.seq_args, &lrp->res.seq_res, task); @@ -8794,9 +8671,8 @@ static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) { struct nfs4_layoutcommit_data *data = calldata; struct nfs_server *server = NFS_SERVER(data->args.inode); - struct nfs4_session *session = nfs4_get_session(server); - nfs41_setup_sequence(session, + nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, &data->res.seq_res, task); @@ -9120,7 +8996,7 @@ struct nfs_free_stateid_data { static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) { struct nfs_free_stateid_data *data = calldata; - nfs41_setup_sequence(nfs4_get_session(data->server), + nfs4_setup_sequence(data->server->nfs_client, &data->args.seq_args, &data->res.seq_res, task); @@ -9232,10 +9108,8 @@ static bool nfs41_match_stateid(const nfs4_stateid *s1, if (s1->seqid == s2->seqid) return true; - if (s1->seqid == 0 || s2->seqid == 0) - return true; - return false; + return s1->seqid == 0 || s2->seqid == 0; } #endif /* CONFIG_NFS_V4_1 */ diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 82e77198d17e..1f8c2ae43a8d 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -153,7 +153,7 @@ void nfs4_set_lease_period(struct nfs_client *clp, spin_unlock(&clp->cl_lock); /* Cap maximum reconnect timeout at 1/2 lease period */ - rpc_cap_max_reconnect_timeout(clp->cl_rpcclient, lease >> 1); + rpc_set_connect_timeout(clp->cl_rpcclient, lease, lease >> 1); } /* diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h index dae385500005..dfae4880eacb 100644 --- a/fs/nfs/nfs4session.h +++ b/fs/nfs/nfs4session.h @@ -103,6 +103,11 @@ static inline bool nfs4_test_locked_slot(const struct nfs4_slot_table *tbl, return !!test_bit(slotid, tbl->used_slots); } +static inline struct nfs4_session *nfs4_get_session(const struct nfs_client *clp) +{ + return clp->cl_session; +} + #if defined(CONFIG_NFS_V4_1) extern void nfs41_set_target_slotid(struct nfs4_slot_table *tbl, u32 target_highest_slotid); @@ -170,6 +175,8 @@ static inline int nfs4_has_persistent_session(const struct nfs_client *clp) return 0; } +#define nfs_session_id_hash(session) (0) + #endif /* defined(CONFIG_NFS_V4_1) */ #endif /* IS_ENABLED(CONFIG_NFS_V4) */ #endif /* __LINUX_FS_NFS_NFS4SESSION_H */ diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index daeb94e3acd4..8156bad6b441 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -868,7 +868,7 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_ for(;;) { spin_lock(&state->state_lock); - lsp = __nfs4_find_lock_state(state, owner, 0); + lsp = __nfs4_find_lock_state(state, owner, NULL); if (lsp != NULL) break; if (new != NULL) { diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index cfb8f7ce5cf6..845d0eadefc9 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -241,38 +241,6 @@ DEFINE_NFS4_CLIENTID_EVENT(nfs4_bind_conn_to_session); DEFINE_NFS4_CLIENTID_EVENT(nfs4_sequence); DEFINE_NFS4_CLIENTID_EVENT(nfs4_reclaim_complete); -TRACE_EVENT(nfs4_setup_sequence, - TP_PROTO( - const struct nfs4_session *session, - const struct nfs4_sequence_args *args - ), - TP_ARGS(session, args), - - TP_STRUCT__entry( - __field(unsigned int, session) - __field(unsigned int, slot_nr) - __field(unsigned int, seq_nr) - __field(unsigned int, highest_used_slotid) - ), - - TP_fast_assign( - const struct nfs4_slot *sa_slot = args->sa_slot; - __entry->session = nfs_session_id_hash(&session->sess_id); - __entry->slot_nr = sa_slot->slot_nr; - __entry->seq_nr = sa_slot->seq_nr; - __entry->highest_used_slotid = - sa_slot->table->highest_used_slotid; - ), - TP_printk( - "session=0x%08x slot_nr=%u seq_nr=%u " - "highest_used_slotid=%u", - __entry->session, - __entry->slot_nr, - __entry->seq_nr, - __entry->highest_used_slotid - ) -); - #define show_nfs4_sequence_status_flags(status) \ __print_flags((unsigned long)status, "|", \ { SEQ4_STATUS_CB_PATH_DOWN, "CB_PATH_DOWN" }, \ @@ -382,6 +350,38 @@ TRACE_EVENT(nfs4_cb_sequence, ); #endif /* CONFIG_NFS_V4_1 */ +TRACE_EVENT(nfs4_setup_sequence, + TP_PROTO( + const struct nfs4_session *session, + const struct nfs4_sequence_args *args + ), + TP_ARGS(session, args), + + TP_STRUCT__entry( + __field(unsigned int, session) + __field(unsigned int, slot_nr) + __field(unsigned int, seq_nr) + __field(unsigned int, highest_used_slotid) + ), + + TP_fast_assign( + const struct nfs4_slot *sa_slot = args->sa_slot; + __entry->session = session ? nfs_session_id_hash(&session->sess_id) : 0; + __entry->slot_nr = sa_slot->slot_nr; + __entry->seq_nr = sa_slot->seq_nr; + __entry->highest_used_slotid = + sa_slot->table->highest_used_slotid; + ), + TP_printk( + "session=0x%08x slot_nr=%u seq_nr=%u " + "highest_used_slotid=%u", + __entry->session, + __entry->slot_nr, + __entry->seq_nr, + __entry->highest_used_slotid + ) +); + DECLARE_EVENT_CLASS(nfs4_open_event, TP_PROTO( const struct nfs_open_context *ctx, diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index e9255cb453e6..f0369e362753 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -169,8 +169,10 @@ static int nfs4_stat_to_errno(int); open_owner_id_maxsz + \ encode_opentype_maxsz + \ encode_claim_null_maxsz) +#define decode_space_limit_maxsz (3) #define decode_ace_maxsz (3 + nfs4_owner_maxsz) #define decode_delegation_maxsz (1 + decode_stateid_maxsz + 1 + \ + decode_space_limit_maxsz + \ decode_ace_maxsz) #define decode_change_info_maxsz (5) #define decode_open_maxsz (op_decode_hdr_maxsz + \ @@ -924,34 +926,22 @@ static __be32 *reserve_space(struct xdr_stream *xdr, size_t nbytes) static void encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len) { - __be32 *p; - - p = xdr_reserve_space(xdr, len); - xdr_encode_opaque_fixed(p, buf, len); + WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0); } static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) { - __be32 *p; - - p = reserve_space(xdr, 4 + len); - xdr_encode_opaque(p, str, len); + WARN_ON_ONCE(xdr_stream_encode_opaque(xdr, str, len) < 0); } static void encode_uint32(struct xdr_stream *xdr, u32 n) { - __be32 *p; - - p = reserve_space(xdr, 4); - *p = cpu_to_be32(n); + WARN_ON_ONCE(xdr_stream_encode_u32(xdr, n) < 0); } static void encode_uint64(struct xdr_stream *xdr, u64 n) { - __be32 *p; - - p = reserve_space(xdr, 8); - xdr_encode_hyper(p, n); + WARN_ON_ONCE(xdr_stream_encode_u64(xdr, n) < 0); } static void encode_nfs4_seqid(struct xdr_stream *xdr, @@ -2524,7 +2514,7 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr, encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); - replen = hdr.replen + op_decode_hdr_maxsz + 1; + replen = hdr.replen + op_decode_hdr_maxsz; encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr); xdr_inline_pages(&req->rq_rcv_buf, replen << 2, @@ -3062,20 +3052,15 @@ static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) static int decode_opaque_inline(struct xdr_stream *xdr, unsigned int *len, char **string) { - __be32 *p; - - p = xdr_inline_decode(xdr, 4); - if (unlikely(!p)) - goto out_overflow; - *len = be32_to_cpup(p); - p = xdr_inline_decode(xdr, *len); - if (unlikely(!p)) - goto out_overflow; - *string = (char *)p; + ssize_t ret = xdr_stream_decode_opaque_inline(xdr, (void **)string, + NFS4_OPAQUE_LIMIT); + if (unlikely(ret < 0)) { + if (ret == -EBADMSG) + print_overflow_msg(__func__, xdr); + return -EIO; + } + *len = ret; return 0; -out_overflow: - print_overflow_msg(__func__, xdr); - return -EIO; } static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr) @@ -3142,7 +3127,7 @@ static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected) } /* Dummy routine */ -static int decode_ace(struct xdr_stream *xdr, void *ace, struct nfs_client *clp) +static int decode_ace(struct xdr_stream *xdr, void *ace) { __be32 *p; unsigned int strlen; @@ -3890,45 +3875,50 @@ out_overflow: return -EIO; } +static ssize_t decode_nfs4_string(struct xdr_stream *xdr, + struct nfs4_string *name, gfp_t gfp_flags) +{ + ssize_t ret; + + ret = xdr_stream_decode_string_dup(xdr, &name->data, + XDR_MAX_NETOBJ, gfp_flags); + name->len = 0; + if (ret > 0) + name->len = ret; + return ret; +} + static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, const struct nfs_server *server, kuid_t *uid, struct nfs4_string *owner_name) { - uint32_t len; - __be32 *p; - int ret = 0; + ssize_t len; + char *p; *uid = make_kuid(&init_user_ns, -2); if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER - 1U))) return -EIO; - if (likely(bitmap[1] & FATTR4_WORD1_OWNER)) { - p = xdr_inline_decode(xdr, 4); - if (unlikely(!p)) - goto out_overflow; - len = be32_to_cpup(p); - p = xdr_inline_decode(xdr, len); - if (unlikely(!p)) - goto out_overflow; - if (owner_name != NULL) { - owner_name->data = kmemdup(p, len, GFP_NOWAIT); - if (owner_name->data != NULL) { - owner_name->len = len; - ret = NFS_ATTR_FATTR_OWNER_NAME; - } - } else if (len < XDR_MAX_NETOBJ) { - if (nfs_map_name_to_uid(server, (char *)p, len, uid) == 0) - ret = NFS_ATTR_FATTR_OWNER; - else - dprintk("%s: nfs_map_name_to_uid failed!\n", - __func__); - } else - dprintk("%s: name too long (%u)!\n", - __func__, len); - bitmap[1] &= ~FATTR4_WORD1_OWNER; + if (!(bitmap[1] & FATTR4_WORD1_OWNER)) + return 0; + bitmap[1] &= ~FATTR4_WORD1_OWNER; + + if (owner_name != NULL) { + len = decode_nfs4_string(xdr, owner_name, GFP_NOWAIT); + if (len <= 0) + goto out; + dprintk("%s: name=%s\n", __func__, owner_name->data); + return NFS_ATTR_FATTR_OWNER_NAME; + } else { + len = xdr_stream_decode_opaque_inline(xdr, (void **)&p, + XDR_MAX_NETOBJ); + if (len <= 0 || nfs_map_name_to_uid(server, p, len, uid) != 0) + goto out; + dprintk("%s: uid=%d\n", __func__, (int)from_kuid(&init_user_ns, *uid)); + return NFS_ATTR_FATTR_OWNER; } - dprintk("%s: uid=%d\n", __func__, (int)from_kuid(&init_user_ns, *uid)); - return ret; -out_overflow: +out: + if (len != -EBADMSG) + return 0; print_overflow_msg(__func__, xdr); return -EIO; } @@ -3937,41 +3927,33 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, const struct nfs_server *server, kgid_t *gid, struct nfs4_string *group_name) { - uint32_t len; - __be32 *p; - int ret = 0; + ssize_t len; + char *p; *gid = make_kgid(&init_user_ns, -2); if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER_GROUP - 1U))) return -EIO; - if (likely(bitmap[1] & FATTR4_WORD1_OWNER_GROUP)) { - p = xdr_inline_decode(xdr, 4); - if (unlikely(!p)) - goto out_overflow; - len = be32_to_cpup(p); - p = xdr_inline_decode(xdr, len); - if (unlikely(!p)) - goto out_overflow; - if (group_name != NULL) { - group_name->data = kmemdup(p, len, GFP_NOWAIT); - if (group_name->data != NULL) { - group_name->len = len; - ret = NFS_ATTR_FATTR_GROUP_NAME; - } - } else if (len < XDR_MAX_NETOBJ) { - if (nfs_map_group_to_gid(server, (char *)p, len, gid) == 0) - ret = NFS_ATTR_FATTR_GROUP; - else - dprintk("%s: nfs_map_group_to_gid failed!\n", - __func__); - } else - dprintk("%s: name too long (%u)!\n", - __func__, len); - bitmap[1] &= ~FATTR4_WORD1_OWNER_GROUP; + if (!(bitmap[1] & FATTR4_WORD1_OWNER_GROUP)) + return 0; + bitmap[1] &= ~FATTR4_WORD1_OWNER_GROUP; + + if (group_name != NULL) { + len = decode_nfs4_string(xdr, group_name, GFP_NOWAIT); + if (len <= 0) + goto out; + dprintk("%s: name=%s\n", __func__, group_name->data); + return NFS_ATTR_FATTR_OWNER_NAME; + } else { + len = xdr_stream_decode_opaque_inline(xdr, (void **)&p, + XDR_MAX_NETOBJ); + if (len <= 0 || nfs_map_group_to_gid(server, p, len, gid) != 0) + goto out; + dprintk("%s: gid=%d\n", __func__, (int)from_kgid(&init_user_ns, *gid)); + return NFS_ATTR_FATTR_GROUP; } - dprintk("%s: gid=%d\n", __func__, (int)from_kgid(&init_user_ns, *gid)); - return ret; -out_overflow: +out: + if (len != -EBADMSG) + return 0; print_overflow_msg(__func__, xdr); return -EIO; } @@ -4294,15 +4276,12 @@ out_overflow: static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len) { - __be32 *p; - - p = xdr_inline_decode(xdr, len); - if (likely(p)) { - memcpy(buf, p, len); - return 0; + ssize_t ret = xdr_stream_decode_opaque_fixed(xdr, buf, len); + if (unlikely(ret < 0)) { + print_overflow_msg(__func__, xdr); + return -EIO; } - print_overflow_msg(__func__, xdr); - return -EIO; + return 0; } static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) @@ -5093,7 +5072,7 @@ static int decode_rw_delegation(struct xdr_stream *xdr, if (decode_space_limit(xdr, &res->pagemod_limit) < 0) return -EIO; } - return decode_ace(xdr, NULL, res->server->nfs_client); + return decode_ace(xdr, NULL); out_overflow: print_overflow_msg(__func__, xdr); return -EIO; @@ -5660,8 +5639,6 @@ static int decode_exchange_id(struct xdr_stream *xdr, status = decode_opaque_inline(xdr, &dummy, &dummy_str); if (unlikely(status)) return status; - if (unlikely(dummy > NFS4_OPAQUE_LIMIT)) - return -EIO; memcpy(res->server_owner->major_id, dummy_str, dummy); res->server_owner->major_id_sz = dummy; @@ -5669,8 +5646,6 @@ static int decode_exchange_id(struct xdr_stream *xdr, status = decode_opaque_inline(xdr, &dummy, &dummy_str); if (unlikely(status)) return status; - if (unlikely(dummy > NFS4_OPAQUE_LIMIT)) - return -EIO; memcpy(res->server_scope->server_scope, dummy_str, dummy); res->server_scope->server_scope_sz = dummy; @@ -5685,16 +5660,12 @@ static int decode_exchange_id(struct xdr_stream *xdr, status = decode_opaque_inline(xdr, &dummy, &dummy_str); if (unlikely(status)) return status; - if (unlikely(dummy > NFS4_OPAQUE_LIMIT)) - return -EIO; memcpy(res->impl_id->domain, dummy_str, dummy); /* nii_name */ status = decode_opaque_inline(xdr, &dummy, &dummy_str); if (unlikely(status)) return status; - if (unlikely(dummy > NFS4_OPAQUE_LIMIT)) - return -EIO; memcpy(res->impl_id->name, dummy_str, dummy); /* nii_date */ diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c index 2a4cdce939a0..8f3d2acb81c3 100644 --- a/fs/nfs/objlayout/objlayout.c +++ b/fs/nfs/objlayout/objlayout.c @@ -291,7 +291,7 @@ objlayout_read_pagelist(struct nfs_pgio_header *hdr) &hdr->args.pgbase, hdr->args.offset, hdr->args.count); - dprintk("%s: inode(%lx) offset 0x%llx count 0x%Zx eof=%d\n", + dprintk("%s: inode(%lx) offset 0x%llx count 0x%zx eof=%d\n", __func__, inode->i_ino, offset, count, hdr->res.eof); err = objio_read_pagelist(hdr); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 6bca17883b93..54e0f9f2dd94 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -531,39 +531,32 @@ static void nfs_show_mountd_netid(struct seq_file *m, struct nfs_server *nfss, int showdefaults) { struct sockaddr *sap = (struct sockaddr *) &nfss->mountd_address; + char *proto = NULL; - seq_printf(m, ",mountproto="); switch (sap->sa_family) { case AF_INET: switch (nfss->mountd_protocol) { case IPPROTO_UDP: - seq_printf(m, RPCBIND_NETID_UDP); + proto = RPCBIND_NETID_UDP; break; case IPPROTO_TCP: - seq_printf(m, RPCBIND_NETID_TCP); + proto = RPCBIND_NETID_TCP; break; - default: - if (showdefaults) - seq_printf(m, "auto"); } break; case AF_INET6: switch (nfss->mountd_protocol) { case IPPROTO_UDP: - seq_printf(m, RPCBIND_NETID_UDP6); + proto = RPCBIND_NETID_UDP6; break; case IPPROTO_TCP: - seq_printf(m, RPCBIND_NETID_TCP6); + proto = RPCBIND_NETID_TCP6; break; - default: - if (showdefaults) - seq_printf(m, "auto"); } break; - default: - if (showdefaults) - seq_printf(m, "auto"); } + if (proto || showdefaults) + seq_printf(m, ",mountproto=%s", proto ?: "auto"); } static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss, diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 006068526542..e75b056f46f4 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1785,7 +1785,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data) if (status < 0) { nfs_context_set_write_error(req->wb_context, status); nfs_inode_remove_request(req); - dprintk(", error = %d\n", status); + dprintk_cont(", error = %d\n", status); goto next; } @@ -1794,11 +1794,11 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data) if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) { /* We have a match */ nfs_inode_remove_request(req); - dprintk(" OK\n"); + dprintk_cont(" OK\n"); goto next; } /* We have a mismatch. Write the page again */ - dprintk(" mismatch\n"); + dprintk_cont(" mismatch\n"); nfs_mark_request_dirty(req); set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); next: diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index a06115e31612..92b4b41d19d2 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c @@ -24,7 +24,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, { struct nfsd4_layout_seg *seg = &args->lg_seg; struct super_block *sb = inode->i_sb; - u32 block_size = (1 << inode->i_blkbits); + u32 block_size = i_blocksize(inode); struct pnfs_block_extent *bex; struct iomap iomap; u32 device_generation = 0; @@ -181,7 +181,7 @@ nfsd4_block_proc_layoutcommit(struct inode *inode, int nr_iomaps; nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout, - lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits); + lcp->lc_up_len, &iomaps, i_blocksize(inode)); if (nr_iomaps < 0) return nfserrno(nr_iomaps); @@ -375,7 +375,7 @@ nfsd4_scsi_proc_layoutcommit(struct inode *inode, int nr_iomaps; nr_iomaps = nfsd4_scsi_decode_layoutupdate(lcp->lc_up_layout, - lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits); + lcp->lc_up_len, &iomaps, i_blocksize(inode)); if (nr_iomaps < 0) return nfserrno(nr_iomaps); diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 43e109cc0ccc..e71f11b1a180 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -1102,6 +1102,7 @@ static struct flags { { NFSEXP_NOAUTHNLM, {"insecure_locks", ""}}, { NFSEXP_V4ROOT, {"v4root", ""}}, { NFSEXP_PNFS, {"pnfs", ""}}, + { NFSEXP_SECURITY_LABEL, {"security_label", ""}}, { 0, {"", ""}} }; diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c index d08cd88155c7..838f90f3f890 100644 --- a/fs/nfsd/nfs2acl.c +++ b/fs/nfsd/nfs2acl.c @@ -376,5 +376,4 @@ struct svc_version nfsd_acl_version2 = { .vs_proc = nfsd_acl_procedures2, .vs_dispatch = nfsd_dispatch, .vs_xdrsize = NFS3_SVC_XDRSIZE, - .vs_hidden = 0, }; diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c index 0c890347cde3..dcb5f79076c0 100644 --- a/fs/nfsd/nfs3acl.c +++ b/fs/nfsd/nfs3acl.c @@ -266,6 +266,5 @@ struct svc_version nfsd_acl_version3 = { .vs_proc = nfsd_acl_procedures3, .vs_dispatch = nfsd_dispatch, .vs_xdrsize = NFS3_SVC_XDRSIZE, - .vs_hidden = 0, }; diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index d818e4ffd79f..045c9081eabe 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -193,11 +193,9 @@ nfsd3_proc_write(struct svc_rqst *rqstp, struct nfsd3_writeargs *argp, fh_copy(&resp->fh, &argp->fh); resp->committed = argp->stable; - nfserr = nfsd_write(rqstp, &resp->fh, NULL, - argp->offset, - rqstp->rq_vec, argp->vlen, - &cnt, - &resp->committed); + nfserr = nfsd_write(rqstp, &resp->fh, argp->offset, + rqstp->rq_vec, argp->vlen, + &cnt, resp->committed); resp->count = cnt; RETURN_STATUS(nfserr); } diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index eb78109d666c..0274db6e65d0 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -303,6 +303,7 @@ static int decode_cb_compound4res(struct xdr_stream *xdr, p = xdr_inline_decode(xdr, length + 4); if (unlikely(p == NULL)) goto out_overflow; + p += XDR_QUADLEN(length); hdr->nops = be32_to_cpup(p); return 0; out_overflow: @@ -396,13 +397,10 @@ static int decode_cb_sequence4resok(struct xdr_stream *xdr, struct nfsd4_callback *cb) { struct nfsd4_session *session = cb->cb_clp->cl_cb_session; - struct nfs4_sessionid id; - int status; + int status = -ESERVERFAULT; __be32 *p; u32 dummy; - status = -ESERVERFAULT; - /* * If the server returns different values for sessionID, slotID or * sequence number, the server is looney tunes. @@ -410,9 +408,8 @@ static int decode_cb_sequence4resok(struct xdr_stream *xdr, p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4); if (unlikely(p == NULL)) goto out_overflow; - memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN); - if (memcmp(id.data, session->se_sessionid.data, - NFS4_MAX_SESSIONID_LEN) != 0) { + + if (memcmp(p, session->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) { dprintk("NFS: %s Invalid session id\n", __func__); goto out; } @@ -753,6 +750,14 @@ int set_callback_cred(void) return 0; } +void cleanup_callback_cred(void) +{ + if (callback_cred) { + put_rpccred(callback_cred); + callback_cred = NULL; + } +} + static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses) { if (clp->cl_minorversion == 0) { diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c index 5b20577dcdd2..6b9b6cca469f 100644 --- a/fs/nfsd/nfs4idmap.c +++ b/fs/nfsd/nfs4idmap.c @@ -628,6 +628,10 @@ nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen, { __be32 status; u32 id = -1; + + if (name == NULL || namelen == 0) + return nfserr_inval; + status = do_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, &id); *uid = make_kuid(&init_user_ns, id); if (!uid_valid(*uid)) @@ -641,6 +645,10 @@ nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen, { __be32 status; u32 id = -1; + + if (name == NULL || namelen == 0) + return nfserr_inval; + status = do_name_to_id(rqstp, IDMAP_TYPE_GROUP, name, namelen, &id); *gid = make_kgid(&init_user_ns, id); if (!gid_valid(*gid)) diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 74a6e573e061..cbeeda1e94a2 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -95,11 +95,15 @@ check_attr_support(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, u32 *bmval, u32 *writable) { struct dentry *dentry = cstate->current_fh.fh_dentry; + struct svc_export *exp = cstate->current_fh.fh_export; if (!nfsd_attrs_supported(cstate->minorversion, bmval)) return nfserr_attrnotsupp; if ((bmval[0] & FATTR4_WORD0_ACL) && !IS_POSIXACL(d_inode(dentry))) return nfserr_attrnotsupp; + if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) && + !(exp->ex_flags & NFSEXP_SECURITY_LABEL)) + return nfserr_attrnotsupp; if (writable && !bmval_is_subset(bmval, writable)) return nfserr_inval; if (writable && (bmval[2] & FATTR4_WORD2_MODE_UMASK) && @@ -983,7 +987,7 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, status = nfsd_vfs_write(rqstp, &cstate->current_fh, filp, write->wr_offset, rqstp->rq_vec, nvecs, &cnt, - &write->wr_how_written); + write->wr_how_written); fput(filp); write->wr_bytes_written = cnt; @@ -1838,6 +1842,12 @@ static inline u32 nfsd4_status_stateid_rsize(struct svc_rqst *rqstp, struct nfsd return (op_encode_hdr_size + op_encode_stateid_maxsz)* sizeof(__be32); } +static inline u32 nfsd4_access_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) +{ + /* ac_supported, ac_resp_access */ + return (op_encode_hdr_size + 2)* sizeof(__be32); +} + static inline u32 nfsd4_commit_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) { return (op_encode_hdr_size + op_encode_verifier_maxsz) * sizeof(__be32); @@ -1892,6 +1902,11 @@ static inline u32 nfsd4_getattr_rsize(struct svc_rqst *rqstp, return ret; } +static inline u32 nfsd4_getfh_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) +{ + return (op_encode_hdr_size + 1) * sizeof(__be32) + NFS4_FHSIZE; +} + static inline u32 nfsd4_link_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) { return (op_encode_hdr_size + op_encode_change_info_maxsz) @@ -1933,6 +1948,11 @@ static inline u32 nfsd4_readdir_rsize(struct svc_rqst *rqstp, struct nfsd4_op *o XDR_QUADLEN(rlen)) * sizeof(__be32); } +static inline u32 nfsd4_readlink_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) +{ + return (op_encode_hdr_size + 1) * sizeof(__be32) + PAGE_SIZE; +} + static inline u32 nfsd4_remove_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) { return (op_encode_hdr_size + op_encode_change_info_maxsz) @@ -1952,11 +1972,23 @@ static inline u32 nfsd4_sequence_rsize(struct svc_rqst *rqstp, + XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) * sizeof(__be32); } +static inline u32 nfsd4_test_stateid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) +{ + return (op_encode_hdr_size + 1 + op->u.test_stateid.ts_num_ids) + * sizeof(__be32); +} + static inline u32 nfsd4_setattr_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) { return (op_encode_hdr_size + nfs4_fattr_bitmap_maxsz) * sizeof(__be32); } +static inline u32 nfsd4_secinfo_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) +{ + return (op_encode_hdr_size + RPC_AUTH_MAXFLAVOR * + (4 + XDR_QUADLEN(GSS_OID_MAX_LEN))) * sizeof(__be32); +} + static inline u32 nfsd4_setclientid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) { return (op_encode_hdr_size + 2 + XDR_QUADLEN(NFS4_VERIFIER_SIZE)) * @@ -2011,6 +2043,19 @@ static inline u32 nfsd4_copy_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) } #ifdef CONFIG_NFSD_PNFS +static inline u32 nfsd4_getdeviceinfo_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) +{ + u32 maxcount = 0, rlen = 0; + + maxcount = svc_max_payload(rqstp); + rlen = min(op->u.getdeviceinfo.gd_maxcount, maxcount); + + return (op_encode_hdr_size + + 1 /* gd_layout_type*/ + + XDR_QUADLEN(rlen) + + 2 /* gd_notify_types */) * sizeof(__be32); +} + /* * At this stage we don't really know what layout driver will handle the request, * so we need to define an arbitrary upper bound here. @@ -2040,10 +2085,17 @@ static inline u32 nfsd4_layoutreturn_rsize(struct svc_rqst *rqstp, struct nfsd4_ } #endif /* CONFIG_NFSD_PNFS */ + +static inline u32 nfsd4_seek_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) +{ + return (op_encode_hdr_size + 3) * sizeof(__be32); +} + static struct nfsd4_operation nfsd4_ops[] = { [OP_ACCESS] = { .op_func = (nfsd4op_func)nfsd4_access, .op_name = "OP_ACCESS", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_access_rsize, }, [OP_CLOSE] = { .op_func = (nfsd4op_func)nfsd4_close, @@ -2081,6 +2133,7 @@ static struct nfsd4_operation nfsd4_ops[] = { [OP_GETFH] = { .op_func = (nfsd4op_func)nfsd4_getfh, .op_name = "OP_GETFH", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_getfh_rsize, }, [OP_LINK] = { .op_func = (nfsd4op_func)nfsd4_link, @@ -2099,6 +2152,7 @@ static struct nfsd4_operation nfsd4_ops[] = { [OP_LOCKT] = { .op_func = (nfsd4op_func)nfsd4_lockt, .op_name = "OP_LOCKT", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_lock_rsize, }, [OP_LOCKU] = { .op_func = (nfsd4op_func)nfsd4_locku, @@ -2111,15 +2165,18 @@ static struct nfsd4_operation nfsd4_ops[] = { .op_func = (nfsd4op_func)nfsd4_lookup, .op_flags = OP_HANDLES_WRONGSEC | OP_CLEAR_STATEID, .op_name = "OP_LOOKUP", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize, }, [OP_LOOKUPP] = { .op_func = (nfsd4op_func)nfsd4_lookupp, .op_flags = OP_HANDLES_WRONGSEC | OP_CLEAR_STATEID, .op_name = "OP_LOOKUPP", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize, }, [OP_NVERIFY] = { .op_func = (nfsd4op_func)nfsd4_nverify, .op_name = "OP_NVERIFY", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize, }, [OP_OPEN] = { .op_func = (nfsd4op_func)nfsd4_open, @@ -2177,6 +2234,7 @@ static struct nfsd4_operation nfsd4_ops[] = { [OP_READLINK] = { .op_func = (nfsd4op_func)nfsd4_readlink, .op_name = "OP_READLINK", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_readlink_rsize, }, [OP_REMOVE] = { .op_func = (nfsd4op_func)nfsd4_remove, @@ -2215,6 +2273,7 @@ static struct nfsd4_operation nfsd4_ops[] = { .op_func = (nfsd4op_func)nfsd4_secinfo, .op_flags = OP_HANDLES_WRONGSEC, .op_name = "OP_SECINFO", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_secinfo_rsize, }, [OP_SETATTR] = { .op_func = (nfsd4op_func)nfsd4_setattr, @@ -2240,6 +2299,7 @@ static struct nfsd4_operation nfsd4_ops[] = { [OP_VERIFY] = { .op_func = (nfsd4op_func)nfsd4_verify, .op_name = "OP_VERIFY", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize, }, [OP_WRITE] = { .op_func = (nfsd4op_func)nfsd4_write, @@ -2314,11 +2374,13 @@ static struct nfsd4_operation nfsd4_ops[] = { .op_func = (nfsd4op_func)nfsd4_secinfo_no_name, .op_flags = OP_HANDLES_WRONGSEC, .op_name = "OP_SECINFO_NO_NAME", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_secinfo_rsize, }, [OP_TEST_STATEID] = { .op_func = (nfsd4op_func)nfsd4_test_stateid, .op_flags = ALLOWED_WITHOUT_FH, .op_name = "OP_TEST_STATEID", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_test_stateid_rsize, }, [OP_FREE_STATEID] = { .op_func = (nfsd4op_func)nfsd4_free_stateid, @@ -2332,6 +2394,7 @@ static struct nfsd4_operation nfsd4_ops[] = { .op_func = (nfsd4op_func)nfsd4_getdeviceinfo, .op_flags = ALLOWED_WITHOUT_FH, .op_name = "OP_GETDEVICEINFO", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_getdeviceinfo_rsize, }, [OP_LAYOUTGET] = { .op_func = (nfsd4op_func)nfsd4_layoutget, @@ -2381,6 +2444,7 @@ static struct nfsd4_operation nfsd4_ops[] = { [OP_SEEK] = { .op_func = (nfsd4op_func)nfsd4_seek, .op_name = "OP_SEEK", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_seek_rsize, }, }; @@ -2425,14 +2489,11 @@ bool nfsd4_spo_must_allow(struct svc_rqst *rqstp) int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op) { - struct nfsd4_operation *opdesc; - nfsd4op_rsize estimator; - if (op->opnum == OP_ILLEGAL) return op_encode_hdr_size * sizeof(__be32); - opdesc = OPDESC(op); - estimator = opdesc->op_rsize_bop; - return estimator ? estimator(rqstp, op) : PAGE_SIZE; + + BUG_ON(OPDESC(op)->op_rsize_bop == NULL); + return OPDESC(op)->op_rsize_bop(rqstp, op); } void warn_on_nonidempotent_op(struct nfsd4_op *op) @@ -2476,12 +2537,13 @@ static struct svc_procedure nfsd_procedures4[2] = { }; struct svc_version nfsd_version4 = { - .vs_vers = 4, - .vs_nproc = 2, - .vs_proc = nfsd_procedures4, - .vs_dispatch = nfsd_dispatch, - .vs_xdrsize = NFS4_SVC_XDRSIZE, - .vs_rpcb_optnl = 1, + .vs_vers = 4, + .vs_nproc = 2, + .vs_proc = nfsd_procedures4, + .vs_dispatch = nfsd_dispatch, + .vs_xdrsize = NFS4_SVC_XDRSIZE, + .vs_rpcb_optnl = true, + .vs_need_cong_ctrl = true, }; /* diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index a0dee8ae9f97..e9ef50addddb 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2281,7 +2281,7 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_r out_err: conn->cb_addr.ss_family = AF_UNSPEC; conn->cb_addrlen = 0; - dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) " + dprintk("NFSD: this client (clientid %08x/%08x) " "will not receive delegations\n", clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); @@ -7012,23 +7012,24 @@ nfs4_state_start(void) ret = set_callback_cred(); if (ret) - return -ENOMEM; + return ret; + laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4"); if (laundry_wq == NULL) { ret = -ENOMEM; - goto out_recovery; + goto out_cleanup_cred; } ret = nfsd4_create_callback_queue(); if (ret) goto out_free_laundry; set_max_delegations(); - return 0; out_free_laundry: destroy_workqueue(laundry_wq); -out_recovery: +out_cleanup_cred: + cleanup_callback_cred(); return ret; } @@ -7086,6 +7087,7 @@ nfs4_state_shutdown(void) { destroy_workqueue(laundry_wq); nfsd4_destroy_callback_queue(); + cleanup_callback_cred(); } static void diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 8fae53ce21d1..33017d652b1d 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -58,7 +58,7 @@ #define NFSDDBG_FACILITY NFSDDBG_XDR -u32 nfsd_suppattrs[3][3] = { +const u32 nfsd_suppattrs[3][3] = { {NFSD4_SUPPORTED_ATTRS_WORD0, NFSD4_SUPPORTED_ATTRS_WORD1, NFSD4_SUPPORTED_ATTRS_WORD2}, @@ -1250,7 +1250,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write) READ_BUF(16); p = xdr_decode_hyper(p, &write->wr_offset); write->wr_stable_how = be32_to_cpup(p++); - if (write->wr_stable_how > 2) + if (write->wr_stable_how > NFS_FILE_SYNC) goto xdr_error; write->wr_buflen = be32_to_cpup(p++); @@ -1941,12 +1941,12 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp) } else max_reply += nfsd4_max_reply(argp->rqstp, op); /* - * OP_LOCK may return a conflicting lock. (Special case - * because it will just skip encoding this if it runs - * out of xdr buffer space, and it is the only operation - * that behaves this way.) + * OP_LOCK and OP_LOCKT may return a conflicting lock. + * (Special case because it will just skip encoding this + * if it runs out of xdr buffer space, and it is the only + * operation that behaves this way.) */ - if (op->opnum == OP_LOCK) + if (op->opnum == OP_LOCK || op->opnum == OP_LOCKT) max_reply += NFS4_OPAQUE_LIMIT; if (op->status) { @@ -1966,9 +1966,13 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp) DECODE_TAIL; } -static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode) +static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode, + struct svc_export *exp) { - if (IS_I_VERSION(inode)) { + if (exp->ex_flags & NFSEXP_V4ROOT) { + *p++ = cpu_to_be32(convert_to_wallclock(exp->cd->flush_time)); + *p++ = 0; + } else if (IS_I_VERSION(inode)) { p = xdr_encode_hyper(p, inode->i_version); } else { *p++ = cpu_to_be32(stat->ctime.tv_sec); @@ -2297,7 +2301,7 @@ static int get_parent_attributes(struct svc_export *exp, struct kstat *stat) if (path.dentry != path.mnt->mnt_root) break; } - err = vfs_getattr(&path, stat); + err = vfs_getattr(&path, stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); path_put(&path); return err; } @@ -2381,7 +2385,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, goto out; } - err = vfs_getattr(&path, &stat); + err = vfs_getattr(&path, &stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); if (err) goto out_nfserr; if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE | @@ -2417,8 +2421,11 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, #ifdef CONFIG_NFSD_V4_SECURITY_LABEL if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) || bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) { - err = security_inode_getsecctx(d_inode(dentry), + if (exp->ex_flags & NFSEXP_SECURITY_LABEL) + err = security_inode_getsecctx(d_inode(dentry), &context, &contextlen); + else + err = -EOPNOTSUPP; contextsupport = (err == 0); if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) { if (err == -EOPNOTSUPP) @@ -2490,7 +2497,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, p = xdr_reserve_space(xdr, 8); if (!p) goto out_resource; - p = encode_change(p, &stat, d_inode(dentry)); + p = encode_change(p, &stat, d_inode(dentry), exp); } if (bmval0 & FATTR4_WORD0_SIZE) { p = xdr_reserve_space(xdr, 8); diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index d6b97b424ad1..96fd15979cbd 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c @@ -578,7 +578,7 @@ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) struct kvec *vec = &rqstp->rq_res.head[0]; if (vec->iov_len + data->iov_len > PAGE_SIZE) { - printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n", + printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n", data->iov_len); return 0; } diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index f3b2f34b10a3..73e75ac90525 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -536,6 +536,19 @@ out_free: return rv; } +static ssize_t +nfsd_print_version_support(char *buf, int remaining, const char *sep, + unsigned vers, unsigned minor) +{ + const char *format = (minor == 0) ? "%s%c%u" : "%s%c%u.%u"; + bool supported = !!nfsd_vers(vers, NFSD_TEST); + + if (vers == 4 && !nfsd_minorversion(minor, NFSD_TEST)) + supported = false; + return snprintf(buf, remaining, format, sep, + supported ? '+' : '-', vers, minor); +} + static ssize_t __write_versions(struct file *file, char *buf, size_t size) { char *mesg = buf; @@ -561,6 +574,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size) len = qword_get(&mesg, vers, size); if (len <= 0) return -EINVAL; do { + enum vers_op cmd; sign = *vers; if (sign == '+' || sign == '-') num = simple_strtol((vers+1), &minorp, 0); @@ -569,24 +583,22 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size) if (*minorp == '.') { if (num != 4) return -EINVAL; - minor = simple_strtoul(minorp+1, NULL, 0); - if (minor == 0) - return -EINVAL; - if (nfsd_minorversion(minor, sign == '-' ? - NFSD_CLEAR : NFSD_SET) < 0) + if (kstrtouint(minorp+1, 0, &minor) < 0) return -EINVAL; - goto next; - } + } else + minor = 0; + cmd = sign == '-' ? NFSD_CLEAR : NFSD_SET; switch(num) { case 2: case 3: - case 4: - nfsd_vers(num, sign == '-' ? NFSD_CLEAR : NFSD_SET); + nfsd_vers(num, cmd); break; + case 4: + if (nfsd_minorversion(minor, cmd) >= 0) + break; default: return -EINVAL; } - next: vers += len + 1; } while ((len = qword_get(&mesg, vers, size)) > 0); /* If all get turned off, turn them back on, as @@ -599,35 +611,23 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size) len = 0; sep = ""; remaining = SIMPLE_TRANSACTION_LIMIT; - for (num=2 ; num <= 4 ; num++) - if (nfsd_vers(num, NFSD_AVAIL)) { - len = snprintf(buf, remaining, "%s%c%d", sep, - nfsd_vers(num, NFSD_TEST)?'+':'-', - num); - sep = " "; - - if (len >= remaining) - break; - remaining -= len; - buf += len; - tlen += len; - } - if (nfsd_vers(4, NFSD_AVAIL)) - for (minor = 1; minor <= NFSD_SUPPORTED_MINOR_VERSION; - minor++) { - len = snprintf(buf, remaining, " %c4.%u", - (nfsd_vers(4, NFSD_TEST) && - nfsd_minorversion(minor, NFSD_TEST)) ? - '+' : '-', - minor); - + for (num=2 ; num <= 4 ; num++) { + if (!nfsd_vers(num, NFSD_AVAIL)) + continue; + minor = 0; + do { + len = nfsd_print_version_support(buf, remaining, + sep, num, minor); if (len >= remaining) - break; + goto out; remaining -= len; buf += len; tlen += len; - } - + minor++; + sep = " "; + } while (num == 4 && minor <= NFSD_SUPPORTED_MINOR_VERSION); + } +out: len = snprintf(buf, remaining, "\n"); if (len >= remaining) return -EINVAL; diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index d74c8c44dc35..d96606801d47 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -362,16 +362,16 @@ void nfsd_lockd_shutdown(void); FATTR4_WORD2_MODE_UMASK | \ NFSD4_2_SECURITY_ATTRS) -extern u32 nfsd_suppattrs[3][3]; +extern const u32 nfsd_suppattrs[3][3]; -static inline bool bmval_is_subset(u32 *bm1, u32 *bm2) +static inline bool bmval_is_subset(const u32 *bm1, const u32 *bm2) { return !((bm1[0] & ~bm2[0]) || (bm1[1] & ~bm2[1]) || (bm1[2] & ~bm2[2])); } -static inline bool nfsd_attrs_supported(u32 minorversion, u32 *bmval) +static inline bool nfsd_attrs_supported(u32 minorversion, const u32 *bmval) { return bmval_is_subset(bmval, nfsd_suppattrs[minorversion]); } diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index 010aff5c5a79..fa82b7707e85 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -204,18 +204,14 @@ nfsd_proc_write(struct svc_rqst *rqstp, struct nfsd_writeargs *argp, struct nfsd_attrstat *resp) { __be32 nfserr; - int stable = 1; unsigned long cnt = argp->len; dprintk("nfsd: WRITE %s %d bytes at %d\n", SVCFH_fmt(&argp->fh), argp->len, argp->offset); - nfserr = nfsd_write(rqstp, fh_copy(&resp->fh, &argp->fh), NULL, - argp->offset, - rqstp->rq_vec, argp->vlen, - &cnt, - &stable); + nfserr = nfsd_write(rqstp, fh_copy(&resp->fh, &argp->fh), argp->offset, + rqstp->rq_vec, argp->vlen, &cnt, NFS_DATA_SYNC); return nfsd_return_attrs(nfserr, resp); } diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index e6bfd96734c0..786a4a2cb2d7 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -6,7 +6,7 @@ * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/freezer.h> #include <linux/module.h> #include <linux/fs_struct.h> @@ -153,6 +153,18 @@ int nfsd_vers(int vers, enum vers_op change) return 0; } +static void +nfsd_adjust_nfsd_versions4(void) +{ + unsigned i; + + for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++) { + if (nfsd_supported_minorversions[i]) + return; + } + nfsd_vers(4, NFSD_CLEAR); +} + int nfsd_minorversion(u32 minorversion, enum vers_op change) { if (minorversion > NFSD_SUPPORTED_MINOR_VERSION) @@ -160,9 +172,11 @@ int nfsd_minorversion(u32 minorversion, enum vers_op change) switch(change) { case NFSD_SET: nfsd_supported_minorversions[minorversion] = true; + nfsd_vers(4, NFSD_SET); break; case NFSD_CLEAR: nfsd_supported_minorversions[minorversion] = false; + nfsd_adjust_nfsd_versions4(); break; case NFSD_TEST: return nfsd_supported_minorversions[minorversion]; @@ -354,6 +368,8 @@ static int nfsd_inet6addr_event(struct notifier_block *this, dprintk("nfsd_inet6addr_event: removed %pI6\n", &ifa->addr); sin6.sin6_family = AF_INET6; sin6.sin6_addr = ifa->addr; + if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL) + sin6.sin6_scope_id = ifa->idev->dev->ifindex; svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin6); } diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 4516e8b7d776..005c911b34ac 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -615,6 +615,7 @@ extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(const char *recdir, extern __be32 nfs4_check_open_reclaim(clientid_t *clid, struct nfsd4_compound_state *cstate, struct nfsd_net *nn); extern int set_callback_cred(void); +extern void cleanup_callback_cred(void); extern void nfsd4_probe_callback(struct nfs4_client *clp); extern void nfsd4_probe_callback_sync(struct nfs4_client *clp); extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *); diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 26c6fdb4bf67..19d50f600e8d 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -377,7 +377,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, __be32 err; int host_err; bool get_write_count; - int size_change = 0; + bool size_change = (iap->ia_valid & ATTR_SIZE); if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE)) accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE; @@ -390,11 +390,11 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, /* Get inode */ err = fh_verify(rqstp, fhp, ftype, accmode); if (err) - goto out; + return err; if (get_write_count) { host_err = fh_want_write(fhp); if (host_err) - return nfserrno(host_err); + goto out; } dentry = fhp->fh_dentry; @@ -405,20 +405,28 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, iap->ia_valid &= ~ATTR_MODE; if (!iap->ia_valid) - goto out; + return 0; nfsd_sanitize_attrs(inode, iap); + if (check_guard && guardtime != inode->i_ctime.tv_sec) + return nfserr_notsync; + /* * The size case is special, it changes the file in addition to the - * attributes. + * attributes, and file systems don't expect it to be mixed with + * "random" attribute changes. We thus split out the size change + * into a separate call to ->setattr, and do the rest as a separate + * setattr call. */ - if (iap->ia_valid & ATTR_SIZE) { + if (size_change) { err = nfsd_get_write_access(rqstp, fhp, iap); if (err) - goto out; - size_change = 1; + return err; + } + fh_lock(fhp); + if (size_change) { /* * RFC5661, Section 18.30.4: * Changing the size of a file with SETATTR indirectly @@ -426,29 +434,36 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, * * (and similar for the older RFCs) */ - if (iap->ia_size != i_size_read(inode)) - iap->ia_valid |= ATTR_MTIME; - } + struct iattr size_attr = { + .ia_valid = ATTR_SIZE | ATTR_CTIME | ATTR_MTIME, + .ia_size = iap->ia_size, + }; - iap->ia_valid |= ATTR_CTIME; + host_err = notify_change(dentry, &size_attr, NULL); + if (host_err) + goto out_unlock; + iap->ia_valid &= ~ATTR_SIZE; - if (check_guard && guardtime != inode->i_ctime.tv_sec) { - err = nfserr_notsync; - goto out_put_write_access; + /* + * Avoid the additional setattr call below if the only other + * attribute that the client sends is the mtime, as we update + * it as part of the size change above. + */ + if ((iap->ia_valid & ~ATTR_MTIME) == 0) + goto out_unlock; } - fh_lock(fhp); + iap->ia_valid |= ATTR_CTIME; host_err = notify_change(dentry, iap, NULL); - fh_unlock(fhp); - err = nfserrno(host_err); -out_put_write_access: +out_unlock: + fh_unlock(fhp); if (size_change) put_write_access(inode); - if (!err) - err = nfserrno(commit_metadata(fhp)); out: - return err; + if (!host_err) + host_err = commit_metadata(fhp); + return nfserrno(host_err); } #if defined(CONFIG_NFSD_V4) @@ -940,14 +955,12 @@ static int wait_for_concurrent_writes(struct file *file) __be32 nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, loff_t offset, struct kvec *vec, int vlen, - unsigned long *cnt, int *stablep) + unsigned long *cnt, int stable) { struct svc_export *exp; - struct inode *inode; mm_segment_t oldfs; __be32 err = 0; int host_err; - int stable = *stablep; int use_wgather; loff_t pos = offset; unsigned int pflags = current->flags; @@ -962,13 +975,11 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, */ current->flags |= PF_LESS_THROTTLE; - inode = file_inode(file); - exp = fhp->fh_export; - + exp = fhp->fh_export; use_wgather = (rqstp->rq_vers == 2) && EX_WGATHER(exp); if (!EX_ISSYNC(exp)) - stable = 0; + stable = NFS_UNSTABLE; if (stable && !use_wgather) flags |= RWF_SYNC; @@ -1035,35 +1046,22 @@ __be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp, * N.B. After this call fhp needs an fh_put */ __be32 -nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, - loff_t offset, struct kvec *vec, int vlen, unsigned long *cnt, - int *stablep) +nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset, + struct kvec *vec, int vlen, unsigned long *cnt, int stable) { - __be32 err = 0; + struct file *file = NULL; + __be32 err = 0; trace_write_start(rqstp, fhp, offset, vlen); - if (file) { - err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, - NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE); - if (err) - goto out; - trace_write_opened(rqstp, fhp, offset, vlen); - err = nfsd_vfs_write(rqstp, fhp, file, offset, vec, vlen, cnt, - stablep); - trace_write_io_done(rqstp, fhp, offset, vlen); - } else { - err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_WRITE, &file); - if (err) - goto out; + err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_WRITE, &file); + if (err) + goto out; - trace_write_opened(rqstp, fhp, offset, vlen); - if (cnt) - err = nfsd_vfs_write(rqstp, fhp, file, offset, vec, vlen, - cnt, stablep); - trace_write_io_done(rqstp, fhp, offset, vlen); - fput(file); - } + trace_write_opened(rqstp, fhp, offset, vlen); + err = nfsd_vfs_write(rqstp, fhp, file, offset, vec, vlen, cnt, stable); + trace_write_io_done(rqstp, fhp, offset, vlen); + fput(file); out: trace_write_done(rqstp, fhp, offset, vlen); return err; diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h index 0bf9e7bf5800..1bbdccecbf3d 100644 --- a/fs/nfsd/vfs.h +++ b/fs/nfsd/vfs.h @@ -83,12 +83,12 @@ __be32 nfsd_readv(struct file *, loff_t, struct kvec *, int, unsigned long *); __be32 nfsd_read(struct svc_rqst *, struct svc_fh *, loff_t, struct kvec *, int, unsigned long *); -__be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *, - loff_t, struct kvec *,int, unsigned long *, int *); +__be32 nfsd_write(struct svc_rqst *, struct svc_fh *, loff_t, + struct kvec *, int, unsigned long *, int); __be32 nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, loff_t offset, struct kvec *vec, int vlen, unsigned long *cnt, - int *stablep); + int stable); __be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *, char *, int *); __be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *, @@ -135,7 +135,8 @@ static inline __be32 fh_getattr(struct svc_fh *fh, struct kstat *stat) { struct path p = {.mnt = fh->fh_export->ex_path.mnt, .dentry = fh->fh_dentry}; - return nfserrno(vfs_getattr(&p, stat)); + return nfserrno(vfs_getattr(&p, stat, STATX_BASIC_STATS, + AT_STATX_SYNC_AS_STAT)); } static inline int nfsd_create_is_exclusive(int createmode) diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index 2c90e285d7c6..03b8ba933eb2 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c @@ -34,7 +34,7 @@ static inline unsigned long nilfs_palloc_groups_per_desc_block(const struct inode *inode) { - return (1UL << inode->i_blkbits) / + return i_blocksize(inode) / sizeof(struct nilfs_palloc_group_desc); } diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index d5c23da43513..c21e0b4454a6 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -50,7 +50,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) brelse(bh); BUG(); } - memset(bh->b_data, 0, 1 << inode->i_blkbits); + memset(bh->b_data, 0, i_blocksize(inode)); bh->b_bdev = inode->i_sb->s_bdev; bh->b_blocknr = blocknr; set_buffer_mapped(bh); diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 2e315f9f2e51..06ffa135dfa6 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -119,7 +119,7 @@ nilfs_btree_node_set_nchildren(struct nilfs_btree_node *node, int nchildren) static int nilfs_btree_node_size(const struct nilfs_bmap *btree) { - return 1 << btree->b_inode->i_blkbits; + return i_blocksize(btree->b_inode); } static int nilfs_btree_nchildren_per_block(const struct nilfs_bmap *btree) @@ -1870,7 +1870,7 @@ int nilfs_btree_convert_and_insert(struct nilfs_bmap *btree, di = &dreq; ni = NULL; } else if ((n + 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX( - 1 << btree->b_inode->i_blkbits)) { + nilfs_btree_node_size(btree))) { di = &dreq; ni = &nreq; } else { diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index c7f4fef9ebf5..7ffe71a8dfb9 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -51,7 +51,7 @@ void nilfs_inode_add_blocks(struct inode *inode, int n) { struct nilfs_root *root = NILFS_I(inode)->i_root; - inode_add_bytes(inode, (1 << inode->i_blkbits) * n); + inode_add_bytes(inode, i_blocksize(inode) * n); if (root) atomic64_add(n, &root->blocks_count); } @@ -60,7 +60,7 @@ void nilfs_inode_sub_blocks(struct inode *inode, int n) { struct nilfs_root *root = NILFS_I(inode)->i_root; - inode_sub_bytes(inode, (1 << inode->i_blkbits) * n); + inode_sub_bytes(inode, i_blocksize(inode) * n); if (root) atomic64_sub(n, &root->blocks_count); } diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index d56d3a5bea88..98835ed6bef4 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -57,7 +57,7 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block, set_buffer_mapped(bh); kaddr = kmap_atomic(bh->b_page); - memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits); + memset(kaddr + bh_offset(bh), 0, i_blocksize(inode)); if (init_block) init_block(inode, bh, kaddr); flush_dcache_page(bh->b_page); @@ -501,7 +501,7 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size, struct nilfs_mdt_info *mi = NILFS_MDT(inode); mi->mi_entry_size = entry_size; - mi->mi_entries_per_block = (1 << inode->i_blkbits) / entry_size; + mi->mi_entries_per_block = i_blocksize(inode) / entry_size; mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size); } diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index bedcae2c28e6..febed1217b3f 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -30,6 +30,8 @@ #include <linux/crc32.h> #include <linux/pagevec.h> #include <linux/slab.h> +#include <linux/sched/signal.h> + #include "nilfs.h" #include "btnode.h" #include "page.h" @@ -723,7 +725,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, lock_page(page); if (!page_has_buffers(page)) - create_empty_buffers(page, 1 << inode->i_blkbits, 0); + create_empty_buffers(page, i_blocksize(inode), 0); unlock_page(page); bh = head = page_buffers(page); diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index a4c46221755e..e5f7e47de68e 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> /* UINT_MAX */ #include <linux/mount.h> #include <linux/sched.h> +#include <linux/sched/user.h> #include <linux/types.h> #include <linux/wait.h> diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 7ebfca6a1427..2b37f2785834 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -14,6 +14,7 @@ #include <linux/types.h> #include <linux/uaccess.h> #include <linux/compat.h> +#include <linux/sched/signal.h> #include <asm/ioctls.h> diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index f36c29398de3..1aeb837ae414 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c @@ -30,6 +30,7 @@ #include <linux/slab.h> /* kmem_* */ #include <linux/types.h> #include <linux/sched.h> +#include <linux/sched/user.h> #include "inotify.h" diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 1cf41c623be1..498d609b26c7 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -30,7 +30,7 @@ #include <linux/inotify.h> #include <linux/kernel.h> /* roundup() */ #include <linux/namei.h> /* LOOKUP_FOLLOW */ -#include <linux/sched.h> /* struct user */ +#include <linux/sched/signal.h> #include <linux/slab.h> /* struct kmem_cache */ #include <linux/syscalls.h> #include <linux/types.h> diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 358ed7e1195a..c4f68c338735 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -24,7 +24,7 @@ #include <linux/gfp.h> #include <linux/pagemap.h> #include <linux/pagevec.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/swap.h> #include <linux/uio.h> #include <linux/writeback.h> diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index d4ec0d8961a6..fb15a96df0b6 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -30,6 +30,7 @@ #include <linux/swap.h> #include <linux/quotaops.h> #include <linux/blkdev.h> +#include <linux/sched/signal.h> #include <cluster/masklog.h> diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 11556b7d93ec..88a31e9340a0 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -608,7 +608,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, int ret = 0; struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; unsigned int block_end, block_start; - unsigned int bsize = 1 << inode->i_blkbits; + unsigned int bsize = i_blocksize(inode); if (!page_has_buffers(page)) create_empty_buffers(page, bsize, 0); diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index ec000575e863..4348027384f5 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -54,6 +54,7 @@ */ #include <linux/kernel.h> +#include <linux/sched/mm.h> #include <linux/jiffies.h> #include <linux/slab.h> #include <linux/idr.h> diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 32fd261ae13d..a2b19fbdcf46 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -33,6 +33,7 @@ #include <linux/delay.h> #include <linux/err.h> #include <linux/debugfs.h> +#include <linux/sched/signal.h> #include "cluster/heartbeat.h" #include "cluster/nodemanager.h" diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 7025d8c27999..3e04279446e8 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -2924,7 +2924,7 @@ again: /* * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for * another try; otherwise, we are sure the MIGRATING state is there, - * drop the unneded state which blocked threads trying to DIRTY + * drop the unneeded state which blocked threads trying to DIRTY */ spin_lock(&res->spinlock); BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); diff --git a/fs/ocfs2/dlmfs/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c index f70cda2f090d..9cecf4857195 100644 --- a/fs/ocfs2/dlmfs/userdlm.c +++ b/fs/ocfs2/dlmfs/userdlm.c @@ -28,6 +28,7 @@ */ #include <linux/signal.h> +#include <linux/sched/signal.h> #include <linux/module.h> #include <linux/fs.h> diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 8dce4099a6ca..3b7c937a36b5 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -33,6 +33,7 @@ #include <linux/seq_file.h> #include <linux/time.h> #include <linux/quotaops.h> +#include <linux/sched/signal.h> #define MLOG_MASK_PREFIX ML_DLM_GLUE #include <cluster/masklog.h> diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 7b6a146327d7..bfeb647459d9 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -808,7 +808,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, /* We know that zero_from is block aligned */ for (block_start = zero_from; block_start < zero_to; block_start = block_end) { - block_end = block_start + (1 << inode->i_blkbits); + block_end = block_start + i_blocksize(inode); /* * block_start is block-aligned. Bump it by one to force @@ -1306,16 +1306,15 @@ bail: return status; } -int ocfs2_getattr(struct vfsmount *mnt, - struct dentry *dentry, - struct kstat *stat) +int ocfs2_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct inode *inode = d_inode(dentry); - struct super_block *sb = dentry->d_sb; + struct inode *inode = d_inode(path->dentry); + struct super_block *sb = path->dentry->d_sb; struct ocfs2_super *osb = sb->s_fs_info; int err; - err = ocfs2_inode_revalidate(dentry); + err = ocfs2_inode_revalidate(path->dentry); if (err) { if (err != -ENOENT) mlog_errno(err); diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h index 897fd9a2e51d..1fdc9839cd93 100644 --- a/fs/ocfs2/file.h +++ b/fs/ocfs2/file.h @@ -68,8 +68,8 @@ int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh, int ocfs2_extend_allocation(struct inode *inode, u32 logical_start, u32 clusters_to_add, int mark_unwritten); int ocfs2_setattr(struct dentry *dentry, struct iattr *attr); -int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat); +int ocfs2_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); int ocfs2_permission(struct inode *inode, int mask); int ocfs2_should_update_atime(struct inode *inode, diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index a24e42f95341..ca1646fbcaef 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -42,6 +42,7 @@ #include <linux/seq_file.h> #include <linux/quotaops.h> #include <linux/cleancache.h> +#include <linux/signal.h> #define CREATE_TRACE_POINTS #include "ocfs2_trace.h" diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c index df7ea8543a2e..8c9034ee7383 100644 --- a/fs/omfs/inode.c +++ b/fs/omfs/inode.c @@ -8,6 +8,7 @@ #include <linux/slab.h> #include <linux/fs.h> #include <linux/vfs.h> +#include <linux/cred.h> #include <linux/parser.h> #include <linux/buffer_head.h> #include <linux/vmalloc.h> diff --git a/fs/open.c b/fs/open.c index 9921f70bc5ca..949cef29c3bb 100644 --- a/fs/open.c +++ b/fs/open.c @@ -301,12 +301,10 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (S_ISFIFO(inode->i_mode)) return -ESPIPE; - /* - * Let individual file system decide if it supports preallocation - * for directories or not. - */ - if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode) && - !S_ISBLK(inode->i_mode)) + if (S_ISDIR(inode->i_mode)) + return -EISDIR; + + if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) return -ENODEV; /* Check for wrap through zero too */ @@ -316,7 +314,7 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (!file->f_op->fallocate) return -EOPNOTSUPP; - sb_start_write(inode->i_sb); + file_start_write(file); ret = file->f_op->fallocate(file, mode, offset, len); /* @@ -329,7 +327,7 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (ret == 0) fsnotify_modify(file); - sb_end_write(inode->i_sb); + file_end_write(file); return ret; } EXPORT_SYMBOL_GPL(vfs_fallocate); diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c index b0ced669427e..c4ab6fdf17a0 100644 --- a/fs/orangefs/devorangefs-req.c +++ b/fs/orangefs/devorangefs-req.c @@ -400,8 +400,9 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb, /* remove the op from the in progress hash table */ op = orangefs_devreq_remove_op(head.tag); if (!op) { - gossip_err("WARNING: No one's waiting for tag %llu\n", - llu(head.tag)); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: No one's waiting for tag %llu\n", + __func__, llu(head.tag)); return ret; } diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index 551bc74ed2b8..a304bf34b212 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -136,12 +136,6 @@ static ssize_t orangefs_direct_IO(struct kiocb *iocb, return -EINVAL; } -struct backing_dev_info orangefs_backing_dev_info = { - .name = "orangefs", - .ra_pages = 0, - .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, -}; - /** ORANGEFS2 implementation of address space operations */ const struct address_space_operations orangefs_address_operations = { .readpage = orangefs_readpage, @@ -251,25 +245,24 @@ out: /* * Obtain attributes of an object given a dentry */ -int orangefs_getattr(struct vfsmount *mnt, - struct dentry *dentry, - struct kstat *kstat) +int orangefs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { int ret = -ENOENT; - struct inode *inode = dentry->d_inode; + struct inode *inode = path->dentry->d_inode; struct orangefs_inode_s *orangefs_inode = NULL; gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_getattr: called on %pd\n", - dentry); + path->dentry); ret = orangefs_inode_getattr(inode, 0, 0); if (ret == 0) { - generic_fillattr(inode, kstat); + generic_fillattr(inode, stat); /* override block size reported to stat */ orangefs_inode = ORANGEFS_I(inode); - kstat->blksize = orangefs_inode->blksize; + stat->blksize = orangefs_inode->blksize; } return ret; } diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c index 75375e90a63f..6333cbbdfef7 100644 --- a/fs/orangefs/orangefs-bufmap.c +++ b/fs/orangefs/orangefs-bufmap.c @@ -344,6 +344,11 @@ int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc) user_desc->size, user_desc->count); + if (user_desc->total_size < 0 || + user_desc->size < 0 || + user_desc->count < 0) + goto out; + /* * sanity check alignment and size of buffer that caller wants to * work with diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c index 27e75cf28b3a..791912da97d7 100644 --- a/fs/orangefs/orangefs-debugfs.c +++ b/fs/orangefs/orangefs-debugfs.c @@ -967,13 +967,13 @@ int orangefs_debugfs_new_client_string(void __user *arg) int ret; ret = copy_from_user(&client_debug_array_string, - (void __user *)arg, - ORANGEFS_MAX_DEBUG_STRING_LEN); + (void __user *)arg, + ORANGEFS_MAX_DEBUG_STRING_LEN); if (ret != 0) { pr_info("%s: CLIENT_STRING: copy_from_user failed\n", __func__); - return -EIO; + return -EFAULT; } /* @@ -988,17 +988,18 @@ int orangefs_debugfs_new_client_string(void __user *arg) */ client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] = '\0'; - + pr_info("%s: client debug array string has been received.\n", __func__); if (!help_string_initialized) { /* Build a proper debug help string. */ - if (orangefs_prepare_debugfs_help_string(0)) { + ret = orangefs_prepare_debugfs_help_string(0); + if (ret) { gossip_err("%s: no debug help string \n", __func__); - return -EIO; + return ret; } } @@ -1011,7 +1012,7 @@ int orangefs_debugfs_new_client_string(void __user *arg) help_string_initialized++; - return ret; + return 0; } int orangefs_debugfs_new_debug(void __user *arg) diff --git a/fs/orangefs/orangefs-dev-proto.h b/fs/orangefs/orangefs-dev-proto.h index a3d84ffee905..f380f9ed1b28 100644 --- a/fs/orangefs/orangefs-dev-proto.h +++ b/fs/orangefs/orangefs-dev-proto.h @@ -50,8 +50,7 @@ * Misc constants. Please retain them as multiples of 8! * Otherwise 32-64 bit interactions will be messed up :) */ -#define ORANGEFS_MAX_DEBUG_STRING_LEN 0x00000400 -#define ORANGEFS_MAX_DEBUG_ARRAY_LEN 0x00000800 +#define ORANGEFS_MAX_DEBUG_STRING_LEN 0x00000800 /* * The maximum number of directory entries in a single request is 96. diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h index 3bf803d732c5..5e48a0be9761 100644 --- a/fs/orangefs/orangefs-kernel.h +++ b/fs/orangefs/orangefs-kernel.h @@ -41,7 +41,7 @@ #include <linux/uaccess.h> #include <linux/atomic.h> #include <linux/uio.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/mm.h> #include <linux/wait.h> #include <linux/dcache.h> @@ -439,9 +439,8 @@ struct inode *orangefs_new_inode(struct super_block *sb, int orangefs_setattr(struct dentry *dentry, struct iattr *iattr); -int orangefs_getattr(struct vfsmount *mnt, - struct dentry *dentry, - struct kstat *kstat); +int orangefs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); int orangefs_permission(struct inode *inode, int mask); @@ -529,7 +528,6 @@ extern spinlock_t orangefs_htable_ops_in_progress_lock; extern int hash_table_size; extern const struct address_space_operations orangefs_address_operations; -extern struct backing_dev_info orangefs_backing_dev_info; extern const struct inode_operations orangefs_file_inode_operations; extern const struct file_operations orangefs_file_operations; extern const struct inode_operations orangefs_symlink_inode_operations; diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c index 4113eb0495bf..c1b5174cb5a9 100644 --- a/fs/orangefs/orangefs-mod.c +++ b/fs/orangefs/orangefs-mod.c @@ -80,11 +80,6 @@ static int __init orangefs_init(void) int ret = -1; __u32 i = 0; - ret = bdi_init(&orangefs_backing_dev_info); - - if (ret) - return ret; - if (op_timeout_secs < 0) op_timeout_secs = 0; @@ -94,7 +89,7 @@ static int __init orangefs_init(void) /* initialize global book keeping data structures */ ret = op_cache_initialize(); if (ret < 0) - goto err; + goto out; ret = orangefs_inode_cache_initialize(); if (ret < 0) @@ -181,9 +176,6 @@ cleanup_inode: cleanup_op: op_cache_finalize(); -err: - bdi_destroy(&orangefs_backing_dev_info); - out: return ret; } @@ -207,8 +199,6 @@ static void __exit orangefs_exit(void) kfree(orangefs_htable_ops_in_progress); - bdi_destroy(&orangefs_backing_dev_info); - pr_info("orangefs: module version %s unloaded\n", ORANGEFS_VERSION); } diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c index 084954448f18..afd2f523b283 100644 --- a/fs/orangefs/orangefs-sysfs.c +++ b/fs/orangefs/orangefs-sysfs.c @@ -91,6 +91,13 @@ * Description: * Readahead cache buffer count and size. * + * What: /sys/fs/orangefs/readahead_readcnt + * Date: Jan 2017 + * Contact: Martin Brandenburg <martin@omnibond.com> + * Description: + * Number of buffers (in multiples of readahead_size) + * which can be read ahead for a single file at once. + * * What: /sys/fs/orangefs/acache/... * Date: Jun 2015 * Contact: Martin Brandenburg <martin@omnibond.com> @@ -329,7 +336,8 @@ static ssize_t sysfs_service_op_show(struct kobject *kobj, if (!(orangefs_features & ORANGEFS_FEATURE_READAHEAD) && (!strcmp(attr->attr.name, "readahead_count") || !strcmp(attr->attr.name, "readahead_size") || - !strcmp(attr->attr.name, "readahead_count_size"))) { + !strcmp(attr->attr.name, "readahead_count_size") || + !strcmp(attr->attr.name, "readahead_readcnt"))) { rc = -EINVAL; goto out; } @@ -360,6 +368,11 @@ static ssize_t sysfs_service_op_show(struct kobject *kobj, "readahead_count_size")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT_SIZE; + + else if (!strcmp(attr->attr.name, + "readahead_readcnt")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_READAHEAD_READCNT; } else if (!strcmp(kobj->name, ACACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "timeout_msecs")) new_op->upcall.req.param.op = @@ -542,7 +555,8 @@ static ssize_t sysfs_service_op_store(struct kobject *kobj, if (!(orangefs_features & ORANGEFS_FEATURE_READAHEAD) && (!strcmp(attr->attr.name, "readahead_count") || !strcmp(attr->attr.name, "readahead_size") || - !strcmp(attr->attr.name, "readahead_count_size"))) { + !strcmp(attr->attr.name, "readahead_count_size") || + !strcmp(attr->attr.name, "readahead_readcnt"))) { rc = -EINVAL; goto out; } @@ -609,6 +623,15 @@ static ssize_t sysfs_service_op_store(struct kobject *kobj, new_op->upcall.req.param.u.value32[0] = val1; new_op->upcall.req.param.u.value32[1] = val2; goto value_set; + } else if (!strcmp(attr->attr.name, + "readahead_readcnt")) { + if ((val >= 0)) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_READAHEAD_READCNT; + } else { + rc = 0; + goto out; + } } } else if (!strcmp(kobj->name, ACACHE_KOBJ_ID)) { @@ -812,6 +835,10 @@ static struct orangefs_attribute readahead_count_size_attribute = __ATTR(readahead_count_size, 0664, sysfs_service_op_show, sysfs_service_op_store); +static struct orangefs_attribute readahead_readcnt_attribute = + __ATTR(readahead_readcnt, 0664, sysfs_service_op_show, + sysfs_service_op_store); + static struct orangefs_attribute perf_counter_reset_attribute = __ATTR(perf_counter_reset, 0664, @@ -838,6 +865,7 @@ static struct attribute *orangefs_default_attrs[] = { &readahead_count_attribute.attr, &readahead_size_attribute.attr, &readahead_count_size_attribute.attr, + &readahead_readcnt_attribute.attr, &perf_counter_reset_attribute.attr, &perf_history_size_attribute.attr, &perf_time_interval_secs_attribute.attr, diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c index 06af81f71e10..9b96b99539d6 100644 --- a/fs/orangefs/orangefs-utils.c +++ b/fs/orangefs/orangefs-utils.c @@ -306,7 +306,7 @@ int orangefs_inode_getattr(struct inode *inode, int new, int bypass) break; case S_IFDIR: inode->i_size = PAGE_SIZE; - orangefs_inode->blksize = (1 << inode->i_blkbits); + orangefs_inode->blksize = i_blocksize(inode); spin_lock(&inode->i_lock); inode_set_bytes(inode, inode->i_size); spin_unlock(&inode->i_lock); @@ -316,7 +316,7 @@ int orangefs_inode_getattr(struct inode *inode, int new, int bypass) if (new) { inode->i_size = (loff_t)strlen(new_op-> downcall.resp.getattr.link_target); - orangefs_inode->blksize = (1 << inode->i_blkbits); + orangefs_inode->blksize = i_blocksize(inode); ret = strscpy(orangefs_inode->link_target, new_op->downcall.resp.getattr.link_target, ORANGEFS_NAME_MAX); diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c index c48859f16e7b..67c24351a67f 100644 --- a/fs/orangefs/super.c +++ b/fs/orangefs/super.c @@ -115,6 +115,13 @@ static struct inode *orangefs_alloc_inode(struct super_block *sb) return &orangefs_inode->vfs_inode; } +static void orangefs_i_callback(struct rcu_head *head) +{ + struct inode *inode = container_of(head, struct inode, i_rcu); + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + kmem_cache_free(orangefs_inode_cache, orangefs_inode); +} + static void orangefs_destroy_inode(struct inode *inode) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); @@ -123,7 +130,7 @@ static void orangefs_destroy_inode(struct inode *inode) "%s: deallocated %p destroying inode %pU\n", __func__, orangefs_inode, get_khandle_from_ino(inode)); - kmem_cache_free(orangefs_inode_cache, orangefs_inode); + call_rcu(&inode->i_rcu, orangefs_i_callback); } /* diff --git a/fs/orangefs/upcall.h b/fs/orangefs/upcall.h index af0b0e36d559..b8249f8fdd80 100644 --- a/fs/orangefs/upcall.h +++ b/fs/orangefs/upcall.h @@ -182,6 +182,7 @@ enum orangefs_param_request_op { ORANGEFS_PARAM_REQUEST_OP_READAHEAD_SIZE = 26, ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT = 27, ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT_SIZE = 28, + ORANGEFS_PARAM_REQUEST_OP_READAHEAD_READCNT = 29, }; struct orangefs_param_request_s { diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index f57043dace62..906ea6c93260 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -15,11 +15,13 @@ #include <linux/xattr.h> #include <linux/security.h> #include <linux/uaccess.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/cred.h> #include <linux/namei.h> #include <linux/fdtable.h> #include <linux/ratelimit.h> #include "overlayfs.h" +#include "ovl_entry.h" #define OVL_COPY_UP_CHUNK_SIZE (1 << 20) @@ -232,12 +234,14 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat) static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, struct dentry *dentry, struct path *lowerpath, - struct kstat *stat, const char *link) + struct kstat *stat, const char *link, + struct kstat *pstat, bool tmpfile) { struct inode *wdir = workdir->d_inode; struct inode *udir = upperdir->d_inode; struct dentry *newdentry = NULL; struct dentry *upper = NULL; + struct dentry *temp = NULL; int err; const struct cred *old_creds = NULL; struct cred *new_creds = NULL; @@ -248,25 +252,30 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, .link = link }; - newdentry = ovl_lookup_temp(workdir, dentry); - err = PTR_ERR(newdentry); - if (IS_ERR(newdentry)) - goto out; - upper = lookup_one_len(dentry->d_name.name, upperdir, dentry->d_name.len); err = PTR_ERR(upper); if (IS_ERR(upper)) - goto out1; + goto out; err = security_inode_copy_up(dentry, &new_creds); if (err < 0) - goto out2; + goto out1; if (new_creds) old_creds = override_creds(new_creds); - err = ovl_create_real(wdir, newdentry, &cattr, NULL, true); + if (tmpfile) + temp = ovl_do_tmpfile(upperdir, stat->mode); + else + temp = ovl_lookup_temp(workdir, dentry); + err = PTR_ERR(temp); + if (IS_ERR(temp)) + goto out1; + + err = 0; + if (!tmpfile) + err = ovl_create_real(wdir, temp, &cattr, NULL, true); if (new_creds) { revert_creds(old_creds); @@ -281,39 +290,55 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, ovl_path_upper(dentry, &upperpath); BUG_ON(upperpath.dentry != NULL); - upperpath.dentry = newdentry; + upperpath.dentry = temp; + + if (tmpfile) { + inode_unlock(udir); + err = ovl_copy_up_data(lowerpath, &upperpath, + stat->size); + inode_lock_nested(udir, I_MUTEX_PARENT); + } else { + err = ovl_copy_up_data(lowerpath, &upperpath, + stat->size); + } - err = ovl_copy_up_data(lowerpath, &upperpath, stat->size); if (err) goto out_cleanup; } - err = ovl_copy_xattr(lowerpath->dentry, newdentry); + err = ovl_copy_xattr(lowerpath->dentry, temp); if (err) goto out_cleanup; - inode_lock(newdentry->d_inode); - err = ovl_set_attr(newdentry, stat); - inode_unlock(newdentry->d_inode); + inode_lock(temp->d_inode); + err = ovl_set_attr(temp, stat); + inode_unlock(temp->d_inode); if (err) goto out_cleanup; - err = ovl_do_rename(wdir, newdentry, udir, upper, 0); + if (tmpfile) + err = ovl_do_link(temp, udir, upper, true); + else + err = ovl_do_rename(wdir, temp, udir, upper, 0); if (err) goto out_cleanup; + newdentry = dget(tmpfile ? upper : temp); ovl_dentry_update(dentry, newdentry); ovl_inode_update(d_inode(dentry), d_inode(newdentry)); - newdentry = NULL; + + /* Restore timestamps on parent (best effort) */ + ovl_set_timestamps(upperdir, pstat); out2: - dput(upper); + dput(temp); out1: - dput(newdentry); + dput(upper); out: return err; out_cleanup: - ovl_cleanup(wdir, newdentry); + if (!tmpfile) + ovl_cleanup(wdir, temp); goto out2; } @@ -337,6 +362,7 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, struct dentry *lowerdentry = lowerpath->dentry; struct dentry *upperdir; const char *link = NULL; + struct ovl_fs *ofs = dentry->d_sb->s_fs_info; if (WARN_ON(!workdir)) return -EROFS; @@ -346,7 +372,8 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, ovl_path_upper(parent, &parentpath); upperdir = parentpath.dentry; - err = vfs_getattr(&parentpath, &pstat); + err = vfs_getattr(&parentpath, &pstat, + STATX_ATIME | STATX_MTIME, AT_STATX_SYNC_AS_STAT); if (err) return err; @@ -356,6 +383,25 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, return PTR_ERR(link); } + /* Should we copyup with O_TMPFILE or with workdir? */ + if (S_ISREG(stat->mode) && ofs->tmpfile) { + err = ovl_copy_up_start(dentry); + /* err < 0: interrupted, err > 0: raced with another copy-up */ + if (unlikely(err)) { + pr_debug("ovl_copy_up_start(%pd2) = %i\n", dentry, err); + if (err > 0) + err = 0; + goto out_done; + } + + inode_lock_nested(upperdir->d_inode, I_MUTEX_PARENT); + err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath, + stat, link, &pstat, true); + inode_unlock(upperdir->d_inode); + ovl_copy_up_end(dentry); + goto out_done; + } + err = -EIO; if (lock_rename(workdir, upperdir) != NULL) { pr_err("overlayfs: failed to lock workdir+upperdir\n"); @@ -368,13 +414,10 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, } err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath, - stat, link); - if (!err) { - /* Restore timestamps on parent (best effort) */ - ovl_set_timestamps(upperdir, &pstat); - } + stat, link, &pstat, false); out_unlock: unlock_rename(workdir, upperdir); +out_done: do_delayed_call(&done); return err; @@ -409,7 +452,8 @@ int ovl_copy_up_flags(struct dentry *dentry, int flags) } ovl_path_lower(next, &lowerpath); - err = vfs_getattr(&lowerpath, &stat); + err = vfs_getattr(&lowerpath, &stat, + STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); /* maybe truncate regular file. this has no effect on dirs */ if (flags & O_TRUNC) stat.size = 0; diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 16e06dd89457..6515796460df 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -138,9 +138,10 @@ static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry) return err; } -static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int ovl_dir_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { + struct dentry *dentry = path->dentry; int err; enum ovl_path_type type; struct path realpath; @@ -148,7 +149,7 @@ static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry, type = ovl_path_real(dentry, &realpath); old_cred = ovl_override_creds(dentry->d_sb); - err = vfs_getattr(&realpath, stat); + err = vfs_getattr(&realpath, stat, request_mask, flags); revert_creds(old_cred); if (err) return err; @@ -264,7 +265,8 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry, goto out; ovl_path_upper(dentry, &upperpath); - err = vfs_getattr(&upperpath, &stat); + err = vfs_getattr(&upperpath, &stat, + STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); if (err) goto out_unlock; diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 08643ac44a02..f8fe6bf2036d 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -9,6 +9,7 @@ #include <linux/fs.h> #include <linux/slab.h> +#include <linux/cred.h> #include <linux/xattr.h> #include <linux/posix_acl.h> #include "overlayfs.h" @@ -56,16 +57,17 @@ out: return err; } -static int ovl_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int ovl_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { + struct dentry *dentry = path->dentry; struct path realpath; const struct cred *old_cred; int err; ovl_path_real(dentry, &realpath); old_cred = ovl_override_creds(dentry->d_sb); - err = vfs_getattr(&realpath, stat); + err = vfs_getattr(&realpath, stat, request_mask, flags); revert_creds(old_cred); return err; } diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index 023bb0b03352..b8b077821fb0 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -8,6 +8,7 @@ */ #include <linux/fs.h> +#include <linux/cred.h> #include <linux/namei.h> #include <linux/xattr.h> #include <linux/ratelimit.h> diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index 8af450b0e57a..741dc0b6931f 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -127,6 +127,15 @@ static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry) return err; } +static inline struct dentry *ovl_do_tmpfile(struct dentry *dentry, umode_t mode) +{ + struct dentry *ret = vfs_tmpfile(dentry, mode, 0); + int err = IS_ERR(ret) ? PTR_ERR(ret) : 0; + + pr_debug("tmpfile(%pd2, 0%o) = %i\n", dentry, mode, err); + return ret; +} + static inline struct inode *ovl_inode_real(struct inode *inode, bool *is_upper) { unsigned long x = (unsigned long) READ_ONCE(inode->i_private); @@ -169,6 +178,8 @@ void ovl_dentry_version_inc(struct dentry *dentry); u64 ovl_dentry_version_get(struct dentry *dentry); bool ovl_is_whiteout(struct dentry *dentry); struct file *ovl_path_open(struct path *path, int flags); +int ovl_copy_up_start(struct dentry *dentry); +void ovl_copy_up_end(struct dentry *dentry); /* namei.c */ int ovl_path_next(int idx, struct dentry *dentry, struct path *path); diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index d14bca1850d9..59614faa14c3 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h @@ -27,6 +27,8 @@ struct ovl_fs { struct ovl_config config; /* creds of process who forced instantiation of super block */ const struct cred *creator_cred; + bool tmpfile; + wait_queue_head_t copyup_wq; }; /* private information held for every overlayfs dentry */ @@ -38,6 +40,7 @@ struct ovl_entry { u64 version; const char *redirect; bool opaque; + bool copying; }; struct rcu_head rcu; }; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 20f48abbb82f..c9e70d39c1ea 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -7,6 +7,7 @@ * the Free Software Foundation. */ +#include <uapi/linux/magic.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/xattr.h> @@ -160,6 +161,25 @@ static void ovl_put_super(struct super_block *sb) kfree(ufs); } +static int ovl_sync_fs(struct super_block *sb, int wait) +{ + struct ovl_fs *ufs = sb->s_fs_info; + struct super_block *upper_sb; + int ret; + + if (!ufs->upper_mnt) + return 0; + upper_sb = ufs->upper_mnt->mnt_sb; + if (!upper_sb->s_op->sync_fs) + return 0; + + /* real inodes have already been synced by sync_filesystem(ovl_sb) */ + down_read(&upper_sb->s_umount); + ret = upper_sb->s_op->sync_fs(upper_sb, wait); + up_read(&upper_sb->s_umount); + return ret; +} + /** * ovl_statfs * @sb: The overlayfs super block @@ -222,6 +242,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data) static const struct super_operations ovl_super_operations = { .put_super = ovl_put_super, + .sync_fs = ovl_sync_fs, .statfs = ovl_statfs, .show_options = ovl_show_options, .remount_fs = ovl_remount, @@ -701,6 +722,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) unsigned int stacklen = 0; unsigned int i; bool remote = false; + struct cred *cred; int err; err = -ENOMEM; @@ -708,6 +730,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (!ufs) goto out; + init_waitqueue_head(&ufs->copyup_wq); ufs->config.redirect_dir = ovl_redirect_dir_def; err = ovl_parse_opt((char *) data, &ufs->config); if (err) @@ -825,6 +848,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) * creation of workdir in previous step. */ if (ufs->workdir) { + struct dentry *temp; + err = ovl_check_d_type_supported(&workpath); if (err < 0) goto out_put_workdir; @@ -836,6 +861,14 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) */ if (!err) pr_warn("overlayfs: upper fs needs to support d_type.\n"); + + /* Check if upper/work fs supports O_TMPFILE */ + temp = ovl_do_tmpfile(ufs->workdir, S_IFREG | 0); + ufs->tmpfile = !IS_ERR(temp); + if (ufs->tmpfile) + dput(temp); + else + pr_warn("overlayfs: upper fs does not support tmpfile.\n"); } } @@ -870,10 +903,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) else sb->s_d_op = &ovl_dentry_operations; - ufs->creator_cred = prepare_creds(); - if (!ufs->creator_cred) + ufs->creator_cred = cred = prepare_creds(); + if (!cred) goto out_put_lower_mnt; + /* Never override disk quota limits or use reserved space */ + cap_lower(cred->cap_effective, CAP_SYS_RESOURCE); + err = -ENOMEM; oe = ovl_alloc_entry(numlower); if (!oe) diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index 952286f4826c..1953986ee6bc 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c @@ -10,7 +10,9 @@ #include <linux/fs.h> #include <linux/mount.h> #include <linux/slab.h> +#include <linux/cred.h> #include <linux/xattr.h> +#include <linux/sched/signal.h> #include "overlayfs.h" #include "ovl_entry.h" @@ -263,3 +265,33 @@ struct file *ovl_path_open(struct path *path, int flags) { return dentry_open(path, flags | O_NOATIME, current_cred()); } + +int ovl_copy_up_start(struct dentry *dentry) +{ + struct ovl_fs *ofs = dentry->d_sb->s_fs_info; + struct ovl_entry *oe = dentry->d_fsdata; + int err; + + spin_lock(&ofs->copyup_wq.lock); + err = wait_event_interruptible_locked(ofs->copyup_wq, !oe->copying); + if (!err) { + if (oe->__upperdentry) + err = 1; /* Already copied up */ + else + oe->copying = true; + } + spin_unlock(&ofs->copyup_wq.lock); + + return err; +} + +void ovl_copy_up_end(struct dentry *dentry) +{ + struct ovl_fs *ofs = dentry->d_sb->s_fs_info; + struct ovl_entry *oe = dentry->d_fsdata; + + spin_lock(&ofs->copyup_wq.lock); + oe->copying = false; + wake_up_locked(&ofs->copyup_wq); + spin_unlock(&ofs->copyup_wq.lock); +} diff --git a/fs/posix_acl.c b/fs/posix_acl.c index c9d48dc78495..eebf5f6cf6d5 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -15,6 +15,7 @@ #include <linux/atomic.h> #include <linux/fs.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/posix_acl.h> #include <linux/posix_acl_xattr.h> #include <linux/xattr.h> diff --git a/fs/proc/array.c b/fs/proc/array.c index fe12b519d09b..88c355574aa0 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -60,6 +60,10 @@ #include <linux/tty.h> #include <linux/string.h> #include <linux/mman.h> +#include <linux/sched/mm.h> +#include <linux/sched/numa_balancing.h> +#include <linux/sched/task.h> +#include <linux/sched/cputime.h> #include <linux/proc_fs.h> #include <linux/ioport.h> #include <linux/uaccess.h> diff --git a/fs/proc/base.c b/fs/proc/base.c index b8f06273353e..c87b6b9a8a76 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -85,6 +85,11 @@ #include <linux/user_namespace.h> #include <linux/fs_struct.h> #include <linux/slab.h> +#include <linux/sched/autogroup.h> +#include <linux/sched/mm.h> +#include <linux/sched/coredump.h> +#include <linux/sched/debug.h> +#include <linux/sched/stat.h> #include <linux/flex_array.h> #include <linux/posix-timers.h> #ifdef CONFIG_HARDWALL @@ -766,7 +771,7 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode) if (!IS_ERR_OR_NULL(mm)) { /* ensure this mm_struct can't be freed */ - atomic_inc(&mm->mm_count); + mmgrab(mm); /* but do not pin its memory */ mmput(mm); } @@ -813,7 +818,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf, return -ENOMEM; copied = 0; - if (!atomic_inc_not_zero(&mm->mm_users)) + if (!mmget_not_zero(mm)) goto free; /* Maybe we should limit FOLL_FORCE to actual ptrace users? */ @@ -921,7 +926,7 @@ static ssize_t environ_read(struct file *file, char __user *buf, return -ENOMEM; ret = 0; - if (!atomic_inc_not_zero(&mm->mm_users)) + if (!mmget_not_zero(mm)) goto free; down_read(&mm->mmap_sem); @@ -1064,7 +1069,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy) if (p) { if (atomic_read(&p->mm->mm_users) > 1) { mm = p->mm; - atomic_inc(&mm->mm_count); + mmgrab(mm); } task_unlock(p); } @@ -1724,11 +1729,12 @@ out_unlock: return NULL; } -int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +int pid_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct task_struct *task; - struct pid_namespace *pid = dentry->d_sb->s_fs_info; + struct pid_namespace *pid = path->dentry->d_sb->s_fs_info; generic_fillattr(inode, stat); @@ -3511,9 +3517,10 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx) return 0; } -static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +static int proc_task_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct task_struct *p = get_proc_task(inode); generic_fillattr(inode, stat); diff --git a/fs/proc/fd.c b/fs/proc/fd.c index 00ce1531b2f5..c330495c3115 100644 --- a/fs/proc/fd.c +++ b/fs/proc/fd.c @@ -1,4 +1,4 @@ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/dcache.h> #include <linux/path.h> diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 06c73904d497..ee27feb34cf4 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -118,10 +118,10 @@ static int proc_notify_change(struct dentry *dentry, struct iattr *iattr) return 0; } -static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int proc_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct proc_dir_entry *de = PDE(inode); if (de && de->nlink) set_nlink(inode, de->nlink); diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 5d6960f5f1c0..c5ae09b6c726 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -14,6 +14,8 @@ #include <linux/spinlock.h> #include <linux/atomic.h> #include <linux/binfmts.h> +#include <linux/sched/coredump.h> +#include <linux/sched/task.h> struct ctl_table_header; struct mempolicy; @@ -149,7 +151,7 @@ extern int proc_pid_statm(struct seq_file *, struct pid_namespace *, * base.c */ extern const struct dentry_operations pid_dentry_operations; -extern int pid_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int pid_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int proc_setattr(struct dentry *, struct iattr *); extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *, umode_t); extern int pid_revalidate(struct dentry *, unsigned int); diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 0b80ad87b4d6..4ee55274f155 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -28,6 +28,7 @@ #include <linux/list.h> #include <linux/ioport.h> #include <linux/memory.h> +#include <linux/sched/task.h> #include <asm/sections.h> #include "internal.h" @@ -373,7 +374,10 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) phdr->p_flags = PF_R|PF_W|PF_X; phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff; phdr->p_vaddr = (size_t)m->addr; - phdr->p_paddr = 0; + if (m->type == KCORE_RAM || m->type == KCORE_TEXT) + phdr->p_paddr = __pa(m->addr); + else + phdr->p_paddr = (elf_addr_t)-1; phdr->p_filesz = phdr->p_memsz = m->size; phdr->p_align = PAGE_SIZE; } diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index aec66e6c2060..983fce5c2418 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c @@ -3,6 +3,8 @@ #include <linux/pid_namespace.h> #include <linux/proc_fs.h> #include <linux/sched.h> +#include <linux/sched/loadavg.h> +#include <linux/sched/stat.h> #include <linux/seq_file.h> #include <linux/seqlock.h> #include <linux/time.h> diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index ffd72a6c6e04..d72fc40241d9 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c @@ -17,6 +17,7 @@ #include <linux/slab.h> #include <linux/init.h> #include <linux/sched.h> +#include <linux/sched/task.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/mount.h> @@ -140,10 +141,10 @@ static struct dentry *proc_tgid_net_lookup(struct inode *dir, return de; } -static int proc_tgid_net_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int proc_tgid_net_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct net *net; net = get_proc_task_net(inode); diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 3e64c6502dc8..8f91ec66baa3 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -8,6 +8,7 @@ #include <linux/printk.h> #include <linux/security.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/namei.h> #include <linux/mm.h> #include <linux/module.h> @@ -801,9 +802,10 @@ static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr) return 0; } -static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +static int proc_sys_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; diff --git a/fs/proc/root.c b/fs/proc/root.c index b90da888b81a..deecb397daa3 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -14,12 +14,14 @@ #include <linux/stat.h> #include <linux/init.h> #include <linux/sched.h> +#include <linux/sched/stat.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/user_namespace.h> #include <linux/mount.h> #include <linux/pid_namespace.h> #include <linux/parser.h> +#include <linux/cred.h> #include "internal.h" @@ -149,10 +151,10 @@ void __init proc_root_init(void) proc_sys_init(); } -static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat -) +static int proc_root_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - generic_fillattr(d_inode(dentry), stat); + generic_fillattr(d_inode(path->dentry), stat); stat->nlink = proc_root.nlink + nr_processes(); return 0; } diff --git a/fs/proc/stat.c b/fs/proc/stat.c index e47c3e8c4dfe..bd4e55f4aa20 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -5,11 +5,12 @@ #include <linux/kernel_stat.h> #include <linux/proc_fs.h> #include <linux/sched.h> +#include <linux/sched/stat.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/irqnr.h> -#include <linux/cputime.h> +#include <linux/sched/cputime.h> #include <linux/tick.h> #ifndef arch_irq_stat_cpu diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 8f96a49178d0..f08bd31c1081 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -11,6 +11,7 @@ #include <linux/mempolicy.h> #include <linux/rmap.h> #include <linux/swap.h> +#include <linux/sched/mm.h> #include <linux/swapops.h> #include <linux/mmu_notifier.h> #include <linux/page_idle.h> @@ -167,7 +168,7 @@ static void *m_start(struct seq_file *m, loff_t *ppos) return ERR_PTR(-ESRCH); mm = priv->mm; - if (!mm || !atomic_inc_not_zero(&mm->mm_users)) + if (!mm || !mmget_not_zero(mm)) return NULL; down_read(&mm->mmap_sem); @@ -1352,7 +1353,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, unsigned long end_vaddr; int ret = 0, copied = 0; - if (!mm || !atomic_inc_not_zero(&mm->mm_users)) + if (!mm || !mmget_not_zero(mm)) goto out; ret = -EINVAL; diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 37175621e890..23266694db11 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -7,6 +7,8 @@ #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/seq_file.h> +#include <linux/sched/mm.h> + #include "internal.h" /* @@ -219,7 +221,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) return ERR_PTR(-ESRCH); mm = priv->mm; - if (!mm || !atomic_inc_not_zero(&mm->mm_users)) + if (!mm || !mmget_not_zero(mm)) return NULL; down_read(&mm->mmap_sem); diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c index 3f1190d18991..b5713fefb4c1 100644 --- a/fs/proc_namespace.c +++ b/fs/proc_namespace.c @@ -10,6 +10,8 @@ #include <linux/nsproxy.h> #include <linux/security.h> #include <linux/fs_struct.h> +#include <linux/sched/task.h> + #include "proc/internal.h" /* only for get_proc_task() in ->open() */ #include "pnode.h" diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 406fed92362a..74b489e3714d 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -72,6 +72,7 @@ #include <linux/proc_fs.h> #include <linux/security.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/kmod.h> #include <linux/namei.h> #include <linux/capability.h> diff --git a/fs/read_write.c b/fs/read_write.c index 5816d4c4cab0..c4f88afbc67f 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -4,8 +4,9 @@ * Copyright (C) 1991, 1992 Linus Torvalds */ -#include <linux/slab.h> +#include <linux/slab.h> #include <linux/stat.h> +#include <linux/sched/xacct.h> #include <linux/fcntl.h> #include <linux/file.h> #include <linux/uio.h> @@ -23,9 +24,6 @@ #include <linux/uaccess.h> #include <asm/unistd.h> -typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *); -typedef ssize_t (*iter_fn_t)(struct kiocb *, struct iov_iter *); - const struct file_operations generic_ro_fops = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, @@ -370,7 +368,7 @@ ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos) kiocb.ki_pos = *ppos; iter->type |= READ; - ret = file->f_op->read_iter(&kiocb, iter); + ret = call_read_iter(file, &kiocb, iter); BUG_ON(ret == -EIOCBQUEUED); if (ret > 0) *ppos = kiocb.ki_pos; @@ -390,7 +388,7 @@ ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos) kiocb.ki_pos = *ppos; iter->type |= WRITE; - ret = file->f_op->write_iter(&kiocb, iter); + ret = call_write_iter(file, &kiocb, iter); BUG_ON(ret == -EIOCBQUEUED); if (ret > 0) *ppos = kiocb.ki_pos; @@ -439,7 +437,7 @@ static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo kiocb.ki_pos = *ppos; iov_iter_init(&iter, READ, &iov, 1, len); - ret = filp->f_op->read_iter(&kiocb, &iter); + ret = call_read_iter(filp, &kiocb, &iter); BUG_ON(ret == -EIOCBQUEUED); *ppos = kiocb.ki_pos; return ret; @@ -496,7 +494,7 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t kiocb.ki_pos = *ppos; iov_iter_init(&iter, WRITE, &iov, 1, len); - ret = filp->f_op->write_iter(&kiocb, &iter); + ret = call_write_iter(filp, &kiocb, &iter); BUG_ON(ret == -EIOCBQUEUED); if (ret > 0) *ppos = kiocb.ki_pos; @@ -675,7 +673,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to) EXPORT_SYMBOL(iov_shorten); static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter, - loff_t *ppos, iter_fn_t fn, int flags) + loff_t *ppos, int type, int flags) { struct kiocb kiocb; ssize_t ret; @@ -692,7 +690,10 @@ static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter, kiocb.ki_flags |= (IOCB_DSYNC | IOCB_SYNC); kiocb.ki_pos = *ppos; - ret = fn(&kiocb, iter); + if (type == READ) + ret = call_read_iter(filp, &kiocb, iter); + else + ret = call_write_iter(filp, &kiocb, iter); BUG_ON(ret == -EIOCBQUEUED); *ppos = kiocb.ki_pos; return ret; @@ -700,7 +701,7 @@ static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter, /* Do it by hand, with file-ops */ static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter, - loff_t *ppos, io_fn_t fn, int flags) + loff_t *ppos, int type, int flags) { ssize_t ret = 0; @@ -711,7 +712,13 @@ static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter, struct iovec iovec = iov_iter_iovec(iter); ssize_t nr; - nr = fn(filp, iovec.iov_base, iovec.iov_len, ppos); + if (type == READ) { + nr = filp->f_op->read(filp, iovec.iov_base, + iovec.iov_len, ppos); + } else { + nr = filp->f_op->write(filp, iovec.iov_base, + iovec.iov_len, ppos); + } if (nr < 0) { if (!ret) @@ -834,50 +841,32 @@ out: return ret; } -static ssize_t do_readv_writev(int type, struct file *file, - const struct iovec __user * uvector, - unsigned long nr_segs, loff_t *pos, - int flags) +static ssize_t __do_readv_writev(int type, struct file *file, + struct iov_iter *iter, loff_t *pos, int flags) { size_t tot_len; - struct iovec iovstack[UIO_FASTIOV]; - struct iovec *iov = iovstack; - struct iov_iter iter; - ssize_t ret; - io_fn_t fn; - iter_fn_t iter_fn; - - ret = import_iovec(type, uvector, nr_segs, - ARRAY_SIZE(iovstack), &iov, &iter); - if (ret < 0) - return ret; + ssize_t ret = 0; - tot_len = iov_iter_count(&iter); + tot_len = iov_iter_count(iter); if (!tot_len) goto out; ret = rw_verify_area(type, file, pos, tot_len); if (ret < 0) goto out; - if (type == READ) { - fn = file->f_op->read; - iter_fn = file->f_op->read_iter; - } else { - fn = (io_fn_t)file->f_op->write; - iter_fn = file->f_op->write_iter; + if (type != READ) file_start_write(file); - } - if (iter_fn) - ret = do_iter_readv_writev(file, &iter, pos, iter_fn, flags); + if ((type == READ && file->f_op->read_iter) || + (type == WRITE && file->f_op->write_iter)) + ret = do_iter_readv_writev(file, iter, pos, type, flags); else - ret = do_loop_readv_writev(file, &iter, pos, fn, flags); + ret = do_loop_readv_writev(file, iter, pos, type, flags); if (type != READ) file_end_write(file); out: - kfree(iov); if ((ret + (type == READ)) > 0) { if (type == READ) fsnotify_access(file); @@ -887,6 +876,27 @@ out: return ret; } +static ssize_t do_readv_writev(int type, struct file *file, + const struct iovec __user *uvector, + unsigned long nr_segs, loff_t *pos, + int flags) +{ + struct iovec iovstack[UIO_FASTIOV]; + struct iovec *iov = iovstack; + struct iov_iter iter; + ssize_t ret; + + ret = import_iovec(type, uvector, nr_segs, + ARRAY_SIZE(iovstack), &iov, &iter); + if (ret < 0) + return ret; + + ret = __do_readv_writev(type, file, &iter, pos, flags); + kfree(iov); + + return ret; +} + ssize_t vfs_readv(struct file *file, const struct iovec __user *vec, unsigned long vlen, loff_t *pos, int flags) { @@ -1064,51 +1074,19 @@ static ssize_t compat_do_readv_writev(int type, struct file *file, unsigned long nr_segs, loff_t *pos, int flags) { - compat_ssize_t tot_len; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; ssize_t ret; - io_fn_t fn; - iter_fn_t iter_fn; ret = compat_import_iovec(type, uvector, nr_segs, UIO_FASTIOV, &iov, &iter); if (ret < 0) return ret; - tot_len = iov_iter_count(&iter); - if (!tot_len) - goto out; - ret = rw_verify_area(type, file, pos, tot_len); - if (ret < 0) - goto out; - - if (type == READ) { - fn = file->f_op->read; - iter_fn = file->f_op->read_iter; - } else { - fn = (io_fn_t)file->f_op->write; - iter_fn = file->f_op->write_iter; - file_start_write(file); - } - - if (iter_fn) - ret = do_iter_readv_writev(file, &iter, pos, iter_fn, flags); - else - ret = do_loop_readv_writev(file, &iter, pos, fn, flags); - - if (type != READ) - file_end_write(file); - -out: + ret = __do_readv_writev(type, file, &iter, pos, flags); kfree(iov); - if ((ret + (type == READ)) > 0) { - if (type == READ) - fsnotify_access(file); - else - fsnotify_modify(file); - } + return ret; } @@ -1518,6 +1496,11 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, if (flags != 0) return -EINVAL; + if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) + return -EISDIR; + if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) + return -EINVAL; + ret = rw_verify_area(READ, file_in, &pos_in, len); if (unlikely(ret)) return ret; @@ -1538,7 +1521,7 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, if (len == 0) return 0; - sb_start_write(inode_out->i_sb); + file_start_write(file_out); /* * Try cloning first, this is supported by more file systems, and @@ -1574,7 +1557,7 @@ done: inc_syscr(current); inc_syscw(current); - sb_end_write(inode_out->i_sb); + file_end_write(file_out); return ret; } diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index 2f8c5c9bdaf6..b396eb09f288 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -189,7 +189,7 @@ int reiserfs_commit_page(struct inode *inode, struct page *page, int ret = 0; th.t_trans_id = 0; - blocksize = 1 << inode->i_blkbits; + blocksize = i_blocksize(inode); if (logit) { reiserfs_write_lock(s); diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index cfeae9b0a2b7..a6ab9d64ea1b 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -525,7 +525,7 @@ static int reiserfs_get_blocks_direct_io(struct inode *inode, * referenced in convert_tail_for_hole() that may be called from * reiserfs_get_block() */ - bh_result->b_size = (1 << inode->i_blkbits); + bh_result->b_size = i_blocksize(inode); ret = reiserfs_get_block(inode, iblock, bh_result, create | GET_BLOCK_NO_DANGLE); diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index e314cb30a181..feabcde0290d 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -1166,7 +1166,7 @@ static int reiserfs_parse_options(struct super_block *s, if (!strcmp(arg, "auto")) { /* From JFS code, to auto-get the size. */ *blocks = - s->s_bdev->bd_inode->i_size >> s-> + i_size_read(s->s_bdev->bd_inode) >> s-> s_blocksize_bits; } else { *blocks = simple_strtoul(arg, &p, 0); diff --git a/fs/select.c b/fs/select.c index 305c0daf5d67..e2112270d75a 100644 --- a/fs/select.c +++ b/fs/select.c @@ -15,7 +15,8 @@ */ #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/rt.h> #include <linux/syscalls.h> #include <linux/export.h> #include <linux/slab.h> @@ -26,7 +27,6 @@ #include <linux/fs.h> #include <linux/rcupdate.h> #include <linux/hrtimer.h> -#include <linux/sched/rt.h> #include <linux/freezer.h> #include <net/busy_poll.h> #include <linux/vmalloc.h> diff --git a/fs/splice.c b/fs/splice.c index 4ef78aa8ef61..006ba50f4ece 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -33,6 +33,8 @@ #include <linux/gfp.h> #include <linux/socket.h> #include <linux/compat.h> +#include <linux/sched/signal.h> + #include "internal.h" /* @@ -307,7 +309,7 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, idx = to.idx; init_sync_kiocb(&kiocb, in); kiocb.ki_pos = *ppos; - ret = in->f_op->read_iter(&kiocb, &to); + ret = call_read_iter(in, &kiocb, &to); if (ret > 0) { *ppos = kiocb.ki_pos; file_accessed(in); diff --git a/fs/stat.c b/fs/stat.c index a268b7f27adf..fa0be59340cc 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -12,12 +12,22 @@ #include <linux/fs.h> #include <linux/namei.h> #include <linux/security.h> +#include <linux/cred.h> #include <linux/syscalls.h> #include <linux/pagemap.h> #include <linux/uaccess.h> #include <asm/unistd.h> +/** + * generic_fillattr - Fill in the basic attributes from the inode struct + * @inode: Inode to use as the source + * @stat: Where to fill in the attributes + * + * Fill in the basic attributes in the kstat structure from data that's to be + * found on the VFS inode structure. This is the default if no getattr inode + * operation is supplied. + */ void generic_fillattr(struct inode *inode, struct kstat *stat) { stat->dev = inode->i_sb->s_dev; @@ -31,83 +41,149 @@ void generic_fillattr(struct inode *inode, struct kstat *stat) stat->atime = inode->i_atime; stat->mtime = inode->i_mtime; stat->ctime = inode->i_ctime; - stat->blksize = (1 << inode->i_blkbits); + stat->blksize = i_blocksize(inode); stat->blocks = inode->i_blocks; -} + if (IS_NOATIME(inode)) + stat->result_mask &= ~STATX_ATIME; + if (IS_AUTOMOUNT(inode)) + stat->attributes |= STATX_ATTR_AUTOMOUNT; +} EXPORT_SYMBOL(generic_fillattr); /** * vfs_getattr_nosec - getattr without security checks * @path: file to get attributes from * @stat: structure to return attributes in + * @request_mask: STATX_xxx flags indicating what the caller wants + * @query_flags: Query mode (KSTAT_QUERY_FLAGS) * * Get attributes without calling security_inode_getattr. * * Currently the only caller other than vfs_getattr is internal to the - * filehandle lookup code, which uses only the inode number and returns - * no attributes to any user. Any other code probably wants - * vfs_getattr. + * filehandle lookup code, which uses only the inode number and returns no + * attributes to any user. Any other code probably wants vfs_getattr. */ -int vfs_getattr_nosec(struct path *path, struct kstat *stat) +int vfs_getattr_nosec(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { struct inode *inode = d_backing_inode(path->dentry); + memset(stat, 0, sizeof(*stat)); + stat->result_mask |= STATX_BASIC_STATS; + request_mask &= STATX_ALL; + query_flags &= KSTAT_QUERY_FLAGS; if (inode->i_op->getattr) - return inode->i_op->getattr(path->mnt, path->dentry, stat); + return inode->i_op->getattr(path, stat, request_mask, + query_flags); generic_fillattr(inode, stat); return 0; } - EXPORT_SYMBOL(vfs_getattr_nosec); -int vfs_getattr(struct path *path, struct kstat *stat) +/* + * vfs_getattr - Get the enhanced basic attributes of a file + * @path: The file of interest + * @stat: Where to return the statistics + * @request_mask: STATX_xxx flags indicating what the caller wants + * @query_flags: Query mode (KSTAT_QUERY_FLAGS) + * + * Ask the filesystem for a file's attributes. The caller must indicate in + * request_mask and query_flags to indicate what they want. + * + * If the file is remote, the filesystem can be forced to update the attributes + * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can + * suppress the update by passing AT_STATX_DONT_SYNC. + * + * Bits must have been set in request_mask to indicate which attributes the + * caller wants retrieving. Any such attribute not requested may be returned + * anyway, but the value may be approximate, and, if remote, may not have been + * synchronised with the server. + * + * 0 will be returned on success, and a -ve error code if unsuccessful. + */ +int vfs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { int retval; retval = security_inode_getattr(path); if (retval) return retval; - return vfs_getattr_nosec(path, stat); + return vfs_getattr_nosec(path, stat, request_mask, query_flags); } - EXPORT_SYMBOL(vfs_getattr); -int vfs_fstat(unsigned int fd, struct kstat *stat) +/** + * vfs_statx_fd - Get the enhanced basic attributes by file descriptor + * @fd: The file descriptor referring to the file of interest + * @stat: The result structure to fill in. + * @request_mask: STATX_xxx flags indicating what the caller wants + * @query_flags: Query mode (KSTAT_QUERY_FLAGS) + * + * This function is a wrapper around vfs_getattr(). The main difference is + * that it uses a file descriptor to determine the file location. + * + * 0 will be returned on success, and a -ve error code if unsuccessful. + */ +int vfs_statx_fd(unsigned int fd, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { struct fd f = fdget_raw(fd); int error = -EBADF; if (f.file) { - error = vfs_getattr(&f.file->f_path, stat); + error = vfs_getattr(&f.file->f_path, stat, + request_mask, query_flags); fdput(f); } return error; } -EXPORT_SYMBOL(vfs_fstat); +EXPORT_SYMBOL(vfs_statx_fd); -int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat, - int flag) +/** + * vfs_statx - Get basic and extra attributes by filename + * @dfd: A file descriptor representing the base dir for a relative filename + * @filename: The name of the file of interest + * @flags: Flags to control the query + * @stat: The result structure to fill in. + * @request_mask: STATX_xxx flags indicating what the caller wants + * + * This function is a wrapper around vfs_getattr(). The main difference is + * that it uses a filename and base directory to determine the file location. + * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink + * at the given name from being referenced. + * + * The caller must have preset stat->request_mask as for vfs_getattr(). The + * flags are also used to load up stat->query_flags. + * + * 0 will be returned on success, and a -ve error code if unsuccessful. + */ +int vfs_statx(int dfd, const char __user *filename, int flags, + struct kstat *stat, u32 request_mask) { struct path path; int error = -EINVAL; - unsigned int lookup_flags = 0; + unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT; - if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | - AT_EMPTY_PATH)) != 0) - goto out; + if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | + AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0) + return -EINVAL; - if (!(flag & AT_SYMLINK_NOFOLLOW)) - lookup_flags |= LOOKUP_FOLLOW; - if (flag & AT_EMPTY_PATH) + if (flags & AT_SYMLINK_NOFOLLOW) + lookup_flags &= ~LOOKUP_FOLLOW; + if (flags & AT_NO_AUTOMOUNT) + lookup_flags &= ~LOOKUP_AUTOMOUNT; + if (flags & AT_EMPTY_PATH) lookup_flags |= LOOKUP_EMPTY; + retry: error = user_path_at(dfd, filename, lookup_flags, &path); if (error) goto out; - error = vfs_getattr(&path, stat); + error = vfs_getattr(&path, stat, request_mask, flags); path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; @@ -116,19 +192,7 @@ retry: out: return error; } -EXPORT_SYMBOL(vfs_fstatat); - -int vfs_stat(const char __user *name, struct kstat *stat) -{ - return vfs_fstatat(AT_FDCWD, name, stat, 0); -} -EXPORT_SYMBOL(vfs_stat); - -int vfs_lstat(const char __user *name, struct kstat *stat) -{ - return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW); -} -EXPORT_SYMBOL(vfs_lstat); +EXPORT_SYMBOL(vfs_statx); #ifdef __ARCH_WANT_OLD_STAT @@ -141,7 +205,7 @@ static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * sta { static int warncount = 5; struct __old_kernel_stat tmp; - + if (warncount > 0) { warncount--; printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n", @@ -166,7 +230,7 @@ static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * sta #if BITS_PER_LONG == 32 if (stat->size > MAX_NON_LFS) return -EOVERFLOW; -#endif +#endif tmp.st_size = stat->size; tmp.st_atime = stat->atime.tv_sec; tmp.st_mtime = stat->mtime.tv_sec; @@ -445,6 +509,81 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, } #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ +static inline int __put_timestamp(struct timespec *kts, + struct statx_timestamp __user *uts) +{ + return (__put_user(kts->tv_sec, &uts->tv_sec ) || + __put_user(kts->tv_nsec, &uts->tv_nsec ) || + __put_user(0, &uts->__reserved )); +} + +/* + * Set the statx results. + */ +static long statx_set_result(struct kstat *stat, struct statx __user *buffer) +{ + uid_t uid = from_kuid_munged(current_user_ns(), stat->uid); + gid_t gid = from_kgid_munged(current_user_ns(), stat->gid); + + if (__put_user(stat->result_mask, &buffer->stx_mask ) || + __put_user(stat->mode, &buffer->stx_mode ) || + __clear_user(&buffer->__spare0, sizeof(buffer->__spare0)) || + __put_user(stat->nlink, &buffer->stx_nlink ) || + __put_user(uid, &buffer->stx_uid ) || + __put_user(gid, &buffer->stx_gid ) || + __put_user(stat->attributes, &buffer->stx_attributes ) || + __put_user(stat->blksize, &buffer->stx_blksize ) || + __put_user(MAJOR(stat->rdev), &buffer->stx_rdev_major ) || + __put_user(MINOR(stat->rdev), &buffer->stx_rdev_minor ) || + __put_user(MAJOR(stat->dev), &buffer->stx_dev_major ) || + __put_user(MINOR(stat->dev), &buffer->stx_dev_minor ) || + __put_timestamp(&stat->atime, &buffer->stx_atime ) || + __put_timestamp(&stat->btime, &buffer->stx_btime ) || + __put_timestamp(&stat->ctime, &buffer->stx_ctime ) || + __put_timestamp(&stat->mtime, &buffer->stx_mtime ) || + __put_user(stat->ino, &buffer->stx_ino ) || + __put_user(stat->size, &buffer->stx_size ) || + __put_user(stat->blocks, &buffer->stx_blocks ) || + __clear_user(&buffer->__spare1, sizeof(buffer->__spare1)) || + __clear_user(&buffer->__spare2, sizeof(buffer->__spare2))) + return -EFAULT; + + return 0; +} + +/** + * sys_statx - System call to get enhanced stats + * @dfd: Base directory to pathwalk from *or* fd to stat. + * @filename: File to stat *or* NULL. + * @flags: AT_* flags to control pathwalk. + * @mask: Parts of statx struct actually required. + * @buffer: Result buffer. + * + * Note that if filename is NULL, then it does the equivalent of fstat() using + * dfd to indicate the file of interest. + */ +SYSCALL_DEFINE5(statx, + int, dfd, const char __user *, filename, unsigned, flags, + unsigned int, mask, + struct statx __user *, buffer) +{ + struct kstat stat; + int error; + + if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) + return -EINVAL; + if (!access_ok(VERIFY_WRITE, buffer, sizeof(*buffer))) + return -EFAULT; + + if (filename) + error = vfs_statx(dfd, filename, flags, &stat, mask); + else + error = vfs_statx_fd(dfd, &stat, mask, flags); + if (error) + return error; + return statx_set_result(&stat, buffer); +} + /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ void __inode_add_bytes(struct inode *inode, loff_t bytes) { diff --git a/fs/sync.c b/fs/sync.c index 2a54c1f22035..11ba023434b1 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -192,7 +192,7 @@ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) spin_unlock(&inode->i_lock); mark_inode_dirty_sync(inode); } - return file->f_op->fsync(file, start, end, datasync); + return call_fsync(file, start, end, datasync); } EXPORT_SYMBOL(vfs_fsync_range); diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index 08d3e630b49c..83809f5b5eca 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -440,10 +440,11 @@ static unsigned sysv_nblocks(struct super_block *s, loff_t size) return blocks; } -int sysv_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +int sysv_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct super_block *s = dentry->d_sb; - generic_fillattr(d_inode(dentry), stat); + struct super_block *s = path->dentry->d_sb; + generic_fillattr(d_inode(path->dentry), stat); stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size); stat->blksize = s->s_blocksize; return 0; diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h index 6c212288adcb..1e7e27c729af 100644 --- a/fs/sysv/sysv.h +++ b/fs/sysv/sysv.h @@ -142,7 +142,7 @@ extern struct inode *sysv_iget(struct super_block *, unsigned int); extern int sysv_write_inode(struct inode *, struct writeback_control *wbc); extern int sysv_sync_inode(struct inode *); extern void sysv_set_inode(struct inode *, dev_t); -extern int sysv_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int sysv_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int sysv_init_icache(void); extern void sysv_destroy_icache(void); diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 528369f3e472..30825d882aa9 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -1622,11 +1622,11 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, return do_rename(old_dir, old_dentry, new_dir, new_dentry, flags); } -int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +int ubifs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { loff_t size; - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct ubifs_inode *ui = ubifs_inode(inode); mutex_lock(&ui->ui_mutex); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index f0c86f076535..4d57e488038e 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1749,8 +1749,8 @@ int ubifs_update_time(struct inode *inode, struct timespec *time, int flags); /* dir.c */ struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir, umode_t mode); -int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat); +int ubifs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); int ubifs_check_dir_empty(struct inode *dir); /* xattr.c */ diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 8ec6b3df0bc7..a8d8f71ef8bd 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1193,7 +1193,7 @@ int udf_setsize(struct inode *inode, loff_t newsize) { int err; struct udf_inode_info *iinfo; - int bsize = 1 << inode->i_blkbits; + int bsize = i_blocksize(inode); if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c index f7dfef53f739..6023c97c6da2 100644 --- a/fs/udf/symlink.c +++ b/fs/udf/symlink.c @@ -152,9 +152,10 @@ out_unmap: return err; } -static int udf_symlink_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int udf_symlink_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { + struct dentry *dentry = path->dentry; struct inode *inode = d_backing_inode(dentry); struct page *page; diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 625b7285a37b..973607df579d 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -14,7 +14,8 @@ #include <linux/list.h> #include <linux/hashtable.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include <linux/mm.h> #include <linux/poll.h> #include <linux/slab.h> @@ -1807,17 +1808,17 @@ static void init_once_userfaultfd_ctx(void *mem) } /** - * userfaultfd_file_create - Creates an userfaultfd file pointer. + * userfaultfd_file_create - Creates a userfaultfd file pointer. * @flags: Flags for the userfaultfd file. * - * This function creates an userfaultfd file pointer, w/out installing + * This function creates a userfaultfd file pointer, w/out installing * it into the fd table. This is useful when the userfaultfd file is * used during the initialization of data structures that require * extra setup after the userfaultfd creation. So the userfaultfd * creation is split into the file pointer creation phase, and the * file descriptor installation phase. In this way races with * userspace closing the newly installed file descriptor can be - * avoided. Returns an userfaultfd file pointer, or a proper error + * avoided. Returns a userfaultfd file pointer, or a proper error * pointer. */ static struct file *userfaultfd_file_create(int flags) @@ -1847,7 +1848,7 @@ static struct file *userfaultfd_file_create(int flags) ctx->released = false; ctx->mm = current->mm; /* prevent the mm struct to be freed */ - atomic_inc(&ctx->mm->mm_count); + mmgrab(ctx->mm); file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx, O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS)); diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c index 339c696bbc01..2dfdc62f795e 100644 --- a/fs/xfs/kmem.c +++ b/fs/xfs/kmem.c @@ -16,6 +16,7 @@ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/mm.h> +#include <linux/sched/mm.h> #include <linux/highmem.h> #include <linux/slab.h> #include <linux/swap.h> diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 1ff9df7a3ce8..bf65a9ea8642 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -103,9 +103,9 @@ xfs_finish_page_writeback( unsigned int bsize; ASSERT(bvec->bv_offset < PAGE_SIZE); - ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0); + ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0); ASSERT(end < PAGE_SIZE); - ASSERT((bvec->bv_len & ((1 << inode->i_blkbits) - 1)) == 0); + ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0); bh = head = page_buffers(bvec->bv_page); @@ -349,7 +349,7 @@ xfs_map_blocks( { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; - ssize_t count = 1 << inode->i_blkbits; + ssize_t count = i_blocksize(inode); xfs_fileoff_t offset_fsb, end_fsb; int error = 0; int bmapi_flags = XFS_BMAPI_ENTIRE; @@ -758,7 +758,7 @@ xfs_aops_discard_page( break; } next_buffer: - offset += 1 << inode->i_blkbits; + offset += i_blocksize(inode); } while ((bh = bh->b_this_page) != head); @@ -846,7 +846,7 @@ xfs_writepage_map( LIST_HEAD(submit_list); struct xfs_ioend *ioend, *next; struct buffer_head *bh, *head; - ssize_t len = 1 << inode->i_blkbits; + ssize_t len = i_blocksize(inode); int error = 0; int count = 0; int uptodate = 1; @@ -1210,7 +1210,7 @@ xfs_map_trim_size( offset + mapping_size >= i_size_read(inode)) { /* limit mapping to block that spans EOF */ mapping_size = roundup_64(i_size_read(inode) - offset, - 1 << inode->i_blkbits); + i_blocksize(inode)); } if (mapping_size > LONG_MAX) mapping_size = LONG_MAX; @@ -1241,7 +1241,7 @@ xfs_get_blocks( return -EIO; offset = (xfs_off_t)iblock << inode->i_blkbits; - ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); + ASSERT(bh_result->b_size >= i_blocksize(inode)); size = bh_result->b_size; if (offset >= i_size_read(inode)) @@ -1389,7 +1389,7 @@ xfs_vm_set_page_dirty( if (offset < end_offset) set_buffer_dirty(bh); bh = bh->b_this_page; - offset += 1 << inode->i_blkbits; + offset += i_blocksize(inode); } while (bh != head); } /* diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 8c7d01b75922..b6208728ba39 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -33,6 +33,7 @@ #include <linux/migrate.h> #include <linux/backing-dev.h> #include <linux/freezer.h> +#include <linux/sched/mm.h> #include "xfs_format.h" #include "xfs_log_format.h" diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index a50eca676670..35703a801372 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -754,7 +754,7 @@ xfs_file_fallocate( if (error) goto out_unlock; } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { - unsigned blksize_mask = (1 << inode->i_blkbits) - 1; + unsigned int blksize_mask = i_blocksize(inode) - 1; if (offset & blksize_mask || len & blksize_mask) { error = -EINVAL; @@ -776,7 +776,7 @@ xfs_file_fallocate( if (error) goto out_unlock; } else if (mode & FALLOC_FL_INSERT_RANGE) { - unsigned blksize_mask = (1 << inode->i_blkbits) - 1; + unsigned int blksize_mask = i_blocksize(inode) - 1; new_size = i_size_read(inode) + len; if (offset & blksize_mask || len & blksize_mask) { diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index cf1363dbf32b..2fd7fdf5438f 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -43,6 +43,7 @@ #include "xfs_acl.h" #include <linux/capability.h> +#include <linux/cred.h> #include <linux/dcache.h> #include <linux/mount.h> #include <linux/namei.h> diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 22c16155f1b4..229cc6a6d8ef 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -489,11 +489,12 @@ xfs_vn_get_link_inline( STATIC int xfs_vn_getattr( - struct vfsmount *mnt, - struct dentry *dentry, - struct kstat *stat) + const struct path *path, + struct kstat *stat, + u32 request_mask, + unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index 7a989de224f4..592fdf7111cb 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h @@ -55,7 +55,7 @@ typedef __u32 xfs_nlink_t; #include <linux/file.h> #include <linux/swap.h> #include <linux/errno.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/bitops.h> #include <linux/major.h> #include <linux/pagemap.h> diff --git a/include/asm-generic/kprobes.h b/include/asm-generic/kprobes.h new file mode 100644 index 000000000000..57af9f21d148 --- /dev/null +++ b/include/asm-generic/kprobes.h @@ -0,0 +1,25 @@ +#ifndef _ASM_GENERIC_KPROBES_H +#define _ASM_GENERIC_KPROBES_H + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) +#ifdef CONFIG_KPROBES +/* + * Blacklist ganerating macro. Specify functions which is not probed + * by using this macro. + */ +# define __NOKPROBE_SYMBOL(fname) \ +static unsigned long __used \ + __attribute__((__section__("_kprobe_blacklist"))) \ + _kbl_addr_##fname = (unsigned long)fname; +# define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname) +/* Use this to forbid a kprobes attach on very low level functions */ +# define __kprobes __attribute__((__section__(".kprobes.text"))) +# define nokprobe_inline __always_inline +#else +# define NOKPROBE_SYMBOL(fname) +# define __kprobes +# define nokprobe_inline inline +#endif +#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ + +#endif /* _ASM_GENERIC_KPROBES_H */ diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index d81b0ba9921f..2ef16bf25826 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -40,6 +40,7 @@ #include <linux/bug.h> #include <linux/rbtree.h> #include <linux/kernel.h> +#include <linux/mm_types.h> #include <linux/list.h> #include <linux/spinlock.h> #ifdef CONFIG_DRM_DEBUG_MM diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h index 86ab99bc0ac5..35e1482ba8a1 100644 --- a/include/drm/drm_os_linux.h +++ b/include/drm/drm_os_linux.h @@ -4,6 +4,7 @@ */ #include <linux/interrupt.h> /* For task queue support */ +#include <linux/sched/signal.h> #include <linux/delay.h> #ifndef readq diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h index 360e00cefd35..a0c812b0fa39 100644 --- a/include/dt-bindings/clock/bcm2835.h +++ b/include/dt-bindings/clock/bcm2835.h @@ -64,3 +64,5 @@ #define BCM2835_CLOCK_CAM1 46 #define BCM2835_CLOCK_DSI0E 47 #define BCM2835_CLOCK_DSI1E 48 +#define BCM2835_CLOCK_DSI0P 49 +#define BCM2835_CLOCK_DSI1P 50 diff --git a/include/dt-bindings/clock/exynos4415.h b/include/dt-bindings/clock/exynos4415.h deleted file mode 100644 index 7eed55100721..000000000000 --- a/include/dt-bindings/clock/exynos4415.h +++ /dev/null @@ -1,360 +0,0 @@ -/* - * Copyright (c) 2014 Samsung Electronics Co., Ltd. - * Author: Chanwoo Choi <cw00.choi@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Device Tree binding constants for Samsung Exynos4415 clock controllers. - */ - -#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H -#define _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H - -/* - * Let each exported clock get a unique index, which is used on DT-enabled - * platforms to lookup the clock from a clock specifier. These indices are - * therefore considered an ABI and so must not be changed. This implies - * that new clocks should be added either in free spaces between clock groups - * or at the end. - */ - -/* - * Main CMU - */ - -#define CLK_OSCSEL 1 -#define CLK_FIN_PLL 2 -#define CLK_FOUT_APLL 3 -#define CLK_FOUT_MPLL 4 -#define CLK_FOUT_EPLL 5 -#define CLK_FOUT_G3D_PLL 6 -#define CLK_FOUT_ISP_PLL 7 -#define CLK_FOUT_DISP_PLL 8 - -/* Muxes */ -#define CLK_MOUT_MPLL_USER_L 16 -#define CLK_MOUT_GDL 17 -#define CLK_MOUT_MPLL_USER_R 18 -#define CLK_MOUT_GDR 19 -#define CLK_MOUT_EBI 20 -#define CLK_MOUT_ACLK_200 21 -#define CLK_MOUT_ACLK_160 22 -#define CLK_MOUT_ACLK_100 23 -#define CLK_MOUT_ACLK_266 24 -#define CLK_MOUT_G3D_PLL 25 -#define CLK_MOUT_EPLL 26 -#define CLK_MOUT_EBI_1 27 -#define CLK_MOUT_ISP_PLL 28 -#define CLK_MOUT_DISP_PLL 29 -#define CLK_MOUT_MPLL_USER_T 30 -#define CLK_MOUT_ACLK_400_MCUISP 31 -#define CLK_MOUT_G3D_PLLSRC 32 -#define CLK_MOUT_CSIS1 33 -#define CLK_MOUT_CSIS0 34 -#define CLK_MOUT_CAM1 35 -#define CLK_MOUT_FIMC3_LCLK 36 -#define CLK_MOUT_FIMC2_LCLK 37 -#define CLK_MOUT_FIMC1_LCLK 38 -#define CLK_MOUT_FIMC0_LCLK 39 -#define CLK_MOUT_MFC 40 -#define CLK_MOUT_MFC_1 41 -#define CLK_MOUT_MFC_0 42 -#define CLK_MOUT_G3D 43 -#define CLK_MOUT_G3D_1 44 -#define CLK_MOUT_G3D_0 45 -#define CLK_MOUT_MIPI0 46 -#define CLK_MOUT_FIMD0 47 -#define CLK_MOUT_TSADC_ISP 48 -#define CLK_MOUT_UART_ISP 49 -#define CLK_MOUT_SPI1_ISP 50 -#define CLK_MOUT_SPI0_ISP 51 -#define CLK_MOUT_PWM_ISP 52 -#define CLK_MOUT_AUDIO0 53 -#define CLK_MOUT_TSADC 54 -#define CLK_MOUT_MMC2 55 -#define CLK_MOUT_MMC1 56 -#define CLK_MOUT_MMC0 57 -#define CLK_MOUT_UART3 58 -#define CLK_MOUT_UART2 59 -#define CLK_MOUT_UART1 60 -#define CLK_MOUT_UART0 61 -#define CLK_MOUT_SPI2 62 -#define CLK_MOUT_SPI1 63 -#define CLK_MOUT_SPI0 64 -#define CLK_MOUT_SPDIF 65 -#define CLK_MOUT_AUDIO2 66 -#define CLK_MOUT_AUDIO1 67 -#define CLK_MOUT_MPLL_USER_C 68 -#define CLK_MOUT_HPM 69 -#define CLK_MOUT_CORE 70 -#define CLK_MOUT_APLL 71 -#define CLK_MOUT_PXLASYNC_CSIS1_FIMC 72 -#define CLK_MOUT_PXLASYNC_CSIS0_FIMC 73 -#define CLK_MOUT_JPEG 74 -#define CLK_MOUT_JPEG1 75 -#define CLK_MOUT_JPEG0 76 -#define CLK_MOUT_ACLK_ISP0_300 77 -#define CLK_MOUT_ACLK_ISP0_400 78 -#define CLK_MOUT_ACLK_ISP0_300_USER 79 -#define CLK_MOUT_ACLK_ISP1_300 80 -#define CLK_MOUT_ACLK_ISP1_300_USER 81 -#define CLK_MOUT_HDMI 82 - -/* Dividers */ -#define CLK_DIV_GPL 90 -#define CLK_DIV_GDL 91 -#define CLK_DIV_GPR 92 -#define CLK_DIV_GDR 93 -#define CLK_DIV_ACLK_400_MCUISP 94 -#define CLK_DIV_EBI 95 -#define CLK_DIV_ACLK_200 96 -#define CLK_DIV_ACLK_160 97 -#define CLK_DIV_ACLK_100 98 -#define CLK_DIV_ACLK_266 99 -#define CLK_DIV_CSIS1 100 -#define CLK_DIV_CSIS0 101 -#define CLK_DIV_CAM1 102 -#define CLK_DIV_FIMC3_LCLK 103 -#define CLK_DIV_FIMC2_LCLK 104 -#define CLK_DIV_FIMC1_LCLK 105 -#define CLK_DIV_FIMC0_LCLK 106 -#define CLK_DIV_TV_BLK 107 -#define CLK_DIV_MFC 108 -#define CLK_DIV_G3D 109 -#define CLK_DIV_MIPI0_PRE 110 -#define CLK_DIV_MIPI0 111 -#define CLK_DIV_FIMD0 112 -#define CLK_DIV_UART_ISP 113 -#define CLK_DIV_SPI1_ISP_PRE 114 -#define CLK_DIV_SPI1_ISP 115 -#define CLK_DIV_SPI0_ISP_PRE 116 -#define CLK_DIV_SPI0_ISP 117 -#define CLK_DIV_PWM_ISP 118 -#define CLK_DIV_PCM0 119 -#define CLK_DIV_AUDIO0 120 -#define CLK_DIV_TSADC_PRE 121 -#define CLK_DIV_TSADC 122 -#define CLK_DIV_MMC1_PRE 123 -#define CLK_DIV_MMC1 124 -#define CLK_DIV_MMC0_PRE 125 -#define CLK_DIV_MMC0 126 -#define CLK_DIV_MMC2_PRE 127 -#define CLK_DIV_MMC2 128 -#define CLK_DIV_UART3 129 -#define CLK_DIV_UART2 130 -#define CLK_DIV_UART1 131 -#define CLK_DIV_UART0 132 -#define CLK_DIV_SPI1_PRE 133 -#define CLK_DIV_SPI1 134 -#define CLK_DIV_SPI0_PRE 135 -#define CLK_DIV_SPI0 136 -#define CLK_DIV_SPI2_PRE 137 -#define CLK_DIV_SPI2 138 -#define CLK_DIV_PCM2 139 -#define CLK_DIV_AUDIO2 140 -#define CLK_DIV_PCM1 141 -#define CLK_DIV_AUDIO1 142 -#define CLK_DIV_I2S1 143 -#define CLK_DIV_PXLASYNC_CSIS1_FIMC 144 -#define CLK_DIV_PXLASYNC_CSIS0_FIMC 145 -#define CLK_DIV_JPEG 146 -#define CLK_DIV_CORE2 147 -#define CLK_DIV_APLL 148 -#define CLK_DIV_PCLK_DBG 149 -#define CLK_DIV_ATB 150 -#define CLK_DIV_PERIPH 151 -#define CLK_DIV_COREM1 152 -#define CLK_DIV_COREM0 153 -#define CLK_DIV_CORE 154 -#define CLK_DIV_HPM 155 -#define CLK_DIV_COPY 156 - -/* Gates */ -#define CLK_ASYNC_G3D 180 -#define CLK_ASYNC_MFCL 181 -#define CLK_ASYNC_TVX 182 -#define CLK_PPMULEFT 183 -#define CLK_GPIO_LEFT 184 -#define CLK_PPMUIMAGE 185 -#define CLK_QEMDMA2 186 -#define CLK_QEROTATOR 187 -#define CLK_SMMUMDMA2 188 -#define CLK_SMMUROTATOR 189 -#define CLK_MDMA2 190 -#define CLK_ROTATOR 191 -#define CLK_ASYNC_ISPMX 192 -#define CLK_ASYNC_MAUDIOX 193 -#define CLK_ASYNC_MFCR 194 -#define CLK_ASYNC_FSYSD 195 -#define CLK_ASYNC_LCD0X 196 -#define CLK_ASYNC_CAMX 197 -#define CLK_PPMURIGHT 198 -#define CLK_GPIO_RIGHT 199 -#define CLK_ANTIRBK_APBIF 200 -#define CLK_EFUSE_WRITER_APBIF 201 -#define CLK_MONOCNT 202 -#define CLK_TZPC6 203 -#define CLK_PROVISIONKEY1 204 -#define CLK_PROVISIONKEY0 205 -#define CLK_CMU_ISPPART 206 -#define CLK_TMU_APBIF 207 -#define CLK_KEYIF 208 -#define CLK_RTC 209 -#define CLK_WDT 210 -#define CLK_MCT 211 -#define CLK_SECKEY 212 -#define CLK_HDMI_CEC 213 -#define CLK_TZPC5 214 -#define CLK_TZPC4 215 -#define CLK_TZPC3 216 -#define CLK_TZPC2 217 -#define CLK_TZPC1 218 -#define CLK_TZPC0 219 -#define CLK_CMU_COREPART 220 -#define CLK_CMU_TOPPART 221 -#define CLK_PMU_APBIF 222 -#define CLK_SYSREG 223 -#define CLK_CHIP_ID 224 -#define CLK_SMMUFIMC_LITE2 225 -#define CLK_FIMC_LITE2 226 -#define CLK_PIXELASYNCM1 227 -#define CLK_PIXELASYNCM0 228 -#define CLK_PPMUCAMIF 229 -#define CLK_SMMUJPEG 230 -#define CLK_SMMUFIMC3 231 -#define CLK_SMMUFIMC2 232 -#define CLK_SMMUFIMC1 233 -#define CLK_SMMUFIMC0 234 -#define CLK_JPEG 235 -#define CLK_CSIS1 236 -#define CLK_CSIS0 237 -#define CLK_FIMC3 238 -#define CLK_FIMC2 239 -#define CLK_FIMC1 240 -#define CLK_FIMC0 241 -#define CLK_PPMUTV 242 -#define CLK_SMMUTV 243 -#define CLK_HDMI 244 -#define CLK_MIXER 245 -#define CLK_VP 246 -#define CLK_PPMUMFC_R 247 -#define CLK_PPMUMFC_L 248 -#define CLK_SMMUMFC_R 249 -#define CLK_SMMUMFC_L 250 -#define CLK_MFC 251 -#define CLK_PPMUG3D 252 -#define CLK_G3D 253 -#define CLK_PPMULCD0 254 -#define CLK_SMMUFIMD0 255 -#define CLK_DSIM0 256 -#define CLK_SMIES 257 -#define CLK_MIE0 258 -#define CLK_FIMD0 259 -#define CLK_TSADC 260 -#define CLK_PPMUFILE 261 -#define CLK_NFCON 262 -#define CLK_USBDEVICE 263 -#define CLK_USBHOST 264 -#define CLK_SROMC 265 -#define CLK_SDMMC2 266 -#define CLK_SDMMC1 267 -#define CLK_SDMMC0 268 -#define CLK_PDMA1 269 -#define CLK_PDMA0 270 -#define CLK_SPDIF 271 -#define CLK_PWM 272 -#define CLK_PCM2 273 -#define CLK_PCM1 274 -#define CLK_I2S1 275 -#define CLK_SPI2 276 -#define CLK_SPI1 277 -#define CLK_SPI0 278 -#define CLK_I2CHDMI 279 -#define CLK_I2C7 280 -#define CLK_I2C6 281 -#define CLK_I2C5 282 -#define CLK_I2C4 283 -#define CLK_I2C3 284 -#define CLK_I2C2 285 -#define CLK_I2C1 286 -#define CLK_I2C0 287 -#define CLK_UART3 288 -#define CLK_UART2 289 -#define CLK_UART1 290 -#define CLK_UART0 291 - -/* Special clocks */ -#define CLK_SCLK_PXLAYSNC_CSIS1_FIMC 330 -#define CLK_SCLK_PXLAYSNC_CSIS0_FIMC 331 -#define CLK_SCLK_JPEG 332 -#define CLK_SCLK_CSIS1 333 -#define CLK_SCLK_CSIS0 334 -#define CLK_SCLK_CAM1 335 -#define CLK_SCLK_FIMC3_LCLK 336 -#define CLK_SCLK_FIMC2_LCLK 337 -#define CLK_SCLK_FIMC1_LCLK 338 -#define CLK_SCLK_FIMC0_LCLK 339 -#define CLK_SCLK_PIXEL 340 -#define CLK_SCLK_HDMI 341 -#define CLK_SCLK_MIXER 342 -#define CLK_SCLK_MFC 343 -#define CLK_SCLK_G3D 344 -#define CLK_SCLK_MIPIDPHY4L 345 -#define CLK_SCLK_MIPI0 346 -#define CLK_SCLK_MDNIE0 347 -#define CLK_SCLK_FIMD0 348 -#define CLK_SCLK_PCM0 349 -#define CLK_SCLK_AUDIO0 350 -#define CLK_SCLK_TSADC 351 -#define CLK_SCLK_EBI 352 -#define CLK_SCLK_MMC2 353 -#define CLK_SCLK_MMC1 354 -#define CLK_SCLK_MMC0 355 -#define CLK_SCLK_I2S 356 -#define CLK_SCLK_PCM2 357 -#define CLK_SCLK_PCM1 358 -#define CLK_SCLK_AUDIO2 359 -#define CLK_SCLK_AUDIO1 360 -#define CLK_SCLK_SPDIF 361 -#define CLK_SCLK_SPI2 362 -#define CLK_SCLK_SPI1 363 -#define CLK_SCLK_SPI0 364 -#define CLK_SCLK_UART3 365 -#define CLK_SCLK_UART2 366 -#define CLK_SCLK_UART1 367 -#define CLK_SCLK_UART0 368 -#define CLK_SCLK_HDMIPHY 369 - -/* - * Total number of clocks of main CMU. - * NOTE: Must be equal to last clock ID increased by one. - */ -#define CLK_NR_CLKS 370 - -/* - * CMU DMC - */ -#define CLK_DMC_FOUT_MPLL 1 -#define CLK_DMC_FOUT_BPLL 2 - -#define CLK_DMC_MOUT_MPLL 3 -#define CLK_DMC_MOUT_BPLL 4 -#define CLK_DMC_MOUT_DPHY 5 -#define CLK_DMC_MOUT_DMC_BUS 6 - -#define CLK_DMC_DIV_DMC 7 -#define CLK_DMC_DIV_DPHY 8 -#define CLK_DMC_DIV_DMC_PRE 9 -#define CLK_DMC_DIV_DMCP 10 -#define CLK_DMC_DIV_DMCD 11 -#define CLK_DMC_DIV_MPLL_PRE 12 - -/* - * Total number of clocks of CMU_DMC. - * NOTE: Must be equal to highest clock ID increased by one. - */ -#define NR_CLKS_DMC 13 - -#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H */ diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h index 4fa6bb2136e3..be39d23e6a32 100644 --- a/include/dt-bindings/clock/exynos5433.h +++ b/include/dt-bindings/clock/exynos5433.h @@ -771,7 +771,10 @@ #define CLK_PCLK_DECON 113 -#define DISP_NR_CLK 114 +#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY 114 +#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY 115 + +#define DISP_NR_CLK 116 /* CMU_AUD */ #define CLK_MOUT_AUD_PLL_USER 1 diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h index baade6f429d0..692846c7941b 100644 --- a/include/dt-bindings/clock/gxbb-clkc.h +++ b/include/dt-bindings/clock/gxbb-clkc.h @@ -14,15 +14,21 @@ #define CLKID_MPLL2 15 #define CLKID_SPI 34 #define CLKID_I2C 22 +#define CLKID_SAR_ADC 23 #define CLKID_ETH 36 #define CLKID_USB0 50 #define CLKID_USB1 51 #define CLKID_USB 55 +#define CLKID_HDMI_PCLK 63 #define CLKID_USB1_DDR_BRIDGE 64 #define CLKID_USB0_DDR_BRIDGE 65 +#define CLKID_SANA 69 +#define CLKID_GCLK_VENCI_INT0 77 #define CLKID_AO_I2C 93 #define CLKID_SD_EMMC_A 94 #define CLKID_SD_EMMC_B 95 #define CLKID_SD_EMMC_C 96 +#define CLKID_SAR_ADC_CLK 97 +#define CLKID_SAR_ADC_SEL 98 #endif /* __GXBB_CLKC_H */ diff --git a/include/dt-bindings/clock/hi3660-clock.h b/include/dt-bindings/clock/hi3660-clock.h new file mode 100644 index 000000000000..1c00b7fe296f --- /dev/null +++ b/include/dt-bindings/clock/hi3660-clock.h @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2016-2017 Linaro Ltd. + * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DTS_HI3660_CLOCK_H +#define __DTS_HI3660_CLOCK_H + +/* fixed rate clocks */ +#define HI3660_CLKIN_SYS 0 +#define HI3660_CLKIN_REF 1 +#define HI3660_CLK_FLL_SRC 2 +#define HI3660_CLK_PPLL0 3 +#define HI3660_CLK_PPLL1 4 +#define HI3660_CLK_PPLL2 5 +#define HI3660_CLK_PPLL3 6 +#define HI3660_CLK_SCPLL 7 +#define HI3660_PCLK 8 +#define HI3660_CLK_UART0_DBG 9 +#define HI3660_CLK_UART6 10 +#define HI3660_OSC32K 11 +#define HI3660_OSC19M 12 +#define HI3660_CLK_480M 13 +#define HI3660_CLK_INV 14 + +/* clk in crgctrl */ +#define HI3660_FACTOR_UART3 15 +#define HI3660_CLK_FACTOR_MMC 16 +#define HI3660_CLK_GATE_I2C0 17 +#define HI3660_CLK_GATE_I2C1 18 +#define HI3660_CLK_GATE_I2C2 19 +#define HI3660_CLK_GATE_I2C6 20 +#define HI3660_CLK_DIV_SYSBUS 21 +#define HI3660_CLK_DIV_320M 22 +#define HI3660_CLK_DIV_A53 23 +#define HI3660_CLK_GATE_SPI0 24 +#define HI3660_CLK_GATE_SPI2 25 +#define HI3660_PCIEPHY_REF 26 +#define HI3660_CLK_ABB_USB 27 +#define HI3660_HCLK_GATE_SDIO0 28 +#define HI3660_HCLK_GATE_SD 29 +#define HI3660_CLK_GATE_AOMM 30 +#define HI3660_PCLK_GPIO0 31 +#define HI3660_PCLK_GPIO1 32 +#define HI3660_PCLK_GPIO2 33 +#define HI3660_PCLK_GPIO3 34 +#define HI3660_PCLK_GPIO4 35 +#define HI3660_PCLK_GPIO5 36 +#define HI3660_PCLK_GPIO6 37 +#define HI3660_PCLK_GPIO7 38 +#define HI3660_PCLK_GPIO8 39 +#define HI3660_PCLK_GPIO9 40 +#define HI3660_PCLK_GPIO10 41 +#define HI3660_PCLK_GPIO11 42 +#define HI3660_PCLK_GPIO12 43 +#define HI3660_PCLK_GPIO13 44 +#define HI3660_PCLK_GPIO14 45 +#define HI3660_PCLK_GPIO15 46 +#define HI3660_PCLK_GPIO16 47 +#define HI3660_PCLK_GPIO17 48 +#define HI3660_PCLK_GPIO18 49 +#define HI3660_PCLK_GPIO19 50 +#define HI3660_PCLK_GPIO20 51 +#define HI3660_PCLK_GPIO21 52 +#define HI3660_CLK_GATE_SPI3 53 +#define HI3660_CLK_GATE_I2C7 54 +#define HI3660_CLK_GATE_I2C3 55 +#define HI3660_CLK_GATE_SPI1 56 +#define HI3660_CLK_GATE_UART1 57 +#define HI3660_CLK_GATE_UART2 58 +#define HI3660_CLK_GATE_UART4 59 +#define HI3660_CLK_GATE_UART5 60 +#define HI3660_CLK_GATE_I2C4 61 +#define HI3660_CLK_GATE_DMAC 62 +#define HI3660_PCLK_GATE_DSS 63 +#define HI3660_ACLK_GATE_DSS 64 +#define HI3660_CLK_GATE_LDI1 65 +#define HI3660_CLK_GATE_LDI0 66 +#define HI3660_CLK_GATE_VIVOBUS 67 +#define HI3660_CLK_GATE_EDC0 68 +#define HI3660_CLK_GATE_TXDPHY0_CFG 69 +#define HI3660_CLK_GATE_TXDPHY0_REF 70 +#define HI3660_CLK_GATE_TXDPHY1_CFG 71 +#define HI3660_CLK_GATE_TXDPHY1_REF 72 +#define HI3660_ACLK_GATE_USB3OTG 73 +#define HI3660_CLK_GATE_SPI4 74 +#define HI3660_CLK_GATE_SD 75 +#define HI3660_CLK_GATE_SDIO0 76 +#define HI3660_CLK_GATE_UFS_SUBSYS 77 +#define HI3660_PCLK_GATE_DSI0 78 +#define HI3660_PCLK_GATE_DSI1 79 +#define HI3660_ACLK_GATE_PCIE 80 +#define HI3660_PCLK_GATE_PCIE_SYS 81 +#define HI3660_CLK_GATE_PCIEAUX 82 +#define HI3660_PCLK_GATE_PCIE_PHY 83 +#define HI3660_CLK_ANDGT_LDI0 84 +#define HI3660_CLK_ANDGT_LDI1 85 +#define HI3660_CLK_ANDGT_EDC0 86 +#define HI3660_CLK_GATE_UFSPHY_GT 87 +#define HI3660_CLK_ANDGT_MMC 88 +#define HI3660_CLK_ANDGT_SD 89 +#define HI3660_CLK_A53HPM_ANDGT 90 +#define HI3660_CLK_ANDGT_SDIO 91 +#define HI3660_CLK_ANDGT_UART0 92 +#define HI3660_CLK_ANDGT_UART1 93 +#define HI3660_CLK_ANDGT_UARTH 94 +#define HI3660_CLK_ANDGT_SPI 95 +#define HI3660_CLK_VIVOBUS_ANDGT 96 +#define HI3660_CLK_AOMM_ANDGT 97 +#define HI3660_CLK_320M_PLL_GT 98 +#define HI3660_AUTODIV_EMMC0BUS 99 +#define HI3660_AUTODIV_SYSBUS 100 +#define HI3660_CLK_GATE_UFSPHY_CFG 101 +#define HI3660_CLK_GATE_UFSIO_REF 102 +#define HI3660_CLK_MUX_SYSBUS 103 +#define HI3660_CLK_MUX_UART0 104 +#define HI3660_CLK_MUX_UART1 105 +#define HI3660_CLK_MUX_UARTH 106 +#define HI3660_CLK_MUX_SPI 107 +#define HI3660_CLK_MUX_I2C 108 +#define HI3660_CLK_MUX_MMC_PLL 109 +#define HI3660_CLK_MUX_LDI1 110 +#define HI3660_CLK_MUX_LDI0 111 +#define HI3660_CLK_MUX_SD_PLL 112 +#define HI3660_CLK_MUX_SD_SYS 113 +#define HI3660_CLK_MUX_EDC0 114 +#define HI3660_CLK_MUX_SDIO_SYS 115 +#define HI3660_CLK_MUX_SDIO_PLL 116 +#define HI3660_CLK_MUX_VIVOBUS 117 +#define HI3660_CLK_MUX_A53HPM 118 +#define HI3660_CLK_MUX_320M 119 +#define HI3660_CLK_MUX_IOPERI 120 +#define HI3660_CLK_DIV_UART0 121 +#define HI3660_CLK_DIV_UART1 122 +#define HI3660_CLK_DIV_UARTH 123 +#define HI3660_CLK_DIV_MMC 124 +#define HI3660_CLK_DIV_SD 125 +#define HI3660_CLK_DIV_EDC0 126 +#define HI3660_CLK_DIV_LDI0 127 +#define HI3660_CLK_DIV_SDIO 128 +#define HI3660_CLK_DIV_LDI1 129 +#define HI3660_CLK_DIV_SPI 130 +#define HI3660_CLK_DIV_VIVOBUS 131 +#define HI3660_CLK_DIV_I2C 132 +#define HI3660_CLK_DIV_UFSPHY 133 +#define HI3660_CLK_DIV_CFGBUS 134 +#define HI3660_CLK_DIV_MMC0BUS 135 +#define HI3660_CLK_DIV_MMC1BUS 136 +#define HI3660_CLK_DIV_UFSPERI 137 +#define HI3660_CLK_DIV_AOMM 138 +#define HI3660_CLK_DIV_IOPERI 139 + +/* clk in pmuctrl */ +#define HI3660_GATE_ABB_192 0 + +/* clk in pctrl */ +#define HI3660_GATE_UFS_TCXO_EN 0 +#define HI3660_GATE_USB_TCXO_EN 1 + +/* clk in sctrl */ +#define HI3660_PCLK_AO_GPIO0 0 +#define HI3660_PCLK_AO_GPIO1 1 +#define HI3660_PCLK_AO_GPIO2 2 +#define HI3660_PCLK_AO_GPIO3 3 +#define HI3660_PCLK_AO_GPIO4 4 +#define HI3660_PCLK_AO_GPIO5 5 +#define HI3660_PCLK_AO_GPIO6 6 +#define HI3660_PCLK_GATE_MMBUF 7 +#define HI3660_CLK_GATE_DSS_AXI_MM 8 +#define HI3660_PCLK_MMBUF_ANDGT 9 +#define HI3660_CLK_MMBUF_PLL_ANDGT 10 +#define HI3660_CLK_FLL_MMBUF_ANDGT 11 +#define HI3660_CLK_SYS_MMBUF_ANDGT 12 +#define HI3660_CLK_GATE_PCIEPHY_GT 13 +#define HI3660_ACLK_MUX_MMBUF 14 +#define HI3660_CLK_SW_MMBUF 15 +#define HI3660_CLK_DIV_AOBUS 16 +#define HI3660_PCLK_DIV_MMBUF 17 +#define HI3660_ACLK_DIV_MMBUF 18 +#define HI3660_CLK_DIV_PCIEPHY 19 + +/* clk in iomcu */ +#define HI3660_CLK_I2C0_IOMCU 0 +#define HI3660_CLK_I2C1_IOMCU 1 +#define HI3660_CLK_I2C2_IOMCU 2 +#define HI3660_CLK_I2C6_IOMCU 3 +#define HI3660_CLK_IOMCU_PERI0 4 + +#endif /* __DTS_HI3660_CLOCK_H */ diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h index 1183347c383f..a7a1a50f33ef 100644 --- a/include/dt-bindings/clock/imx7d-clock.h +++ b/include/dt-bindings/clock/imx7d-clock.h @@ -449,5 +449,6 @@ #define IMX7D_ADC_ROOT_CLK 436 #define IMX7D_CLK_ARM 437 #define IMX7D_CKIL 438 -#define IMX7D_CLK_END 439 +#define IMX7D_OCOTP_CLK 439 +#define IMX7D_CLK_END 440 #endif /* __DT_BINDINGS_CLOCK_IMX7D_H */ diff --git a/include/dt-bindings/clock/qcom,gcc-ipq4019.h b/include/dt-bindings/clock/qcom,gcc-ipq4019.h index 6240e5b0e900..7e8a7be6dcda 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq4019.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq4019.h @@ -81,6 +81,17 @@ #define GCC_WCSS5G_CLK 62 #define GCC_WCSS5G_REF_CLK 63 #define GCC_WCSS5G_RTC_CLK 64 +#define GCC_APSS_DDRPLL_VCO 65 +#define GCC_SDCC_PLLDIV_CLK 66 +#define GCC_FEPLL_VCO 67 +#define GCC_FEPLL125_CLK 68 +#define GCC_FEPLL125DLY_CLK 69 +#define GCC_FEPLL200_CLK 70 +#define GCC_FEPLL500_CLK 71 +#define GCC_FEPLL_WCSS2G_CLK 72 +#define GCC_FEPLL_WCSS5G_CLK 73 +#define GCC_APSS_CPU_PLLDIV_CLK 74 +#define GCC_PCNOC_AHB_CLK_SRC 75 #define WIFI0_CPU_INIT_RESET 0 #define WIFI0_RADIO_SRIF_RESET 1 diff --git a/include/dt-bindings/clock/qcom,gcc-mdm9615.h b/include/dt-bindings/clock/qcom,gcc-mdm9615.h index 9ab2c4087120..787e448958bd 100644 --- a/include/dt-bindings/clock/qcom,gcc-mdm9615.h +++ b/include/dt-bindings/clock/qcom,gcc-mdm9615.h @@ -323,5 +323,7 @@ #define CE3_H_CLK 305 #define USB_HS1_SYSTEM_CLK_SRC 306 #define USB_HS1_SYSTEM_CLK 307 +#define EBI2_CLK 308 +#define EBI2_AON_CLK 309 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8994.h b/include/dt-bindings/clock/qcom,gcc-msm8994.h index 8fa535be2ebc..df47da0860f7 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8994.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8994.h @@ -133,5 +133,6 @@ #define GCC_USB30_MOCK_UTMI_CLK 115 #define GCC_USB3_PHY_AUX_CLK 116 #define GCC_USB_HS_SYSTEM_CLK 117 +#define GCC_SDCC1_AHB_CLK 118 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8996.h b/include/dt-bindings/clock/qcom,gcc-msm8996.h index 1828723eb621..1f5c42254798 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8996.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8996.h @@ -339,6 +339,7 @@ #define GCC_PCIE_PHY_COM_NOCSR_BCR 102 #define GCC_USB3_PHY_BCR 103 #define GCC_USB3PHY_PHY_BCR 104 +#define GCC_MSS_RESTART 105 /* Indexes for GDSCs */ diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h index 5924cdb71336..96b63c00249e 100644 --- a/include/dt-bindings/clock/qcom,rpmcc.h +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -14,7 +14,7 @@ #ifndef _DT_BINDINGS_CLK_MSM_RPMCC_H #define _DT_BINDINGS_CLK_MSM_RPMCC_H -/* apq8064 */ +/* RPM clocks */ #define RPM_PXO_CLK 0 #define RPM_PXO_A_CLK 1 #define RPM_CXO_CLK 2 @@ -38,7 +38,7 @@ #define RPM_SFPB_CLK 20 #define RPM_SFPB_A_CLK 21 -/* msm8916 */ +/* SMD RPM clocks */ #define RPM_SMD_XO_CLK_SRC 0 #define RPM_SMD_XO_A_CLK_SRC 1 #define RPM_SMD_PCNOC_CLK 2 @@ -65,5 +65,41 @@ #define RPM_SMD_RF_CLK1_A_PIN 23 #define RPM_SMD_RF_CLK2_PIN 24 #define RPM_SMD_RF_CLK2_A_PIN 25 +#define RPM_SMD_PNOC_CLK 26 +#define RPM_SMD_PNOC_A_CLK 27 +#define RPM_SMD_CNOC_CLK 28 +#define RPM_SMD_CNOC_A_CLK 29 +#define RPM_SMD_MMSSNOC_AHB_CLK 30 +#define RPM_SMD_MMSSNOC_AHB_A_CLK 31 +#define RPM_SMD_GFX3D_CLK_SRC 32 +#define RPM_SMD_GFX3D_A_CLK_SRC 33 +#define RPM_SMD_OCMEMGX_CLK 34 +#define RPM_SMD_OCMEMGX_A_CLK 35 +#define RPM_SMD_CXO_D0 36 +#define RPM_SMD_CXO_D0_A 37 +#define RPM_SMD_CXO_D1 38 +#define RPM_SMD_CXO_D1_A 39 +#define RPM_SMD_CXO_A0 40 +#define RPM_SMD_CXO_A0_A 41 +#define RPM_SMD_CXO_A1 42 +#define RPM_SMD_CXO_A1_A 43 +#define RPM_SMD_CXO_A2 44 +#define RPM_SMD_CXO_A2_A 45 +#define RPM_SMD_DIV_CLK1 46 +#define RPM_SMD_DIV_A_CLK1 47 +#define RPM_SMD_DIV_CLK2 48 +#define RPM_SMD_DIV_A_CLK2 49 +#define RPM_SMD_DIFF_CLK 50 +#define RPM_SMD_DIFF_A_CLK 51 +#define RPM_SMD_CXO_D0_PIN 52 +#define RPM_SMD_CXO_D0_A_PIN 53 +#define RPM_SMD_CXO_D1_PIN 54 +#define RPM_SMD_CXO_D1_A_PIN 55 +#define RPM_SMD_CXO_A0_PIN 56 +#define RPM_SMD_CXO_A0_A_PIN 57 +#define RPM_SMD_CXO_A1_PIN 58 +#define RPM_SMD_CXO_A1_A_PIN 59 +#define RPM_SMD_CXO_A2_PIN 60 +#define RPM_SMD_CXO_A2_A_PIN 61 #endif diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h index d141c1f0c778..eff4319d008b 100644 --- a/include/dt-bindings/clock/rk3188-cru-common.h +++ b/include/dt-bindings/clock/rk3188-cru-common.h @@ -108,6 +108,8 @@ #define PCLK_TSADC 349 #define PCLK_CPU 350 #define PCLK_PERI 351 +#define PCLK_DDRUPCTL 352 +#define PCLK_PUBL 353 /* hclk gates */ #define HCLK_SDMMC 448 diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h index 9a586e2d9c91..d7b6c83ea63f 100644 --- a/include/dt-bindings/clock/rk3288-cru.h +++ b/include/dt-bindings/clock/rk3288-cru.h @@ -88,6 +88,7 @@ #define SCLK_PVTM_GPU 124 #define SCLK_CRYPTO 125 #define SCLK_MIPIDSI_24M 126 +#define SCLK_VIP_OUT 127 #define SCLK_MAC 151 #define SCLK_MACREF_OUT 152 @@ -168,6 +169,7 @@ #define PCLK_WDT 368 #define PCLK_EFUSE256 369 #define PCLK_EFUSE1024 370 +#define PCLK_ISP_IN 371 /* hclk gates */ #define HCLK_GPS 448 diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h new file mode 100644 index 000000000000..ee702c8e4c09 --- /dev/null +++ b/include/dt-bindings/clock/rk3328-cru.h @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2016 Rockchip Electronics Co. Ltd. + * Author: Elaine <zhangqing@rock-chips.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3328_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3328_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_GPLL 4 +#define PLL_NPLL 5 +#define ARMCLK 6 + +/* sclk gates (special clocks) */ +#define SCLK_RTC32K 30 +#define SCLK_SDMMC_EXT 31 +#define SCLK_SPI 32 +#define SCLK_SDMMC 33 +#define SCLK_SDIO 34 +#define SCLK_EMMC 35 +#define SCLK_TSADC 36 +#define SCLK_SARADC 37 +#define SCLK_UART0 38 +#define SCLK_UART1 39 +#define SCLK_UART2 40 +#define SCLK_I2S0 41 +#define SCLK_I2S1 42 +#define SCLK_I2S2 43 +#define SCLK_I2S1_OUT 44 +#define SCLK_I2S2_OUT 45 +#define SCLK_SPDIF 46 +#define SCLK_TIMER0 47 +#define SCLK_TIMER1 48 +#define SCLK_TIMER2 49 +#define SCLK_TIMER3 50 +#define SCLK_TIMER4 51 +#define SCLK_TIMER5 52 +#define SCLK_WIFI 53 +#define SCLK_CIF_OUT 54 +#define SCLK_I2C0 55 +#define SCLK_I2C1 56 +#define SCLK_I2C2 57 +#define SCLK_I2C3 58 +#define SCLK_CRYPTO 59 +#define SCLK_PWM 60 +#define SCLK_PDM 61 +#define SCLK_EFUSE 62 +#define SCLK_OTP 63 +#define SCLK_DDRCLK 64 +#define SCLK_VDEC_CABAC 65 +#define SCLK_VDEC_CORE 66 +#define SCLK_VENC_DSP 67 +#define SCLK_VENC_CORE 68 +#define SCLK_RGA 69 +#define SCLK_HDMI_SFC 70 +#define SCLK_HDMI_CEC 71 +#define SCLK_USB3_REF 72 +#define SCLK_USB3_SUSPEND 73 +#define SCLK_SDMMC_DRV 74 +#define SCLK_SDIO_DRV 75 +#define SCLK_EMMC_DRV 76 +#define SCLK_SDMMC_EXT_DRV 77 +#define SCLK_SDMMC_SAMPLE 78 +#define SCLK_SDIO_SAMPLE 79 +#define SCLK_EMMC_SAMPLE 80 +#define SCLK_SDMMC_EXT_SAMPLE 81 +#define SCLK_VOP 82 +#define SCLK_MAC2PHY_RXTX 83 +#define SCLK_MAC2PHY_SRC 84 +#define SCLK_MAC2PHY_REF 85 +#define SCLK_MAC2PHY_OUT 86 +#define SCLK_MAC2IO_RX 87 +#define SCLK_MAC2IO_TX 88 +#define SCLK_MAC2IO_REFOUT 89 +#define SCLK_MAC2IO_REF 90 +#define SCLK_MAC2IO_OUT 91 +#define SCLK_TSP 92 +#define SCLK_HSADC_TSP 93 +#define SCLK_USB3PHY_REF 94 +#define SCLK_REF_USB3OTG 95 +#define SCLK_USB3OTG_REF 96 +#define SCLK_USB3OTG_SUSPEND 97 +#define SCLK_REF_USB3OTG_SRC 98 +#define SCLK_MAC2IO_SRC 99 +#define SCLK_MAC2IO 100 +#define SCLK_MAC2PHY 101 + +/* dclk gates */ +#define DCLK_LCDC 120 +#define DCLK_HDMIPHY 121 +#define HDMIPHY 122 +#define USB480M 123 +#define DCLK_LCDC_SRC 124 + +/* aclk gates */ +#define ACLK_AXISRAM 130 +#define ACLK_VOP_PRE 131 +#define ACLK_USB3OTG 132 +#define ACLK_RGA_PRE 133 +#define ACLK_DMAC 134 +#define ACLK_GPU 135 +#define ACLK_BUS_PRE 136 +#define ACLK_PERI_PRE 137 +#define ACLK_RKVDEC_PRE 138 +#define ACLK_RKVDEC 139 +#define ACLK_RKVENC 140 +#define ACLK_VPU_PRE 141 +#define ACLK_VIO_PRE 142 +#define ACLK_VPU 143 +#define ACLK_VIO 144 +#define ACLK_VOP 145 +#define ACLK_GMAC 146 +#define ACLK_H265 147 +#define ACLK_H264 148 +#define ACLK_MAC2PHY 149 +#define ACLK_MAC2IO 150 +#define ACLK_DCF 151 +#define ACLK_TSP 152 +#define ACLK_PERI 153 +#define ACLK_RGA 154 +#define ACLK_IEP 155 +#define ACLK_CIF 156 +#define ACLK_HDCP 157 + +/* pclk gates */ +#define PCLK_GPIO0 200 +#define PCLK_GPIO1 201 +#define PCLK_GPIO2 202 +#define PCLK_GPIO3 203 +#define PCLK_GRF 204 +#define PCLK_I2C0 205 +#define PCLK_I2C1 206 +#define PCLK_I2C2 207 +#define PCLK_I2C3 208 +#define PCLK_SPI 209 +#define PCLK_UART0 210 +#define PCLK_UART1 211 +#define PCLK_UART2 212 +#define PCLK_TSADC 213 +#define PCLK_PWM 214 +#define PCLK_TIMER 215 +#define PCLK_BUS_PRE 216 +#define PCLK_PERI_PRE 217 +#define PCLK_HDMI_CTRL 218 +#define PCLK_HDMI_PHY 219 +#define PCLK_GMAC 220 +#define PCLK_H265 221 +#define PCLK_MAC2PHY 222 +#define PCLK_MAC2IO 223 +#define PCLK_USB3PHY_OTG 224 +#define PCLK_USB3PHY_PIPE 225 +#define PCLK_USB3_GRF 226 +#define PCLK_USB2_GRF 227 +#define PCLK_HDMIPHY 228 +#define PCLK_DDR 229 +#define PCLK_PERI 230 +#define PCLK_HDMI 231 +#define PCLK_HDCP 232 +#define PCLK_DCF 233 +#define PCLK_SARADC 234 + +/* hclk gates */ +#define HCLK_PERI 308 +#define HCLK_TSP 309 +#define HCLK_GMAC 310 +#define HCLK_I2S0_8CH 311 +#define HCLK_I2S1_8CH 313 +#define HCLK_I2S2_2CH 313 +#define HCLK_SPDIF_8CH 314 +#define HCLK_VOP 315 +#define HCLK_NANDC 316 +#define HCLK_SDMMC 317 +#define HCLK_SDIO 318 +#define HCLK_EMMC 319 +#define HCLK_SDMMC_EXT 320 +#define HCLK_RKVDEC_PRE 321 +#define HCLK_RKVDEC 322 +#define HCLK_RKVENC 323 +#define HCLK_VPU_PRE 324 +#define HCLK_VIO_PRE 325 +#define HCLK_VPU 326 +#define HCLK_VIO 327 +#define HCLK_BUS_PRE 328 +#define HCLK_PERI_PRE 329 +#define HCLK_H264 330 +#define HCLK_CIF 331 +#define HCLK_OTG_PMU 332 +#define HCLK_OTG 333 +#define HCLK_HOST0 334 +#define HCLK_HOST0_ARB 335 +#define HCLK_CRYPTO_MST 336 +#define HCLK_CRYPTO_SLV 337 +#define HCLK_PDM 338 +#define HCLK_IEP 339 +#define HCLK_RGA 340 +#define HCLK_HDCP 341 + +#define CLK_NR_CLKS (HCLK_HDCP + 1) + +/* soft-reset indices */ +#define SRST_CORE0_PO 0 +#define SRST_CORE1_PO 1 +#define SRST_CORE2_PO 2 +#define SRST_CORE3_PO 3 +#define SRST_CORE0 4 +#define SRST_CORE1 5 +#define SRST_CORE2 6 +#define SRST_CORE3 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 +#define SRST_TOPDBG 12 +#define SRST_CORE_NIU 13 +#define SRST_STRC_A 14 +#define SRST_L2C 15 + +#define SRST_A53_GIC 18 +#define SRST_DAP 19 +#define SRST_PMU_P 21 +#define SRST_EFUSE 22 +#define SRST_BUSSYS_H 23 +#define SRST_BUSSYS_P 24 +#define SRST_SPDIF 25 +#define SRST_INTMEM 26 +#define SRST_ROM 27 +#define SRST_GPIO0 28 +#define SRST_GPIO1 29 +#define SRST_GPIO2 30 +#define SRST_GPIO3 31 + +#define SRST_I2S0 32 +#define SRST_I2S1 33 +#define SRST_I2S2 34 +#define SRST_I2S0_H 35 +#define SRST_I2S1_H 36 +#define SRST_I2S2_H 37 +#define SRST_UART0 38 +#define SRST_UART1 39 +#define SRST_UART2 40 +#define SRST_UART0_P 41 +#define SRST_UART1_P 42 +#define SRST_UART2_P 43 +#define SRST_I2C0 44 +#define SRST_I2C1 45 +#define SRST_I2C2 46 +#define SRST_I2C3 47 + +#define SRST_I2C0_P 48 +#define SRST_I2C1_P 49 +#define SRST_I2C2_P 50 +#define SRST_I2C3_P 51 +#define SRST_EFUSE_SE_P 52 +#define SRST_EFUSE_NS_P 53 +#define SRST_PWM0 54 +#define SRST_PWM0_P 55 +#define SRST_DMA 56 +#define SRST_TSP_A 57 +#define SRST_TSP_H 58 +#define SRST_TSP 59 +#define SRST_TSP_HSADC 60 +#define SRST_DCF_A 61 +#define SRST_DCF_P 62 + +#define SRST_SCR 64 +#define SRST_SPI 65 +#define SRST_TSADC 66 +#define SRST_TSADC_P 67 +#define SRST_CRYPTO 68 +#define SRST_SGRF 69 +#define SRST_GRF 70 +#define SRST_USB_GRF 71 +#define SRST_TIMER_6CH_P 72 +#define SRST_TIMER0 73 +#define SRST_TIMER1 74 +#define SRST_TIMER2 75 +#define SRST_TIMER3 76 +#define SRST_TIMER4 77 +#define SRST_TIMER5 78 +#define SRST_USB3GRF 79 + +#define SRST_PHYNIU 80 +#define SRST_HDMIPHY 81 +#define SRST_VDAC 82 +#define SRST_ACODEC_p 83 +#define SRST_SARADC 85 +#define SRST_SARADC_P 86 +#define SRST_GRF_DDR 87 +#define SRST_DFIMON 88 +#define SRST_MSCH 89 +#define SRST_DDRMSCH 91 +#define SRST_DDRCTRL 92 +#define SRST_DDRCTRL_P 93 +#define SRST_DDRPHY 94 +#define SRST_DDRPHY_P 95 + +#define SRST_GMAC_NIU_A 96 +#define SRST_GMAC_NIU_P 97 +#define SRST_GMAC2PHY_A 98 +#define SRST_GMAC2IO_A 99 +#define SRST_MACPHY 100 +#define SRST_OTP_PHY 101 +#define SRST_GPU_A 102 +#define SRST_GPU_NIU_A 103 +#define SRST_SDMMCEXT 104 +#define SRST_PERIPH_NIU_A 105 +#define SRST_PERIHP_NIU_H 106 +#define SRST_PERIHP_P 107 +#define SRST_PERIPHSYS_H 108 +#define SRST_MMC0 109 +#define SRST_SDIO 110 +#define SRST_EMMC 111 + +#define SRST_USB2OTG_H 112 +#define SRST_USB2OTG 113 +#define SRST_USB2OTG_ADP 114 +#define SRST_USB2HOST_H 115 +#define SRST_USB2HOST_ARB 116 +#define SRST_USB2HOST_AUX 117 +#define SRST_USB2HOST_EHCIPHY 118 +#define SRST_USB2HOST_UTMI 119 +#define SRST_USB3OTG 120 +#define SRST_USBPOR 121 +#define SRST_USB2OTG_UTMI 122 +#define SRST_USB2HOST_PHY_UTMI 123 +#define SRST_USB3OTG_UTMI 124 +#define SRST_USB3PHY_U2 125 +#define SRST_USB3PHY_U3 126 +#define SRST_USB3PHY_PIPE 127 + +#define SRST_VIO_A 128 +#define SRST_VIO_BUS_H 129 +#define SRST_VIO_H2P_H 130 +#define SRST_VIO_ARBI_H 131 +#define SRST_VOP_NIU_A 132 +#define SRST_VOP_A 133 +#define SRST_VOP_H 134 +#define SRST_VOP_D 135 +#define SRST_RGA 136 +#define SRST_RGA_NIU_A 137 +#define SRST_RGA_A 138 +#define SRST_RGA_H 139 +#define SRST_IEP_A 140 +#define SRST_IEP_H 141 +#define SRST_HDMI 142 +#define SRST_HDMI_P 143 + +#define SRST_HDCP_A 144 +#define SRST_HDCP 145 +#define SRST_HDCP_H 146 +#define SRST_CIF_A 147 +#define SRST_CIF_H 148 +#define SRST_CIF_P 149 +#define SRST_OTP_P 150 +#define SRST_OTP_SBPI 151 +#define SRST_OTP_USER 152 +#define SRST_DDRCTRL_A 153 +#define SRST_DDRSTDY_P 154 +#define SRST_DDRSTDY 155 +#define SRST_PDM_H 156 +#define SRST_PDM 157 +#define SRST_USB3PHY_OTG_P 158 +#define SRST_USB3PHY_PIPE_P 159 + +#define SRST_VCODEC_A 160 +#define SRST_VCODEC_NIU_A 161 +#define SRST_VCODEC_H 162 +#define SRST_VCODEC_NIU_H 163 +#define SRST_VDEC_A 164 +#define SRST_VDEC_NIU_A 165 +#define SRST_VDEC_H 166 +#define SRST_VDEC_NIU_H 167 +#define SRST_VDEC_CORE 168 +#define SRST_VDEC_CABAC 169 +#define SRST_DDRPHYDIV 175 + +#define SRST_RKVENC_NIU_A 176 +#define SRST_RKVENC_NIU_H 177 +#define SRST_RKVENC_H265_A 178 +#define SRST_RKVENC_H265_P 179 +#define SRST_RKVENC_H265_CORE 180 +#define SRST_RKVENC_H265_DSP 181 +#define SRST_RKVENC_H264_A 182 +#define SRST_RKVENC_H264_H 183 +#define SRST_RKVENC_INTMEM 184 + +#endif diff --git a/include/dt-bindings/clock/ste-ab8500.h b/include/dt-bindings/clock/ste-ab8500.h new file mode 100644 index 000000000000..6731f1f00a84 --- /dev/null +++ b/include/dt-bindings/clock/ste-ab8500.h @@ -0,0 +1,11 @@ +#ifndef __STE_CLK_AB8500_H__ +#define __STE_CLK_AB8500_H__ + +#define AB8500_SYSCLK_BUF2 0 +#define AB8500_SYSCLK_BUF3 1 +#define AB8500_SYSCLK_BUF4 2 +#define AB8500_SYSCLK_ULP 3 +#define AB8500_SYSCLK_INT 4 +#define AB8500_SYSCLK_AUDIO 5 + +#endif diff --git a/include/dt-bindings/clock/stm32fx-clock.h b/include/dt-bindings/clock/stm32fx-clock.h index 08bcab61b714..49bb3c203e5c 100644 --- a/include/dt-bindings/clock/stm32fx-clock.h +++ b/include/dt-bindings/clock/stm32fx-clock.h @@ -36,4 +36,24 @@ #define END_PRIMARY_CLK 14 +#define CLK_HSI 14 +#define CLK_SYSCLK 15 +#define CLK_HDMI_CEC 16 +#define CLK_SPDIF 17 +#define CLK_USART1 18 +#define CLK_USART2 19 +#define CLK_USART3 20 +#define CLK_UART4 21 +#define CLK_UART5 22 +#define CLK_USART6 23 +#define CLK_UART7 24 +#define CLK_UART8 25 +#define CLK_I2C1 26 +#define CLK_I2C2 27 +#define CLK_I2C3 28 +#define CLK_I2C4 29 +#define CLK_LPTIMER 30 + +#define END_PRIMARY_CLK_F7 31 + #endif diff --git a/include/dt-bindings/clock/sun5i-ccu.h b/include/dt-bindings/clock/sun5i-ccu.h new file mode 100644 index 000000000000..aeb2e2f781fb --- /dev/null +++ b/include/dt-bindings/clock/sun5i-ccu.h @@ -0,0 +1,103 @@ +/* + * Copyright 2016 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_SUN5I_H_ +#define _DT_BINDINGS_CLK_SUN5I_H_ + +#define CLK_HOSC 1 + +#define CLK_CPU 17 + +#define CLK_AHB_OTG 23 +#define CLK_AHB_EHCI 24 +#define CLK_AHB_OHCI 25 +#define CLK_AHB_SS 26 +#define CLK_AHB_DMA 27 +#define CLK_AHB_BIST 28 +#define CLK_AHB_MMC0 29 +#define CLK_AHB_MMC1 30 +#define CLK_AHB_MMC2 31 +#define CLK_AHB_NAND 32 +#define CLK_AHB_SDRAM 33 +#define CLK_AHB_EMAC 34 +#define CLK_AHB_TS 35 +#define CLK_AHB_SPI0 36 +#define CLK_AHB_SPI1 37 +#define CLK_AHB_SPI2 38 +#define CLK_AHB_GPS 39 +#define CLK_AHB_HSTIMER 40 +#define CLK_AHB_VE 41 +#define CLK_AHB_TVE 42 +#define CLK_AHB_LCD 43 +#define CLK_AHB_CSI 44 +#define CLK_AHB_HDMI 45 +#define CLK_AHB_DE_BE 46 +#define CLK_AHB_DE_FE 47 +#define CLK_AHB_IEP 48 +#define CLK_AHB_GPU 49 +#define CLK_APB0_CODEC 50 +#define CLK_APB0_SPDIF 51 +#define CLK_APB0_I2S 52 +#define CLK_APB0_PIO 53 +#define CLK_APB0_IR 54 +#define CLK_APB0_KEYPAD 55 +#define CLK_APB1_I2C0 56 +#define CLK_APB1_I2C1 57 +#define CLK_APB1_I2C2 58 +#define CLK_APB1_UART0 59 +#define CLK_APB1_UART1 60 +#define CLK_APB1_UART2 61 +#define CLK_APB1_UART3 62 +#define CLK_NAND 63 +#define CLK_MMC0 64 +#define CLK_MMC1 65 +#define CLK_MMC2 66 +#define CLK_TS 67 +#define CLK_SS 68 +#define CLK_SPI0 69 +#define CLK_SPI1 70 +#define CLK_SPI2 71 +#define CLK_IR 72 +#define CLK_I2S 73 +#define CLK_SPDIF 74 +#define CLK_KEYPAD 75 +#define CLK_USB_OHCI 76 +#define CLK_USB_PHY0 77 +#define CLK_USB_PHY1 78 +#define CLK_GPS 79 +#define CLK_DRAM_VE 80 +#define CLK_DRAM_CSI 81 +#define CLK_DRAM_TS 82 +#define CLK_DRAM_TVE 83 +#define CLK_DRAM_DE_FE 84 +#define CLK_DRAM_DE_BE 85 +#define CLK_DRAM_ACE 86 +#define CLK_DRAM_IEP 87 +#define CLK_DE_BE 88 +#define CLK_DE_FE 89 +#define CLK_TCON_CH0 90 + +#define CLK_TCON_CH1 92 +#define CLK_CSI 93 +#define CLK_VE 94 +#define CLK_CODEC 95 +#define CLK_AVS 96 +#define CLK_HDMI 97 +#define CLK_GPU 98 + +#define CLK_IEP 100 + +#endif /* _DT_BINDINGS_CLK_SUN5I_H_ */ diff --git a/include/dt-bindings/clock/sun8i-v3s-ccu.h b/include/dt-bindings/clock/sun8i-v3s-ccu.h new file mode 100644 index 000000000000..c0d5d5599c87 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-v3s-ccu.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2016 Icenowy Zheng <icenowy@aosc.xyz> + * + * Based on sun8i-h3-ccu.h, which is: + * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN8I_V3S_H_ +#define _DT_BINDINGS_CLK_SUN8I_V3S_H_ + +#define CLK_CPU 14 + +#define CLK_BUS_CE 20 +#define CLK_BUS_DMA 21 +#define CLK_BUS_MMC0 22 +#define CLK_BUS_MMC1 23 +#define CLK_BUS_MMC2 24 +#define CLK_BUS_DRAM 25 +#define CLK_BUS_EMAC 26 +#define CLK_BUS_HSTIMER 27 +#define CLK_BUS_SPI0 28 +#define CLK_BUS_OTG 29 +#define CLK_BUS_EHCI0 30 +#define CLK_BUS_OHCI0 31 +#define CLK_BUS_VE 32 +#define CLK_BUS_TCON0 33 +#define CLK_BUS_CSI 34 +#define CLK_BUS_DE 35 +#define CLK_BUS_CODEC 36 +#define CLK_BUS_PIO 37 +#define CLK_BUS_I2C0 38 +#define CLK_BUS_I2C1 39 +#define CLK_BUS_UART0 40 +#define CLK_BUS_UART1 41 +#define CLK_BUS_UART2 42 +#define CLK_BUS_EPHY 43 +#define CLK_BUS_DBG 44 + +#define CLK_MMC0 45 +#define CLK_MMC0_SAMPLE 46 +#define CLK_MMC0_OUTPUT 47 +#define CLK_MMC1 48 +#define CLK_MMC1_SAMPLE 49 +#define CLK_MMC1_OUTPUT 50 +#define CLK_MMC2 51 +#define CLK_MMC2_SAMPLE 52 +#define CLK_MMC2_OUTPUT 53 +#define CLK_CE 54 +#define CLK_SPI0 55 +#define CLK_USB_PHY0 56 +#define CLK_USB_OHCI0 57 + +#define CLK_DRAM_VE 59 +#define CLK_DRAM_CSI 60 +#define CLK_DRAM_EHCI 61 +#define CLK_DRAM_OHCI 62 +#define CLK_DE 63 +#define CLK_TCON0 64 +#define CLK_CSI_MISC 65 +#define CLK_CSI0_MCLK 66 +#define CLK_CSI1_SCLK 67 +#define CLK_CSI1_MCLK 68 +#define CLK_VE 69 +#define CLK_AC_DIG 70 +#define CLK_AVS 71 + +#define CLK_MIPI_CSI 73 + +#endif /* _DT_BINDINGS_CLK_SUN8I_V3S_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-ccu.h b/include/dt-bindings/clock/sun9i-a80-ccu.h new file mode 100644 index 000000000000..6ea1492a73a6 --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-ccu.h @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ + +#define CLK_PLL_AUDIO 2 +#define CLK_PLL_PERIPH0 3 + +#define CLK_C0CPUX 12 +#define CLK_C1CPUX 13 + +#define CLK_OUT_A 27 +#define CLK_OUT_B 28 + +#define CLK_NAND0_0 29 +#define CLK_NAND0_1 30 +#define CLK_NAND1_0 31 +#define CLK_NAND1_1 32 +#define CLK_MMC0 33 +#define CLK_MMC0_SAMPLE 34 +#define CLK_MMC0_OUTPUT 35 +#define CLK_MMC1 36 +#define CLK_MMC1_SAMPLE 37 +#define CLK_MMC1_OUTPUT 38 +#define CLK_MMC2 39 +#define CLK_MMC2_SAMPLE 40 +#define CLK_MMC2_OUTPUT 41 +#define CLK_MMC3 42 +#define CLK_MMC3_SAMPLE 43 +#define CLK_MMC3_OUTPUT 44 +#define CLK_TS 45 +#define CLK_SS 46 +#define CLK_SPI0 47 +#define CLK_SPI1 48 +#define CLK_SPI2 49 +#define CLK_SPI3 50 +#define CLK_I2S0 51 +#define CLK_I2S1 52 +#define CLK_SPDIF 53 +#define CLK_SDRAM 54 +#define CLK_DE 55 +#define CLK_EDP 56 +#define CLK_MP 57 +#define CLK_LCD0 58 +#define CLK_LCD1 59 +#define CLK_MIPI_DSI0 60 +#define CLK_MIPI_DSI1 61 +#define CLK_HDMI 62 +#define CLK_HDMI_SLOW 63 +#define CLK_MIPI_CSI 64 +#define CLK_CSI_ISP 65 +#define CLK_CSI_MISC 66 +#define CLK_CSI0_MCLK 67 +#define CLK_CSI1_MCLK 68 +#define CLK_FD 69 +#define CLK_VE 70 +#define CLK_AVS 71 +#define CLK_GPU_CORE 72 +#define CLK_GPU_MEMORY 73 +#define CLK_GPU_AXI 74 +#define CLK_SATA 75 +#define CLK_AC97 76 +#define CLK_MIPI_HSI 77 +#define CLK_GPADC 78 +#define CLK_CIR_TX 79 + +#define CLK_BUS_FD 80 +#define CLK_BUS_VE 81 +#define CLK_BUS_GPU_CTRL 82 +#define CLK_BUS_SS 83 +#define CLK_BUS_MMC 84 +#define CLK_BUS_NAND0 85 +#define CLK_BUS_NAND1 86 +#define CLK_BUS_SDRAM 87 +#define CLK_BUS_MIPI_HSI 88 +#define CLK_BUS_SATA 89 +#define CLK_BUS_TS 90 +#define CLK_BUS_SPI0 91 +#define CLK_BUS_SPI1 92 +#define CLK_BUS_SPI2 93 +#define CLK_BUS_SPI3 94 + +#define CLK_BUS_OTG 95 +#define CLK_BUS_USB 96 +#define CLK_BUS_GMAC 97 +#define CLK_BUS_MSGBOX 98 +#define CLK_BUS_SPINLOCK 99 +#define CLK_BUS_HSTIMER 100 +#define CLK_BUS_DMA 101 + +#define CLK_BUS_LCD0 102 +#define CLK_BUS_LCD1 103 +#define CLK_BUS_EDP 104 +#define CLK_BUS_CSI 105 +#define CLK_BUS_HDMI 106 +#define CLK_BUS_DE 107 +#define CLK_BUS_MP 108 +#define CLK_BUS_MIPI_DSI 109 + +#define CLK_BUS_SPDIF 110 +#define CLK_BUS_PIO 111 +#define CLK_BUS_AC97 112 +#define CLK_BUS_I2S0 113 +#define CLK_BUS_I2S1 114 +#define CLK_BUS_LRADC 115 +#define CLK_BUS_GPADC 116 +#define CLK_BUS_TWD 117 +#define CLK_BUS_CIR_TX 118 + +#define CLK_BUS_I2C0 119 +#define CLK_BUS_I2C1 120 +#define CLK_BUS_I2C2 121 +#define CLK_BUS_I2C3 122 +#define CLK_BUS_I2C4 123 +#define CLK_BUS_UART0 124 +#define CLK_BUS_UART1 125 +#define CLK_BUS_UART2 126 +#define CLK_BUS_UART3 127 +#define CLK_BUS_UART4 128 +#define CLK_BUS_UART5 129 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-de.h b/include/dt-bindings/clock/sun9i-a80-de.h new file mode 100644 index 000000000000..3dad6c3cd131 --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-de.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ + +#define CLK_FE0 0 +#define CLK_FE1 1 +#define CLK_FE2 2 +#define CLK_IEP_DEU0 3 +#define CLK_IEP_DEU1 4 +#define CLK_BE0 5 +#define CLK_BE1 6 +#define CLK_BE2 7 +#define CLK_IEP_DRC0 8 +#define CLK_IEP_DRC1 9 +#define CLK_MERGE 10 + +#define CLK_DRAM_FE0 11 +#define CLK_DRAM_FE1 12 +#define CLK_DRAM_FE2 13 +#define CLK_DRAM_DEU0 14 +#define CLK_DRAM_DEU1 15 +#define CLK_DRAM_BE0 16 +#define CLK_DRAM_BE1 17 +#define CLK_DRAM_BE2 18 +#define CLK_DRAM_DRC0 19 +#define CLK_DRAM_DRC1 20 + +#define CLK_BUS_FE0 21 +#define CLK_BUS_FE1 22 +#define CLK_BUS_FE2 23 +#define CLK_BUS_DEU0 24 +#define CLK_BUS_DEU1 25 +#define CLK_BUS_BE0 26 +#define CLK_BUS_BE1 27 +#define CLK_BUS_BE2 28 +#define CLK_BUS_DRC0 29 +#define CLK_BUS_DRC1 30 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-usb.h b/include/dt-bindings/clock/sun9i-a80-usb.h new file mode 100644 index 000000000000..783a60d2ccea --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-usb.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ + +#define CLK_BUS_HCI0 0 +#define CLK_USB_OHCI0 1 +#define CLK_BUS_HCI1 2 +#define CLK_BUS_HCI2 3 +#define CLK_USB_OHCI2 4 + +#define CLK_USB0_PHY 5 +#define CLK_USB1_HSIC 6 +#define CLK_USB1_PHY 7 +#define CLK_USB2_HSIC 8 +#define CLK_USB2_PHY 9 +#define CLK_USB_HSIC 10 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ */ diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h index e0ebb20ffdd3..b7aa3646208b 100644 --- a/include/dt-bindings/pinctrl/samsung.h +++ b/include/dt-bindings/pinctrl/samsung.h @@ -68,4 +68,12 @@ #define EXYNOS_PIN_FUNC_6 6 #define EXYNOS_PIN_FUNC_F 0xf +/* Drive strengths for Exynos7 FSYS1 block */ +#define EXYNOS7_FSYS1_PIN_DRV_LV1 0 +#define EXYNOS7_FSYS1_PIN_DRV_LV2 4 +#define EXYNOS7_FSYS1_PIN_DRV_LV3 2 +#define EXYNOS7_FSYS1_PIN_DRV_LV4 6 +#define EXYNOS7_FSYS1_PIN_DRV_LV5 1 +#define EXYNOS7_FSYS1_PIN_DRV_LV6 5 + #endif /* __DT_BINDINGS_PINCTRL_SAMSUNG_H__ */ diff --git a/include/dt-bindings/reset/sun5i-ccu.h b/include/dt-bindings/reset/sun5i-ccu.h new file mode 100644 index 000000000000..c2b9726b5026 --- /dev/null +++ b/include/dt-bindings/reset/sun5i-ccu.h @@ -0,0 +1,32 @@ +/* + * Copyright 2016 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RST_SUN5I_H_ +#define _RST_SUN5I_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_GPS 2 +#define RST_DE_BE 3 +#define RST_DE_FE 4 +#define RST_TVE 5 +#define RST_LCD 6 +#define RST_CSI 7 +#define RST_VE 8 +#define RST_GPU 9 +#define RST_IEP 10 + +#endif /* _RST_SUN5I_H_ */ diff --git a/include/dt-bindings/reset/sun8i-v3s-ccu.h b/include/dt-bindings/reset/sun8i-v3s-ccu.h new file mode 100644 index 000000000000..b58ef21a2e18 --- /dev/null +++ b/include/dt-bindings/reset/sun8i-v3s-ccu.h @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2016 Icenowy Zheng <icenowy@aosc.xyz> + * + * Based on sun8i-v3s-ccu.h, which is + * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN8I_V3S_H_ +#define _DT_BINDINGS_RST_SUN8I_V3S_H_ + +#define RST_USB_PHY0 0 + +#define RST_MBUS 1 + +#define RST_BUS_CE 5 +#define RST_BUS_DMA 6 +#define RST_BUS_MMC0 7 +#define RST_BUS_MMC1 8 +#define RST_BUS_MMC2 9 +#define RST_BUS_DRAM 11 +#define RST_BUS_EMAC 12 +#define RST_BUS_HSTIMER 14 +#define RST_BUS_SPI0 15 +#define RST_BUS_OTG 17 +#define RST_BUS_EHCI0 18 +#define RST_BUS_OHCI0 22 +#define RST_BUS_VE 26 +#define RST_BUS_TCON0 27 +#define RST_BUS_CSI 30 +#define RST_BUS_DE 34 +#define RST_BUS_DBG 38 +#define RST_BUS_EPHY 39 +#define RST_BUS_CODEC 40 +#define RST_BUS_I2C0 46 +#define RST_BUS_I2C1 47 +#define RST_BUS_UART0 49 +#define RST_BUS_UART1 50 +#define RST_BUS_UART2 51 + +#endif /* _DT_BINDINGS_RST_SUN8I_H3_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-ccu.h b/include/dt-bindings/reset/sun9i-a80-ccu.h new file mode 100644 index 000000000000..4b8df4b36788 --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-ccu.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ + +#define RST_BUS_FD 0 +#define RST_BUS_VE 1 +#define RST_BUS_GPU_CTRL 2 +#define RST_BUS_SS 3 +#define RST_BUS_MMC 4 +#define RST_BUS_NAND0 5 +#define RST_BUS_NAND1 6 +#define RST_BUS_SDRAM 7 +#define RST_BUS_SATA 8 +#define RST_BUS_TS 9 +#define RST_BUS_SPI0 10 +#define RST_BUS_SPI1 11 +#define RST_BUS_SPI2 12 +#define RST_BUS_SPI3 13 + +#define RST_BUS_OTG 14 +#define RST_BUS_OTG_PHY 15 +#define RST_BUS_MIPI_HSI 16 +#define RST_BUS_GMAC 17 +#define RST_BUS_MSGBOX 18 +#define RST_BUS_SPINLOCK 19 +#define RST_BUS_HSTIMER 20 +#define RST_BUS_DMA 21 + +#define RST_BUS_LCD0 22 +#define RST_BUS_LCD1 23 +#define RST_BUS_EDP 24 +#define RST_BUS_LVDS 25 +#define RST_BUS_CSI 26 +#define RST_BUS_HDMI0 27 +#define RST_BUS_HDMI1 28 +#define RST_BUS_DE 29 +#define RST_BUS_MP 30 +#define RST_BUS_GPU 31 +#define RST_BUS_MIPI_DSI 32 + +#define RST_BUS_SPDIF 33 +#define RST_BUS_AC97 34 +#define RST_BUS_I2S0 35 +#define RST_BUS_I2S1 36 +#define RST_BUS_LRADC 37 +#define RST_BUS_GPADC 38 +#define RST_BUS_CIR_TX 39 + +#define RST_BUS_I2C0 40 +#define RST_BUS_I2C1 41 +#define RST_BUS_I2C2 42 +#define RST_BUS_I2C3 43 +#define RST_BUS_I2C4 44 +#define RST_BUS_UART0 45 +#define RST_BUS_UART1 46 +#define RST_BUS_UART2 47 +#define RST_BUS_UART3 48 +#define RST_BUS_UART4 49 +#define RST_BUS_UART5 50 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-de.h b/include/dt-bindings/reset/sun9i-a80-de.h new file mode 100644 index 000000000000..205072770171 --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-de.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ + +#define RST_FE0 0 +#define RST_FE1 1 +#define RST_FE2 2 +#define RST_DEU0 3 +#define RST_DEU1 4 +#define RST_BE0 5 +#define RST_BE1 6 +#define RST_BE2 7 +#define RST_DRC0 8 +#define RST_DRC1 9 +#define RST_MERGE 10 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-usb.h b/include/dt-bindings/reset/sun9i-a80-usb.h new file mode 100644 index 000000000000..ee492864c2aa --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-usb.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ + +#define RST_USB0_HCI 0 +#define RST_USB1_HCI 1 +#define RST_USB2_HCI 2 + +#define RST_USB0_PHY 3 +#define RST_USB1_HSIC 4 +#define RST_USB1_PHY 5 +#define RST_USB2_HSIC 6 +#define RST_USB2_PHY 7 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ */ diff --git a/include/keys/user-type.h b/include/keys/user-type.h index c56fef40f53e..e098cbe27db5 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h @@ -48,9 +48,14 @@ extern void user_describe(const struct key *user, struct seq_file *m); extern long user_read(const struct key *key, char __user *buffer, size_t buflen); -static inline const struct user_key_payload *user_key_payload(const struct key *key) +static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key) { - return (struct user_key_payload *)rcu_dereference_key(key); + return (struct user_key_payload *)dereference_key_rcu(key); +} + +static inline struct user_key_payload *user_key_payload_locked(const struct key *key) +{ + return (struct user_key_payload *)dereference_key_locked((struct key *)key); } #endif /* CONFIG_KEYS */ diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 1303b570b18c..05488da3aee9 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -6,6 +6,8 @@ #include <asm/exec.h> #include <uapi/linux/binfmts.h> +struct filename; + #define CORENAME_MAX_SIZE 128 /* @@ -123,4 +125,12 @@ extern void install_exec_creds(struct linux_binprm *bprm); extern void set_binfmt(struct linux_binfmt *new); extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t); +extern int do_execve(struct filename *, + const char __user * const __user *, + const char __user * const __user *); +extern int do_execveat(int, struct filename *, + const char __user * const __user *, + const char __user * const __user *, + int); + #endif /* _LINUX_BINFMTS_H */ diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h new file mode 100644 index 000000000000..b1ef6e14744f --- /dev/null +++ b/include/linux/blk-mq-virtio.h @@ -0,0 +1,10 @@ +#ifndef _LINUX_BLK_MQ_VIRTIO_H +#define _LINUX_BLK_MQ_VIRTIO_H + +struct blk_mq_tag_set; +struct virtio_device; + +int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set, + struct virtio_device *vdev, int first_vec); + +#endif /* _LINUX_BLK_MQ_VIRTIO_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 001d30d727c5..b296a9006117 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -245,6 +245,9 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_unfreeze_queue(struct request_queue *q); void blk_mq_freeze_queue_start(struct request_queue *q); +void blk_mq_freeze_queue_wait(struct request_queue *q); +int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, + unsigned long timeout); int blk_mq_reinit_tagset(struct blk_mq_tag_set *set); int blk_mq_map_queues(struct blk_mq_tag_set *set); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aecca0e7d9ca..796016e63c1d 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -2,6 +2,7 @@ #define _LINUX_BLKDEV_H #include <linux/sched.h> +#include <linux/sched/clock.h> #ifdef CONFIG_BLOCK diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 03a6653d329a..2ea0c282f3dc 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -22,7 +22,6 @@ struct ceph_osd_client; * completion callback for async writepages */ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *); -typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool); #define CEPH_HOMELESS_OSD -1 @@ -170,15 +169,12 @@ struct ceph_osd_request { unsigned int r_num_ops; int r_result; - bool r_got_reply; struct ceph_osd_client *r_osdc; struct kref r_kref; bool r_mempool; - struct completion r_completion; - struct completion r_done_completion; /* fsync waiter */ + struct completion r_completion; /* private to osd_client.c */ ceph_osdc_callback_t r_callback; - ceph_osdc_unsafe_callback_t r_unsafe_callback; struct list_head r_unsafe_item; struct inode *r_inode; /* for use by callbacks */ diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index 9a9041784dcf..938656f70807 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -57,7 +57,7 @@ static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool) case CEPH_POOL_TYPE_EC: return false; default: - BUG_ON(1); + BUG(); } } @@ -82,13 +82,6 @@ void ceph_oloc_copy(struct ceph_object_locator *dest, void ceph_oloc_destroy(struct ceph_object_locator *oloc); /* - * Maximum supported by kernel client object name length - * - * (probably outdated: must be >= RBD_MAX_MD_NAME_LEN -- currently 100) - */ -#define CEPH_MAX_OID_NAME_LEN 100 - -/* * 51-char inline_name is long enough for all cephfs and all but one * rbd requests: <imgname> in "<imgname>.rbd"/"rbd_id.<imgname>" can be * arbitrarily long (~PAGE_SIZE). It's done once during rbd map; all @@ -173,8 +166,8 @@ struct ceph_osdmap { * the list of osds that store+replicate them. */ struct crush_map *crush; - struct mutex crush_scratch_mutex; - int crush_scratch_ary[CEPH_PG_MAX_SIZE * 3]; + struct mutex crush_workspace_mutex; + void *crush_workspace; }; static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd) diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index 5c0da61cb763..5d0018782d50 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -50,7 +50,7 @@ struct ceph_timespec { #define CEPH_PG_LAYOUT_LINEAR 2 #define CEPH_PG_LAYOUT_HYBRID 3 -#define CEPH_PG_MAX_SIZE 16 /* max # osds in a single pg */ +#define CEPH_PG_MAX_SIZE 32 /* max # osds in a single pg */ /* * placement group. diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 861b4677fc5b..6a3f850cabab 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -148,14 +148,18 @@ struct cgroup_subsys_state { * set for a task. */ struct css_set { - /* Reference count */ - atomic_t refcount; - /* - * List running through all cgroup groups in the same hash - * slot. Protected by css_set_lock + * Set of subsystem states, one for each subsystem. This array is + * immutable after creation apart from the init_css_set during + * subsystem registration (at boot time). */ - struct hlist_node hlist; + struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + + /* reference count */ + atomic_t refcount; + + /* the default cgroup associated with this css_set */ + struct cgroup *dfl_cgrp; /* * Lists running through all tasks using this cgroup group. @@ -167,21 +171,29 @@ struct css_set { struct list_head tasks; struct list_head mg_tasks; + /* all css_task_iters currently walking this cset */ + struct list_head task_iters; + /* - * List of cgrp_cset_links pointing at cgroups referenced from this - * css_set. Protected by css_set_lock. + * On the default hierarhcy, ->subsys[ssid] may point to a css + * attached to an ancestor instead of the cgroup this css_set is + * associated with. The following node is anchored at + * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to + * iterate through all css's attached to a given cgroup. */ - struct list_head cgrp_links; + struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; - /* the default cgroup associated with this css_set */ - struct cgroup *dfl_cgrp; + /* + * List running through all cgroup groups in the same hash + * slot. Protected by css_set_lock + */ + struct hlist_node hlist; /* - * Set of subsystem states, one for each subsystem. This array is - * immutable after creation apart from the init_css_set during - * subsystem registration (at boot time). + * List of cgrp_cset_links pointing at cgroups referenced from this + * css_set. Protected by css_set_lock. */ - struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + struct list_head cgrp_links; /* * List of csets participating in the on-going migration either as @@ -201,18 +213,6 @@ struct css_set { struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; - /* - * On the default hierarhcy, ->subsys[ssid] may point to a css - * attached to an ancestor instead of the cgroup this css_set is - * associated with. The following node is anchored at - * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to - * iterate through all css's attached to a given cgroup. - */ - struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; - - /* all css_task_iters currently walking this cset */ - struct list_head task_iters; - /* dead and being drained, ignore for migration */ bool dead; @@ -388,6 +388,9 @@ struct cftype { struct list_head node; /* anchored at ss->cfts */ struct kernfs_ops *kf_ops; + int (*open)(struct kernfs_open_file *of); + void (*release)(struct kernfs_open_file *of); + /* * read_u64() is a shortcut for the common case of returning a * single integer. Use it in place of read() @@ -528,8 +531,8 @@ extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups * @tsk: target task * - * Called from threadgroup_change_begin() and allows cgroup operations to - * synchronize against threadgroup changes using a percpu_rw_semaphore. + * Allows cgroup operations to synchronize against threadgroup changes + * using a percpu_rw_semaphore. */ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) { @@ -540,8 +543,7 @@ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups * @tsk: target task * - * Called from threadgroup_change_end(). Counterpart of - * cgroup_threadcgroup_change_begin(). + * Counterpart of cgroup_threadcgroup_change_begin(). */ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) { @@ -552,7 +554,11 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) #define CGROUP_SUBSYS_COUNT 0 -static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {} +static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) +{ + might_sleep(); +} + static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} #endif /* CONFIG_CGROUPS */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index c83c23f0577b..f6b43fbb141c 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -266,7 +266,7 @@ void css_task_iter_end(struct css_task_iter *it); * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset * @leader: the loop cursor * @dst_css: the destination css - * @tset: takset to iterate + * @tset: taskset to iterate * * Iterate threadgroup leaders of @tset. For single-task migrations, @tset * may not contain any. diff --git a/include/linux/cgroup_rdma.h b/include/linux/cgroup_rdma.h new file mode 100644 index 000000000000..e94290b29e99 --- /dev/null +++ b/include/linux/cgroup_rdma.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2016 Parav Pandit <pandit.parav@gmail.com> + * + * This file is subject to the terms and conditions of version 2 of the GNU + * General Public License. See the file COPYING in the main directory of the + * Linux distribution for more details. + */ + +#ifndef _CGROUP_RDMA_H +#define _CGROUP_RDMA_H + +#include <linux/cgroup.h> + +enum rdmacg_resource_type { + RDMACG_RESOURCE_HCA_HANDLE, + RDMACG_RESOURCE_HCA_OBJECT, + RDMACG_RESOURCE_MAX, +}; + +#ifdef CONFIG_CGROUP_RDMA + +struct rdma_cgroup { + struct cgroup_subsys_state css; + + /* + * head to keep track of all resource pools + * that belongs to this cgroup. + */ + struct list_head rpools; +}; + +struct rdmacg_device { + struct list_head dev_node; + struct list_head rpools; + char *name; +}; + +/* + * APIs for RDMA/IB stack to publish when a device wants to + * participate in resource accounting + */ +int rdmacg_register_device(struct rdmacg_device *device); +void rdmacg_unregister_device(struct rdmacg_device *device); + +/* APIs for RDMA/IB stack to charge/uncharge pool specific resources */ +int rdmacg_try_charge(struct rdma_cgroup **rdmacg, + struct rdmacg_device *device, + enum rdmacg_resource_type index); +void rdmacg_uncharge(struct rdma_cgroup *cg, + struct rdmacg_device *device, + enum rdmacg_resource_type index); +#endif /* CONFIG_CGROUP_RDMA */ +#endif /* _CGROUP_RDMA_H */ diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 0df0336acee9..d0e597c44585 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -56,6 +56,10 @@ SUBSYS(hugetlb) SUBSYS(pids) #endif +#if IS_ENABLED(CONFIG_CGROUP_RDMA) +SUBSYS(rdma) +#endif + /* * The following subsystems are not supported on the default hierarchy. */ diff --git a/include/linux/compat.h b/include/linux/compat.h index 9e40be522793..aef47be2a5c1 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -711,8 +711,10 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long); compat_stack_t __user *__uss = uss; \ struct task_struct *t = current; \ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \ - put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \ + put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ put_user_ex(t->sas_ss_size, &__uss->ss_size); \ + if (t->sas_ss_flags & SS_AUTODISARM) \ + sas_ss_reset(t); \ } while (0); asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 811f7a915658..0efef9cf014f 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -197,6 +197,17 @@ #endif #endif +#ifdef CONFIG_STACK_VALIDATION +#define annotate_unreachable() ({ \ + asm("%c0:\t\n" \ + ".pushsection .discard.unreachable\t\n" \ + ".long %c0b - .\t\n" \ + ".popsection\t\n" : : "i" (__LINE__)); \ +}) +#else +#define annotate_unreachable() +#endif + /* * Mark a position in code as unreachable. This can be used to * suppress control flow warnings after asm blocks that transfer @@ -206,7 +217,8 @@ * this in the preprocessor, but we can live with this because they're * unreleased. Really, we need to have autoconf for the kernel. */ -#define unreachable() __builtin_unreachable() +#define unreachable() \ + do { annotate_unreachable(); __builtin_unreachable(); } while (0) /* Mark a function definition as prohibited from being cloned. */ #define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 91c30cba984e..f8110051188f 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -105,29 +105,36 @@ struct ftrace_branch_data { }; }; +struct ftrace_likely_data { + struct ftrace_branch_data data; + unsigned long constant; +}; + /* * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code * to disable branch tracing on a per file basis. */ #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) -void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); +void ftrace_likely_update(struct ftrace_likely_data *f, int val, + int expect, int is_constant); #define likely_notrace(x) __builtin_expect(!!(x), 1) #define unlikely_notrace(x) __builtin_expect(!!(x), 0) -#define __branch_check__(x, expect) ({ \ +#define __branch_check__(x, expect, is_constant) ({ \ int ______r; \ - static struct ftrace_branch_data \ + static struct ftrace_likely_data \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_annotated_branch"))) \ ______f = { \ - .func = __func__, \ - .file = __FILE__, \ - .line = __LINE__, \ + .data.func = __func__, \ + .data.file = __FILE__, \ + .data.line = __LINE__, \ }; \ - ______r = likely_notrace(x); \ - ftrace_likely_update(&______f, ______r, expect); \ + ______r = __builtin_expect(!!(x), expect); \ + ftrace_likely_update(&______f, ______r, \ + expect, is_constant); \ ______r; \ }) @@ -137,10 +144,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); * written by Daniel Walker. */ # ifndef likely -# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1)) +# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) # endif # ifndef unlikely -# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0)) +# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) # endif #ifdef CONFIG_PROFILE_ALL_BRANCHES @@ -570,12 +577,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s (_________p1); \ }) -/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ -#ifdef CONFIG_KPROBES -# define __kprobes __attribute__((__section__(".kprobes.text"))) -# define nokprobe_inline __always_inline -#else -# define __kprobes -# define nokprobe_inline inline -#endif #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 21f9c74496e7..f92081234afd 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -30,6 +30,8 @@ struct cpu { extern void boot_cpu_init(void); extern void boot_cpu_state_init(void); +extern void cpu_init(void); +extern void trap_init(void); extern int register_cpu(struct cpu *cpu, int num); extern struct device *get_cpu_device(unsigned cpu); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index bb790c4db0c5..62d240e962f0 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -26,7 +26,6 @@ enum cpuhp_state { CPUHP_ARM_OMAP_WAKE_DEAD, CPUHP_IRQ_POLL_DEAD, CPUHP_BLOCK_SOFTIRQ_DEAD, - CPUHP_VIRT_SCSI_DEAD, CPUHP_ACPI_CPUDRV_DEAD, CPUHP_S390_PFAULT_DEAD, CPUHP_BLK_MQ_DEAD, diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index bfc204e70338..611fce58d670 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -9,6 +9,8 @@ */ #include <linux/sched.h> +#include <linux/sched/topology.h> +#include <linux/sched/task.h> #include <linux/cpumask.h> #include <linux/nodemask.h> #include <linux/mm.h> diff --git a/include/linux/cputime.h b/include/linux/cputime.h deleted file mode 100644 index a691dc4ddc13..000000000000 --- a/include/linux/cputime.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __LINUX_CPUTIME_H -#define __LINUX_CPUTIME_H - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -#include <asm/cputime.h> - -#ifndef cputime_to_nsecs -# define cputime_to_nsecs(__ct) \ - (cputime_to_usecs(__ct) * NSEC_PER_USEC) -#endif - -#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ -#endif /* __LINUX_CPUTIME_H */ diff --git a/include/linux/cred.h b/include/linux/cred.h index f0e70a1bb3ac..b03e7d049a64 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -18,8 +18,9 @@ #include <linux/selinux.h> #include <linux/atomic.h> #include <linux/uidgid.h> +#include <linux/sched.h> +#include <linux/sched/user.h> -struct user_struct; struct cred; struct inode; diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h index be8f12b8f195..fbecbd089d75 100644 --- a/include/linux/crush/crush.h +++ b/include/linux/crush/crush.h @@ -135,13 +135,6 @@ struct crush_bucket { __u32 size; /* num items */ __s32 *items; - /* - * cached random permutation: used for uniform bucket and for - * the linear search fallback for the other bucket types. - */ - __u32 perm_x; /* @x for which *perm is defined */ - __u32 perm_n; /* num elements of *perm that are permuted/defined */ - __u32 *perm; }; struct crush_bucket_uniform { @@ -211,6 +204,21 @@ struct crush_map { * device fails. */ __u8 chooseleaf_stable; + /* + * This value is calculated after decode or construction by + * the builder. It is exposed here (rather than having a + * 'build CRUSH working space' function) so that callers can + * reserve a static buffer, allocate space on the stack, or + * otherwise avoid calling into the heap allocator if they + * want to. The size of the working space depends on the map, + * while the size of the scratch vector passed to the mapper + * depends on the size of the desired result set. + * + * Nothing stops the caller from allocating both in one swell + * foop and passing in two points, though. + */ + size_t working_size; + #ifndef __KERNEL__ /* * version 0 (original) of straw_calc has various flaws. version 1 @@ -248,4 +256,23 @@ static inline int crush_calc_tree_node(int i) return ((i+1) << 1)-1; } +/* + * These data structures are private to the CRUSH implementation. They + * are exposed in this header file because builder needs their + * definitions to calculate the total working size. + * + * Moving this out of the crush map allow us to treat the CRUSH map as + * immutable within the mapper and removes the requirement for a CRUSH + * map lock. + */ +struct crush_work_bucket { + __u32 perm_x; /* @x for which *perm is defined */ + __u32 perm_n; /* num elements of *perm that are permuted/defined */ + __u32 *perm; /* Permutation of the bucket's items */ +}; + +struct crush_work { + struct crush_work_bucket **work; /* Per-bucket working store */ +}; + #endif diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h index 5dfd5b1125d2..c95e19e1ff11 100644 --- a/include/linux/crush/mapper.h +++ b/include/linux/crush/mapper.h @@ -15,6 +15,20 @@ extern int crush_do_rule(const struct crush_map *map, int ruleno, int x, int *result, int result_max, const __u32 *weights, int weight_max, - int *scratch); + void *cwin); + +/* + * Returns the exact amount of workspace that will need to be used + * for a given combination of crush_map and result_max. The caller can + * then allocate this much on its own, either on the stack, in a + * per-thread long-lived buffer, or however it likes. + */ +static inline size_t crush_work_size(const struct crush_map *map, + int result_max) +{ + return map->working_size + result_max * 3 * sizeof(__u32); +} + +void crush_init_workspace(const struct crush_map *map, void *v); #endif diff --git a/include/linux/dcache.h b/include/linux/dcache.h index c965e4469499..d2e38dc6172c 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -11,6 +11,7 @@ #include <linux/rcupdate.h> #include <linux/lockref.h> #include <linux/stringhash.h> +#include <linux/wait.h> struct path; struct vfsmount; @@ -562,7 +563,7 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper) * @inode: inode to select the dentry from multiple layers (can be NULL) * @flags: open flags to control copy-up behavior * - * If dentry is on an union/overlay, then return the underlying, real dentry. + * If dentry is on a union/overlay, then return the underlying, real dentry. * Otherwise return the dentry itself. * * See also: Documentation/filesystems/vfs.txt @@ -581,7 +582,7 @@ static inline struct dentry *d_real(struct dentry *dentry, * d_real_inode - Return the real inode * @dentry: The dentry to query * - * If dentry is on an union/overlay, then return the underlying, real inode. + * If dentry is on a union/overlay, then return the underlying, real inode. * Otherwise return d_inode(). */ static inline struct inode *d_real_inode(const struct dentry *dentry) diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 00e60f79a9cc..4178d2493547 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -18,8 +18,6 @@ #define _LINUX_DELAYACCT_H #include <uapi/linux/taskstats.h> -#include <linux/sched.h> -#include <linux/slab.h> /* * Per-task flags relevant to delay accounting @@ -30,7 +28,43 @@ #define DELAYACCT_PF_BLKIO 0x00000002 /* I am waiting on IO */ #ifdef CONFIG_TASK_DELAY_ACCT +struct task_delay_info { + spinlock_t lock; + unsigned int flags; /* Private per-task flags */ + + /* For each stat XXX, add following, aligned appropriately + * + * struct timespec XXX_start, XXX_end; + * u64 XXX_delay; + * u32 XXX_count; + * + * Atomicity of updates to XXX_delay, XXX_count protected by + * single lock above (split into XXX_lock if contention is an issue). + */ + + /* + * XXX_count is incremented on every XXX operation, the delay + * associated with the operation is added to XXX_delay. + * XXX_delay contains the accumulated delay time in nanoseconds. + */ + u64 blkio_start; /* Shared by blkio, swapin */ + u64 blkio_delay; /* wait for sync block io completion */ + u64 swapin_delay; /* wait for swapin block io completion */ + u32 blkio_count; /* total count of the number of sync block */ + /* io operations performed */ + u32 swapin_count; /* total count of the number of swapin block */ + /* io operations performed */ + + u64 freepages_start; + u64 freepages_delay; /* wait for memory reclaim */ + u32 freepages_count; /* total count of memory reclaim */ +}; +#endif +#include <linux/sched.h> +#include <linux/slab.h> + +#ifdef CONFIG_TASK_DELAY_ACCT extern int delayacct_on; /* Delay accounting turned on/off */ extern struct kmem_cache *delayacct_cache; extern void delayacct_init(void); diff --git a/include/linux/device.h b/include/linux/device.h index a48a7ff70164..30c4570e928d 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -925,6 +925,7 @@ struct device { #ifdef CONFIG_NUMA int numa_node; /* NUMA node this device is close to */ #endif + const struct dma_map_ops *dma_ops; u64 *dma_mask; /* dma mask (if dma'able device) */ u64 coherent_dma_mask;/* Like dma_mask, but for alloc_coherent mappings as diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index c24721a33b4c..0977317c6835 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -134,7 +134,8 @@ struct dma_map_ops { int is_phys; }; -extern struct dma_map_ops dma_noop_ops; +extern const struct dma_map_ops dma_noop_ops; +extern const struct dma_map_ops dma_virt_ops; #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) @@ -171,14 +172,26 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, #ifdef CONFIG_HAS_DMA #include <asm/dma-mapping.h> +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) +{ + if (dev && dev->dma_ops) + return dev->dma_ops; + return get_arch_dma_ops(dev ? dev->bus : NULL); +} + +static inline void set_dma_ops(struct device *dev, + const struct dma_map_ops *dma_ops) +{ + dev->dma_ops = dma_ops; +} #else /* * Define the dma api to allow compilation but not linking of * dma dependent code. Code that depends on the dma-mapping * API needs to set 'depends on HAS_DMA' in its Kconfig */ -extern struct dma_map_ops bad_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +extern const struct dma_map_ops bad_dma_ops; +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { return &bad_dma_ops; } @@ -189,7 +202,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; kmemcheck_mark_initialized(ptr, size); @@ -208,7 +221,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) @@ -224,7 +237,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); int i, ents; struct scatterlist *s; @@ -242,7 +255,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg int nents, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); debug_dma_unmap_sg(dev, sg, nents, dir); @@ -256,7 +269,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; kmemcheck_mark_initialized(page_address(page) + offset, size); @@ -272,7 +285,7 @@ static inline void dma_unmap_page_attrs(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) @@ -286,7 +299,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); @@ -307,7 +320,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_resource) @@ -319,7 +332,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_single_for_cpu) @@ -331,7 +344,7 @@ static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_single_for_device) @@ -371,7 +384,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_sg_for_cpu) @@ -383,7 +396,7 @@ static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_sg_for_device) @@ -428,7 +441,7 @@ static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); if (ops->mmap) return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); @@ -446,7 +459,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); if (ops->get_sgtable) return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, @@ -464,7 +477,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); void *cpu_addr; BUG_ON(!ops); @@ -486,7 +499,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); WARN_ON(irqs_disabled()); @@ -544,7 +557,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) #ifndef HAVE_ARCH_DMA_SUPPORTED static inline int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (!ops) return 0; @@ -557,7 +570,7 @@ static inline int dma_supported(struct device *dev, u64 mask) #ifndef HAVE_ARCH_DMA_SET_MASK static inline int dma_set_mask(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (ops->set_dma_mask) return ops->set_dma_mask(dev, mask); diff --git a/include/linux/dmar.h b/include/linux/dmar.h index e9bc9292bd3a..e8ffba1052d3 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -26,7 +26,7 @@ #include <linux/msi.h> #include <linux/irqreturn.h> #include <linux/rwsem.h> -#include <linux/rcupdate.h> +#include <linux/rculist.h> struct acpi_dmar_header; diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 698d51a0eea3..c8240a12c42d 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -3,6 +3,8 @@ #include <linux/user.h> #include <linux/bug.h> +#include <linux/sched/task_stack.h> + #include <asm/elf.h> #include <uapi/linux/elfcore.h> diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index cea41a124a80..e2d239ed4c60 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -36,6 +36,12 @@ #define F2FS_NODE_INO(sbi) (sbi->node_ino_num) #define F2FS_META_INO(sbi) (sbi->meta_ino_num) +#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */ +#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */ +#define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */ +#define F2FS_IO_SIZE_BITS(sbi) ((sbi)->write_io_size_bits) /* power of 2 */ +#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1) + /* This flag is used by node and meta inodes, and by recovery */ #define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) #define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM) @@ -108,6 +114,7 @@ struct f2fs_super_block { /* * For checkpoint */ +#define CP_NAT_BITS_FLAG 0x00000080 #define CP_CRC_RECOVERY_FLAG 0x00000040 #define CP_FASTBOOT_FLAG 0x00000020 #define CP_FSCK_FLAG 0x00000010 @@ -272,6 +279,7 @@ struct f2fs_node { * For NAT entries */ #define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry)) +#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8) struct f2fs_nat_entry { __u8 version; /* latest version of cached nat entry */ diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 9f4956d8601c..728d4e0292aa 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -61,6 +61,8 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name, #endif /* CONFIG_FAULT_INJECTION */ +struct kmem_cache; + #ifdef CONFIG_FAILSLAB extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags); #else diff --git a/include/linux/frame.h b/include/linux/frame.h index e6baaba3f1ae..d772c61c31da 100644 --- a/include/linux/frame.h +++ b/include/linux/frame.h @@ -11,7 +11,7 @@ * For more information, see tools/objtool/Documentation/stack-validation.txt. */ #define STACK_FRAME_NON_STANDARD(func) \ - static void __used __section(__func_stack_frame_non_standard) \ + static void __used __section(.discard.func_stack_frame_non_standard) \ *__func_stack_frame_non_standard_##func = func #else /* !CONFIG_STACK_VALIDATION */ diff --git a/include/linux/fs.h b/include/linux/fs.h index c930cbc19342..aad3fd0ff5f8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -655,6 +655,11 @@ struct inode { void *i_private; /* fs or device private pointer */ }; +static inline unsigned int i_blocksize(const struct inode *node) +{ + return (1 << node->i_blkbits); +} + static inline int inode_unhashed(struct inode *inode) { return hlist_unhashed(&inode->i_hash); @@ -1562,6 +1567,9 @@ extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); extern int vfs_whiteout(struct inode *, struct dentry *); +extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, + int open_flag); + /* * VFS file helper functions. */ @@ -1701,7 +1709,7 @@ struct inode_operations { int (*rename) (struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr) (struct dentry *, struct iattr *); - int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); + int (*getattr) (const struct path *, struct kstat *, u32, unsigned int); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); @@ -1713,6 +1721,29 @@ struct inode_operations { int (*set_acl)(struct inode *, struct posix_acl *, int); } ____cacheline_aligned; +static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, + struct iov_iter *iter) +{ + return file->f_op->read_iter(kio, iter); +} + +static inline ssize_t call_write_iter(struct file *file, struct kiocb *kio, + struct iov_iter *iter) +{ + return file->f_op->write_iter(kio, iter); +} + +static inline int call_mmap(struct file *file, struct vm_area_struct *vma) +{ + return file->f_op->mmap(file, vma); +} + +static inline int call_fsync(struct file *file, loff_t start, loff_t end, + int datasync) +{ + return file->f_op->fsync(file, start, end, datasync); +} + ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_pointer, @@ -1739,19 +1770,6 @@ extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, extern int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same); -static inline int do_clone_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - u64 len) -{ - int ret; - - sb_start_write(file_inode(file_out)->i_sb); - ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len); - sb_end_write(file_inode(file_out)->i_sb); - - return ret; -} - struct super_operations { struct inode *(*alloc_inode)(struct super_block *sb); void (*destroy_inode)(struct inode *); @@ -2563,6 +2581,19 @@ static inline void file_end_write(struct file *file) __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE); } +static inline int do_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + u64 len) +{ + int ret; + + file_start_write(file_out); + ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len); + file_end_write(file_out); + + return ret; +} + /* * get_write_access() gets write permission for a file. * put_write_access() releases this write permission. @@ -2871,8 +2902,8 @@ extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); extern void generic_fillattr(struct inode *, struct kstat *); -int vfs_getattr_nosec(struct path *path, struct kstat *stat); -extern int vfs_getattr(struct path *, struct kstat *); +extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); +extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int); void __inode_add_bytes(struct inode *inode, loff_t bytes); void inode_add_bytes(struct inode *inode, loff_t bytes); void __inode_sub_bytes(struct inode *inode, loff_t bytes); @@ -2885,10 +2916,29 @@ extern const struct inode_operations simple_symlink_inode_operations; extern int iterate_dir(struct file *, struct dir_context *); -extern int vfs_stat(const char __user *, struct kstat *); -extern int vfs_lstat(const char __user *, struct kstat *); -extern int vfs_fstat(unsigned int, struct kstat *); -extern int vfs_fstatat(int , const char __user *, struct kstat *, int); +extern int vfs_statx(int, const char __user *, int, struct kstat *, u32); +extern int vfs_statx_fd(unsigned int, struct kstat *, u32, unsigned int); + +static inline int vfs_stat(const char __user *filename, struct kstat *stat) +{ + return vfs_statx(AT_FDCWD, filename, 0, stat, STATX_BASIC_STATS); +} +static inline int vfs_lstat(const char __user *name, struct kstat *stat) +{ + return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW, + stat, STATX_BASIC_STATS); +} +static inline int vfs_fstatat(int dfd, const char __user *filename, + struct kstat *stat, int flags) +{ + return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS); +} +static inline int vfs_fstat(int fd, struct kstat *stat) +{ + return vfs_statx_fd(fd, stat, STATX_BASIC_STATS, 0); +} + + extern const char *vfs_get_link(struct dentry *, struct delayed_call *); extern int vfs_readlink(struct dentry *, char __user *, int); @@ -2918,7 +2968,7 @@ extern int dcache_dir_close(struct inode *, struct file *); extern loff_t dcache_dir_lseek(struct file *, loff_t, int); extern int dcache_readdir(struct file *, struct dir_context *); extern int simple_setattr(struct dentry *, struct iattr *); -extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int simple_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int simple_statfs(struct dentry *, struct kstatfs *); extern int simple_open(struct inode *inode, struct file *file); extern int simple_link(struct dentry *, struct inode *, struct dentry *); diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h index a1e8277120c7..c46eab5bc893 100644 --- a/include/linux/fsl-diu-fb.h +++ b/include/linux/fsl-diu-fb.h @@ -73,7 +73,7 @@ struct diu_ad { /* Word 0(32-bit) in DDR memory */ /* __u16 comp; */ /* __u16 pixel_s:2; */ -/* __u16 pallete:1; */ +/* __u16 palette:1; */ /* __u16 red_c:2; */ /* __u16 green_c:2; */ /* __u16 blue_c:2; */ @@ -142,7 +142,7 @@ struct diu_ad { struct diu { __be32 desc[3]; __be32 gamma; - __be32 pallete; + __be32 palette; __be32 cursor; __be32 curs_pos; __be32 diu_mode; diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index e52b427223ba..249e579ecd4c 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -19,7 +19,6 @@ #include <linux/ktime.h> #include <linux/init.h> #include <linux/list.h> -#include <linux/wait.h> #include <linux/percpu.h> #include <linux/timer.h> #include <linux/timerqueue.h> diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 7b23a3316dcb..6b183521c616 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -30,6 +30,7 @@ #include <linux/device.h> /* for struct device */ #include <linux/sched.h> /* for completion */ #include <linux/mutex.h> +#include <linux/rtmutex.h> #include <linux/irqdomain.h> /* for Host Notify IRQ */ #include <linux/of.h> /* for struct device_node */ #include <linux/swab.h> /* for swab16 */ @@ -283,6 +284,7 @@ enum i2c_slave_event { extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb); extern int i2c_slave_unregister(struct i2c_client *client); +extern bool i2c_detect_slave_mode(struct device *dev); static inline int i2c_slave_event(struct i2c_client *client, enum i2c_slave_event event, u8 *val) diff --git a/include/linux/idr.h b/include/linux/idr.h index 3c01b89aed67..bf70b3ef0a07 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -12,47 +12,29 @@ #ifndef __IDR_H__ #define __IDR_H__ -#include <linux/types.h> -#include <linux/bitops.h> -#include <linux/init.h> -#include <linux/rcupdate.h> +#include <linux/radix-tree.h> +#include <linux/gfp.h> +#include <linux/percpu.h> + +struct idr { + struct radix_tree_root idr_rt; + unsigned int idr_next; +}; /* - * Using 6 bits at each layer allows us to allocate 7 layers out of each page. - * 8 bits only gave us 3 layers out of every pair of pages, which is less - * efficient except for trees with a largest element between 192-255 inclusive. + * The IDR API does not expose the tagging functionality of the radix tree + * to users. Use tag 0 to track whether a node has free space below it. */ -#define IDR_BITS 6 -#define IDR_SIZE (1 << IDR_BITS) -#define IDR_MASK ((1 << IDR_BITS)-1) - -struct idr_layer { - int prefix; /* the ID prefix of this idr_layer */ - int layer; /* distance from leaf */ - struct idr_layer __rcu *ary[1<<IDR_BITS]; - int count; /* When zero, we can release it */ - union { - /* A zero bit means "space here" */ - DECLARE_BITMAP(bitmap, IDR_SIZE); - struct rcu_head rcu_head; - }; -}; +#define IDR_FREE 0 -struct idr { - struct idr_layer __rcu *hint; /* the last layer allocated from */ - struct idr_layer __rcu *top; - int layers; /* only valid w/o concurrent changes */ - int cur; /* current pos for cyclic allocation */ - spinlock_t lock; - int id_free_cnt; - struct idr_layer *id_free; -}; +/* Set the IDR flag and the IDR_FREE tag */ +#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT)) -#define IDR_INIT(name) \ +#define IDR_INIT \ { \ - .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ + .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \ } -#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) +#define DEFINE_IDR(name) struct idr name = IDR_INIT /** * idr_get_cursor - Return the current position of the cyclic allocator @@ -62,9 +44,9 @@ struct idr { * idr_alloc_cyclic() if it is free (otherwise the search will start from * this position). */ -static inline unsigned int idr_get_cursor(struct idr *idr) +static inline unsigned int idr_get_cursor(const struct idr *idr) { - return READ_ONCE(idr->cur); + return READ_ONCE(idr->idr_next); } /** @@ -77,7 +59,7 @@ static inline unsigned int idr_get_cursor(struct idr *idr) */ static inline void idr_set_cursor(struct idr *idr, unsigned int val) { - WRITE_ONCE(idr->cur, val); + WRITE_ONCE(idr->idr_next, val); } /** @@ -97,22 +79,31 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val) * period). */ -/* - * This is what we export. - */ - -void *idr_find_slowpath(struct idr *idp, int id); void idr_preload(gfp_t gfp_mask); -int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); -int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask); -int idr_for_each(struct idr *idp, +int idr_alloc(struct idr *, void *entry, int start, int end, gfp_t); +int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t); +int idr_for_each(const struct idr *, int (*fn)(int id, void *p, void *data), void *data); -void *idr_get_next(struct idr *idp, int *nextid); -void *idr_replace(struct idr *idp, void *ptr, int id); -void idr_remove(struct idr *idp, int id); -void idr_destroy(struct idr *idp); -void idr_init(struct idr *idp); -bool idr_is_empty(struct idr *idp); +void *idr_get_next(struct idr *, int *nextid); +void *idr_replace(struct idr *, void *, int id); +void idr_destroy(struct idr *); + +static inline void *idr_remove(struct idr *idr, int id) +{ + return radix_tree_delete_item(&idr->idr_rt, id, NULL); +} + +static inline void idr_init(struct idr *idr) +{ + INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); + idr->idr_next = 0; +} + +static inline bool idr_is_empty(const struct idr *idr) +{ + return radix_tree_empty(&idr->idr_rt) && + radix_tree_tagged(&idr->idr_rt, IDR_FREE); +} /** * idr_preload_end - end preload section started with idr_preload() @@ -137,19 +128,14 @@ static inline void idr_preload_end(void) * This function can be called under rcu_read_lock(), given that the leaf * pointers lifetimes are correctly managed. */ -static inline void *idr_find(struct idr *idr, int id) +static inline void *idr_find(const struct idr *idr, int id) { - struct idr_layer *hint = rcu_dereference_raw(idr->hint); - - if (hint && (id & ~IDR_MASK) == hint->prefix) - return rcu_dereference_raw(hint->ary[id & IDR_MASK]); - - return idr_find_slowpath(idr, id); + return radix_tree_lookup(&idr->idr_rt, id); } /** * idr_for_each_entry - iterate over an idr's elements of a given type - * @idp: idr handle + * @idr: idr handle * @entry: the type * to use as cursor * @id: id entry's key * @@ -157,57 +143,60 @@ static inline void *idr_find(struct idr *idr, int id) * after normal terminatinon @entry is left with the value NULL. This * is convenient for a "not found" value. */ -#define idr_for_each_entry(idp, entry, id) \ - for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) +#define idr_for_each_entry(idr, entry, id) \ + for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id) /** - * idr_for_each_entry - continue iteration over an idr's elements of a given type - * @idp: idr handle + * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type + * @idr: idr handle * @entry: the type * to use as cursor * @id: id entry's key * * Continue to iterate over list of given type, continuing after * the current position. */ -#define idr_for_each_entry_continue(idp, entry, id) \ - for ((entry) = idr_get_next((idp), &(id)); \ +#define idr_for_each_entry_continue(idr, entry, id) \ + for ((entry) = idr_get_next((idr), &(id)); \ entry; \ - ++id, (entry) = idr_get_next((idp), &(id))) + ++id, (entry) = idr_get_next((idr), &(id))) /* * IDA - IDR based id allocator, use when translation from id to * pointer isn't necessary. - * - * IDA_BITMAP_LONGS is calculated to be one less to accommodate - * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes. */ #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ -#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1) +#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long)) #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) struct ida_bitmap { - long nr_busy; unsigned long bitmap[IDA_BITMAP_LONGS]; }; +DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap); + struct ida { - struct idr idr; - struct ida_bitmap *free_bitmap; + struct radix_tree_root ida_rt; }; -#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, } -#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) +#define IDA_INIT { \ + .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \ +} +#define DEFINE_IDA(name) struct ida name = IDA_INIT int ida_pre_get(struct ida *ida, gfp_t gfp_mask); int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); void ida_remove(struct ida *ida, int id); void ida_destroy(struct ida *ida); -void ida_init(struct ida *ida); int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, gfp_t gfp_mask); void ida_simple_remove(struct ida *ida, unsigned int id); +static inline void ida_init(struct ida *ida) +{ + INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT); +} + /** * ida_get_new - allocate new ID * @ida: idr handle @@ -220,11 +209,8 @@ static inline int ida_get_new(struct ida *ida, int *p_id) return ida_get_new_above(ida, 0, p_id); } -static inline bool ida_is_empty(struct ida *ida) +static inline bool ida_is_empty(const struct ida *ida) { - return idr_is_empty(&ida->idr); + return radix_tree_empty(&ida->ida_rt); } - -void __init idr_init_cache(void); - #endif /* __IDR_H__ */ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 3a85d61f7614..91d9049f0039 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -12,8 +12,10 @@ #include <linux/securebits.h> #include <linux/seqlock.h> #include <linux/rbtree.h> +#include <linux/sched/autogroup.h> #include <net/net_namespace.h> #include <linux/sched/rt.h> +#include <linux/mm_types.h> #include <asm/thread_info.h> @@ -149,8 +151,6 @@ extern struct group_info init_groups; extern struct cred init_cred; -extern struct task_group root_task_group; - #ifdef CONFIG_CGROUP_SCHED # define INIT_CGROUP_SCHED(tsk) \ .sched_task_group = &root_task_group, diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 78c5d5ae3857..f1045b2c6a00 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -100,7 +100,7 @@ struct ipmi_user_hndl { /* Create a new user of the IPMI layer on the given interface number. */ int ipmi_create_user(unsigned int if_num, - struct ipmi_user_hndl *handler, + const struct ipmi_user_hndl *handler, void *handler_data, ipmi_user_t *user); diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index b63d6b7b0db0..8e06d758ee48 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -89,11 +89,17 @@ extern bool static_key_initialized; struct static_key { atomic_t enabled; -/* Set lsb bit to 1 if branch is default true, 0 ot */ - struct jump_entry *entries; -#ifdef CONFIG_MODULES - struct static_key_mod *next; -#endif +/* + * bit 0 => 1 if key is initially true + * 0 if initially false + * bit 1 => 1 if points to struct static_key_mod + * 0 if points to struct jump_entry + */ + union { + unsigned long type; + struct jump_entry *entries; + struct static_key_mod *next; + }; }; #else @@ -118,9 +124,10 @@ struct module; #ifdef HAVE_JUMP_LABEL -#define JUMP_TYPE_FALSE 0UL -#define JUMP_TYPE_TRUE 1UL -#define JUMP_TYPE_MASK 1UL +#define JUMP_TYPE_FALSE 0UL +#define JUMP_TYPE_TRUE 1UL +#define JUMP_TYPE_LINKED 2UL +#define JUMP_TYPE_MASK 3UL static __always_inline bool static_key_false(struct static_key *key) { diff --git a/include/linux/kasan.h b/include/linux/kasan.h index c908b25bf5a5..ceb3fe78a0d3 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -1,7 +1,6 @@ #ifndef _LINUX_KASAN_H #define _LINUX_KASAN_H -#include <linux/sched.h> #include <linux/types.h> struct kmem_cache; @@ -30,16 +29,10 @@ static inline void *kasan_mem_to_shadow(const void *addr) } /* Enable reporting bugs after kasan_disable_current() */ -static inline void kasan_enable_current(void) -{ - current->kasan_depth++; -} +extern void kasan_enable_current(void); /* Disable reporting bugs for current task */ -static inline void kasan_disable_current(void) -{ - current->kasan_depth--; -} +extern void kasan_disable_current(void); void kasan_unpoison_shadow(const void *address, size_t size); diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index 8f2e059e4d45..4d748603e818 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -8,7 +8,7 @@ /* * The use of "&&" / "||" is limited in certain expressions. - * The followings enable to calculate "and" / "or" with macro expansion only. + * The following enable to calculate "and" / "or" with macro expansion only. */ #define __and(x, y) ___and(x, y) #define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y) diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 7056238fd9f5..a9b11b8d06f2 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -46,6 +46,7 @@ enum kernfs_node_flag { KERNFS_SUICIDAL = 0x0400, KERNFS_SUICIDED = 0x0800, KERNFS_EMPTY_DIR = 0x1000, + KERNFS_HAS_RELEASE = 0x2000, }; /* @flags for kernfs_create_root() */ @@ -175,6 +176,7 @@ struct kernfs_open_file { /* published fields */ struct kernfs_node *kn; struct file *file; + struct seq_file *seq_file; void *priv; /* private fields, do not use outside kernfs proper */ @@ -185,12 +187,20 @@ struct kernfs_open_file { char *prealloc_buf; size_t atomic_write_len; - bool mmapped; + bool mmapped:1; + bool released:1; const struct vm_operations_struct *vm_ops; }; struct kernfs_ops { /* + * Optional open/release methods. Both are called with + * @of->seq_file populated. + */ + int (*open)(struct kernfs_open_file *of); + void (*release)(struct kernfs_open_file *of); + + /* * Read is handled by either seq_file or raw_read(). * * If seq_show() is present, seq_file path is active. Other seq diff --git a/include/linux/key.h b/include/linux/key.h index 722914798f37..e45212f2777e 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -354,7 +354,10 @@ static inline bool key_is_instantiated(const struct key *key) !test_bit(KEY_FLAG_NEGATIVE, &key->flags); } -#define rcu_dereference_key(KEY) \ +#define dereference_key_rcu(KEY) \ + (rcu_dereference((KEY)->payload.rcu_data0)) + +#define dereference_key_locked(KEY) \ (rcu_dereference_protected((KEY)->payload.rcu_data0, \ rwsem_is_locked(&((struct key *)(KEY))->sem))) diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 1e032a1ddb3e..5d9a400af509 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -1,7 +1,8 @@ #ifndef _LINUX_KHUGEPAGED_H #define _LINUX_KHUGEPAGED_H -#include <linux/sched.h> /* MMF_VM_HUGEPAGE */ +#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */ + #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern struct attribute_group khugepaged_attr_group; diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 16ddfb8b304a..c328e4f7dcad 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -29,7 +29,7 @@ * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi * <prasanna@in.ibm.com> added function-return probes. */ -#include <linux/compiler.h> /* for __kprobes */ +#include <linux/compiler.h> #include <linux/linkage.h> #include <linux/list.h> #include <linux/notifier.h> @@ -40,9 +40,9 @@ #include <linux/rcupdate.h> #include <linux/mutex.h> #include <linux/ftrace.h> +#include <asm/kprobes.h> #ifdef CONFIG_KPROBES -#include <asm/kprobes.h> /* kprobe_status settings */ #define KPROBE_HIT_ACTIVE 0x00000001 @@ -51,6 +51,7 @@ #define KPROBE_HIT_SSDONE 0x00000008 #else /* CONFIG_KPROBES */ +#include <asm-generic/kprobes.h> typedef int kprobe_opcode_t; struct arch_specific_insn { int dummy; @@ -509,18 +510,4 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr) } #endif -#ifdef CONFIG_KPROBES -/* - * Blacklist ganerating macro. Specify functions which is not probed - * by using this macro. - */ -#define __NOKPROBE_SYMBOL(fname) \ -static unsigned long __used \ - __attribute__((section("_kprobe_blacklist"))) \ - _kbl_addr_##fname = (unsigned long)fname; -#define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname) -#else -#define NOKPROBE_SYMBOL(fname) -#endif - #endif /* _LINUX_KPROBES_H */ diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 481c8c4627ca..e1cfda4bee58 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -12,6 +12,7 @@ #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/sched.h> +#include <linux/sched/coredump.h> struct stable_node; struct mem_cgroup; diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index c15373894a42..b37dee3acaba 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -355,7 +355,8 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp) static inline int nlm_compare_locks(const struct file_lock *fl1, const struct file_lock *fl2) { - return fl1->fl_pid == fl2->fl_pid + return file_inode(fl1->fl_file) == file_inode(fl2->fl_file) + && fl1->fl_pid == fl2->fl_pid && fl1->fl_owner == fl2->fl_owner && fl1->fl_start == fl2->fl_start && fl1->fl_end == fl2->fl_end diff --git a/include/linux/log2.h b/include/linux/log2.h index ef3d4f67118c..c373295f359f 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -16,12 +16,6 @@ #include <linux/bitops.h> /* - * deal with unrepresentable constant logarithms - */ -extern __attribute__((const, noreturn)) -int ____ilog2_NaN(void); - -/* * non-constant log of base 2 calculators * - the arch may override these in asm/bitops.h if they can be implemented * more efficiently than using fls() and fls64() @@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define ilog2(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n) < 1 ? ____ilog2_NaN() : \ + (n) < 2 ? 0 : \ (n) & (1ULL << 63) ? 63 : \ (n) & (1ULL << 62) ? 62 : \ (n) & (1ULL << 61) ? 61 : \ @@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) (n) & (1ULL << 4) ? 4 : \ (n) & (1ULL << 3) ? 3 : \ (n) & (1ULL << 2) ? 2 : \ - (n) & (1ULL << 1) ? 1 : \ - (n) & (1ULL << 0) ? 0 : \ - ____ilog2_NaN() \ - ) : \ + 1 ) : \ (sizeof(n) <= 4) ? \ __ilog2_u32(n) : \ __ilog2_u64(n) \ diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h index 6483a6fdce59..ffb21e79204d 100644 --- a/include/linux/mfd/tps65910.h +++ b/include/linux/mfd/tps65910.h @@ -134,6 +134,7 @@ /* RTC_CTRL_REG bitfields */ #define TPS65910_RTC_CTRL_STOP_RTC 0x01 /*0=stop, 1=run */ +#define TPS65910_RTC_CTRL_AUTO_COMP 0x04 #define TPS65910_RTC_CTRL_GET_TIME 0x40 /* RTC_STATUS_REG bitfields */ diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h index 27d7c95fd0da..504d54c71bdb 100644 --- a/include/linux/mic_bus.h +++ b/include/linux/mic_bus.h @@ -90,7 +90,7 @@ struct mbus_hw_ops { }; struct mbus_device * -mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, +mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, struct mbus_hw_ops *hw_ops, int index, void __iomem *mmio_va); void mbus_unregister_device(struct mbus_device *mbdev); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 808751d7b737..f60f45fe226f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1,9 +1,9 @@ #ifndef _LINUX_MM_TYPES_H #define _LINUX_MM_TYPES_H +#include <linux/mm_types_task.h> + #include <linux/auxvec.h> -#include <linux/types.h> -#include <linux/threads.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/rbtree.h> @@ -13,7 +13,7 @@ #include <linux/uprobes.h> #include <linux/page-flags-layout.h> #include <linux/workqueue.h> -#include <asm/page.h> + #include <asm/mmu.h> #ifndef AT_VECTOR_SIZE_ARCH @@ -24,11 +24,6 @@ struct address_space; struct mem_cgroup; -#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) -#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ - IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) -#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) - /* * Each physical page in the system has a struct page associated with * it to keep track of whatever it is we are using the page for at the @@ -231,17 +226,6 @@ struct page { #endif ; -struct page_frag { - struct page *page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 offset; - __u32 size; -#else - __u16 offset; - __u16 size; -#endif -}; - #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) @@ -371,27 +355,6 @@ struct core_state { struct completion startup; }; -enum { - MM_FILEPAGES, /* Resident file mapping pages */ - MM_ANONPAGES, /* Resident anonymous pages */ - MM_SWAPENTS, /* Anonymous swap entries */ - MM_SHMEMPAGES, /* Resident shared memory pages */ - NR_MM_COUNTERS -}; - -#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) -#define SPLIT_RSS_COUNTING -/* per-thread cached information, */ -struct task_rss_stat { - int events; /* for synchronization threshold */ - int count[NR_MM_COUNTERS]; -}; -#endif /* USE_SPLIT_PTE_PTLOCKS */ - -struct mm_rss_stat { - atomic_long_t count[NR_MM_COUNTERS]; -}; - struct kioctx_table; struct mm_struct { struct vm_area_struct *mmap; /* list of VMAs */ @@ -407,8 +370,27 @@ struct mm_struct { unsigned long task_size; /* size of task vm space */ unsigned long highest_vm_end; /* highest vma end address */ pgd_t * pgd; - atomic_t mm_users; /* How many users with user space? */ - atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ + + /** + * @mm_users: The number of users including userspace. + * + * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops + * to 0 (i.e. when the task exits and there are no other temporary + * reference holders), we also release a reference on @mm_count + * (which may then free the &struct mm_struct if @mm_count also + * drops to 0). + */ + atomic_t mm_users; + + /** + * @mm_count: The number of references to &struct mm_struct + * (@mm_users count as 1). + * + * Use mmgrab()/mmdrop() to modify. When this drops to 0, the + * &struct mm_struct is freed. + */ + atomic_t mm_count; + atomic_long_t nr_ptes; /* PTE page table pages */ #if CONFIG_PGTABLE_LEVELS > 2 atomic_long_t nr_pmds; /* PMD page table pages */ @@ -515,6 +497,8 @@ struct mm_struct { struct work_struct async_put_work; }; +extern struct mm_struct init_mm; + static inline void mm_init_cpumask(struct mm_struct *mm) { #ifdef CONFIG_CPUMASK_OFFSTACK diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h new file mode 100644 index 000000000000..136dfdf63ba1 --- /dev/null +++ b/include/linux/mm_types_task.h @@ -0,0 +1,87 @@ +#ifndef _LINUX_MM_TYPES_TASK_H +#define _LINUX_MM_TYPES_TASK_H + +/* + * Here are the definitions of the MM data types that are embedded in 'struct task_struct'. + * + * (These are defined separately to decouple sched.h from mm_types.h as much as possible.) + */ + +#include <linux/types.h> +#include <linux/threads.h> +#include <linux/atomic.h> +#include <linux/cpumask.h> + +#include <asm/page.h> + +#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) +#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ + IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) +#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) + +/* + * The per task VMA cache array: + */ +#define VMACACHE_BITS 2 +#define VMACACHE_SIZE (1U << VMACACHE_BITS) +#define VMACACHE_MASK (VMACACHE_SIZE - 1) + +struct vmacache { + u32 seqnum; + struct vm_area_struct *vmas[VMACACHE_SIZE]; +}; + +enum { + MM_FILEPAGES, /* Resident file mapping pages */ + MM_ANONPAGES, /* Resident anonymous pages */ + MM_SWAPENTS, /* Anonymous swap entries */ + MM_SHMEMPAGES, /* Resident shared memory pages */ + NR_MM_COUNTERS +}; + +#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) +#define SPLIT_RSS_COUNTING +/* per-thread cached information, */ +struct task_rss_stat { + int events; /* for synchronization threshold */ + int count[NR_MM_COUNTERS]; +}; +#endif /* USE_SPLIT_PTE_PTLOCKS */ + +struct mm_rss_stat { + atomic_long_t count[NR_MM_COUNTERS]; +}; + +struct page_frag { + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 offset; + __u32 size; +#else + __u16 offset; + __u16 size; +#endif +}; + +/* Track pages that require TLB flushes */ +struct tlbflush_unmap_batch { +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH + /* + * Each bit set is a CPU that potentially has a TLB entry for one of + * the PFNs being flushed. See set_tlb_ubc_flush_pending(). + */ + struct cpumask cpumask; + + /* True if any bit in cpumask is set */ + bool flush_required; + + /* + * If true then the PTE was dirty when unmapped. The entry must be + * flushed before IO is initiated or a stale TLB entry potentially + * allows an update without redirtying the page. + */ + bool writable; +#endif +}; + +#endif /* _LINUX_MM_TYPES_TASK_H */ diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h index 7b3d487d8b3f..b532ce524dae 100644 --- a/include/linux/mtd/qinfo.h +++ b/include/linux/mtd/qinfo.h @@ -14,7 +14,7 @@ * @DevId - Chip Device ID * @qinfo - pointer to qinfo records describing the chip * @numchips - number of chips including virual RWW partitions - * @chipshift - Chip/partiton size 2^chipshift + * @chipshift - Chip/partition size 2^chipshift * @chips - per-chip data structure */ struct lpddr_private { diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index f1da8c8dd473..287f34161086 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -335,7 +335,7 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr); -extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int nfs_getattr(const struct path *, struct kstat *, u32, unsigned int); extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); extern void nfs_access_set_mask(struct nfs_access_entry *, u32); extern int nfs_permission(struct inode *, int); diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 0a3fadc32693..aa3cd0878270 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -7,6 +7,43 @@ #include <linux/sched.h> #include <asm/irq.h> +#ifdef CONFIG_LOCKUP_DETECTOR +extern void touch_softlockup_watchdog_sched(void); +extern void touch_softlockup_watchdog(void); +extern void touch_softlockup_watchdog_sync(void); +extern void touch_all_softlockup_watchdogs(void); +extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos); +extern unsigned int softlockup_panic; +extern unsigned int hardlockup_panic; +void lockup_detector_init(void); +#else +static inline void touch_softlockup_watchdog_sched(void) +{ +} +static inline void touch_softlockup_watchdog(void) +{ +} +static inline void touch_softlockup_watchdog_sync(void) +{ +} +static inline void touch_all_softlockup_watchdogs(void) +{ +} +static inline void lockup_detector_init(void) +{ +} +#endif + +#ifdef CONFIG_DETECT_HUNG_TASK +void reset_hung_task_detector(void); +#else +static inline void reset_hung_task_detector(void) +{ +} +#endif + /* * The run state of the lockup detectors is controlled by the content of the * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - diff --git a/include/linux/oom.h b/include/linux/oom.h index b4e36e92bc87..8a266e2be5a6 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -2,7 +2,7 @@ #define __INCLUDE_LINUX_OOM_H -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/types.h> #include <linux/nodemask.h> #include <uapi/linux/oom.h> diff --git a/include/linux/pci.h b/include/linux/pci.h index 282ed32244ce..eb3da1a04e6c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1323,6 +1323,7 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, void pci_free_irq_vectors(struct pci_dev *dev); int pci_irq_vector(struct pci_dev *dev, unsigned int nr); const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); +int pci_irq_get_node(struct pci_dev *pdev, int vec); #else static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } @@ -1370,6 +1371,11 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, { return cpu_possible_mask; } + +static inline int pci_irq_get_node(struct pci_dev *pdev, int vec) +{ + return first_online_node; +} #endif static inline int diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h index a5f98d53d732..9b7dd59fe28d 100644 --- a/include/linux/perf_regs.h +++ b/include/linux/perf_regs.h @@ -1,6 +1,8 @@ #ifndef _LINUX_PERF_REGS_H #define _LINUX_PERF_REGS_H +#include <linux/sched/task_stack.h> + struct perf_regs { __u64 abi; struct pt_regs *regs; diff --git a/include/linux/pid.h b/include/linux/pid.h index 23705a53abba..4d179316e431 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -1,7 +1,7 @@ #ifndef _LINUX_PID_H #define _LINUX_PID_H -#include <linux/rcupdate.h> +#include <linux/rculist.h> enum pid_type { @@ -191,10 +191,10 @@ pid_t pid_vnr(struct pid *pid); #define do_each_pid_thread(pid, type, task) \ do_each_pid_task(pid, type, task) { \ struct task_struct *tg___ = task; \ - do { + for_each_thread(tg___, task) { #define while_each_pid_thread(pid, type, task) \ - } while_each_thread(tg___, task); \ + } \ task = tg___; \ } while_each_pid_task(pid, type, task) #endif /* _LINUX_PID_H */ diff --git a/include/linux/platform_data/rtc-m48t86.h b/include/linux/platform_data/rtc-m48t86.h deleted file mode 100644 index 915d6b4f0f89..000000000000 --- a/include/linux/platform_data/rtc-m48t86.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * ST M48T86 / Dallas DS12887 RTC driver - * Copyright (c) 2006 Tower Technologies - * - * Author: Alessandro Zummo <a.zummo@towertech.it> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -struct m48t86_ops -{ - void (*writebyte)(unsigned char value, unsigned long addr); - unsigned char (*readbyte)(unsigned long addr); -}; diff --git a/include/linux/platform_data/video-imxfb.h b/include/linux/platform_data/video-imxfb.h index 18e908324549..a5c0a71ec914 100644 --- a/include/linux/platform_data/video-imxfb.h +++ b/include/linux/platform_data/video-imxfb.h @@ -47,10 +47,6 @@ #define LSCR1_GRAY2(x) (((x) & 0xf) << 4) #define LSCR1_GRAY1(x) (((x) & 0xf)) -#define DMACR_BURST (1 << 31) -#define DMACR_HM(x) (((x) & 0xf) << 16) -#define DMACR_TM(x) ((x) & 0xf) - struct imx_fb_videomode { struct fb_videomode mode; u32 pcr; diff --git a/include/linux/platform_data/x86/clk-pmc-atom.h b/include/linux/platform_data/x86/clk-pmc-atom.h new file mode 100644 index 000000000000..3ab892208343 --- /dev/null +++ b/include/linux/platform_data/x86/clk-pmc-atom.h @@ -0,0 +1,44 @@ +/* + * Intel Atom platform clocks for BayTrail and CherryTrail SoC. + * + * Copyright (C) 2016, Intel Corporation + * Author: Irina Tirdea <irina.tirdea@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __PLATFORM_DATA_X86_CLK_PMC_ATOM_H +#define __PLATFORM_DATA_X86_CLK_PMC_ATOM_H + +/** + * struct pmc_clk - PMC platform clock configuration + * + * @name: identified, typically pmc_plt_clk_<x>, x=[0..5] + * @freq: in Hz, 19.2MHz and 25MHz (Baytrail only) supported + * @parent_name: one of 'xtal' or 'osc' + */ +struct pmc_clk { + const char *name; + unsigned long freq; + const char *parent_name; +}; + +/** + * struct pmc_clk_data - common PMC clock configuration + * + * @base: PMC clock register base offset + * @clks: pointer to set of registered clocks, typically 0..5 + */ +struct pmc_clk_data { + void __iomem *base; + const struct pmc_clk *clks; +}; + +#endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */ diff --git a/arch/x86/include/asm/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h index aa8744c77c6d..e4905fe69c38 100644 --- a/arch/x86/include/asm/pmc_atom.h +++ b/include/linux/platform_data/x86/pmc_atom.h @@ -50,7 +50,7 @@ BIT_ORED_DEDICATED_IRQ_GPSC | \ BIT_SHARED_IRQ_GPSS) -/* The timers acumulate time spent in sleep state */ +/* The timers accumulate time spent in sleep state */ #define PMC_S0IR_TMR 0x80 #define PMC_S0I1_TMR 0x84 #define PMC_S0I2_TMR 0x88 diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index d4d34791e463..032b55909145 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -146,8 +146,6 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier); int dev_pm_qos_remove_notifier(struct device *dev, struct notifier_block *notifier); -int dev_pm_qos_add_global_notifier(struct notifier_block *notifier); -int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier); void dev_pm_qos_constraints_init(struct device *dev); void dev_pm_qos_constraints_destroy(struct device *dev); int dev_pm_qos_add_ancestor_request(struct device *dev, @@ -172,6 +170,12 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return dev->power.qos->flags_req->data.flr.flags; } + +static inline s32 dev_pm_qos_raw_read_value(struct device *dev) +{ + return IS_ERR_OR_NULL(dev->power.qos) ? + 0 : pm_qos_read_value(&dev->power.qos->resume_latency); +} #else static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) @@ -199,12 +203,6 @@ static inline int dev_pm_qos_add_notifier(struct device *dev, static inline int dev_pm_qos_remove_notifier(struct device *dev, struct notifier_block *notifier) { return 0; } -static inline int dev_pm_qos_add_global_notifier( - struct notifier_block *notifier) - { return 0; } -static inline int dev_pm_qos_remove_global_notifier( - struct notifier_block *notifier) - { return 0; } static inline void dev_pm_qos_constraints_init(struct device *dev) { dev->power.power_state = PMSG_ON; @@ -236,6 +234,7 @@ static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {} static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; } static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } +static inline s32 dev_pm_qos_raw_read_value(struct device *dev) { return 0; } #endif #endif diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 7eeceac52dea..cae461224948 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -55,6 +55,27 @@ /* We use the MSB mostly because its available */ #define PREEMPT_NEED_RESCHED 0x80000000 +#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) + +/* + * Disable preemption until the scheduler is running -- use an unconditional + * value so that it also works on !PREEMPT_COUNT kernels. + * + * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). + */ +#define INIT_PREEMPT_COUNT PREEMPT_OFFSET + +/* + * Initial preempt_count value; reflects the preempt_count schedule invariant + * which states that during context switches: + * + * preempt_count() == 2*PREEMPT_DISABLE_OFFSET + * + * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. + * Note: See finish_task_switch(). + */ +#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) + /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ #include <asm/preempt.h> diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index e0e539321ab9..422bc2e4cb6a 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -3,6 +3,7 @@ #include <linux/compiler.h> /* For unlikely. */ #include <linux/sched.h> /* For struct task_struct. */ +#include <linux/sched/signal.h> /* For send_sig(), same_thread_group(), etc. */ #include <linux/err.h> /* for IS_ERR_VALUE */ #include <linux/bug.h> /* For BUG_ON. */ #include <linux/pid_namespace.h> /* For task_active_pid_ns. */ diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 2c6c5114c089..08fad7c6a471 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -287,8 +287,6 @@ struct pwm_ops { * @pwms: array of PWM devices allocated by the framework * @of_xlate: request a PWM device given a device tree PWM specifier * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier - * @can_sleep: must be true if the .config(), .enable() or .disable() - * operations may sleep */ struct pwm_chip { struct device *dev; @@ -302,7 +300,6 @@ struct pwm_chip { struct pwm_device * (*of_xlate)(struct pwm_chip *pc, const struct of_phandle_args *args); unsigned int of_pwm_n_cells; - bool can_sleep; }; /** @@ -451,8 +448,6 @@ struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id); struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np, const char *con_id); void devm_pwm_put(struct device *dev, struct pwm_device *pwm); - -bool pwm_can_sleep(struct pwm_device *pwm); #else static inline struct pwm_device *pwm_request(int pwm_id, const char *label) { @@ -566,11 +561,6 @@ static inline struct pwm_device *devm_of_pwm_get(struct device *dev, static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm) { } - -static inline bool pwm_can_sleep(struct pwm_device *pwm) -{ - return false; -} #endif static inline void pwm_apply_args(struct pwm_device *pwm) @@ -613,18 +603,25 @@ struct pwm_lookup { const char *con_id; unsigned int period; enum pwm_polarity polarity; + const char *module; /* optional, may be NULL */ }; -#define PWM_LOOKUP(_provider, _index, _dev_id, _con_id, _period, _polarity) \ - { \ - .provider = _provider, \ - .index = _index, \ - .dev_id = _dev_id, \ - .con_id = _con_id, \ - .period = _period, \ - .polarity = _polarity \ +#define PWM_LOOKUP_WITH_MODULE(_provider, _index, _dev_id, _con_id, \ + _period, _polarity, _module) \ + { \ + .provider = _provider, \ + .index = _index, \ + .dev_id = _dev_id, \ + .con_id = _con_id, \ + .period = _period, \ + .polarity = _polarity, \ + .module = _module, \ } +#define PWM_LOOKUP(_provider, _index, _dev_id, _con_id, _period, _polarity) \ + PWM_LOOKUP_WITH_MODULE(_provider, _index, _dev_id, _con_id, _period, \ + _polarity, NULL) + #if IS_ENABLED(CONFIG_PWM) void pwm_add_table(struct pwm_lookup *table, size_t num); void pwm_remove_table(struct pwm_lookup *table, size_t num); diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 52bda854593b..3e5735064b71 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -22,11 +22,13 @@ #define _LINUX_RADIX_TREE_H #include <linux/bitops.h> -#include <linux/preempt.h> -#include <linux/types.h> #include <linux/bug.h> #include <linux/kernel.h> +#include <linux/list.h> +#include <linux/preempt.h> #include <linux/rcupdate.h> +#include <linux/spinlock.h> +#include <linux/types.h> /* * The bottom two bits of the slot determine how the remaining bits in the @@ -94,7 +96,7 @@ struct radix_tree_node { unsigned char count; /* Total entry count */ unsigned char exceptional; /* Exceptional entry count */ struct radix_tree_node *parent; /* Used when ascending tree */ - void *private_data; /* For tree user */ + struct radix_tree_root *root; /* The tree we belong to */ union { struct list_head private_list; /* For tree user */ struct rcu_head rcu_head; /* Used when freeing node */ @@ -103,7 +105,10 @@ struct radix_tree_node { unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; -/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ +/* The top bits of gfp_mask are used to store the root tags and the IDR flag */ +#define ROOT_IS_IDR ((__force gfp_t)(1 << __GFP_BITS_SHIFT)) +#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1) + struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node __rcu *rnode; @@ -123,7 +128,7 @@ do { \ (root)->rnode = NULL; \ } while (0) -static inline bool radix_tree_empty(struct radix_tree_root *root) +static inline bool radix_tree_empty(const struct radix_tree_root *root) { return root->rnode == NULL; } @@ -216,10 +221,8 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter) */ /** - * radix_tree_deref_slot - dereference a slot - * @pslot: pointer to slot, returned by radix_tree_lookup_slot - * Returns: item that was stored in that slot with any direct pointer flag - * removed. + * radix_tree_deref_slot - dereference a slot + * @slot: slot pointer, returned by radix_tree_lookup_slot * * For use with radix_tree_lookup_slot(). Caller must hold tree at least read * locked across slot lookup and dereference. Not required if write lock is @@ -227,26 +230,27 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter) * * radix_tree_deref_retry must be used to confirm validity of the pointer if * only the read lock is held. + * + * Return: entry stored in that slot. */ -static inline void *radix_tree_deref_slot(void **pslot) +static inline void *radix_tree_deref_slot(void __rcu **slot) { - return rcu_dereference(*pslot); + return rcu_dereference(*slot); } /** - * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held - * @pslot: pointer to slot, returned by radix_tree_lookup_slot - * Returns: item that was stored in that slot with any direct pointer flag - * removed. - * - * Similar to radix_tree_deref_slot but only used during migration when a pages - * mapping is being moved. The caller does not hold the RCU read lock but it - * must hold the tree lock to prevent parallel updates. + * radix_tree_deref_slot_protected - dereference a slot with tree lock held + * @slot: slot pointer, returned by radix_tree_lookup_slot + * + * Similar to radix_tree_deref_slot. The caller does not hold the RCU read + * lock but it must hold the tree lock to prevent parallel updates. + * + * Return: entry stored in that slot. */ -static inline void *radix_tree_deref_slot_protected(void **pslot, +static inline void *radix_tree_deref_slot_protected(void __rcu **slot, spinlock_t *treelock) { - return rcu_dereference_protected(*pslot, lockdep_is_held(treelock)); + return rcu_dereference_protected(*slot, lockdep_is_held(treelock)); } /** @@ -282,9 +286,9 @@ static inline int radix_tree_exception(void *arg) return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); } -int __radix_tree_create(struct radix_tree_root *root, unsigned long index, +int __radix_tree_create(struct radix_tree_root *, unsigned long index, unsigned order, struct radix_tree_node **nodep, - void ***slotp); + void __rcu ***slotp); int __radix_tree_insert(struct radix_tree_root *, unsigned long index, unsigned order, void *); static inline int radix_tree_insert(struct radix_tree_root *root, @@ -292,55 +296,56 @@ static inline int radix_tree_insert(struct radix_tree_root *root, { return __radix_tree_insert(root, index, 0, entry); } -void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, - struct radix_tree_node **nodep, void ***slotp); -void *radix_tree_lookup(struct radix_tree_root *, unsigned long); -void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); +void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index, + struct radix_tree_node **nodep, void __rcu ***slotp); +void *radix_tree_lookup(const struct radix_tree_root *, unsigned long); +void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *, + unsigned long index); typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *); -void __radix_tree_replace(struct radix_tree_root *root, - struct radix_tree_node *node, - void **slot, void *item, +void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *, + void __rcu **slot, void *entry, radix_tree_update_node_t update_node, void *private); void radix_tree_iter_replace(struct radix_tree_root *, - const struct radix_tree_iter *, void **slot, void *item); -void radix_tree_replace_slot(struct radix_tree_root *root, - void **slot, void *item); -void __radix_tree_delete_node(struct radix_tree_root *root, - struct radix_tree_node *node, + const struct radix_tree_iter *, void __rcu **slot, void *entry); +void radix_tree_replace_slot(struct radix_tree_root *, + void __rcu **slot, void *entry); +void __radix_tree_delete_node(struct radix_tree_root *, + struct radix_tree_node *, radix_tree_update_node_t update_node, void *private); +void radix_tree_iter_delete(struct radix_tree_root *, + struct radix_tree_iter *iter, void __rcu **slot); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete(struct radix_tree_root *, unsigned long); -void radix_tree_clear_tags(struct radix_tree_root *root, - struct radix_tree_node *node, - void **slot); -unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, +void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *, + void __rcu **slot); +unsigned int radix_tree_gang_lookup(const struct radix_tree_root *, void **results, unsigned long first_index, unsigned int max_items); -unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, - void ***results, unsigned long *indices, +unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *, + void __rcu ***results, unsigned long *indices, unsigned long first_index, unsigned int max_items); int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); void radix_tree_init(void); -void *radix_tree_tag_set(struct radix_tree_root *root, +void *radix_tree_tag_set(struct radix_tree_root *, unsigned long index, unsigned int tag); -void *radix_tree_tag_clear(struct radix_tree_root *root, +void *radix_tree_tag_clear(struct radix_tree_root *, unsigned long index, unsigned int tag); -int radix_tree_tag_get(struct radix_tree_root *root, +int radix_tree_tag_get(const struct radix_tree_root *, unsigned long index, unsigned int tag); -void radix_tree_iter_tag_set(struct radix_tree_root *root, +void radix_tree_iter_tag_set(struct radix_tree_root *, + const struct radix_tree_iter *iter, unsigned int tag); +void radix_tree_iter_tag_clear(struct radix_tree_root *, const struct radix_tree_iter *iter, unsigned int tag); -unsigned int -radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, - unsigned long first_index, unsigned int max_items, - unsigned int tag); -unsigned int -radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, - unsigned long first_index, unsigned int max_items, - unsigned int tag); -int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); +unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *, + void **results, unsigned long first_index, + unsigned int max_items, unsigned int tag); +unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, + void __rcu ***results, unsigned long first_index, + unsigned int max_items, unsigned int tag); +int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); static inline void radix_tree_preload_end(void) { @@ -352,10 +357,14 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index, unsigned new_order); int radix_tree_join(struct radix_tree_root *, unsigned long index, unsigned new_order, void *); +void __rcu **idr_get_free(struct radix_tree_root *, struct radix_tree_iter *, + gfp_t, int end); -#define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ -#define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ -#define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ +enum { + RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ + RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */ + RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */ +}; /** * radix_tree_iter_init - initialize radix tree iterator @@ -364,7 +373,7 @@ int radix_tree_join(struct radix_tree_root *, unsigned long index, * @start: iteration starting index * Returns: NULL */ -static __always_inline void ** +static __always_inline void __rcu ** radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) { /* @@ -393,10 +402,46 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) * Also it fills @iter with data about chunk: position in the tree (index), * its end (next_index), and constructs a bit mask for tagged iterating (tags). */ -void **radix_tree_next_chunk(struct radix_tree_root *root, +void __rcu **radix_tree_next_chunk(const struct radix_tree_root *, struct radix_tree_iter *iter, unsigned flags); /** + * radix_tree_iter_lookup - look up an index in the radix tree + * @root: radix tree root + * @iter: iterator state + * @index: key to look up + * + * If @index is present in the radix tree, this function returns the slot + * containing it and updates @iter to describe the entry. If @index is not + * present, it returns NULL. + */ +static inline void __rcu ** +radix_tree_iter_lookup(const struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned long index) +{ + radix_tree_iter_init(iter, index); + return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG); +} + +/** + * radix_tree_iter_find - find a present entry + * @root: radix tree root + * @iter: iterator state + * @index: start location + * + * This function returns the slot containing the entry with the lowest index + * which is at least @index. If @index is larger than any present entry, this + * function returns NULL. The @iter is updated to describe the entry found. + */ +static inline void __rcu ** +radix_tree_iter_find(const struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned long index) +{ + radix_tree_iter_init(iter, index); + return radix_tree_next_chunk(root, iter, 0); +} + +/** * radix_tree_iter_retry - retry this chunk of the iteration * @iter: iterator state * @@ -406,7 +451,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, * and continue the iteration. */ static inline __must_check -void **radix_tree_iter_retry(struct radix_tree_iter *iter) +void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter) { iter->next_index = iter->index; iter->tags = 0; @@ -429,7 +474,7 @@ __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) * have been invalidated by an insertion or deletion. Call this function * before releasing the lock to continue the iteration from the next index. */ -void **__must_check radix_tree_iter_resume(void **slot, +void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot, struct radix_tree_iter *iter); /** @@ -445,11 +490,11 @@ radix_tree_chunk_size(struct radix_tree_iter *iter) } #ifdef CONFIG_RADIX_TREE_MULTIORDER -void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, - unsigned flags); +void __rcu **__radix_tree_next_slot(void __rcu **slot, + struct radix_tree_iter *iter, unsigned flags); #else /* Can't happen without sibling entries, but the compiler can't tell that */ -static inline void ** __radix_tree_next_slot(void **slot, +static inline void __rcu **__radix_tree_next_slot(void __rcu **slot, struct radix_tree_iter *iter, unsigned flags) { return slot; @@ -475,8 +520,8 @@ static inline void ** __radix_tree_next_slot(void **slot, * b) we are doing non-tagged iteration, and iter->index and iter->next_index * have been set up so that radix_tree_chunk_size() returns 1 or 0. */ -static __always_inline void ** -radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) +static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, + struct radix_tree_iter *iter, unsigned flags) { if (flags & RADIX_TREE_ITER_TAGGED) { iter->tags >>= 1; @@ -514,7 +559,7 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) return NULL; found: - if (unlikely(radix_tree_is_internal_node(*slot))) + if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot)))) return __radix_tree_next_slot(slot, iter, flags); return slot; } diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6ade6a52d9d4..de88b33c0974 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -40,7 +40,6 @@ #include <linux/cpumask.h> #include <linux/seqlock.h> #include <linux/lockdep.h> -#include <linux/completion.h> #include <linux/debugobjects.h> #include <linux/bug.h> #include <linux/compiler.h> @@ -226,45 +225,6 @@ void call_rcu_sched(struct rcu_head *head, void synchronize_sched(void); -/* - * Structure allowing asynchronous waiting on RCU. - */ -struct rcu_synchronize { - struct rcu_head head; - struct completion completion; -}; -void wakeme_after_rcu(struct rcu_head *head); - -void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, - struct rcu_synchronize *rs_array); - -#define _wait_rcu_gp(checktiny, ...) \ -do { \ - call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ - struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ - __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ - __crcu_array, __rs_array); \ -} while (0) - -#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) - -/** - * synchronize_rcu_mult - Wait concurrently for multiple grace periods - * @...: List of call_rcu() functions for the flavors to wait on. - * - * This macro waits concurrently for multiple flavors of RCU grace periods. - * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait - * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU - * domain requires you to write a wrapper function for that SRCU domain's - * call_srcu() function, supplying the corresponding srcu_struct. - * - * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU - * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called - * is automatically a grace period. - */ -#define synchronize_rcu_mult(...) \ - _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) - /** * call_rcu_tasks() - Queue an RCU for invocation task-based grace period * @head: structure to be used for queueing the RCU updates. diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h new file mode 100644 index 000000000000..e774b4f5f220 --- /dev/null +++ b/include/linux/rcupdate_wait.h @@ -0,0 +1,50 @@ +#ifndef _LINUX_SCHED_RCUPDATE_WAIT_H +#define _LINUX_SCHED_RCUPDATE_WAIT_H + +/* + * RCU synchronization types and methods: + */ + +#include <linux/rcupdate.h> +#include <linux/completion.h> + +/* + * Structure allowing asynchronous waiting on RCU. + */ +struct rcu_synchronize { + struct rcu_head head; + struct completion completion; +}; +void wakeme_after_rcu(struct rcu_head *head); + +void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, + struct rcu_synchronize *rs_array); + +#define _wait_rcu_gp(checktiny, ...) \ +do { \ + call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ + struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ + __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ + __crcu_array, __rs_array); \ +} while (0) + +#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) + +/** + * synchronize_rcu_mult - Wait concurrently for multiple grace periods + * @...: List of call_rcu() functions for the flavors to wait on. + * + * This macro waits concurrently for multiple flavors of RCU grace periods. + * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait + * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU + * domain requires you to write a wrapper function for that SRCU domain's + * call_srcu() function, supplying the corresponding srcu_struct. + * + * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU + * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called + * is automatically a grace period. + */ +#define synchronize_rcu_mult(...) \ + _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) + +#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 4f9b2fa2173d..b452953e21c8 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -53,15 +53,8 @@ static inline void cond_synchronize_sched(unsigned long oldstate) might_sleep(); } -static inline void rcu_barrier_bh(void) -{ - wait_rcu_gp(call_rcu_bh); -} - -static inline void rcu_barrier_sched(void) -{ - wait_rcu_gp(call_rcu_sched); -} +extern void rcu_barrier_bh(void); +extern void rcu_barrier_sched(void); static inline void synchronize_rcu_expedited(void) { diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 600aadf9cca4..0023fee4bbbc 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -1,54 +1,10 @@ #ifndef _LINUX_REFCOUNT_H #define _LINUX_REFCOUNT_H -/* - * Variant of atomic_t specialized for reference counts. - * - * The interface matches the atomic_t interface (to aid in porting) but only - * provides the few functions one should use for reference counting. - * - * It differs in that the counter saturates at UINT_MAX and will not move once - * there. This avoids wrapping the counter and causing 'spurious' - * use-after-free issues. - * - * Memory ordering rules are slightly relaxed wrt regular atomic_t functions - * and provide only what is strictly required for refcounts. - * - * The increments are fully relaxed; these will not provide ordering. The - * rationale is that whatever is used to obtain the object we're increasing the - * reference count on will provide the ordering. For locked data structures, - * its the lock acquire, for RCU/lockless data structures its the dependent - * load. - * - * Do note that inc_not_zero() provides a control dependency which will order - * future stores against the inc, this ensures we'll never modify the object - * if we did not in fact acquire a reference. - * - * The decrements will provide release order, such that all the prior loads and - * stores will be issued before, it also provides a control dependency, which - * will order us against the subsequent free(). - * - * The control dependency is against the load of the cmpxchg (ll/sc) that - * succeeded. This means the stores aren't fully ordered, but this is fine - * because the 1->0 transition indicates no concurrency. - * - * Note that the allocator is responsible for ordering things between free() - * and alloc(). - * - */ - #include <linux/atomic.h> -#include <linux/bug.h> #include <linux/mutex.h> #include <linux/spinlock.h> - -#ifdef CONFIG_DEBUG_REFCOUNT -#define REFCOUNT_WARN(cond, str) WARN_ON(cond) -#define __refcount_check __must_check -#else -#define REFCOUNT_WARN(cond, str) (void)(cond) -#define __refcount_check -#endif +#include <linux/kernel.h> typedef struct refcount_struct { atomic_t refs; @@ -66,229 +22,21 @@ static inline unsigned int refcount_read(const refcount_t *r) return atomic_read(&r->refs); } -static inline __refcount_check -bool refcount_add_not_zero(unsigned int i, refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); - - for (;;) { - if (!val) - return false; - - if (unlikely(val == UINT_MAX)) - return true; - - new = val + i; - if (new < val) - new = UINT_MAX; - old = atomic_cmpxchg_relaxed(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); - - return true; -} - -static inline void refcount_add(unsigned int i, refcount_t *r) -{ - REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); -} - -/* - * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. - * - * Provides no memory ordering, it is assumed the caller has guaranteed the - * object memory to be stable (RCU, etc.). It does provide a control dependency - * and thereby orders future stores. See the comment on top. - */ -static inline __refcount_check -bool refcount_inc_not_zero(refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); - - for (;;) { - new = val + 1; - - if (!val) - return false; - - if (unlikely(!new)) - return true; - - old = atomic_cmpxchg_relaxed(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); - - return true; -} - -/* - * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. - * - * Provides no memory ordering, it is assumed the caller already has a - * reference on the object, will WARN when this is not so. - */ -static inline void refcount_inc(refcount_t *r) -{ - REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); -} - -/* - * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to - * decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. - */ -static inline __refcount_check -bool refcount_sub_and_test(unsigned int i, refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); - - for (;;) { - if (unlikely(val == UINT_MAX)) - return false; - - new = val - i; - if (new > val) { - REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); - return false; - } - - old = atomic_cmpxchg_release(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - return !new; -} - -static inline __refcount_check -bool refcount_dec_and_test(refcount_t *r) -{ - return refcount_sub_and_test(1, r); -} +extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r); +extern void refcount_add(unsigned int i, refcount_t *r); -/* - * Similar to atomic_dec(), it will WARN on underflow and fail to decrement - * when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before. - */ -static inline -void refcount_dec(refcount_t *r) -{ - REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); -} - -/* - * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the - * success thereof. - * - * Like all decrement operations, it provides release memory order and provides - * a control dependency. - * - * It can be used like a try-delete operator; this explicit case is provided - * and not cmpxchg in generic, because that would allow implementing unsafe - * operations. - */ -static inline __refcount_check -bool refcount_dec_if_one(refcount_t *r) -{ - return atomic_cmpxchg_release(&r->refs, 1, 0) == 1; -} - -/* - * No atomic_t counterpart, it decrements unless the value is 1, in which case - * it will return false. - * - * Was often done like: atomic_add_unless(&var, -1, 1) - */ -static inline __refcount_check -bool refcount_dec_not_one(refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); +extern __must_check bool refcount_inc_not_zero(refcount_t *r); +extern void refcount_inc(refcount_t *r); - for (;;) { - if (unlikely(val == UINT_MAX)) - return true; +extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r); +extern void refcount_sub(unsigned int i, refcount_t *r); - if (val == 1) - return false; +extern __must_check bool refcount_dec_and_test(refcount_t *r); +extern void refcount_dec(refcount_t *r); - new = val - 1; - if (new > val) { - REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); - return true; - } - - old = atomic_cmpxchg_release(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - return true; -} - -/* - * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail - * to decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. - */ -static inline __refcount_check -bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) -{ - if (refcount_dec_not_one(r)) - return false; - - mutex_lock(lock); - if (!refcount_dec_and_test(r)) { - mutex_unlock(lock); - return false; - } - - return true; -} - -/* - * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to - * decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. - */ -static inline __refcount_check -bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) -{ - if (refcount_dec_not_one(r)) - return false; - - spin_lock(lock); - if (!refcount_dec_and_test(r)) { - spin_unlock(lock); - return false; - } - - return true; -} +extern __must_check bool refcount_dec_if_one(refcount_t *r); +extern __must_check bool refcount_dec_not_one(refcount_t *r); +extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); +extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); #endif /* _LINUX_REFCOUNT_H */ diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index f2e12a845910..092292b6675e 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -25,7 +25,7 @@ #include <linux/list_nulls.h> #include <linux/workqueue.h> #include <linux/mutex.h> -#include <linux/rcupdate.h> +#include <linux/rculist.h> /* * The end of the chain is marked with a special nulls marks which has diff --git a/include/linux/rodata_test.h b/include/linux/rodata_test.h new file mode 100644 index 000000000000..ea05f6c51413 --- /dev/null +++ b/include/linux/rodata_test.h @@ -0,0 +1,23 @@ +/* + * rodata_test.h: functional test for mark_rodata_ro function + * + * (C) Copyright 2008 Intel Corporation + * Author: Arjan van de Ven <arjan@linux.intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#ifndef _RODATA_TEST_H +#define _RODATA_TEST_H + +#ifdef CONFIG_DEBUG_RODATA_TEST +extern const int rodata_test_data; +void rodata_test(void); +#else +static inline void rodata_test(void) {} +#endif + +#endif /* _RODATA_TEST_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 451e241f32c5..d67eee84fd43 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1,197 +1,57 @@ #ifndef _LINUX_SCHED_H #define _LINUX_SCHED_H -#include <uapi/linux/sched.h> - -#include <linux/sched/prio.h> - - -struct sched_param { - int sched_priority; -}; - -#include <asm/param.h> /* for HZ */ +/* + * Define 'struct task_struct' and provide the main scheduler + * APIs (schedule(), wakeup variants, etc.) + */ -#include <linux/capability.h> -#include <linux/threads.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/timex.h> -#include <linux/jiffies.h> -#include <linux/plist.h> -#include <linux/rbtree.h> -#include <linux/thread_info.h> -#include <linux/cpumask.h> -#include <linux/errno.h> -#include <linux/nodemask.h> -#include <linux/mm_types.h> -#include <linux/preempt.h> +#include <uapi/linux/sched.h> -#include <asm/page.h> -#include <asm/ptrace.h> +#include <asm/current.h> -#include <linux/smp.h> +#include <linux/pid.h> #include <linux/sem.h> #include <linux/shm.h> -#include <linux/signal.h> -#include <linux/compiler.h> -#include <linux/completion.h> -#include <linux/pid.h> -#include <linux/percpu.h> -#include <linux/topology.h> +#include <linux/kcov.h> +#include <linux/mutex.h> +#include <linux/plist.h> +#include <linux/hrtimer.h> #include <linux/seccomp.h> +#include <linux/nodemask.h> #include <linux/rcupdate.h> -#include <linux/rculist.h> -#include <linux/rtmutex.h> - -#include <linux/time.h> -#include <linux/param.h> #include <linux/resource.h> -#include <linux/timer.h> -#include <linux/hrtimer.h> -#include <linux/kcov.h> -#include <linux/task_io_accounting.h> #include <linux/latencytop.h> -#include <linux/cred.h> -#include <linux/llist.h> -#include <linux/uidgid.h> -#include <linux/gfp.h> -#include <linux/magic.h> -#include <linux/cgroup-defs.h> - -#include <asm/processor.h> - -#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ - -/* - * Extended scheduling parameters data structure. - * - * This is needed because the original struct sched_param can not be - * altered without introducing ABI issues with legacy applications - * (e.g., in sched_getparam()). - * - * However, the possibility of specifying more than just a priority for - * the tasks may be useful for a wide variety of application fields, e.g., - * multimedia, streaming, automation and control, and many others. - * - * This variant (sched_attr) is meant at describing a so-called - * sporadic time-constrained task. In such model a task is specified by: - * - the activation period or minimum instance inter-arrival time; - * - the maximum (or average, depending on the actual scheduling - * discipline) computation time of all instances, a.k.a. runtime; - * - the deadline (relative to the actual activation time) of each - * instance. - * Very briefly, a periodic (sporadic) task asks for the execution of - * some specific computation --which is typically called an instance-- - * (at most) every period. Moreover, each instance typically lasts no more - * than the runtime and must be completed by time instant t equal to - * the instance activation time + the deadline. - * - * This is reflected by the actual fields of the sched_attr structure: - * - * @size size of the structure, for fwd/bwd compat. - * - * @sched_policy task's scheduling policy - * @sched_flags for customizing the scheduler behaviour - * @sched_nice task's nice value (SCHED_NORMAL/BATCH) - * @sched_priority task's static priority (SCHED_FIFO/RR) - * @sched_deadline representative of the task's deadline - * @sched_runtime representative of the task's runtime - * @sched_period representative of the task's period - * - * Given this task model, there are a multiplicity of scheduling algorithms - * and policies, that can be used to ensure all the tasks will make their - * timing constraints. - * - * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the - * only user of this new interface. More information about the algorithm - * available in the scheduling class file or in Documentation/. - */ -struct sched_attr { - u32 size; - - u32 sched_policy; - u64 sched_flags; - - /* SCHED_NORMAL, SCHED_BATCH */ - s32 sched_nice; - - /* SCHED_FIFO, SCHED_RR */ - u32 sched_priority; - - /* SCHED_DEADLINE */ - u64 sched_runtime; - u64 sched_deadline; - u64 sched_period; -}; +#include <linux/sched/prio.h> +#include <linux/signal_types.h> +#include <linux/mm_types_task.h> +#include <linux/task_io_accounting.h> -struct futex_pi_state; -struct robust_list_head; +/* task_struct member predeclarations (sorted alphabetically): */ +struct audit_context; +struct backing_dev_info; struct bio_list; -struct fs_struct; -struct perf_event_context; struct blk_plug; -struct filename; +struct cfs_rq; +struct fs_struct; +struct futex_pi_state; +struct io_context; +struct mempolicy; struct nameidata; - -#define VMACACHE_BITS 2 -#define VMACACHE_SIZE (1U << VMACACHE_BITS) -#define VMACACHE_MASK (VMACACHE_SIZE - 1) - -/* - * These are the constant used to fake the fixed-point load-average - * counting. Some notes: - * - 11 bit fractions expand to 22 bits by the multiplies: this gives - * a load-average precision of 10 bits integer + 11 bits fractional - * - if you want to count load-averages more often, you need more - * precision, or rounding will get you. With 2-second counting freq, - * the EXP_n values would be 1981, 2034 and 2043 if still using only - * 11 bit fractions. - */ -extern unsigned long avenrun[]; /* Load averages */ -extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); - -#define FSHIFT 11 /* nr of bits of precision */ -#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ -#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ -#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ -#define EXP_5 2014 /* 1/exp(5sec/5min) */ -#define EXP_15 2037 /* 1/exp(5sec/15min) */ - -#define CALC_LOAD(load,exp,n) \ - load *= exp; \ - load += n*(FIXED_1-exp); \ - load >>= FSHIFT; - -extern unsigned long total_forks; -extern int nr_threads; -DECLARE_PER_CPU(unsigned long, process_counts); -extern int nr_processes(void); -extern unsigned long nr_running(void); -extern bool single_task_running(void); -extern unsigned long nr_iowait(void); -extern unsigned long nr_iowait_cpu(int cpu); -extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); - -extern void calc_global_load(unsigned long ticks); - -#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -extern void cpu_load_update_nohz_start(void); -extern void cpu_load_update_nohz_stop(void); -#else -static inline void cpu_load_update_nohz_start(void) { } -static inline void cpu_load_update_nohz_stop(void) { } -#endif - -extern void dump_cpu_task(int cpu); - +struct nsproxy; +struct perf_event_context; +struct pid_namespace; +struct pipe_inode_info; +struct rcu_node; +struct reclaim_state; +struct robust_list_head; +struct sched_attr; +struct sched_param; struct seq_file; -struct cfs_rq; +struct sighand_struct; +struct signal_struct; +struct task_delay_info; struct task_group; -#ifdef CONFIG_SCHED_DEBUG -extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); -extern void proc_sched_set_task(struct task_struct *p); -#endif /* * Task state bitmask. NOTE! These bits are also @@ -203,53 +63,53 @@ extern void proc_sched_set_task(struct task_struct *p); * modifying one set can't modify the other one by * mistake. */ -#define TASK_RUNNING 0 -#define TASK_INTERRUPTIBLE 1 -#define TASK_UNINTERRUPTIBLE 2 -#define __TASK_STOPPED 4 -#define __TASK_TRACED 8 -/* in tsk->exit_state */ -#define EXIT_DEAD 16 -#define EXIT_ZOMBIE 32 -#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) -/* in tsk->state again */ -#define TASK_DEAD 64 -#define TASK_WAKEKILL 128 -#define TASK_WAKING 256 -#define TASK_PARKED 512 -#define TASK_NOLOAD 1024 -#define TASK_NEW 2048 -#define TASK_STATE_MAX 4096 - -#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" - -extern char ___assert_task_state[1 - 2*!!( - sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; - -/* Convenience macros for the sake of set_current_state */ -#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) -#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) -#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) - -#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) - -/* Convenience macros for the sake of wake_up */ -#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) -#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) - -/* get_task_state() */ -#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ - TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ - __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) - -#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) -#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) -#define task_is_stopped_or_traced(task) \ - ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) -#define task_contributes_to_load(task) \ - ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ - (task->flags & PF_FROZEN) == 0 && \ - (task->state & TASK_NOLOAD) == 0) + +/* Used in tsk->state: */ +#define TASK_RUNNING 0 +#define TASK_INTERRUPTIBLE 1 +#define TASK_UNINTERRUPTIBLE 2 +#define __TASK_STOPPED 4 +#define __TASK_TRACED 8 +/* Used in tsk->exit_state: */ +#define EXIT_DEAD 16 +#define EXIT_ZOMBIE 32 +#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) +/* Used in tsk->state again: */ +#define TASK_DEAD 64 +#define TASK_WAKEKILL 128 +#define TASK_WAKING 256 +#define TASK_PARKED 512 +#define TASK_NOLOAD 1024 +#define TASK_NEW 2048 +#define TASK_STATE_MAX 4096 + +#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" + +/* Convenience macros for the sake of set_current_state: */ +#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) +#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) +#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) + +#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) + +/* Convenience macros for the sake of wake_up(): */ +#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) +#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) + +/* get_task_state(): */ +#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ + TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ + __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) + +#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) + +#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) + +#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) + +#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ + (task->flags & PF_FROZEN) == 0 && \ + (task->state & TASK_NOLOAD) == 0) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP @@ -299,139 +159,24 @@ extern char ___assert_task_state[1 - 2*!!( * * Also see the comments of try_to_wake_up(). */ -#define __set_current_state(state_value) \ - do { current->state = (state_value); } while (0) -#define set_current_state(state_value) \ - smp_store_mb(current->state, (state_value)) - -#endif - -/* Task command name length */ -#define TASK_COMM_LEN 16 - -#include <linux/spinlock.h> - -/* - * This serializes "schedule()" and also protects - * the run-queue from deletions/modifications (but - * _adding_ to the beginning of the run-queue has - * a separate lock). - */ -extern rwlock_t tasklist_lock; -extern spinlock_t mmlist_lock; - -struct task_struct; - -#ifdef CONFIG_PROVE_RCU -extern int lockdep_tasklist_lock_is_held(void); -#endif /* #ifdef CONFIG_PROVE_RCU */ - -extern void sched_init(void); -extern void sched_init_smp(void); -extern asmlinkage void schedule_tail(struct task_struct *prev); -extern void init_idle(struct task_struct *idle, int cpu); -extern void init_idle_bootup_task(struct task_struct *idle); - -extern cpumask_var_t cpu_isolated_map; - -extern int runqueue_is_locked(int cpu); - -#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -extern void nohz_balance_enter_idle(int cpu); -extern void set_cpu_sd_state_idle(void); -extern int get_nohz_timer_target(void); -#else -static inline void nohz_balance_enter_idle(int cpu) { } -static inline void set_cpu_sd_state_idle(void) { } +#define __set_current_state(state_value) do { current->state = (state_value); } while (0) +#define set_current_state(state_value) smp_store_mb(current->state, (state_value)) #endif -/* - * Only dump TASK_* tasks. (0 for all tasks) - */ -extern void show_state_filter(unsigned long state_filter); - -static inline void show_state(void) -{ - show_state_filter(0); -} - -extern void show_regs(struct pt_regs *); +/* Task command name length: */ +#define TASK_COMM_LEN 16 -/* - * TASK is a pointer to the task whose backtrace we want to see (or NULL for current - * task), SP is the stack pointer of the first frame that should be shown in the back - * trace (or NULL if the entire call-chain of the task should be shown). - */ -extern void show_stack(struct task_struct *task, unsigned long *sp); +extern cpumask_var_t cpu_isolated_map; -extern void cpu_init (void); -extern void trap_init(void); -extern void update_process_times(int user); extern void scheduler_tick(void); -extern int sched_cpu_starting(unsigned int cpu); -extern int sched_cpu_activate(unsigned int cpu); -extern int sched_cpu_deactivate(unsigned int cpu); -#ifdef CONFIG_HOTPLUG_CPU -extern int sched_cpu_dying(unsigned int cpu); -#else -# define sched_cpu_dying NULL -#endif - -extern void sched_show_task(struct task_struct *p); - -#ifdef CONFIG_LOCKUP_DETECTOR -extern void touch_softlockup_watchdog_sched(void); -extern void touch_softlockup_watchdog(void); -extern void touch_softlockup_watchdog_sync(void); -extern void touch_all_softlockup_watchdogs(void); -extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos); -extern unsigned int softlockup_panic; -extern unsigned int hardlockup_panic; -void lockup_detector_init(void); -#else -static inline void touch_softlockup_watchdog_sched(void) -{ -} -static inline void touch_softlockup_watchdog(void) -{ -} -static inline void touch_softlockup_watchdog_sync(void) -{ -} -static inline void touch_all_softlockup_watchdogs(void) -{ -} -static inline void lockup_detector_init(void) -{ -} -#endif - -#ifdef CONFIG_DETECT_HUNG_TASK -void reset_hung_task_detector(void); -#else -static inline void reset_hung_task_detector(void) -{ -} -#endif - -/* Attach to any functions which should be ignored in wchan output. */ -#define __sched __attribute__((__section__(".sched.text"))) +#define MAX_SCHEDULE_TIMEOUT LONG_MAX -/* Linker adds these: start and end of __sched functions */ -extern char __sched_text_start[], __sched_text_end[]; - -/* Is this address in the __sched functions? */ -extern int in_sched_functions(unsigned long addr); - -#define MAX_SCHEDULE_TIMEOUT LONG_MAX -extern signed long schedule_timeout(signed long timeout); -extern signed long schedule_timeout_interruptible(signed long timeout); -extern signed long schedule_timeout_killable(signed long timeout); -extern signed long schedule_timeout_uninterruptible(signed long timeout); -extern signed long schedule_timeout_idle(signed long timeout); +extern long schedule_timeout(long timeout); +extern long schedule_timeout_interruptible(long timeout); +extern long schedule_timeout_killable(long timeout); +extern long schedule_timeout_uninterruptible(long timeout); +extern long schedule_timeout_idle(long timeout); asmlinkage void schedule(void); extern void schedule_preempt_disabled(void); @@ -440,112 +185,6 @@ extern void io_schedule_finish(int token); extern long io_schedule_timeout(long timeout); extern void io_schedule(void); -void __noreturn do_task_dead(void); - -struct nsproxy; -struct user_namespace; - -#ifdef CONFIG_MMU -extern void arch_pick_mmap_layout(struct mm_struct *mm); -extern unsigned long -arch_get_unmapped_area(struct file *, unsigned long, unsigned long, - unsigned long, unsigned long); -extern unsigned long -arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags); -#else -static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} -#endif - -#define SUID_DUMP_DISABLE 0 /* No setuid dumping */ -#define SUID_DUMP_USER 1 /* Dump as user of process */ -#define SUID_DUMP_ROOT 2 /* Dump as root */ - -/* mm flags */ - -/* for SUID_DUMP_* above */ -#define MMF_DUMPABLE_BITS 2 -#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) - -extern void set_dumpable(struct mm_struct *mm, int value); -/* - * This returns the actual value of the suid_dumpable flag. For things - * that are using this for checking for privilege transitions, it must - * test against SUID_DUMP_USER rather than treating it as a boolean - * value. - */ -static inline int __get_dumpable(unsigned long mm_flags) -{ - return mm_flags & MMF_DUMPABLE_MASK; -} - -static inline int get_dumpable(struct mm_struct *mm) -{ - return __get_dumpable(mm->flags); -} - -/* coredump filter bits */ -#define MMF_DUMP_ANON_PRIVATE 2 -#define MMF_DUMP_ANON_SHARED 3 -#define MMF_DUMP_MAPPED_PRIVATE 4 -#define MMF_DUMP_MAPPED_SHARED 5 -#define MMF_DUMP_ELF_HEADERS 6 -#define MMF_DUMP_HUGETLB_PRIVATE 7 -#define MMF_DUMP_HUGETLB_SHARED 8 -#define MMF_DUMP_DAX_PRIVATE 9 -#define MMF_DUMP_DAX_SHARED 10 - -#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS -#define MMF_DUMP_FILTER_BITS 9 -#define MMF_DUMP_FILTER_MASK \ - (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) -#define MMF_DUMP_FILTER_DEFAULT \ - ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ - (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) - -#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS -# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) -#else -# define MMF_DUMP_MASK_DEFAULT_ELF 0 -#endif - /* leave room for more dump flags */ -#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ -#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ -/* - * This one-shot flag is dropped due to necessity of changing exe once again - * on NFS restore - */ -//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ - -#define MMF_HAS_UPROBES 19 /* has uprobes */ -#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ -#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ -#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ -#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ - -#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) - -struct sighand_struct { - atomic_t count; - struct k_sigaction action[_NSIG]; - spinlock_t siglock; - wait_queue_head_t signalfd_wqh; -}; - -struct pacct_struct { - int ac_flag; - long ac_exitcode; - unsigned long ac_mem; - u64 ac_utime, ac_stime; - unsigned long ac_minflt, ac_majflt; -}; - -struct cpu_itimer { - u64 expires; - u64 incr; -}; - /** * struct prev_cputime - snaphsot of system and user cputime * @utime: time spent in user mode @@ -557,20 +196,12 @@ struct cpu_itimer { */ struct prev_cputime { #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - u64 utime; - u64 stime; - raw_spinlock_t lock; + u64 utime; + u64 stime; + raw_spinlock_t lock; #endif }; -static inline void prev_cputime_init(struct prev_cputime *prev) -{ -#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - prev->utime = prev->stime = 0; - raw_spin_lock_init(&prev->lock); -#endif -} - /** * struct task_cputime - collected CPU time counts * @utime: time spent in user mode, in nanoseconds @@ -582,376 +213,35 @@ static inline void prev_cputime_init(struct prev_cputime *prev) * these counts together and treat all three of them in parallel. */ struct task_cputime { - u64 utime; - u64 stime; - unsigned long long sum_exec_runtime; -}; - -/* Alternate field names when used to cache expirations. */ -#define virt_exp utime -#define prof_exp stime -#define sched_exp sum_exec_runtime - -/* - * This is the atomic variant of task_cputime, which can be used for - * storing and updating task_cputime statistics without locking. - */ -struct task_cputime_atomic { - atomic64_t utime; - atomic64_t stime; - atomic64_t sum_exec_runtime; -}; - -#define INIT_CPUTIME_ATOMIC \ - (struct task_cputime_atomic) { \ - .utime = ATOMIC64_INIT(0), \ - .stime = ATOMIC64_INIT(0), \ - .sum_exec_runtime = ATOMIC64_INIT(0), \ - } - -#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) - -/* - * Disable preemption until the scheduler is running -- use an unconditional - * value so that it also works on !PREEMPT_COUNT kernels. - * - * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). - */ -#define INIT_PREEMPT_COUNT PREEMPT_OFFSET - -/* - * Initial preempt_count value; reflects the preempt_count schedule invariant - * which states that during context switches: - * - * preempt_count() == 2*PREEMPT_DISABLE_OFFSET - * - * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. - * Note: See finish_task_switch(). - */ -#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) - -/** - * struct thread_group_cputimer - thread group interval timer counts - * @cputime_atomic: atomic thread group interval timers. - * @running: true when there are timers running and - * @cputime_atomic receives updates. - * @checking_timer: true when a thread in the group is in the - * process of checking for thread group timers. - * - * This structure contains the version of task_cputime, above, that is - * used for thread group CPU timer calculations. - */ -struct thread_group_cputimer { - struct task_cputime_atomic cputime_atomic; - bool running; - bool checking_timer; -}; - -#include <linux/rwsem.h> -struct autogroup; - -/* - * NOTE! "signal_struct" does not have its own - * locking, because a shared signal_struct always - * implies a shared sighand_struct, so locking - * sighand_struct is always a proper superset of - * the locking of signal_struct. - */ -struct signal_struct { - atomic_t sigcnt; - atomic_t live; - int nr_threads; - struct list_head thread_head; - - wait_queue_head_t wait_chldexit; /* for wait4() */ - - /* current thread group signal load-balancing target: */ - struct task_struct *curr_target; - - /* shared signal handling: */ - struct sigpending shared_pending; - - /* thread group exit support */ - int group_exit_code; - /* overloaded: - * - notify group_exit_task when ->count is equal to notify_count - * - everyone except group_exit_task is stopped during signal delivery - * of fatal signals, group_exit_task processes the signal. - */ - int notify_count; - struct task_struct *group_exit_task; - - /* thread group stop support, overloads group_exit_code too */ - int group_stop_count; - unsigned int flags; /* see SIGNAL_* flags below */ - - /* - * PR_SET_CHILD_SUBREAPER marks a process, like a service - * manager, to re-parent orphan (double-forking) child processes - * to this process instead of 'init'. The service manager is - * able to receive SIGCHLD signals and is able to investigate - * the process until it calls wait(). All children of this - * process will inherit a flag if they should look for a - * child_subreaper process at exit. - */ - unsigned int is_child_subreaper:1; - unsigned int has_child_subreaper:1; - -#ifdef CONFIG_POSIX_TIMERS - - /* POSIX.1b Interval Timers */ - int posix_timer_id; - struct list_head posix_timers; - - /* ITIMER_REAL timer for the process */ - struct hrtimer real_timer; - ktime_t it_real_incr; - - /* - * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use - * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these - * values are defined to 0 and 1 respectively - */ - struct cpu_itimer it[2]; - - /* - * Thread group totals for process CPU timers. - * See thread_group_cputimer(), et al, for details. - */ - struct thread_group_cputimer cputimer; - - /* Earliest-expiration cache. */ - struct task_cputime cputime_expires; - - struct list_head cpu_timers[3]; - -#endif - - struct pid *leader_pid; - -#ifdef CONFIG_NO_HZ_FULL - atomic_t tick_dep_mask; -#endif - - struct pid *tty_old_pgrp; - - /* boolean value for session group leader */ - int leader; - - struct tty_struct *tty; /* NULL if no tty */ - -#ifdef CONFIG_SCHED_AUTOGROUP - struct autogroup *autogroup; -#endif - /* - * Cumulative resource counters for dead threads in the group, - * and for reaped dead child processes forked by this group. - * Live threads maintain their own counters and add to these - * in __exit_signal, except for the group leader. - */ - seqlock_t stats_lock; - u64 utime, stime, cutime, cstime; - u64 gtime; - u64 cgtime; - struct prev_cputime prev_cputime; - unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; - unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; - unsigned long inblock, oublock, cinblock, coublock; - unsigned long maxrss, cmaxrss; - struct task_io_accounting ioac; - - /* - * Cumulative ns of schedule CPU time fo dead threads in the - * group, not including a zombie group leader, (This only differs - * from jiffies_to_ns(utime + stime) if sched_clock uses something - * other than jiffies.) - */ - unsigned long long sum_sched_runtime; - - /* - * We don't bother to synchronize most readers of this at all, - * because there is no reader checking a limit that actually needs - * to get both rlim_cur and rlim_max atomically, and either one - * alone is a single word that can safely be read normally. - * getrlimit/setrlimit use task_lock(current->group_leader) to - * protect this instead of the siglock, because they really - * have no need to disable irqs. - */ - struct rlimit rlim[RLIM_NLIMITS]; - -#ifdef CONFIG_BSD_PROCESS_ACCT - struct pacct_struct pacct; /* per-process accounting information */ -#endif -#ifdef CONFIG_TASKSTATS - struct taskstats *stats; -#endif -#ifdef CONFIG_AUDIT - unsigned audit_tty; - struct tty_audit_buf *tty_audit_buf; -#endif - - /* - * Thread is the potential origin of an oom condition; kill first on - * oom - */ - bool oom_flag_origin; - short oom_score_adj; /* OOM kill score adjustment */ - short oom_score_adj_min; /* OOM kill score adjustment min value. - * Only settable by CAP_SYS_RESOURCE. */ - struct mm_struct *oom_mm; /* recorded mm when the thread group got - * killed by the oom killer */ - - struct mutex cred_guard_mutex; /* guard against foreign influences on - * credential calculations - * (notably. ptrace) */ + u64 utime; + u64 stime; + unsigned long long sum_exec_runtime; }; -/* - * Bits in flags field of signal_struct. - */ -#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ -#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ -#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ -#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ -/* - * Pending notifications to parent. - */ -#define SIGNAL_CLD_STOPPED 0x00000010 -#define SIGNAL_CLD_CONTINUED 0x00000020 -#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) - -#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ - -#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ - SIGNAL_STOP_CONTINUED) - -static inline void signal_set_stop_flags(struct signal_struct *sig, - unsigned int flags) -{ - WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); - sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; -} - -/* If true, all threads except ->group_exit_task have pending SIGKILL */ -static inline int signal_group_exit(const struct signal_struct *sig) -{ - return (sig->flags & SIGNAL_GROUP_EXIT) || - (sig->group_exit_task != NULL); -} - -/* - * Some day this will be a full-fledged user tracking system.. - */ -struct user_struct { - atomic_t __count; /* reference count */ - atomic_t processes; /* How many processes does this user have? */ - atomic_t sigpending; /* How many pending signals does this user have? */ -#ifdef CONFIG_FANOTIFY - atomic_t fanotify_listeners; -#endif -#ifdef CONFIG_EPOLL - atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ -#endif -#ifdef CONFIG_POSIX_MQUEUE - /* protected by mq_lock */ - unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ -#endif - unsigned long locked_shm; /* How many pages of mlocked shm ? */ - unsigned long unix_inflight; /* How many files in flight in unix sockets */ - atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ +/* Alternate field names when used on cache expirations: */ +#define virt_exp utime +#define prof_exp stime +#define sched_exp sum_exec_runtime -#ifdef CONFIG_KEYS - struct key *uid_keyring; /* UID specific keyring */ - struct key *session_keyring; /* UID's default session keyring */ -#endif - - /* Hash table maintenance information */ - struct hlist_node uidhash_node; - kuid_t uid; - -#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) - atomic_long_t locked_vm; -#endif -}; - -extern int uids_sysfs_init(void); - -extern struct user_struct *find_user(kuid_t); - -extern struct user_struct root_user; -#define INIT_USER (&root_user) - - -struct backing_dev_info; -struct reclaim_state; - -#ifdef CONFIG_SCHED_INFO struct sched_info { - /* cumulative counters */ - unsigned long pcount; /* # of times run on this cpu */ - unsigned long long run_delay; /* time spent waiting on a runqueue */ - - /* timestamps */ - unsigned long long last_arrival,/* when we last ran on a cpu */ - last_queued; /* when we were last queued to run */ -}; -#endif /* CONFIG_SCHED_INFO */ +#ifdef CONFIG_SCHED_INFO + /* Cumulative counters: */ -#ifdef CONFIG_TASK_DELAY_ACCT -struct task_delay_info { - spinlock_t lock; - unsigned int flags; /* Private per-task flags */ + /* # of times we have run on this CPU: */ + unsigned long pcount; - /* For each stat XXX, add following, aligned appropriately - * - * struct timespec XXX_start, XXX_end; - * u64 XXX_delay; - * u32 XXX_count; - * - * Atomicity of updates to XXX_delay, XXX_count protected by - * single lock above (split into XXX_lock if contention is an issue). - */ + /* Time spent waiting on a runqueue: */ + unsigned long long run_delay; - /* - * XXX_count is incremented on every XXX operation, the delay - * associated with the operation is added to XXX_delay. - * XXX_delay contains the accumulated delay time in nanoseconds. - */ - u64 blkio_start; /* Shared by blkio, swapin */ - u64 blkio_delay; /* wait for sync block io completion */ - u64 swapin_delay; /* wait for swapin block io completion */ - u32 blkio_count; /* total count of the number of sync block */ - /* io operations performed */ - u32 swapin_count; /* total count of the number of swapin block */ - /* io operations performed */ - - u64 freepages_start; - u64 freepages_delay; /* wait for memory reclaim */ - u32 freepages_count; /* total count of memory reclaim */ -}; -#endif /* CONFIG_TASK_DELAY_ACCT */ + /* Timestamps: */ -static inline int sched_info_on(void) -{ -#ifdef CONFIG_SCHEDSTATS - return 1; -#elif defined(CONFIG_TASK_DELAY_ACCT) - extern int delayacct_on; - return delayacct_on; -#else - return 0; -#endif -} + /* When did we last run on a CPU? */ + unsigned long long last_arrival; -#ifdef CONFIG_SCHEDSTATS -void force_schedstat_enabled(void); -#endif + /* When were we last queued to run? */ + unsigned long long last_queued; -enum cpu_idle_type { - CPU_IDLE, - CPU_NOT_IDLE, - CPU_NEWLY_IDLE, - CPU_MAX_IDLE_TYPES +#endif /* CONFIG_SCHED_INFO */ }; /* @@ -961,290 +251,12 @@ enum cpu_idle_type { * We define a basic fixed point arithmetic range, and then formalize * all these metrics based on that basic range. */ -# define SCHED_FIXEDPOINT_SHIFT 10 -# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) - -/* - * Increase resolution of cpu_capacity calculations - */ -#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT -#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) - -/* - * Wake-queues are lists of tasks with a pending wakeup, whose - * callers have already marked the task as woken internally, - * and can thus carry on. A common use case is being able to - * do the wakeups once the corresponding user lock as been - * released. - * - * We hold reference to each task in the list across the wakeup, - * thus guaranteeing that the memory is still valid by the time - * the actual wakeups are performed in wake_up_q(). - * - * One per task suffices, because there's never a need for a task to be - * in two wake queues simultaneously; it is forbidden to abandon a task - * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is - * already in a wake queue, the wakeup will happen soon and the second - * waker can just skip it. - * - * The DEFINE_WAKE_Q macro declares and initializes the list head. - * wake_up_q() does NOT reinitialize the list; it's expected to be - * called near the end of a function. Otherwise, the list can be - * re-initialized for later re-use by wake_q_init(). - * - * Note that this can cause spurious wakeups. schedule() callers - * must ensure the call is done inside a loop, confirming that the - * wakeup condition has in fact occurred. - */ -struct wake_q_node { - struct wake_q_node *next; -}; - -struct wake_q_head { - struct wake_q_node *first; - struct wake_q_node **lastp; -}; - -#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) - -#define DEFINE_WAKE_Q(name) \ - struct wake_q_head name = { WAKE_Q_TAIL, &name.first } - -static inline void wake_q_init(struct wake_q_head *head) -{ - head->first = WAKE_Q_TAIL; - head->lastp = &head->first; -} - -extern void wake_q_add(struct wake_q_head *head, - struct task_struct *task); -extern void wake_up_q(struct wake_q_head *head); - -/* - * sched-domains (multiprocessor balancing) declarations: - */ -#ifdef CONFIG_SMP -#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ -#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ -#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ -#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ -#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ -#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ -#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */ -#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */ -#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ -#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ -#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ -#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ -#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ -#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ -#define SD_NUMA 0x4000 /* cross-node balancing */ - -#ifdef CONFIG_SCHED_SMT -static inline int cpu_smt_flags(void) -{ - return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; -} -#endif - -#ifdef CONFIG_SCHED_MC -static inline int cpu_core_flags(void) -{ - return SD_SHARE_PKG_RESOURCES; -} -#endif - -#ifdef CONFIG_NUMA -static inline int cpu_numa_flags(void) -{ - return SD_NUMA; -} -#endif - -extern int arch_asym_cpu_priority(int cpu); - -struct sched_domain_attr { - int relax_domain_level; -}; - -#define SD_ATTR_INIT (struct sched_domain_attr) { \ - .relax_domain_level = -1, \ -} - -extern int sched_domain_level_max; - -struct sched_group; - -struct sched_domain_shared { - atomic_t ref; - atomic_t nr_busy_cpus; - int has_idle_cores; -}; - -struct sched_domain { - /* These fields must be setup */ - struct sched_domain *parent; /* top domain must be null terminated */ - struct sched_domain *child; /* bottom domain must be null terminated */ - struct sched_group *groups; /* the balancing groups of the domain */ - unsigned long min_interval; /* Minimum balance interval ms */ - unsigned long max_interval; /* Maximum balance interval ms */ - unsigned int busy_factor; /* less balancing by factor if busy */ - unsigned int imbalance_pct; /* No balance until over watermark */ - unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ - unsigned int busy_idx; - unsigned int idle_idx; - unsigned int newidle_idx; - unsigned int wake_idx; - unsigned int forkexec_idx; - unsigned int smt_gain; - - int nohz_idle; /* NOHZ IDLE status */ - int flags; /* See SD_* */ - int level; - - /* Runtime fields. */ - unsigned long last_balance; /* init to jiffies. units in jiffies */ - unsigned int balance_interval; /* initialise to 1. units in ms. */ - unsigned int nr_balance_failed; /* initialise to 0 */ - - /* idle_balance() stats */ - u64 max_newidle_lb_cost; - unsigned long next_decay_max_lb_cost; - - u64 avg_scan_cost; /* select_idle_sibling */ - -#ifdef CONFIG_SCHEDSTATS - /* load_balance() stats */ - unsigned int lb_count[CPU_MAX_IDLE_TYPES]; - unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; - unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; - unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; - unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; - unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; - unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; - unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; - - /* Active load balancing */ - unsigned int alb_count; - unsigned int alb_failed; - unsigned int alb_pushed; - - /* SD_BALANCE_EXEC stats */ - unsigned int sbe_count; - unsigned int sbe_balanced; - unsigned int sbe_pushed; - - /* SD_BALANCE_FORK stats */ - unsigned int sbf_count; - unsigned int sbf_balanced; - unsigned int sbf_pushed; - - /* try_to_wake_up() stats */ - unsigned int ttwu_wake_remote; - unsigned int ttwu_move_affine; - unsigned int ttwu_move_balance; -#endif -#ifdef CONFIG_SCHED_DEBUG - char *name; -#endif - union { - void *private; /* used during construction */ - struct rcu_head rcu; /* used during destruction */ - }; - struct sched_domain_shared *shared; - - unsigned int span_weight; - /* - * Span of all CPUs in this domain. - * - * NOTE: this field is variable length. (Allocated dynamically - * by attaching extra space to the end of the structure, - * depending on how many CPUs the kernel has booted up with) - */ - unsigned long span[0]; -}; - -static inline struct cpumask *sched_domain_span(struct sched_domain *sd) -{ - return to_cpumask(sd->span); -} - -extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], - struct sched_domain_attr *dattr_new); - -/* Allocate an array of sched domains, for partition_sched_domains(). */ -cpumask_var_t *alloc_sched_domains(unsigned int ndoms); -void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); - -bool cpus_share_cache(int this_cpu, int that_cpu); - -typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); -typedef int (*sched_domain_flags_f)(void); - -#define SDTL_OVERLAP 0x01 - -struct sd_data { - struct sched_domain **__percpu sd; - struct sched_domain_shared **__percpu sds; - struct sched_group **__percpu sg; - struct sched_group_capacity **__percpu sgc; -}; - -struct sched_domain_topology_level { - sched_domain_mask_f mask; - sched_domain_flags_f sd_flags; - int flags; - int numa_level; - struct sd_data data; -#ifdef CONFIG_SCHED_DEBUG - char *name; -#endif -}; - -extern void set_sched_topology(struct sched_domain_topology_level *tl); -extern void wake_up_if_idle(int cpu); - -#ifdef CONFIG_SCHED_DEBUG -# define SD_INIT_NAME(type) .name = #type -#else -# define SD_INIT_NAME(type) -#endif - -#else /* CONFIG_SMP */ - -struct sched_domain_attr; - -static inline void -partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], - struct sched_domain_attr *dattr_new) -{ -} - -static inline bool cpus_share_cache(int this_cpu, int that_cpu) -{ - return true; -} - -#endif /* !CONFIG_SMP */ - - -struct io_context; /* See blkdev.h */ - - -#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK -extern void prefetch_stack(struct task_struct *t); -#else -static inline void prefetch_stack(struct task_struct *t) { } -#endif - -struct audit_context; /* See audit.c */ -struct mempolicy; -struct pipe_inode_info; -struct uts_namespace; +# define SCHED_FIXEDPOINT_SHIFT 10 +# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) struct load_weight { - unsigned long weight; - u32 inv_weight; + unsigned long weight; + u32 inv_weight; }; /* @@ -1300,71 +312,73 @@ struct load_weight { * issues. */ struct sched_avg { - u64 last_update_time, load_sum; - u32 util_sum, period_contrib; - unsigned long load_avg, util_avg; + u64 last_update_time; + u64 load_sum; + u32 util_sum; + u32 period_contrib; + unsigned long load_avg; + unsigned long util_avg; }; -#ifdef CONFIG_SCHEDSTATS struct sched_statistics { - u64 wait_start; - u64 wait_max; - u64 wait_count; - u64 wait_sum; - u64 iowait_count; - u64 iowait_sum; - - u64 sleep_start; - u64 sleep_max; - s64 sum_sleep_runtime; - - u64 block_start; - u64 block_max; - u64 exec_max; - u64 slice_max; - - u64 nr_migrations_cold; - u64 nr_failed_migrations_affine; - u64 nr_failed_migrations_running; - u64 nr_failed_migrations_hot; - u64 nr_forced_migrations; - - u64 nr_wakeups; - u64 nr_wakeups_sync; - u64 nr_wakeups_migrate; - u64 nr_wakeups_local; - u64 nr_wakeups_remote; - u64 nr_wakeups_affine; - u64 nr_wakeups_affine_attempts; - u64 nr_wakeups_passive; - u64 nr_wakeups_idle; -}; +#ifdef CONFIG_SCHEDSTATS + u64 wait_start; + u64 wait_max; + u64 wait_count; + u64 wait_sum; + u64 iowait_count; + u64 iowait_sum; + + u64 sleep_start; + u64 sleep_max; + s64 sum_sleep_runtime; + + u64 block_start; + u64 block_max; + u64 exec_max; + u64 slice_max; + + u64 nr_migrations_cold; + u64 nr_failed_migrations_affine; + u64 nr_failed_migrations_running; + u64 nr_failed_migrations_hot; + u64 nr_forced_migrations; + + u64 nr_wakeups; + u64 nr_wakeups_sync; + u64 nr_wakeups_migrate; + u64 nr_wakeups_local; + u64 nr_wakeups_remote; + u64 nr_wakeups_affine; + u64 nr_wakeups_affine_attempts; + u64 nr_wakeups_passive; + u64 nr_wakeups_idle; #endif +}; struct sched_entity { - struct load_weight load; /* for load-balancing */ - struct rb_node run_node; - struct list_head group_node; - unsigned int on_rq; + /* For load-balancing: */ + struct load_weight load; + struct rb_node run_node; + struct list_head group_node; + unsigned int on_rq; - u64 exec_start; - u64 sum_exec_runtime; - u64 vruntime; - u64 prev_sum_exec_runtime; + u64 exec_start; + u64 sum_exec_runtime; + u64 vruntime; + u64 prev_sum_exec_runtime; - u64 nr_migrations; + u64 nr_migrations; -#ifdef CONFIG_SCHEDSTATS - struct sched_statistics statistics; -#endif + struct sched_statistics statistics; #ifdef CONFIG_FAIR_GROUP_SCHED - int depth; - struct sched_entity *parent; + int depth; + struct sched_entity *parent; /* rq on which this entity is (to be) queued: */ - struct cfs_rq *cfs_rq; + struct cfs_rq *cfs_rq; /* rq "owned" by this entity/group: */ - struct cfs_rq *my_q; + struct cfs_rq *my_q; #endif #ifdef CONFIG_SMP @@ -1374,49 +388,49 @@ struct sched_entity { * Put into separate cache line so it does not * collide with read-mostly values above. */ - struct sched_avg avg ____cacheline_aligned_in_smp; + struct sched_avg avg ____cacheline_aligned_in_smp; #endif }; struct sched_rt_entity { - struct list_head run_list; - unsigned long timeout; - unsigned long watchdog_stamp; - unsigned int time_slice; - unsigned short on_rq; - unsigned short on_list; - - struct sched_rt_entity *back; + struct list_head run_list; + unsigned long timeout; + unsigned long watchdog_stamp; + unsigned int time_slice; + unsigned short on_rq; + unsigned short on_list; + + struct sched_rt_entity *back; #ifdef CONFIG_RT_GROUP_SCHED - struct sched_rt_entity *parent; + struct sched_rt_entity *parent; /* rq on which this entity is (to be) queued: */ - struct rt_rq *rt_rq; + struct rt_rq *rt_rq; /* rq "owned" by this entity/group: */ - struct rt_rq *my_q; + struct rt_rq *my_q; #endif }; struct sched_dl_entity { - struct rb_node rb_node; + struct rb_node rb_node; /* * Original scheduling parameters. Copied here from sched_attr * during sched_setattr(), they will remain the same until * the next sched_setattr(). */ - u64 dl_runtime; /* maximum runtime for each instance */ - u64 dl_deadline; /* relative deadline of each instance */ - u64 dl_period; /* separation of two instances (period) */ - u64 dl_bw; /* dl_runtime / dl_deadline */ + u64 dl_runtime; /* Maximum runtime for each instance */ + u64 dl_deadline; /* Relative deadline of each instance */ + u64 dl_period; /* Separation of two instances (period) */ + u64 dl_bw; /* dl_runtime / dl_deadline */ /* * Actual scheduling parameters. Initialized with the values above, * they are continously updated during task execution. Note that * the remaining runtime could be < 0 in case we are in overrun. */ - s64 runtime; /* remaining runtime for this instance */ - u64 deadline; /* absolute deadline for this instance */ - unsigned int flags; /* specifying the scheduler behaviour */ + s64 runtime; /* Remaining runtime for this instance */ + u64 deadline; /* Absolute deadline for this instance */ + unsigned int flags; /* Specifying the scheduler behaviour */ /* * Some bool flags: @@ -1429,28 +443,31 @@ struct sched_dl_entity { * outside bandwidth enforcement mechanism (but only until we * exit the critical section); * - * @dl_yielded tells if task gave up the cpu before consuming + * @dl_yielded tells if task gave up the CPU before consuming * all its available runtime during the last job. */ - int dl_throttled, dl_boosted, dl_yielded; + int dl_throttled; + int dl_boosted; + int dl_yielded; /* * Bandwidth enforcement timer. Each -deadline task has its * own bandwidth to be enforced, thus we need one timer per task. */ - struct hrtimer dl_timer; + struct hrtimer dl_timer; }; union rcu_special { struct { - u8 blocked; - u8 need_qs; - u8 exp_need_qs; - u8 pad; /* Otherwise the compiler can store garbage here. */ + u8 blocked; + u8 need_qs; + u8 exp_need_qs; + + /* Otherwise the compiler can store garbage here: */ + u8 pad; } b; /* Bits. */ u32 s; /* Set of bits. */ }; -struct rcu_node; enum perf_event_task_context { perf_invalid_context = -1, @@ -1459,23 +476,8 @@ enum perf_event_task_context { perf_nr_task_contexts, }; -/* Track pages that require TLB flushes */ -struct tlbflush_unmap_batch { - /* - * Each bit set is a CPU that potentially has a TLB entry for one of - * the PFNs being flushed. See set_tlb_ubc_flush_pending(). - */ - struct cpumask cpumask; - - /* True if any bit in cpumask is set */ - bool flush_required; - - /* - * If true then the PTE was dirty when unmapped. The entry must be - * flushed before IO is initiated or a stale TLB entry potentially - * allows an update without redirtying the page. - */ - bool writable; +struct wake_q_node { + struct wake_q_node *next; }; struct task_struct { @@ -1484,362 +486,417 @@ struct task_struct { * For reasons of header soup (see current_thread_info()), this * must be the first element of task_struct. */ - struct thread_info thread_info; + struct thread_info thread_info; #endif - volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ - void *stack; - atomic_t usage; - unsigned int flags; /* per process flags, defined below */ - unsigned int ptrace; + /* -1 unrunnable, 0 runnable, >0 stopped: */ + volatile long state; + void *stack; + atomic_t usage; + /* Per task flags (PF_*), defined further below: */ + unsigned int flags; + unsigned int ptrace; #ifdef CONFIG_SMP - struct llist_node wake_entry; - int on_cpu; + struct llist_node wake_entry; + int on_cpu; #ifdef CONFIG_THREAD_INFO_IN_TASK - unsigned int cpu; /* current CPU */ + /* Current CPU: */ + unsigned int cpu; #endif - unsigned int wakee_flips; - unsigned long wakee_flip_decay_ts; - struct task_struct *last_wakee; + unsigned int wakee_flips; + unsigned long wakee_flip_decay_ts; + struct task_struct *last_wakee; - int wake_cpu; + int wake_cpu; #endif - int on_rq; + int on_rq; + + int prio; + int static_prio; + int normal_prio; + unsigned int rt_priority; - int prio, static_prio, normal_prio; - unsigned int rt_priority; - const struct sched_class *sched_class; - struct sched_entity se; - struct sched_rt_entity rt; + const struct sched_class *sched_class; + struct sched_entity se; + struct sched_rt_entity rt; #ifdef CONFIG_CGROUP_SCHED - struct task_group *sched_task_group; + struct task_group *sched_task_group; #endif - struct sched_dl_entity dl; + struct sched_dl_entity dl; #ifdef CONFIG_PREEMPT_NOTIFIERS - /* list of struct preempt_notifier: */ - struct hlist_head preempt_notifiers; + /* List of struct preempt_notifier: */ + struct hlist_head preempt_notifiers; #endif #ifdef CONFIG_BLK_DEV_IO_TRACE - unsigned int btrace_seq; + unsigned int btrace_seq; #endif - unsigned int policy; - int nr_cpus_allowed; - cpumask_t cpus_allowed; + unsigned int policy; + int nr_cpus_allowed; + cpumask_t cpus_allowed; #ifdef CONFIG_PREEMPT_RCU - int rcu_read_lock_nesting; - union rcu_special rcu_read_unlock_special; - struct list_head rcu_node_entry; - struct rcu_node *rcu_blocked_node; + int rcu_read_lock_nesting; + union rcu_special rcu_read_unlock_special; + struct list_head rcu_node_entry; + struct rcu_node *rcu_blocked_node; #endif /* #ifdef CONFIG_PREEMPT_RCU */ + #ifdef CONFIG_TASKS_RCU - unsigned long rcu_tasks_nvcsw; - bool rcu_tasks_holdout; - struct list_head rcu_tasks_holdout_list; - int rcu_tasks_idle_cpu; + unsigned long rcu_tasks_nvcsw; + bool rcu_tasks_holdout; + struct list_head rcu_tasks_holdout_list; + int rcu_tasks_idle_cpu; #endif /* #ifdef CONFIG_TASKS_RCU */ -#ifdef CONFIG_SCHED_INFO - struct sched_info sched_info; -#endif + struct sched_info sched_info; - struct list_head tasks; + struct list_head tasks; #ifdef CONFIG_SMP - struct plist_node pushable_tasks; - struct rb_node pushable_dl_tasks; -#endif - - struct mm_struct *mm, *active_mm; - /* per-thread vma caching */ - u32 vmacache_seqnum; - struct vm_area_struct *vmacache[VMACACHE_SIZE]; -#if defined(SPLIT_RSS_COUNTING) - struct task_rss_stat rss_stat; -#endif -/* task state */ - int exit_state; - int exit_code, exit_signal; - int pdeath_signal; /* The signal sent when the parent dies */ - unsigned long jobctl; /* JOBCTL_*, siglock protected */ - - /* Used for emulating ABI behavior of previous Linux versions */ - unsigned int personality; - - /* scheduler bits, serialized by scheduler locks */ - unsigned sched_reset_on_fork:1; - unsigned sched_contributes_to_load:1; - unsigned sched_migrated:1; - unsigned sched_remote_wakeup:1; - unsigned :0; /* force alignment to the next boundary */ - - /* unserialized, strictly 'current' */ - unsigned in_execve:1; /* bit to tell LSMs we're in execve */ - unsigned in_iowait:1; -#if !defined(TIF_RESTORE_SIGMASK) - unsigned restore_sigmask:1; + struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; +#endif + + struct mm_struct *mm; + struct mm_struct *active_mm; + + /* Per-thread vma caching: */ + struct vmacache vmacache; + +#ifdef SPLIT_RSS_COUNTING + struct task_rss_stat rss_stat; +#endif + int exit_state; + int exit_code; + int exit_signal; + /* The signal sent when the parent dies: */ + int pdeath_signal; + /* JOBCTL_*, siglock protected: */ + unsigned long jobctl; + + /* Used for emulating ABI behavior of previous Linux versions: */ + unsigned int personality; + + /* Scheduler bits, serialized by scheduler locks: */ + unsigned sched_reset_on_fork:1; + unsigned sched_contributes_to_load:1; + unsigned sched_migrated:1; + unsigned sched_remote_wakeup:1; + /* Force alignment to the next boundary: */ + unsigned :0; + + /* Unserialized, strictly 'current' */ + + /* Bit to tell LSMs we're in execve(): */ + unsigned in_execve:1; + unsigned in_iowait:1; +#ifndef TIF_RESTORE_SIGMASK + unsigned restore_sigmask:1; #endif #ifdef CONFIG_MEMCG - unsigned memcg_may_oom:1; + unsigned memcg_may_oom:1; #ifndef CONFIG_SLOB - unsigned memcg_kmem_skip_account:1; + unsigned memcg_kmem_skip_account:1; #endif #endif #ifdef CONFIG_COMPAT_BRK - unsigned brk_randomized:1; + unsigned brk_randomized:1; #endif - unsigned long atomic_flags; /* Flags needing atomic access. */ + unsigned long atomic_flags; /* Flags requiring atomic access. */ - struct restart_block restart_block; + struct restart_block restart_block; - pid_t pid; - pid_t tgid; + pid_t pid; + pid_t tgid; #ifdef CONFIG_CC_STACKPROTECTOR - /* Canary value for the -fstack-protector gcc feature */ - unsigned long stack_canary; + /* Canary value for the -fstack-protector GCC feature: */ + unsigned long stack_canary; #endif /* - * pointers to (original) parent process, youngest child, younger sibling, + * Pointers to the (original) parent process, youngest child, younger sibling, * older sibling, respectively. (p->father can be replaced with * p->real_parent->pid) */ - struct task_struct __rcu *real_parent; /* real parent process */ - struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ + + /* Real parent process: */ + struct task_struct __rcu *real_parent; + + /* Recipient of SIGCHLD, wait4() reports: */ + struct task_struct __rcu *parent; + /* - * children/sibling forms the list of my natural children + * Children/sibling form the list of natural children: */ - struct list_head children; /* list of my children */ - struct list_head sibling; /* linkage in my parent's children list */ - struct task_struct *group_leader; /* threadgroup leader */ + struct list_head children; + struct list_head sibling; + struct task_struct *group_leader; /* - * ptraced is the list of tasks this task is using ptrace on. + * 'ptraced' is the list of tasks this task is using ptrace() on. + * * This includes both natural children and PTRACE_ATTACH targets. - * p->ptrace_entry is p's link on the p->parent->ptraced list. + * 'ptrace_entry' is this task's link on the p->parent->ptraced list. */ - struct list_head ptraced; - struct list_head ptrace_entry; + struct list_head ptraced; + struct list_head ptrace_entry; /* PID/PID hash table linkage. */ - struct pid_link pids[PIDTYPE_MAX]; - struct list_head thread_group; - struct list_head thread_node; + struct pid_link pids[PIDTYPE_MAX]; + struct list_head thread_group; + struct list_head thread_node; - struct completion *vfork_done; /* for vfork() */ - int __user *set_child_tid; /* CLONE_CHILD_SETTID */ - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ + struct completion *vfork_done; - u64 utime, stime; + /* CLONE_CHILD_SETTID: */ + int __user *set_child_tid; + + /* CLONE_CHILD_CLEARTID: */ + int __user *clear_child_tid; + + u64 utime; + u64 stime; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME - u64 utimescaled, stimescaled; + u64 utimescaled; + u64 stimescaled; #endif - u64 gtime; - struct prev_cputime prev_cputime; + u64 gtime; + struct prev_cputime prev_cputime; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - seqcount_t vtime_seqcount; - unsigned long long vtime_snap; + seqcount_t vtime_seqcount; + unsigned long long vtime_snap; enum { - /* Task is sleeping or running in a CPU with VTIME inactive */ + /* Task is sleeping or running in a CPU with VTIME inactive: */ VTIME_INACTIVE = 0, - /* Task runs in userspace in a CPU with VTIME active */ + /* Task runs in userspace in a CPU with VTIME active: */ VTIME_USER, - /* Task runs in kernelspace in a CPU with VTIME active */ + /* Task runs in kernelspace in a CPU with VTIME active: */ VTIME_SYS, } vtime_snap_whence; #endif #ifdef CONFIG_NO_HZ_FULL - atomic_t tick_dep_mask; + atomic_t tick_dep_mask; #endif - unsigned long nvcsw, nivcsw; /* context switch counts */ - u64 start_time; /* monotonic time in nsec */ - u64 real_start_time; /* boot based time in nsec */ -/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ - unsigned long min_flt, maj_flt; + /* Context switch counts: */ + unsigned long nvcsw; + unsigned long nivcsw; + + /* Monotonic time in nsecs: */ + u64 start_time; + + /* Boot based time in nsecs: */ + u64 real_start_time; + + /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ + unsigned long min_flt; + unsigned long maj_flt; #ifdef CONFIG_POSIX_TIMERS - struct task_cputime cputime_expires; - struct list_head cpu_timers[3]; -#endif - -/* process credentials */ - const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */ - const struct cred __rcu *real_cred; /* objective and real subjective task - * credentials (COW) */ - const struct cred __rcu *cred; /* effective (overridable) subjective task - * credentials (COW) */ - char comm[TASK_COMM_LEN]; /* executable name excluding path - - access with [gs]et_task_comm (which lock - it with task_lock()) - - initialized normally by setup_new_exec */ -/* file system info */ - struct nameidata *nameidata; + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; +#endif + + /* Process credentials: */ + + /* Tracer's credentials at attach: */ + const struct cred __rcu *ptracer_cred; + + /* Objective and real subjective task credentials (COW): */ + const struct cred __rcu *real_cred; + + /* Effective (overridable) subjective task credentials (COW): */ + const struct cred __rcu *cred; + + /* + * executable name, excluding path. + * + * - normally initialized setup_new_exec() + * - access it with [gs]et_task_comm() + * - lock it with task_lock() + */ + char comm[TASK_COMM_LEN]; + + struct nameidata *nameidata; + #ifdef CONFIG_SYSVIPC -/* ipc stuff */ - struct sysv_sem sysvsem; - struct sysv_shm sysvshm; + struct sysv_sem sysvsem; + struct sysv_shm sysvshm; #endif #ifdef CONFIG_DETECT_HUNG_TASK -/* hung task detection */ - unsigned long last_switch_count; -#endif -/* filesystem information */ - struct fs_struct *fs; -/* open file information */ - struct files_struct *files; -/* namespaces */ - struct nsproxy *nsproxy; -/* signal handlers */ - struct signal_struct *signal; - struct sighand_struct *sighand; - - sigset_t blocked, real_blocked; - sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ - struct sigpending pending; - - unsigned long sas_ss_sp; - size_t sas_ss_size; - unsigned sas_ss_flags; - - struct callback_head *task_works; - - struct audit_context *audit_context; + unsigned long last_switch_count; +#endif + /* Filesystem information: */ + struct fs_struct *fs; + + /* Open file information: */ + struct files_struct *files; + + /* Namespaces: */ + struct nsproxy *nsproxy; + + /* Signal handlers: */ + struct signal_struct *signal; + struct sighand_struct *sighand; + sigset_t blocked; + sigset_t real_blocked; + /* Restored if set_restore_sigmask() was used: */ + sigset_t saved_sigmask; + struct sigpending pending; + unsigned long sas_ss_sp; + size_t sas_ss_size; + unsigned int sas_ss_flags; + + struct callback_head *task_works; + + struct audit_context *audit_context; #ifdef CONFIG_AUDITSYSCALL - kuid_t loginuid; - unsigned int sessionid; + kuid_t loginuid; + unsigned int sessionid; #endif - struct seccomp seccomp; + struct seccomp seccomp; -/* Thread group tracking */ - u32 parent_exec_id; - u32 self_exec_id; -/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, - * mempolicy */ - spinlock_t alloc_lock; + /* Thread group tracking: */ + u32 parent_exec_id; + u32 self_exec_id; + + /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ + spinlock_t alloc_lock; /* Protection of the PI data structures: */ - raw_spinlock_t pi_lock; + raw_spinlock_t pi_lock; - struct wake_q_node wake_q; + struct wake_q_node wake_q; #ifdef CONFIG_RT_MUTEXES - /* PI waiters blocked on a rt_mutex held by this task */ - struct rb_root pi_waiters; - struct rb_node *pi_waiters_leftmost; - /* Deadlock detection and priority inheritance handling */ - struct rt_mutex_waiter *pi_blocked_on; + /* PI waiters blocked on a rt_mutex held by this task: */ + struct rb_root pi_waiters; + struct rb_node *pi_waiters_leftmost; + /* Deadlock detection and priority inheritance handling: */ + struct rt_mutex_waiter *pi_blocked_on; #endif #ifdef CONFIG_DEBUG_MUTEXES - /* mutex deadlock detection */ - struct mutex_waiter *blocked_on; + /* Mutex deadlock detection: */ + struct mutex_waiter *blocked_on; #endif + #ifdef CONFIG_TRACE_IRQFLAGS - unsigned int irq_events; - unsigned long hardirq_enable_ip; - unsigned long hardirq_disable_ip; - unsigned int hardirq_enable_event; - unsigned int hardirq_disable_event; - int hardirqs_enabled; - int hardirq_context; - unsigned long softirq_disable_ip; - unsigned long softirq_enable_ip; - unsigned int softirq_disable_event; - unsigned int softirq_enable_event; - int softirqs_enabled; - int softirq_context; + unsigned int irq_events; + unsigned long hardirq_enable_ip; + unsigned long hardirq_disable_ip; + unsigned int hardirq_enable_event; + unsigned int hardirq_disable_event; + int hardirqs_enabled; + int hardirq_context; + unsigned long softirq_disable_ip; + unsigned long softirq_enable_ip; + unsigned int softirq_disable_event; + unsigned int softirq_enable_event; + int softirqs_enabled; + int softirq_context; #endif + #ifdef CONFIG_LOCKDEP -# define MAX_LOCK_DEPTH 48UL - u64 curr_chain_key; - int lockdep_depth; - unsigned int lockdep_recursion; - struct held_lock held_locks[MAX_LOCK_DEPTH]; - gfp_t lockdep_reclaim_gfp; +# define MAX_LOCK_DEPTH 48UL + u64 curr_chain_key; + int lockdep_depth; + unsigned int lockdep_recursion; + struct held_lock held_locks[MAX_LOCK_DEPTH]; + gfp_t lockdep_reclaim_gfp; #endif + #ifdef CONFIG_UBSAN - unsigned int in_ubsan; + unsigned int in_ubsan; #endif -/* journalling filesystem info */ - void *journal_info; + /* Journalling filesystem info: */ + void *journal_info; -/* stacked block device info */ - struct bio_list *bio_list; + /* Stacked block device info: */ + struct bio_list *bio_list; #ifdef CONFIG_BLOCK -/* stack plugging */ - struct blk_plug *plug; + /* Stack plugging: */ + struct blk_plug *plug; #endif -/* VM state */ - struct reclaim_state *reclaim_state; + /* VM state: */ + struct reclaim_state *reclaim_state; - struct backing_dev_info *backing_dev_info; + struct backing_dev_info *backing_dev_info; - struct io_context *io_context; + struct io_context *io_context; - unsigned long ptrace_message; - siginfo_t *last_siginfo; /* For ptrace use. */ - struct task_io_accounting ioac; -#if defined(CONFIG_TASK_XACCT) - u64 acct_rss_mem1; /* accumulated rss usage */ - u64 acct_vm_mem1; /* accumulated virtual memory usage */ - u64 acct_timexpd; /* stime + utime since last update */ + /* Ptrace state: */ + unsigned long ptrace_message; + siginfo_t *last_siginfo; + + struct task_io_accounting ioac; +#ifdef CONFIG_TASK_XACCT + /* Accumulated RSS usage: */ + u64 acct_rss_mem1; + /* Accumulated virtual memory usage: */ + u64 acct_vm_mem1; + /* stime + utime since last update: */ + u64 acct_timexpd; #endif #ifdef CONFIG_CPUSETS - nodemask_t mems_allowed; /* Protected by alloc_lock */ - seqcount_t mems_allowed_seq; /* Seqence no to catch updates */ - int cpuset_mem_spread_rotor; - int cpuset_slab_spread_rotor; + /* Protected by ->alloc_lock: */ + nodemask_t mems_allowed; + /* Seqence number to catch updates: */ + seqcount_t mems_allowed_seq; + int cpuset_mem_spread_rotor; + int cpuset_slab_spread_rotor; #endif #ifdef CONFIG_CGROUPS - /* Control Group info protected by css_set_lock */ - struct css_set __rcu *cgroups; - /* cg_list protected by css_set_lock and tsk->alloc_lock */ - struct list_head cg_list; + /* Control Group info protected by css_set_lock: */ + struct css_set __rcu *cgroups; + /* cg_list protected by css_set_lock and tsk->alloc_lock: */ + struct list_head cg_list; #endif #ifdef CONFIG_INTEL_RDT_A - int closid; + int closid; #endif #ifdef CONFIG_FUTEX - struct robust_list_head __user *robust_list; + struct robust_list_head __user *robust_list; #ifdef CONFIG_COMPAT struct compat_robust_list_head __user *compat_robust_list; #endif - struct list_head pi_state_list; - struct futex_pi_state *pi_state_cache; + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; #endif #ifdef CONFIG_PERF_EVENTS - struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; - struct mutex perf_event_mutex; - struct list_head perf_event_list; + struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; + struct mutex perf_event_mutex; + struct list_head perf_event_list; #endif #ifdef CONFIG_DEBUG_PREEMPT - unsigned long preempt_disable_ip; + unsigned long preempt_disable_ip; #endif #ifdef CONFIG_NUMA - struct mempolicy *mempolicy; /* Protected by alloc_lock */ - short il_next; - short pref_node_fork; + /* Protected by alloc_lock: */ + struct mempolicy *mempolicy; + short il_next; + short pref_node_fork; #endif #ifdef CONFIG_NUMA_BALANCING - int numa_scan_seq; - unsigned int numa_scan_period; - unsigned int numa_scan_period_max; - int numa_preferred_nid; - unsigned long numa_migrate_retry; - u64 node_stamp; /* migration stamp */ - u64 last_task_numa_placement; - u64 last_sum_exec_runtime; - struct callback_head numa_work; - - struct list_head numa_entry; - struct numa_group *numa_group; + int numa_scan_seq; + unsigned int numa_scan_period; + unsigned int numa_scan_period_max; + int numa_preferred_nid; + unsigned long numa_migrate_retry; + /* Migration stamp: */ + u64 node_stamp; + u64 last_task_numa_placement; + u64 last_sum_exec_runtime; + struct callback_head numa_work; + + struct list_head numa_entry; + struct numa_group *numa_group; /* * numa_faults is an array split into four regions: @@ -1855,8 +912,8 @@ struct task_struct { * during the current scan window. When the scan completes, the counts * in faults_memory and faults_cpu decay and these values are copied. */ - unsigned long *numa_faults; - unsigned long total_numa_faults; + unsigned long *numa_faults; + unsigned long total_numa_faults; /* * numa_faults_locality tracks if faults recorded during the last @@ -1864,208 +921,133 @@ struct task_struct { * period is adapted based on the locality of the faults with different * weights depending on whether they were shared or private faults */ - unsigned long numa_faults_locality[3]; + unsigned long numa_faults_locality[3]; - unsigned long numa_pages_migrated; + unsigned long numa_pages_migrated; #endif /* CONFIG_NUMA_BALANCING */ -#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH - struct tlbflush_unmap_batch tlb_ubc; -#endif + struct tlbflush_unmap_batch tlb_ubc; - struct rcu_head rcu; + struct rcu_head rcu; - /* - * cache last used pipe for splice - */ - struct pipe_inode_info *splice_pipe; + /* Cache last used pipe for splice(): */ + struct pipe_inode_info *splice_pipe; - struct page_frag task_frag; + struct page_frag task_frag; -#ifdef CONFIG_TASK_DELAY_ACCT - struct task_delay_info *delays; +#ifdef CONFIG_TASK_DELAY_ACCT + struct task_delay_info *delays; #endif + #ifdef CONFIG_FAULT_INJECTION - int make_it_fail; + int make_it_fail; #endif /* - * when (nr_dirtied >= nr_dirtied_pause), it's time to call - * balance_dirty_pages() for some dirty throttling pause + * When (nr_dirtied >= nr_dirtied_pause), it's time to call + * balance_dirty_pages() for a dirty throttling pause: */ - int nr_dirtied; - int nr_dirtied_pause; - unsigned long dirty_paused_when; /* start of a write-and-pause period */ + int nr_dirtied; + int nr_dirtied_pause; + /* Start of a write-and-pause period: */ + unsigned long dirty_paused_when; #ifdef CONFIG_LATENCYTOP - int latency_record_count; - struct latency_record latency_record[LT_SAVECOUNT]; + int latency_record_count; + struct latency_record latency_record[LT_SAVECOUNT]; #endif /* - * time slack values; these are used to round up poll() and + * Time slack values; these are used to round up poll() and * select() etc timeout values. These are in nanoseconds. */ - u64 timer_slack_ns; - u64 default_timer_slack_ns; + u64 timer_slack_ns; + u64 default_timer_slack_ns; #ifdef CONFIG_KASAN - unsigned int kasan_depth; + unsigned int kasan_depth; #endif + #ifdef CONFIG_FUNCTION_GRAPH_TRACER - /* Index of current stored address in ret_stack */ - int curr_ret_stack; - /* Stack of return addresses for return function tracing */ - struct ftrace_ret_stack *ret_stack; - /* time stamp for last schedule */ - unsigned long long ftrace_timestamp; + /* Index of current stored address in ret_stack: */ + int curr_ret_stack; + + /* Stack of return addresses for return function tracing: */ + struct ftrace_ret_stack *ret_stack; + + /* Timestamp for last schedule: */ + unsigned long long ftrace_timestamp; + /* * Number of functions that haven't been traced - * because of depth overrun. + * because of depth overrun: */ - atomic_t trace_overrun; - /* Pause for the tracing */ - atomic_t tracing_graph_pause; + atomic_t trace_overrun; + + /* Pause tracing: */ + atomic_t tracing_graph_pause; #endif + #ifdef CONFIG_TRACING - /* state flags for use by tracers */ - unsigned long trace; - /* bitmask and counter of trace recursion */ - unsigned long trace_recursion; + /* State flags for use by tracers: */ + unsigned long trace; + + /* Bitmask and counter of trace recursion: */ + unsigned long trace_recursion; #endif /* CONFIG_TRACING */ + #ifdef CONFIG_KCOV - /* Coverage collection mode enabled for this task (0 if disabled). */ - enum kcov_mode kcov_mode; - /* Size of the kcov_area. */ - unsigned kcov_size; - /* Buffer for coverage collection. */ - void *kcov_area; - /* kcov desciptor wired with this task or NULL. */ - struct kcov *kcov; + /* Coverage collection mode enabled for this task (0 if disabled): */ + enum kcov_mode kcov_mode; + + /* Size of the kcov_area: */ + unsigned int kcov_size; + + /* Buffer for coverage collection: */ + void *kcov_area; + + /* KCOV descriptor wired with this task or NULL: */ + struct kcov *kcov; #endif + #ifdef CONFIG_MEMCG - struct mem_cgroup *memcg_in_oom; - gfp_t memcg_oom_gfp_mask; - int memcg_oom_order; + struct mem_cgroup *memcg_in_oom; + gfp_t memcg_oom_gfp_mask; + int memcg_oom_order; - /* number of pages to reclaim on returning to userland */ - unsigned int memcg_nr_pages_over_high; + /* Number of pages to reclaim on returning to userland: */ + unsigned int memcg_nr_pages_over_high; #endif + #ifdef CONFIG_UPROBES - struct uprobe_task *utask; + struct uprobe_task *utask; #endif #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) - unsigned int sequential_io; - unsigned int sequential_io_avg; + unsigned int sequential_io; + unsigned int sequential_io_avg; #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - unsigned long task_state_change; + unsigned long task_state_change; #endif - int pagefault_disabled; + int pagefault_disabled; #ifdef CONFIG_MMU - struct task_struct *oom_reaper_list; + struct task_struct *oom_reaper_list; #endif #ifdef CONFIG_VMAP_STACK - struct vm_struct *stack_vm_area; + struct vm_struct *stack_vm_area; #endif #ifdef CONFIG_THREAD_INFO_IN_TASK - /* A live task holds one reference. */ - atomic_t stack_refcount; -#endif -/* CPU-specific state of this task */ - struct thread_struct thread; -/* - * WARNING: on x86, 'thread_struct' contains a variable-sized - * structure. It *MUST* be at the end of 'task_struct'. - * - * Do not put anything below here! - */ -}; - -#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT -extern int arch_task_struct_size __read_mostly; -#else -# define arch_task_struct_size (sizeof(struct task_struct)) -#endif - -#ifdef CONFIG_VMAP_STACK -static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) -{ - return t->stack_vm_area; -} -#else -static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) -{ - return NULL; -} + /* A live task holds one reference: */ + atomic_t stack_refcount; #endif - -/* Future-safe accessor for struct task_struct's cpus_allowed. */ -#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) - -static inline int tsk_nr_cpus_allowed(struct task_struct *p) -{ - return p->nr_cpus_allowed; -} - -#define TNF_MIGRATED 0x01 -#define TNF_NO_GROUP 0x02 -#define TNF_SHARED 0x04 -#define TNF_FAULT_LOCAL 0x08 -#define TNF_MIGRATE_FAIL 0x10 - -static inline bool in_vfork(struct task_struct *tsk) -{ - bool ret; + /* CPU-specific state of this task: */ + struct thread_struct thread; /* - * need RCU to access ->real_parent if CLONE_VM was used along with - * CLONE_PARENT. - * - * We check real_parent->mm == tsk->mm because CLONE_VFORK does not - * imply CLONE_VM + * WARNING: on x86, 'thread_struct' contains a variable-sized + * structure. It *MUST* be at the end of 'task_struct'. * - * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus - * ->real_parent is not necessarily the task doing vfork(), so in - * theory we can't rely on task_lock() if we want to dereference it. - * - * And in this case we can't trust the real_parent->mm == tsk->mm - * check, it can be false negative. But we do not care, if init or - * another oom-unkillable task does this it should blame itself. + * Do not put anything below here! */ - rcu_read_lock(); - ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; - rcu_read_unlock(); - - return ret; -} - -#ifdef CONFIG_NUMA_BALANCING -extern void task_numa_fault(int last_node, int node, int pages, int flags); -extern pid_t task_numa_group_id(struct task_struct *p); -extern void set_numabalancing_state(bool enabled); -extern void task_numa_free(struct task_struct *p); -extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, - int src_nid, int dst_cpu); -#else -static inline void task_numa_fault(int last_node, int node, int pages, - int flags) -{ -} -static inline pid_t task_numa_group_id(struct task_struct *p) -{ - return 0; -} -static inline void set_numabalancing_state(bool enabled) -{ -} -static inline void task_numa_free(struct task_struct *p) -{ -} -static inline bool should_numa_migrate_memory(struct task_struct *p, - struct page *page, int src_nid, int dst_cpu) -{ - return true; -} -#endif +}; static inline struct pid *task_pid(struct task_struct *task) { @@ -2078,7 +1060,7 @@ static inline struct pid *task_tgid(struct task_struct *task) } /* - * Without tasklist or rcu lock it is not safe to dereference + * Without tasklist or RCU lock it is not safe to dereference * the result of task_pgrp/task_session even if task == current, * we can race with another thread doing sys_setsid/sys_setpgid. */ @@ -2092,8 +1074,6 @@ static inline struct pid *task_session(struct task_struct *task) return task->group_leader->pids[PIDTYPE_SID].pid; } -struct pid_namespace; - /* * the helpers to get the task's different pids as they are seen * from various namespaces @@ -2107,16 +1087,14 @@ struct pid_namespace; * * see also pid_nr() etc in include/linux/pid.h */ -pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, - struct pid_namespace *ns); +pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); static inline pid_t task_pid_nr(struct task_struct *tsk) { return tsk->pid; } -static inline pid_t task_pid_nr_ns(struct task_struct *tsk, - struct pid_namespace *ns) +static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); } @@ -2132,15 +1110,28 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk) return tsk->tgid; } -pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); +extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); static inline pid_t task_tgid_vnr(struct task_struct *tsk) { return pid_vnr(task_tgid(tsk)); } +/** + * pid_alive - check that a task structure is not stale + * @p: Task structure to be checked. + * + * Test if a process is not yet dead (at most zombie state) + * If pid_alive fails, then pointers within the task structure + * can be stale and must not be dereferenced. + * + * Return: 1 if the process is alive. 0 otherwise. + */ +static inline int pid_alive(const struct task_struct *p) +{ + return p->pids[PIDTYPE_PID].pid != NULL; +} -static inline int pid_alive(const struct task_struct *p); static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) { pid_t pid = 0; @@ -2158,8 +1149,7 @@ static inline pid_t task_ppid_nr(const struct task_struct *tsk) return task_ppid_nr_ns(tsk, &init_pid_ns); } -static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, - struct pid_namespace *ns) +static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); } @@ -2170,8 +1160,7 @@ static inline pid_t task_pgrp_vnr(struct task_struct *tsk) } -static inline pid_t task_session_nr_ns(struct task_struct *tsk, - struct pid_namespace *ns) +static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); } @@ -2181,28 +1170,13 @@ static inline pid_t task_session_vnr(struct task_struct *tsk) return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); } -/* obsolete, do not use */ +/* Obsolete, do not use: */ static inline pid_t task_pgrp_nr(struct task_struct *tsk) { return task_pgrp_nr_ns(tsk, &init_pid_ns); } /** - * pid_alive - check that a task structure is not stale - * @p: Task structure to be checked. - * - * Test if a process is not yet dead (at most zombie state) - * If pid_alive fails, then pointers within the task structure - * can be stale and must not be dereferenced. - * - * Return: 1 if the process is alive. 0 otherwise. - */ -static inline int pid_alive(const struct task_struct *p) -{ - return p->pids[PIDTYPE_PID].pid != NULL; -} - -/** * is_global_init - check if a task structure is init. Since init * is free to have sub-threads we need to check tgid. * @tsk: Task structure to be checked. @@ -2218,89 +1192,37 @@ static inline int is_global_init(struct task_struct *tsk) extern struct pid *cad_pid; -extern void free_task(struct task_struct *tsk); -#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) - -extern void __put_task_struct(struct task_struct *t); - -static inline void put_task_struct(struct task_struct *t) -{ - if (atomic_dec_and_test(&t->usage)) - __put_task_struct(t); -} - -struct task_struct *task_rcu_dereference(struct task_struct **ptask); -struct task_struct *try_get_task_struct(struct task_struct **ptask); - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -extern void task_cputime(struct task_struct *t, - u64 *utime, u64 *stime); -extern u64 task_gtime(struct task_struct *t); -#else -static inline void task_cputime(struct task_struct *t, - u64 *utime, u64 *stime) -{ - *utime = t->utime; - *stime = t->stime; -} - -static inline u64 task_gtime(struct task_struct *t) -{ - return t->gtime; -} -#endif - -#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME -static inline void task_cputime_scaled(struct task_struct *t, - u64 *utimescaled, - u64 *stimescaled) -{ - *utimescaled = t->utimescaled; - *stimescaled = t->stimescaled; -} -#else -static inline void task_cputime_scaled(struct task_struct *t, - u64 *utimescaled, - u64 *stimescaled) -{ - task_cputime(t, utimescaled, stimescaled); -} -#endif - -extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); -extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); - /* * Per process flags */ -#define PF_IDLE 0x00000002 /* I am an IDLE thread */ -#define PF_EXITING 0x00000004 /* getting shut down */ -#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ -#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ -#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ -#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ -#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ -#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ -#define PF_DUMPCORE 0x00000200 /* dumped core */ -#define PF_SIGNALED 0x00000400 /* killed by a signal */ -#define PF_MEMALLOC 0x00000800 /* Allocating memory */ -#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ -#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ -#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */ -#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ -#define PF_FROZEN 0x00010000 /* frozen for system suspend */ -#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ -#define PF_KSWAPD 0x00040000 /* I am kswapd */ -#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ -#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ -#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ -#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ -#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ -#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ -#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ -#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ -#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ -#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */ +#define PF_IDLE 0x00000002 /* I am an IDLE thread */ +#define PF_EXITING 0x00000004 /* Getting shut down */ +#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */ +#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ +#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ +#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ +#define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ +#define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ +#define PF_DUMPCORE 0x00000200 /* Dumped core */ +#define PF_SIGNALED 0x00000400 /* Killed by a signal */ +#define PF_MEMALLOC 0x00000800 /* Allocating memory */ +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ +#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ +#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */ +#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ +#define PF_FROZEN 0x00010000 /* Frozen for system suspend */ +#define PF_FSTRANS 0x00020000 /* Inside a filesystem transaction */ +#define PF_KSWAPD 0x00040000 /* I am kswapd */ +#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ +#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ +#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ +#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ +#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ +#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ +#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ +#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ +#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ +#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ /* * Only the _current_ task can read/write to tsk->flags, but other @@ -2313,55 +1235,38 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *s * child is not running and in turn not changing child->flags * at the same time the parent does it. */ -#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) -#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) -#define clear_used_math() clear_stopped_child_used_math(current) -#define set_used_math() set_stopped_child_used_math(current) +#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) +#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) +#define clear_used_math() clear_stopped_child_used_math(current) +#define set_used_math() set_stopped_child_used_math(current) + #define conditional_stopped_child_used_math(condition, child) \ do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) -#define conditional_used_math(condition) \ - conditional_stopped_child_used_math(condition, current) -#define copy_to_stopped_child_used_math(child) \ - do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) -/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ -#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) -#define used_math() tsk_used_math(current) -/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags - * __GFP_FS is also cleared as it implies __GFP_IO. - */ -static inline gfp_t memalloc_noio_flags(gfp_t flags) -{ - if (unlikely(current->flags & PF_MEMALLOC_NOIO)) - flags &= ~(__GFP_IO | __GFP_FS); - return flags; -} +#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) -static inline unsigned int memalloc_noio_save(void) -{ - unsigned int flags = current->flags & PF_MEMALLOC_NOIO; - current->flags |= PF_MEMALLOC_NOIO; - return flags; -} +#define copy_to_stopped_child_used_math(child) \ + do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) -static inline void memalloc_noio_restore(unsigned int flags) -{ - current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; -} +/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ +#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) +#define used_math() tsk_used_math(current) /* Per-process atomic flags. */ -#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ -#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ -#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ -#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ +#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ +#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ +#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ +#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ #define TASK_PFA_TEST(name, func) \ static inline bool task_##func(struct task_struct *p) \ { return test_bit(PFA_##name, &p->atomic_flags); } + #define TASK_PFA_SET(name, func) \ static inline void task_set_##func(struct task_struct *p) \ { set_bit(PFA_##name, &p->atomic_flags); } + #define TASK_PFA_CLEAR(name, func) \ static inline void task_clear_##func(struct task_struct *p) \ { clear_bit(PFA_##name, &p->atomic_flags); } @@ -2380,75 +1285,23 @@ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) TASK_PFA_TEST(LMK_WAITING, lmk_waiting) TASK_PFA_SET(LMK_WAITING, lmk_waiting) -/* - * task->jobctl flags - */ -#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ - -#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ -#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ -#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ -#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ -#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ -#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ -#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ - -#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) -#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) -#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT) -#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT) -#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT) -#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) -#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) - -#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) -#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) - -extern bool task_set_jobctl_pending(struct task_struct *task, - unsigned long mask); -extern void task_clear_jobctl_trapping(struct task_struct *task); -extern void task_clear_jobctl_pending(struct task_struct *task, - unsigned long mask); - -static inline void rcu_copy_process(struct task_struct *p) -{ -#ifdef CONFIG_PREEMPT_RCU - p->rcu_read_lock_nesting = 0; - p->rcu_read_unlock_special.s = 0; - p->rcu_blocked_node = NULL; - INIT_LIST_HEAD(&p->rcu_node_entry); -#endif /* #ifdef CONFIG_PREEMPT_RCU */ -#ifdef CONFIG_TASKS_RCU - p->rcu_tasks_holdout = false; - INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); - p->rcu_tasks_idle_cpu = -1; -#endif /* #ifdef CONFIG_TASKS_RCU */ -} - -static inline void tsk_restore_flags(struct task_struct *task, - unsigned long orig_flags, unsigned long flags) +static inline void +tsk_restore_flags(struct task_struct *task, unsigned long orig_flags, unsigned long flags) { task->flags &= ~flags; task->flags |= orig_flags & flags; } -extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, - const struct cpumask *trial); -extern int task_can_attach(struct task_struct *p, - const struct cpumask *cs_cpus_allowed); +extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); +extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); #ifdef CONFIG_SMP -extern void do_set_cpus_allowed(struct task_struct *p, - const struct cpumask *new_mask); - -extern int set_cpus_allowed_ptr(struct task_struct *p, - const struct cpumask *new_mask); +extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); +extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); #else -static inline void do_set_cpus_allowed(struct task_struct *p, - const struct cpumask *new_mask) +static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { } -static inline int set_cpus_allowed_ptr(struct task_struct *p, - const struct cpumask *new_mask) +static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) { if (!cpumask_test_cpu(0, new_mask)) return -EINVAL; @@ -2456,165 +1309,14 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, } #endif -#ifdef CONFIG_NO_HZ_COMMON -void calc_load_enter_idle(void); -void calc_load_exit_idle(void); -#else -static inline void calc_load_enter_idle(void) { } -static inline void calc_load_exit_idle(void) { } -#endif /* CONFIG_NO_HZ_COMMON */ - #ifndef cpu_relax_yield #define cpu_relax_yield() cpu_relax() #endif -/* - * Do not use outside of architecture code which knows its limitations. - * - * sched_clock() has no promise of monotonicity or bounded drift between - * CPUs, use (which you should not) requires disabling IRQs. - * - * Please use one of the three interfaces below. - */ -extern unsigned long long notrace sched_clock(void); -/* - * See the comment in kernel/sched/clock.c - */ -extern u64 running_clock(void); -extern u64 sched_clock_cpu(int cpu); - - -extern void sched_clock_init(void); - -#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -static inline void sched_clock_init_late(void) -{ -} - -static inline void sched_clock_tick(void) -{ -} - -static inline void clear_sched_clock_stable(void) -{ -} - -static inline void sched_clock_idle_sleep_event(void) -{ -} - -static inline void sched_clock_idle_wakeup_event(u64 delta_ns) -{ -} - -static inline u64 cpu_clock(int cpu) -{ - return sched_clock(); -} - -static inline u64 local_clock(void) -{ - return sched_clock(); -} -#else -extern void sched_clock_init_late(void); -/* - * Architectures can set this to 1 if they have specified - * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, - * but then during bootup it turns out that sched_clock() - * is reliable after all: - */ -extern int sched_clock_stable(void); -extern void clear_sched_clock_stable(void); - -extern void sched_clock_tick(void); -extern void sched_clock_idle_sleep_event(void); -extern void sched_clock_idle_wakeup_event(u64 delta_ns); - -/* - * As outlined in clock.c, provides a fast, high resolution, nanosecond - * time source that is monotonic per cpu argument and has bounded drift - * between cpus. - * - * ######################### BIG FAT WARNING ########################## - * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # - * # go backwards !! # - * #################################################################### - */ -static inline u64 cpu_clock(int cpu) -{ - return sched_clock_cpu(cpu); -} - -static inline u64 local_clock(void) -{ - return sched_clock_cpu(raw_smp_processor_id()); -} -#endif - -#ifdef CONFIG_IRQ_TIME_ACCOUNTING -/* - * An i/f to runtime opt-in for irq time accounting based off of sched_clock. - * The reason for this explicit opt-in is not to have perf penalty with - * slow sched_clocks. - */ -extern void enable_sched_clock_irqtime(void); -extern void disable_sched_clock_irqtime(void); -#else -static inline void enable_sched_clock_irqtime(void) {} -static inline void disable_sched_clock_irqtime(void) {} -#endif - -extern unsigned long long -task_sched_runtime(struct task_struct *task); - -/* sched_exec is called by processes performing an exec */ -#ifdef CONFIG_SMP -extern void sched_exec(void); -#else -#define sched_exec() {} -#endif - -extern void sched_clock_idle_sleep_event(void); -extern void sched_clock_idle_wakeup_event(u64 delta_ns); - -#ifdef CONFIG_HOTPLUG_CPU -extern void idle_task_exit(void); -#else -static inline void idle_task_exit(void) {} -#endif - -#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) -extern void wake_up_nohz_cpu(int cpu); -#else -static inline void wake_up_nohz_cpu(int cpu) { } -#endif - -#ifdef CONFIG_NO_HZ_FULL -extern u64 scheduler_tick_max_deferment(void); -#endif - -#ifdef CONFIG_SCHED_AUTOGROUP -extern void sched_autogroup_create_attach(struct task_struct *p); -extern void sched_autogroup_detach(struct task_struct *p); -extern void sched_autogroup_fork(struct signal_struct *sig); -extern void sched_autogroup_exit(struct signal_struct *sig); -extern void sched_autogroup_exit_task(struct task_struct *p); -#ifdef CONFIG_PROC_FS -extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); -extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); -#endif -#else -static inline void sched_autogroup_create_attach(struct task_struct *p) { } -static inline void sched_autogroup_detach(struct task_struct *p) { } -static inline void sched_autogroup_fork(struct signal_struct *sig) { } -static inline void sched_autogroup_exit(struct signal_struct *sig) { } -static inline void sched_autogroup_exit_task(struct task_struct *p) { } -#endif - extern int yield_to(struct task_struct *p, bool preempt); extern void set_user_nice(struct task_struct *p, long nice); extern int task_prio(const struct task_struct *p); + /** * task_nice - return the nice value of a given task. * @p: the task in question. @@ -2625,16 +1327,15 @@ static inline int task_nice(const struct task_struct *p) { return PRIO_TO_NICE((p)->static_prio); } + extern int can_nice(const struct task_struct *p, const int nice); extern int task_curr(const struct task_struct *p); extern int idle_cpu(int cpu); -extern int sched_setscheduler(struct task_struct *, int, - const struct sched_param *); -extern int sched_setscheduler_nocheck(struct task_struct *, int, - const struct sched_param *); -extern int sched_setattr(struct task_struct *, - const struct sched_attr *); +extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); +extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); +extern int sched_setattr(struct task_struct *, const struct sched_attr *); extern struct task_struct *idle_task(int cpu); + /** * is_idle_task - is the specified task an idle task? * @p: the task in question. @@ -2645,6 +1346,7 @@ static inline bool is_idle_task(const struct task_struct *p) { return !!(p->flags & PF_IDLE); } + extern struct task_struct *curr_task(int cpu); extern void ia64_set_curr_task(int cpu, struct task_struct *p); @@ -2657,23 +1359,15 @@ union thread_union { unsigned long stack[THREAD_SIZE/sizeof(long)]; }; -#ifndef __HAVE_ARCH_KSTACK_END -static inline int kstack_end(void *addr) +#ifdef CONFIG_THREAD_INFO_IN_TASK +static inline struct thread_info *task_thread_info(struct task_struct *task) { - /* Reliable end of stack detection: - * Some APM bios versions misalign the stack - */ - return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); + return &task->thread_info; } +#elif !defined(__HAVE_THREAD_FUNCTIONS) +# define task_thread_info(task) ((struct thread_info *)(task)->stack) #endif -extern union thread_union init_thread_union; -extern struct task_struct init_task; - -extern struct mm_struct init_mm; - -extern struct pid_namespace init_pid_ns; - /* * find a task by one of its numerical ids * @@ -2686,322 +1380,25 @@ extern struct pid_namespace init_pid_ns; */ extern struct task_struct *find_task_by_vpid(pid_t nr); -extern struct task_struct *find_task_by_pid_ns(pid_t nr, - struct pid_namespace *ns); - -/* per-UID process charging. */ -extern struct user_struct * alloc_uid(kuid_t); -static inline struct user_struct *get_uid(struct user_struct *u) -{ - atomic_inc(&u->__count); - return u; -} -extern void free_uid(struct user_struct *); - -#include <asm/current.h> - -extern void xtime_update(unsigned long ticks); +extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); extern void wake_up_new_task(struct task_struct *tsk); -#ifdef CONFIG_SMP - extern void kick_process(struct task_struct *tsk); -#else - static inline void kick_process(struct task_struct *tsk) { } -#endif -extern int sched_fork(unsigned long clone_flags, struct task_struct *p); -extern void sched_dead(struct task_struct *p); - -extern void proc_caches_init(void); -extern void flush_signals(struct task_struct *); -extern void ignore_signals(struct task_struct *); -extern void flush_signal_handlers(struct task_struct *, int force_default); -extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); - -static inline int kernel_dequeue_signal(siginfo_t *info) -{ - struct task_struct *tsk = current; - siginfo_t __info; - int ret; - - spin_lock_irq(&tsk->sighand->siglock); - ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); - spin_unlock_irq(&tsk->sighand->siglock); - - return ret; -} - -static inline void kernel_signal_stop(void) -{ - spin_lock_irq(¤t->sighand->siglock); - if (current->jobctl & JOBCTL_STOP_DEQUEUED) - __set_current_state(TASK_STOPPED); - spin_unlock_irq(¤t->sighand->siglock); - - schedule(); -} - -extern void release_task(struct task_struct * p); -extern int send_sig_info(int, struct siginfo *, struct task_struct *); -extern int force_sigsegv(int, struct task_struct *); -extern int force_sig_info(int, struct siginfo *, struct task_struct *); -extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); -extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); -extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, - const struct cred *, u32); -extern int kill_pgrp(struct pid *pid, int sig, int priv); -extern int kill_pid(struct pid *pid, int sig, int priv); -extern int kill_proc_info(int, struct siginfo *, pid_t); -extern __must_check bool do_notify_parent(struct task_struct *, int); -extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); -extern void force_sig(int, struct task_struct *); -extern int send_sig(int, struct task_struct *, int); -extern int zap_other_threads(struct task_struct *p); -extern struct sigqueue *sigqueue_alloc(void); -extern void sigqueue_free(struct sigqueue *); -extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); -extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); - -#ifdef TIF_RESTORE_SIGMASK -/* - * Legacy restore_sigmask accessors. These are inefficient on - * SMP architectures because they require atomic operations. - */ - -/** - * set_restore_sigmask() - make sure saved_sigmask processing gets done - * - * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code - * will run before returning to user mode, to process the flag. For - * all callers, TIF_SIGPENDING is already set or it's no harm to set - * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the - * arch code will notice on return to user mode, in case those bits - * are scarce. We set TIF_SIGPENDING here to ensure that the arch - * signal code always gets run when TIF_RESTORE_SIGMASK is set. - */ -static inline void set_restore_sigmask(void) -{ - set_thread_flag(TIF_RESTORE_SIGMASK); - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); -} -static inline void clear_restore_sigmask(void) -{ - clear_thread_flag(TIF_RESTORE_SIGMASK); -} -static inline bool test_restore_sigmask(void) -{ - return test_thread_flag(TIF_RESTORE_SIGMASK); -} -static inline bool test_and_clear_restore_sigmask(void) -{ - return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); -} - -#else /* TIF_RESTORE_SIGMASK */ - -/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ -static inline void set_restore_sigmask(void) -{ - current->restore_sigmask = true; - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); -} -static inline void clear_restore_sigmask(void) -{ - current->restore_sigmask = false; -} -static inline bool test_restore_sigmask(void) -{ - return current->restore_sigmask; -} -static inline bool test_and_clear_restore_sigmask(void) -{ - if (!current->restore_sigmask) - return false; - current->restore_sigmask = false; - return true; -} -#endif -static inline void restore_saved_sigmask(void) -{ - if (test_and_clear_restore_sigmask()) - __set_current_blocked(¤t->saved_sigmask); -} - -static inline sigset_t *sigmask_to_save(void) -{ - sigset_t *res = ¤t->blocked; - if (unlikely(test_restore_sigmask())) - res = ¤t->saved_sigmask; - return res; -} - -static inline int kill_cad_pid(int sig, int priv) -{ - return kill_pid(cad_pid, sig, priv); -} - -/* These can be the second arg to send_sig_info/send_group_sig_info. */ -#define SEND_SIG_NOINFO ((struct siginfo *) 0) -#define SEND_SIG_PRIV ((struct siginfo *) 1) -#define SEND_SIG_FORCED ((struct siginfo *) 2) - -/* - * True if we are on the alternate signal stack. - */ -static inline int on_sig_stack(unsigned long sp) -{ - /* - * If the signal stack is SS_AUTODISARM then, by construction, we - * can't be on the signal stack unless user code deliberately set - * SS_AUTODISARM when we were already on it. - * - * This improves reliability: if user state gets corrupted such that - * the stack pointer points very close to the end of the signal stack, - * then this check will enable the signal to be handled anyway. - */ - if (current->sas_ss_flags & SS_AUTODISARM) - return 0; - -#ifdef CONFIG_STACK_GROWSUP - return sp >= current->sas_ss_sp && - sp - current->sas_ss_sp < current->sas_ss_size; -#else - return sp > current->sas_ss_sp && - sp - current->sas_ss_sp <= current->sas_ss_size; -#endif -} - -static inline int sas_ss_flags(unsigned long sp) -{ - if (!current->sas_ss_size) - return SS_DISABLE; - - return on_sig_stack(sp) ? SS_ONSTACK : 0; -} - -static inline void sas_ss_reset(struct task_struct *p) -{ - p->sas_ss_sp = 0; - p->sas_ss_size = 0; - p->sas_ss_flags = SS_DISABLE; -} - -static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) -{ - if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) -#ifdef CONFIG_STACK_GROWSUP - return current->sas_ss_sp; -#else - return current->sas_ss_sp + current->sas_ss_size; -#endif - return sp; -} - -/* - * Routines for handling mm_structs - */ -extern struct mm_struct * mm_alloc(void); - -/* mmdrop drops the mm and the page tables */ -extern void __mmdrop(struct mm_struct *); -static inline void mmdrop(struct mm_struct *mm) -{ - if (unlikely(atomic_dec_and_test(&mm->mm_count))) - __mmdrop(mm); -} - -static inline void mmdrop_async_fn(struct work_struct *work) -{ - struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); - __mmdrop(mm); -} - -static inline void mmdrop_async(struct mm_struct *mm) -{ - if (unlikely(atomic_dec_and_test(&mm->mm_count))) { - INIT_WORK(&mm->async_put_work, mmdrop_async_fn); - schedule_work(&mm->async_put_work); - } -} - -static inline bool mmget_not_zero(struct mm_struct *mm) -{ - return atomic_inc_not_zero(&mm->mm_users); -} - -/* mmput gets rid of the mappings and all user-space */ -extern void mmput(struct mm_struct *); -#ifdef CONFIG_MMU -/* same as above but performs the slow path from the async context. Can - * be called from the atomic context as well - */ -extern void mmput_async(struct mm_struct *); -#endif - -/* Grab a reference to a task's mm, if it is not already going away */ -extern struct mm_struct *get_task_mm(struct task_struct *task); -/* - * Grab a reference to a task's mm, if it is not already going away - * and ptrace_may_access with the mode parameter passed to it - * succeeds. - */ -extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); -/* Remove the current tasks stale references to the old mm_struct */ -extern void mm_release(struct task_struct *, struct mm_struct *); - -#ifdef CONFIG_HAVE_COPY_THREAD_TLS -extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, - struct task_struct *, unsigned long); -#else -extern int copy_thread(unsigned long, unsigned long, unsigned long, - struct task_struct *); - -/* Architectures that haven't opted into copy_thread_tls get the tls argument - * via pt_regs, so ignore the tls argument passed via C. */ -static inline int copy_thread_tls( - unsigned long clone_flags, unsigned long sp, unsigned long arg, - struct task_struct *p, unsigned long tls) -{ - return copy_thread(clone_flags, sp, arg, p); -} -#endif -extern void flush_thread(void); - -#ifdef CONFIG_HAVE_EXIT_THREAD -extern void exit_thread(struct task_struct *tsk); +#ifdef CONFIG_SMP +extern void kick_process(struct task_struct *tsk); #else -static inline void exit_thread(struct task_struct *tsk) -{ -} +static inline void kick_process(struct task_struct *tsk) { } #endif -extern void exit_files(struct task_struct *); -extern void __cleanup_sighand(struct sighand_struct *); - -extern void exit_itimers(struct signal_struct *); -extern void flush_itimer_signals(void); - -extern void do_group_exit(int); - -extern int do_execve(struct filename *, - const char __user * const __user *, - const char __user * const __user *); -extern int do_execveat(int, struct filename *, - const char __user * const __user *, - const char __user * const __user *, - int); -extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); -extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); -struct task_struct *fork_idle(int); -extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); - extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); + static inline void set_task_comm(struct task_struct *tsk, const char *from) { __set_task_comm(tsk, from, false); } + extern char *get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP @@ -3009,263 +1406,15 @@ void scheduler_ipi(void); extern unsigned long wait_task_inactive(struct task_struct *, long match_state); #else static inline void scheduler_ipi(void) { } -static inline unsigned long wait_task_inactive(struct task_struct *p, - long match_state) +static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) { return 1; } #endif -#define tasklist_empty() \ - list_empty(&init_task.tasks) - -#define next_task(p) \ - list_entry_rcu((p)->tasks.next, struct task_struct, tasks) - -#define for_each_process(p) \ - for (p = &init_task ; (p = next_task(p)) != &init_task ; ) - -extern bool current_is_single_threaded(void); - -/* - * Careful: do_each_thread/while_each_thread is a double loop so - * 'break' will not work as expected - use goto instead. - */ -#define do_each_thread(g, t) \ - for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do - -#define while_each_thread(g, t) \ - while ((t = next_thread(t)) != g) - -#define __for_each_thread(signal, t) \ - list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) - -#define for_each_thread(p, t) \ - __for_each_thread((p)->signal, t) - -/* Careful: this is a double loop, 'break' won't work as expected. */ -#define for_each_process_thread(p, t) \ - for_each_process(p) for_each_thread(p, t) - -typedef int (*proc_visitor)(struct task_struct *p, void *data); -void walk_process_tree(struct task_struct *top, proc_visitor, void *); - -static inline int get_nr_threads(struct task_struct *tsk) -{ - return tsk->signal->nr_threads; -} - -static inline bool thread_group_leader(struct task_struct *p) -{ - return p->exit_signal >= 0; -} - -/* Do to the insanities of de_thread it is possible for a process - * to have the pid of the thread group leader without actually being - * the thread group leader. For iteration through the pids in proc - * all we care about is that we have a task with the appropriate - * pid, we don't actually care if we have the right task. - */ -static inline bool has_group_leader_pid(struct task_struct *p) -{ - return task_pid(p) == p->signal->leader_pid; -} - -static inline -bool same_thread_group(struct task_struct *p1, struct task_struct *p2) -{ - return p1->signal == p2->signal; -} - -static inline struct task_struct *next_thread(const struct task_struct *p) -{ - return list_entry_rcu(p->thread_group.next, - struct task_struct, thread_group); -} - -static inline int thread_group_empty(struct task_struct *p) -{ - return list_empty(&p->thread_group); -} - -#define delay_group_leader(p) \ - (thread_group_leader(p) && !thread_group_empty(p)) - -/* - * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring - * subscriptions and synchronises with wait4(). Also used in procfs. Also - * pins the final release of task.io_context. Also protects ->cpuset and - * ->cgroup.subsys[]. And ->vfork_done. - * - * Nests both inside and outside of read_lock(&tasklist_lock). - * It must not be nested with write_lock_irq(&tasklist_lock), - * neither inside nor outside. - */ -static inline void task_lock(struct task_struct *p) -{ - spin_lock(&p->alloc_lock); -} - -static inline void task_unlock(struct task_struct *p) -{ - spin_unlock(&p->alloc_lock); -} - -extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, - unsigned long *flags); - -static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, - unsigned long *flags) -{ - struct sighand_struct *ret; - - ret = __lock_task_sighand(tsk, flags); - (void)__cond_lock(&tsk->sighand->siglock, ret); - return ret; -} - -static inline void unlock_task_sighand(struct task_struct *tsk, - unsigned long *flags) -{ - spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); -} - -/** - * threadgroup_change_begin - mark the beginning of changes to a threadgroup - * @tsk: task causing the changes - * - * All operations which modify a threadgroup - a new thread joining the - * group, death of a member thread (the assertion of PF_EXITING) and - * exec(2) dethreading the process and replacing the leader - are wrapped - * by threadgroup_change_{begin|end}(). This is to provide a place which - * subsystems needing threadgroup stability can hook into for - * synchronization. - */ -static inline void threadgroup_change_begin(struct task_struct *tsk) -{ - might_sleep(); - cgroup_threadgroup_change_begin(tsk); -} - -/** - * threadgroup_change_end - mark the end of changes to a threadgroup - * @tsk: task causing the changes - * - * See threadgroup_change_begin(). - */ -static inline void threadgroup_change_end(struct task_struct *tsk) -{ - cgroup_threadgroup_change_end(tsk); -} - -#ifdef CONFIG_THREAD_INFO_IN_TASK - -static inline struct thread_info *task_thread_info(struct task_struct *task) -{ - return &task->thread_info; -} - -/* - * When accessing the stack of a non-current task that might exit, use - * try_get_task_stack() instead. task_stack_page will return a pointer - * that could get freed out from under you. - */ -static inline void *task_stack_page(const struct task_struct *task) -{ - return task->stack; -} - -#define setup_thread_stack(new,old) do { } while(0) - -static inline unsigned long *end_of_stack(const struct task_struct *task) -{ - return task->stack; -} - -#elif !defined(__HAVE_THREAD_FUNCTIONS) - -#define task_thread_info(task) ((struct thread_info *)(task)->stack) -#define task_stack_page(task) ((void *)(task)->stack) - -static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) -{ - *task_thread_info(p) = *task_thread_info(org); - task_thread_info(p)->task = p; -} - /* - * Return the address of the last usable long on the stack. - * - * When the stack grows down, this is just above the thread - * info struct. Going any lower will corrupt the threadinfo. - * - * When the stack grows up, this is the highest address. - * Beyond that position, we corrupt data on the next page. - */ -static inline unsigned long *end_of_stack(struct task_struct *p) -{ -#ifdef CONFIG_STACK_GROWSUP - return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; -#else - return (unsigned long *)(task_thread_info(p) + 1); -#endif -} - -#endif - -#ifdef CONFIG_THREAD_INFO_IN_TASK -static inline void *try_get_task_stack(struct task_struct *tsk) -{ - return atomic_inc_not_zero(&tsk->stack_refcount) ? - task_stack_page(tsk) : NULL; -} - -extern void put_task_stack(struct task_struct *tsk); -#else -static inline void *try_get_task_stack(struct task_struct *tsk) -{ - return task_stack_page(tsk); -} - -static inline void put_task_stack(struct task_struct *tsk) {} -#endif - -#define task_stack_end_corrupted(task) \ - (*(end_of_stack(task)) != STACK_END_MAGIC) - -static inline int object_is_on_stack(void *obj) -{ - void *stack = task_stack_page(current); - - return (obj >= stack) && (obj < (stack + THREAD_SIZE)); -} - -extern void thread_stack_cache_init(void); - -#ifdef CONFIG_DEBUG_STACK_USAGE -static inline unsigned long stack_not_used(struct task_struct *p) -{ - unsigned long *n = end_of_stack(p); - - do { /* Skip over canary */ -# ifdef CONFIG_STACK_GROWSUP - n--; -# else - n++; -# endif - } while (!*n); - -# ifdef CONFIG_STACK_GROWSUP - return (unsigned long)end_of_stack(p) - (unsigned long)n; -# else - return (unsigned long)n - (unsigned long)end_of_stack(p); -# endif -} -#endif -extern void set_task_stack_end_magic(struct task_struct *tsk); - -/* set thread flags in other task's structures - * - see asm/thread_info.h for TIF_xxxx flags available + * Set thread flags in other task's structures. + * See asm/thread_info.h for TIF_xxxx flags available: */ static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) { @@ -3307,37 +1456,6 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } -static inline int restart_syscall(void) -{ - set_tsk_thread_flag(current, TIF_SIGPENDING); - return -ERESTARTNOINTR; -} - -static inline int signal_pending(struct task_struct *p) -{ - return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); -} - -static inline int __fatal_signal_pending(struct task_struct *p) -{ - return unlikely(sigismember(&p->pending.signal, SIGKILL)); -} - -static inline int fatal_signal_pending(struct task_struct *p) -{ - return signal_pending(p) && __fatal_signal_pending(p); -} - -static inline int signal_pending_state(long state, struct task_struct *p) -{ - if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) - return 0; - if (!signal_pending(p)) - return 0; - - return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); -} - /* * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return @@ -3379,15 +1497,6 @@ static inline void cond_resched_rcu(void) #endif } -static inline unsigned long get_preempt_disable_ip(struct task_struct *p) -{ -#ifdef CONFIG_DEBUG_PREEMPT - return p->preempt_disable_ip; -#else - return 0; -#endif -} - /* * Does a critical section need to be broken due to another * task waiting?: (technically does not depend on CONFIG_PREEMPT, @@ -3402,114 +1511,12 @@ static inline int spin_needbreak(spinlock_t *lock) #endif } -/* - * Idle thread specific functions to determine the need_resched - * polling state. - */ -#ifdef TIF_POLLING_NRFLAG -static inline int tsk_is_polling(struct task_struct *p) -{ - return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); -} - -static inline void __current_set_polling(void) -{ - set_thread_flag(TIF_POLLING_NRFLAG); -} - -static inline bool __must_check current_set_polling_and_test(void) -{ - __current_set_polling(); - - /* - * Polling state must be visible before we test NEED_RESCHED, - * paired by resched_curr() - */ - smp_mb__after_atomic(); - - return unlikely(tif_need_resched()); -} - -static inline void __current_clr_polling(void) -{ - clear_thread_flag(TIF_POLLING_NRFLAG); -} - -static inline bool __must_check current_clr_polling_and_test(void) -{ - __current_clr_polling(); - - /* - * Polling state must be visible before we test NEED_RESCHED, - * paired by resched_curr() - */ - smp_mb__after_atomic(); - - return unlikely(tif_need_resched()); -} - -#else -static inline int tsk_is_polling(struct task_struct *p) { return 0; } -static inline void __current_set_polling(void) { } -static inline void __current_clr_polling(void) { } - -static inline bool __must_check current_set_polling_and_test(void) -{ - return unlikely(tif_need_resched()); -} -static inline bool __must_check current_clr_polling_and_test(void) -{ - return unlikely(tif_need_resched()); -} -#endif - -static inline void current_clr_polling(void) -{ - __current_clr_polling(); - - /* - * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. - * Once the bit is cleared, we'll get IPIs with every new - * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also - * fold. - */ - smp_mb(); /* paired with resched_curr() */ - - preempt_fold_need_resched(); -} - static __always_inline bool need_resched(void) { return unlikely(tif_need_resched()); } /* - * Thread group CPU time accounting. - */ -void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); -void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); - -/* - * Reevaluate whether the task has signals pending delivery. - * Wake the task if so. - * This is required every time the blocked sigset_t changes. - * callers must hold sighand->siglock. - */ -extern void recalc_sigpending_and_wake(struct task_struct *t); -extern void recalc_sigpending(void); - -extern void signal_wake_up_state(struct task_struct *t, unsigned int state); - -static inline void signal_wake_up(struct task_struct *t, bool resume) -{ - signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); -} -static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) -{ - signal_wake_up_state(t, resume ? __TASK_TRACED : 0); -} - -/* * Wrappers for p->thread_info->cpu access. No-op on UP. */ #ifdef CONFIG_SMP @@ -3523,11 +1530,6 @@ static inline unsigned int task_cpu(const struct task_struct *p) #endif } -static inline int task_node(const struct task_struct *p) -{ - return cpu_to_node(task_cpu(p)); -} - extern void set_task_cpu(struct task_struct *p, unsigned int cpu); #else @@ -3558,100 +1560,8 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); -#ifdef CONFIG_CGROUP_SCHED -extern struct task_group root_task_group; -#endif /* CONFIG_CGROUP_SCHED */ - -extern int task_can_switch_user(struct user_struct *up, - struct task_struct *tsk); - -#ifdef CONFIG_TASK_XACCT -static inline void add_rchar(struct task_struct *tsk, ssize_t amt) -{ - tsk->ioac.rchar += amt; -} - -static inline void add_wchar(struct task_struct *tsk, ssize_t amt) -{ - tsk->ioac.wchar += amt; -} - -static inline void inc_syscr(struct task_struct *tsk) -{ - tsk->ioac.syscr++; -} - -static inline void inc_syscw(struct task_struct *tsk) -{ - tsk->ioac.syscw++; -} -#else -static inline void add_rchar(struct task_struct *tsk, ssize_t amt) -{ -} - -static inline void add_wchar(struct task_struct *tsk, ssize_t amt) -{ -} - -static inline void inc_syscr(struct task_struct *tsk) -{ -} - -static inline void inc_syscw(struct task_struct *tsk) -{ -} -#endif - #ifndef TASK_SIZE_OF #define TASK_SIZE_OF(tsk) TASK_SIZE #endif -#ifdef CONFIG_MEMCG -extern void mm_update_next_owner(struct mm_struct *mm); -#else -static inline void mm_update_next_owner(struct mm_struct *mm) -{ -} -#endif /* CONFIG_MEMCG */ - -static inline unsigned long task_rlimit(const struct task_struct *tsk, - unsigned int limit) -{ - return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); -} - -static inline unsigned long task_rlimit_max(const struct task_struct *tsk, - unsigned int limit) -{ - return READ_ONCE(tsk->signal->rlim[limit].rlim_max); -} - -static inline unsigned long rlimit(unsigned int limit) -{ - return task_rlimit(current, limit); -} - -static inline unsigned long rlimit_max(unsigned int limit) -{ - return task_rlimit_max(current, limit); -} - -#define SCHED_CPUFREQ_RT (1U << 0) -#define SCHED_CPUFREQ_DL (1U << 1) -#define SCHED_CPUFREQ_IOWAIT (1U << 2) - -#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) - -#ifdef CONFIG_CPU_FREQ -struct update_util_data { - void (*func)(struct update_util_data *data, u64 time, unsigned int flags); -}; - -void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, - void (*func)(struct update_util_data *data, u64 time, - unsigned int flags)); -void cpufreq_remove_update_util_hook(int cpu); -#endif /* CONFIG_CPU_FREQ */ - #endif diff --git a/include/linux/sched/autogroup.h b/include/linux/sched/autogroup.h new file mode 100644 index 000000000000..55cd496df884 --- /dev/null +++ b/include/linux/sched/autogroup.h @@ -0,0 +1,31 @@ +#ifndef _LINUX_SCHED_AUTOGROUP_H +#define _LINUX_SCHED_AUTOGROUP_H + +struct signal_struct; +struct task_struct; +struct task_group; +struct seq_file; + +#ifdef CONFIG_SCHED_AUTOGROUP +extern void sched_autogroup_create_attach(struct task_struct *p); +extern void sched_autogroup_detach(struct task_struct *p); +extern void sched_autogroup_fork(struct signal_struct *sig); +extern void sched_autogroup_exit(struct signal_struct *sig); +extern void sched_autogroup_exit_task(struct task_struct *p); +#ifdef CONFIG_PROC_FS +extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); +extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); +#endif +#else +static inline void sched_autogroup_create_attach(struct task_struct *p) { } +static inline void sched_autogroup_detach(struct task_struct *p) { } +static inline void sched_autogroup_fork(struct signal_struct *sig) { } +static inline void sched_autogroup_exit(struct signal_struct *sig) { } +static inline void sched_autogroup_exit_task(struct task_struct *p) { } +#endif + +#ifdef CONFIG_CGROUP_SCHED +extern struct task_group root_task_group; +#endif /* CONFIG_CGROUP_SCHED */ + +#endif /* _LINUX_SCHED_AUTOGROUP_H */ diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h new file mode 100644 index 000000000000..4a68c6791207 --- /dev/null +++ b/include/linux/sched/clock.h @@ -0,0 +1,104 @@ +#ifndef _LINUX_SCHED_CLOCK_H +#define _LINUX_SCHED_CLOCK_H + +#include <linux/smp.h> + +/* + * Do not use outside of architecture code which knows its limitations. + * + * sched_clock() has no promise of monotonicity or bounded drift between + * CPUs, use (which you should not) requires disabling IRQs. + * + * Please use one of the three interfaces below. + */ +extern unsigned long long notrace sched_clock(void); + +/* + * See the comment in kernel/sched/clock.c + */ +extern u64 running_clock(void); +extern u64 sched_clock_cpu(int cpu); + + +extern void sched_clock_init(void); + +#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK +static inline void sched_clock_init_late(void) +{ +} + +static inline void sched_clock_tick(void) +{ +} + +static inline void clear_sched_clock_stable(void) +{ +} + +static inline void sched_clock_idle_sleep_event(void) +{ +} + +static inline void sched_clock_idle_wakeup_event(u64 delta_ns) +{ +} + +static inline u64 cpu_clock(int cpu) +{ + return sched_clock(); +} + +static inline u64 local_clock(void) +{ + return sched_clock(); +} +#else +extern void sched_clock_init_late(void); +/* + * Architectures can set this to 1 if they have specified + * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, + * but then during bootup it turns out that sched_clock() + * is reliable after all: + */ +extern int sched_clock_stable(void); +extern void clear_sched_clock_stable(void); + +extern void sched_clock_tick(void); +extern void sched_clock_idle_sleep_event(void); +extern void sched_clock_idle_wakeup_event(u64 delta_ns); + +/* + * As outlined in clock.c, provides a fast, high resolution, nanosecond + * time source that is monotonic per cpu argument and has bounded drift + * between cpus. + * + * ######################### BIG FAT WARNING ########################## + * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # + * # go backwards !! # + * #################################################################### + */ +static inline u64 cpu_clock(int cpu) +{ + return sched_clock_cpu(cpu); +} + +static inline u64 local_clock(void) +{ + return sched_clock_cpu(raw_smp_processor_id()); +} +#endif + +#ifdef CONFIG_IRQ_TIME_ACCOUNTING +/* + * An i/f to runtime opt-in for irq time accounting based off of sched_clock. + * The reason for this explicit opt-in is not to have perf penalty with + * slow sched_clocks. + */ +extern void enable_sched_clock_irqtime(void); +extern void disable_sched_clock_irqtime(void); +#else +static inline void enable_sched_clock_irqtime(void) {} +static inline void disable_sched_clock_irqtime(void) {} +#endif + +#endif /* _LINUX_SCHED_CLOCK_H */ diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h new file mode 100644 index 000000000000..69eedcef8f03 --- /dev/null +++ b/include/linux/sched/coredump.h @@ -0,0 +1,74 @@ +#ifndef _LINUX_SCHED_COREDUMP_H +#define _LINUX_SCHED_COREDUMP_H + +#include <linux/mm_types.h> + +#define SUID_DUMP_DISABLE 0 /* No setuid dumping */ +#define SUID_DUMP_USER 1 /* Dump as user of process */ +#define SUID_DUMP_ROOT 2 /* Dump as root */ + +/* mm flags */ + +/* for SUID_DUMP_* above */ +#define MMF_DUMPABLE_BITS 2 +#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) + +extern void set_dumpable(struct mm_struct *mm, int value); +/* + * This returns the actual value of the suid_dumpable flag. For things + * that are using this for checking for privilege transitions, it must + * test against SUID_DUMP_USER rather than treating it as a boolean + * value. + */ +static inline int __get_dumpable(unsigned long mm_flags) +{ + return mm_flags & MMF_DUMPABLE_MASK; +} + +static inline int get_dumpable(struct mm_struct *mm) +{ + return __get_dumpable(mm->flags); +} + +/* coredump filter bits */ +#define MMF_DUMP_ANON_PRIVATE 2 +#define MMF_DUMP_ANON_SHARED 3 +#define MMF_DUMP_MAPPED_PRIVATE 4 +#define MMF_DUMP_MAPPED_SHARED 5 +#define MMF_DUMP_ELF_HEADERS 6 +#define MMF_DUMP_HUGETLB_PRIVATE 7 +#define MMF_DUMP_HUGETLB_SHARED 8 +#define MMF_DUMP_DAX_PRIVATE 9 +#define MMF_DUMP_DAX_SHARED 10 + +#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS +#define MMF_DUMP_FILTER_BITS 9 +#define MMF_DUMP_FILTER_MASK \ + (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) +#define MMF_DUMP_FILTER_DEFAULT \ + ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ + (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) + +#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS +# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) +#else +# define MMF_DUMP_MASK_DEFAULT_ELF 0 +#endif + /* leave room for more dump flags */ +#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ +#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ +/* + * This one-shot flag is dropped due to necessity of changing exe once again + * on NFS restore + */ +//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ + +#define MMF_HAS_UPROBES 19 /* has uprobes */ +#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ +#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ +#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ +#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ + +#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) + +#endif /* _LINUX_SCHED_COREDUMP_H */ diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h new file mode 100644 index 000000000000..d2be2ccbb372 --- /dev/null +++ b/include/linux/sched/cpufreq.h @@ -0,0 +1,27 @@ +#ifndef _LINUX_SCHED_CPUFREQ_H +#define _LINUX_SCHED_CPUFREQ_H + +#include <linux/types.h> + +/* + * Interface between cpufreq drivers and the scheduler: + */ + +#define SCHED_CPUFREQ_RT (1U << 0) +#define SCHED_CPUFREQ_DL (1U << 1) +#define SCHED_CPUFREQ_IOWAIT (1U << 2) + +#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) + +#ifdef CONFIG_CPU_FREQ +struct update_util_data { + void (*func)(struct update_util_data *data, u64 time, unsigned int flags); +}; + +void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, + void (*func)(struct update_util_data *data, u64 time, + unsigned int flags)); +void cpufreq_remove_update_util_hook(int cpu); +#endif /* CONFIG_CPU_FREQ */ + +#endif /* _LINUX_SCHED_CPUFREQ_H */ diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h new file mode 100644 index 000000000000..4c5b9735c1ae --- /dev/null +++ b/include/linux/sched/cputime.h @@ -0,0 +1,187 @@ +#ifndef _LINUX_SCHED_CPUTIME_H +#define _LINUX_SCHED_CPUTIME_H + +#include <linux/sched/signal.h> + +/* + * cputime accounting APIs: + */ + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +#include <asm/cputime.h> + +#ifndef cputime_to_nsecs +# define cputime_to_nsecs(__ct) \ + (cputime_to_usecs(__ct) * NSEC_PER_USEC) +#endif +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +extern void task_cputime(struct task_struct *t, + u64 *utime, u64 *stime); +extern u64 task_gtime(struct task_struct *t); +#else +static inline void task_cputime(struct task_struct *t, + u64 *utime, u64 *stime) +{ + *utime = t->utime; + *stime = t->stime; +} + +static inline u64 task_gtime(struct task_struct *t) +{ + return t->gtime; +} +#endif + +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME +static inline void task_cputime_scaled(struct task_struct *t, + u64 *utimescaled, + u64 *stimescaled) +{ + *utimescaled = t->utimescaled; + *stimescaled = t->stimescaled; +} +#else +static inline void task_cputime_scaled(struct task_struct *t, + u64 *utimescaled, + u64 *stimescaled) +{ + task_cputime(t, utimescaled, stimescaled); +} +#endif + +extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); +extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); + + +/* + * Thread group CPU time accounting. + */ +void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); +void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); + + +/* + * The following are functions that support scheduler-internal time accounting. + * These functions are generally called at the timer tick. None of this depends + * on CONFIG_SCHEDSTATS. + */ + +/** + * get_running_cputimer - return &tsk->signal->cputimer if cputimer is running + * + * @tsk: Pointer to target task. + */ +#ifdef CONFIG_POSIX_TIMERS +static inline +struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) +{ + struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; + + /* Check if cputimer isn't running. This is accessed without locking. */ + if (!READ_ONCE(cputimer->running)) + return NULL; + + /* + * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime + * in __exit_signal(), we won't account to the signal struct further + * cputime consumed by that task, even though the task can still be + * ticking after __exit_signal(). + * + * In order to keep a consistent behaviour between thread group cputime + * and thread group cputimer accounting, lets also ignore the cputime + * elapsing after __exit_signal() in any thread group timer running. + * + * This makes sure that POSIX CPU clocks and timers are synchronized, so + * that a POSIX CPU timer won't expire while the corresponding POSIX CPU + * clock delta is behind the expiring timer value. + */ + if (unlikely(!tsk->sighand)) + return NULL; + + return cputimer; +} +#else +static inline +struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) +{ + return NULL; +} +#endif + +/** + * account_group_user_time - Maintain utime for a thread group. + * + * @tsk: Pointer to task structure. + * @cputime: Time value by which to increment the utime field of the + * thread_group_cputime structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the utime field there. + */ +static inline void account_group_user_time(struct task_struct *tsk, + u64 cputime) +{ + struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); + + if (!cputimer) + return; + + atomic64_add(cputime, &cputimer->cputime_atomic.utime); +} + +/** + * account_group_system_time - Maintain stime for a thread group. + * + * @tsk: Pointer to task structure. + * @cputime: Time value by which to increment the stime field of the + * thread_group_cputime structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the stime field there. + */ +static inline void account_group_system_time(struct task_struct *tsk, + u64 cputime) +{ + struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); + + if (!cputimer) + return; + + atomic64_add(cputime, &cputimer->cputime_atomic.stime); +} + +/** + * account_group_exec_runtime - Maintain exec runtime for a thread group. + * + * @tsk: Pointer to task structure. + * @ns: Time value by which to increment the sum_exec_runtime field + * of the thread_group_cputime structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the sum_exec_runtime field there. + */ +static inline void account_group_exec_runtime(struct task_struct *tsk, + unsigned long long ns) +{ + struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); + + if (!cputimer) + return; + + atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); +} + +static inline void prev_cputime_init(struct prev_cputime *prev) +{ +#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + prev->utime = prev->stime = 0; + raw_spin_lock_init(&prev->lock); +#endif +} + +extern unsigned long long +task_sched_runtime(struct task_struct *task); + +#endif /* _LINUX_SCHED_CPUTIME_H */ diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 9089a2ae913d..975be862e083 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -1,5 +1,7 @@ -#ifndef _SCHED_DEADLINE_H -#define _SCHED_DEADLINE_H +#ifndef _LINUX_SCHED_DEADLINE_H +#define _LINUX_SCHED_DEADLINE_H + +#include <linux/sched.h> /* * SCHED_DEADLINE tasks has negative priorities, reflecting @@ -26,4 +28,4 @@ static inline bool dl_time_before(u64 a, u64 b) return (s64)(a - b) < 0; } -#endif /* _SCHED_DEADLINE_H */ +#endif /* _LINUX_SCHED_DEADLINE_H */ diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h new file mode 100644 index 000000000000..e0eaee54c5a4 --- /dev/null +++ b/include/linux/sched/debug.h @@ -0,0 +1,50 @@ +#ifndef _LINUX_SCHED_DEBUG_H +#define _LINUX_SCHED_DEBUG_H + +/* + * Various scheduler/task debugging interfaces: + */ + +struct task_struct; + +extern void dump_cpu_task(int cpu); + +/* + * Only dump TASK_* tasks. (0 for all tasks) + */ +extern void show_state_filter(unsigned long state_filter); + +static inline void show_state(void) +{ + show_state_filter(0); +} + +struct pt_regs; + +extern void show_regs(struct pt_regs *); + +/* + * TASK is a pointer to the task whose backtrace we want to see (or NULL for current + * task), SP is the stack pointer of the first frame that should be shown in the back + * trace (or NULL if the entire call-chain of the task should be shown). + */ +extern void show_stack(struct task_struct *task, unsigned long *sp); + +extern void sched_show_task(struct task_struct *p); + +#ifdef CONFIG_SCHED_DEBUG +struct seq_file; +extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); +extern void proc_sched_set_task(struct task_struct *p); +#endif + +/* Attach to any functions which should be ignored in wchan output. */ +#define __sched __attribute__((__section__(".sched.text"))) + +/* Linker adds these: start and end of __sched functions */ +extern char __sched_text_start[], __sched_text_end[]; + +/* Is this address in the __sched functions? */ +extern int in_sched_functions(unsigned long addr); + +#endif /* _LINUX_SCHED_DEBUG_H */ diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h new file mode 100644 index 000000000000..752ac7e628d7 --- /dev/null +++ b/include/linux/sched/hotplug.h @@ -0,0 +1,24 @@ +#ifndef _LINUX_SCHED_HOTPLUG_H +#define _LINUX_SCHED_HOTPLUG_H + +/* + * Scheduler interfaces for hotplug CPU support: + */ + +extern int sched_cpu_starting(unsigned int cpu); +extern int sched_cpu_activate(unsigned int cpu); +extern int sched_cpu_deactivate(unsigned int cpu); + +#ifdef CONFIG_HOTPLUG_CPU +extern int sched_cpu_dying(unsigned int cpu); +#else +# define sched_cpu_dying NULL +#endif + +#ifdef CONFIG_HOTPLUG_CPU +extern void idle_task_exit(void); +#else +static inline void idle_task_exit(void) {} +#endif + +#endif /* _LINUX_SCHED_HOTPLUG_H */ diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h new file mode 100644 index 000000000000..5ca63ebad6b4 --- /dev/null +++ b/include/linux/sched/idle.h @@ -0,0 +1,86 @@ +#ifndef _LINUX_SCHED_IDLE_H +#define _LINUX_SCHED_IDLE_H + +#include <linux/sched.h> + +enum cpu_idle_type { + CPU_IDLE, + CPU_NOT_IDLE, + CPU_NEWLY_IDLE, + CPU_MAX_IDLE_TYPES +}; + +extern void wake_up_if_idle(int cpu); + +/* + * Idle thread specific functions to determine the need_resched + * polling state. + */ +#ifdef TIF_POLLING_NRFLAG + +static inline void __current_set_polling(void) +{ + set_thread_flag(TIF_POLLING_NRFLAG); +} + +static inline bool __must_check current_set_polling_and_test(void) +{ + __current_set_polling(); + + /* + * Polling state must be visible before we test NEED_RESCHED, + * paired by resched_curr() + */ + smp_mb__after_atomic(); + + return unlikely(tif_need_resched()); +} + +static inline void __current_clr_polling(void) +{ + clear_thread_flag(TIF_POLLING_NRFLAG); +} + +static inline bool __must_check current_clr_polling_and_test(void) +{ + __current_clr_polling(); + + /* + * Polling state must be visible before we test NEED_RESCHED, + * paired by resched_curr() + */ + smp_mb__after_atomic(); + + return unlikely(tif_need_resched()); +} + +#else +static inline void __current_set_polling(void) { } +static inline void __current_clr_polling(void) { } + +static inline bool __must_check current_set_polling_and_test(void) +{ + return unlikely(tif_need_resched()); +} +static inline bool __must_check current_clr_polling_and_test(void) +{ + return unlikely(tif_need_resched()); +} +#endif + +static inline void current_clr_polling(void) +{ + __current_clr_polling(); + + /* + * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. + * Once the bit is cleared, we'll get IPIs with every new + * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also + * fold. + */ + smp_mb(); /* paired with resched_curr() */ + + preempt_fold_need_resched(); +} + +#endif /* _LINUX_SCHED_IDLE_H */ diff --git a/include/linux/sched/init.h b/include/linux/sched/init.h new file mode 100644 index 000000000000..127215045285 --- /dev/null +++ b/include/linux/sched/init.h @@ -0,0 +1,11 @@ +#ifndef _LINUX_SCHED_INIT_H +#define _LINUX_SCHED_INIT_H + +/* + * Scheduler init related prototypes: + */ + +extern void sched_init(void); +extern void sched_init_smp(void); + +#endif /* _LINUX_SCHED_INIT_H */ diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h new file mode 100644 index 000000000000..016afa0fb3bb --- /dev/null +++ b/include/linux/sched/jobctl.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_SCHED_JOBCTL_H +#define _LINUX_SCHED_JOBCTL_H + +#include <linux/types.h> + +struct task_struct; + +/* + * task->jobctl flags + */ +#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ + +#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ +#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ +#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ +#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ +#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ +#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ +#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ + +#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) +#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) +#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT) +#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT) +#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT) +#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) +#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) + +#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) +#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) + +extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask); +extern void task_clear_jobctl_trapping(struct task_struct *task); +extern void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask); + +#endif /* _LINUX_SCHED_JOBCTL_H */ diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h new file mode 100644 index 000000000000..4264bc6b2c27 --- /dev/null +++ b/include/linux/sched/loadavg.h @@ -0,0 +1,31 @@ +#ifndef _LINUX_SCHED_LOADAVG_H +#define _LINUX_SCHED_LOADAVG_H + +/* + * These are the constant used to fake the fixed-point load-average + * counting. Some notes: + * - 11 bit fractions expand to 22 bits by the multiplies: this gives + * a load-average precision of 10 bits integer + 11 bits fractional + * - if you want to count load-averages more often, you need more + * precision, or rounding will get you. With 2-second counting freq, + * the EXP_n values would be 1981, 2034 and 2043 if still using only + * 11 bit fractions. + */ +extern unsigned long avenrun[]; /* Load averages */ +extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); + +#define FSHIFT 11 /* nr of bits of precision */ +#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ +#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ +#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ +#define EXP_5 2014 /* 1/exp(5sec/5min) */ +#define EXP_15 2037 /* 1/exp(5sec/15min) */ + +#define CALC_LOAD(load,exp,n) \ + load *= exp; \ + load += n*(FIXED_1-exp); \ + load >>= FSHIFT; + +extern void calc_global_load(unsigned long ticks); + +#endif /* _LINUX_SCHED_LOADAVG_H */ diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h new file mode 100644 index 000000000000..830953ebb391 --- /dev/null +++ b/include/linux/sched/mm.h @@ -0,0 +1,174 @@ +#ifndef _LINUX_SCHED_MM_H +#define _LINUX_SCHED_MM_H + +#include <linux/kernel.h> +#include <linux/atomic.h> +#include <linux/sched.h> +#include <linux/mm_types.h> +#include <linux/gfp.h> + +/* + * Routines for handling mm_structs + */ +extern struct mm_struct * mm_alloc(void); + +/** + * mmgrab() - Pin a &struct mm_struct. + * @mm: The &struct mm_struct to pin. + * + * Make sure that @mm will not get freed even after the owning task + * exits. This doesn't guarantee that the associated address space + * will still exist later on and mmget_not_zero() has to be used before + * accessing it. + * + * This is a preferred way to to pin @mm for a longer/unbounded amount + * of time. + * + * Use mmdrop() to release the reference acquired by mmgrab(). + * + * See also <Documentation/vm/active_mm.txt> for an in-depth explanation + * of &mm_struct.mm_count vs &mm_struct.mm_users. + */ +static inline void mmgrab(struct mm_struct *mm) +{ + atomic_inc(&mm->mm_count); +} + +/* mmdrop drops the mm and the page tables */ +extern void __mmdrop(struct mm_struct *); +static inline void mmdrop(struct mm_struct *mm) +{ + if (unlikely(atomic_dec_and_test(&mm->mm_count))) + __mmdrop(mm); +} + +static inline void mmdrop_async_fn(struct work_struct *work) +{ + struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); + __mmdrop(mm); +} + +static inline void mmdrop_async(struct mm_struct *mm) +{ + if (unlikely(atomic_dec_and_test(&mm->mm_count))) { + INIT_WORK(&mm->async_put_work, mmdrop_async_fn); + schedule_work(&mm->async_put_work); + } +} + +/** + * mmget() - Pin the address space associated with a &struct mm_struct. + * @mm: The address space to pin. + * + * Make sure that the address space of the given &struct mm_struct doesn't + * go away. This does not protect against parts of the address space being + * modified or freed, however. + * + * Never use this function to pin this address space for an + * unbounded/indefinite amount of time. + * + * Use mmput() to release the reference acquired by mmget(). + * + * See also <Documentation/vm/active_mm.txt> for an in-depth explanation + * of &mm_struct.mm_count vs &mm_struct.mm_users. + */ +static inline void mmget(struct mm_struct *mm) +{ + atomic_inc(&mm->mm_users); +} + +static inline bool mmget_not_zero(struct mm_struct *mm) +{ + return atomic_inc_not_zero(&mm->mm_users); +} + +/* mmput gets rid of the mappings and all user-space */ +extern void mmput(struct mm_struct *); +#ifdef CONFIG_MMU +/* same as above but performs the slow path from the async context. Can + * be called from the atomic context as well + */ +extern void mmput_async(struct mm_struct *); +#endif + +/* Grab a reference to a task's mm, if it is not already going away */ +extern struct mm_struct *get_task_mm(struct task_struct *task); +/* + * Grab a reference to a task's mm, if it is not already going away + * and ptrace_may_access with the mode parameter passed to it + * succeeds. + */ +extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); +/* Remove the current tasks stale references to the old mm_struct */ +extern void mm_release(struct task_struct *, struct mm_struct *); + +#ifdef CONFIG_MEMCG +extern void mm_update_next_owner(struct mm_struct *mm); +#else +static inline void mm_update_next_owner(struct mm_struct *mm) +{ +} +#endif /* CONFIG_MEMCG */ + +#ifdef CONFIG_MMU +extern void arch_pick_mmap_layout(struct mm_struct *mm); +extern unsigned long +arch_get_unmapped_area(struct file *, unsigned long, unsigned long, + unsigned long, unsigned long); +extern unsigned long +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); +#else +static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} +#endif + +static inline bool in_vfork(struct task_struct *tsk) +{ + bool ret; + + /* + * need RCU to access ->real_parent if CLONE_VM was used along with + * CLONE_PARENT. + * + * We check real_parent->mm == tsk->mm because CLONE_VFORK does not + * imply CLONE_VM + * + * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus + * ->real_parent is not necessarily the task doing vfork(), so in + * theory we can't rely on task_lock() if we want to dereference it. + * + * And in this case we can't trust the real_parent->mm == tsk->mm + * check, it can be false negative. But we do not care, if init or + * another oom-unkillable task does this it should blame itself. + */ + rcu_read_lock(); + ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; + rcu_read_unlock(); + + return ret; +} + +/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags + * __GFP_FS is also cleared as it implies __GFP_IO. + */ +static inline gfp_t memalloc_noio_flags(gfp_t flags) +{ + if (unlikely(current->flags & PF_MEMALLOC_NOIO)) + flags &= ~(__GFP_IO | __GFP_FS); + return flags; +} + +static inline unsigned int memalloc_noio_save(void) +{ + unsigned int flags = current->flags & PF_MEMALLOC_NOIO; + current->flags |= PF_MEMALLOC_NOIO; + return flags; +} + +static inline void memalloc_noio_restore(unsigned int flags) +{ + current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; +} + +#endif /* _LINUX_SCHED_MM_H */ diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h new file mode 100644 index 000000000000..4995b717500b --- /dev/null +++ b/include/linux/sched/nohz.h @@ -0,0 +1,43 @@ +#ifndef _LINUX_SCHED_NOHZ_H +#define _LINUX_SCHED_NOHZ_H + +/* + * This is the interface between the scheduler and nohz/dyntics: + */ + +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +extern void cpu_load_update_nohz_start(void); +extern void cpu_load_update_nohz_stop(void); +#else +static inline void cpu_load_update_nohz_start(void) { } +static inline void cpu_load_update_nohz_stop(void) { } +#endif + +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +extern void nohz_balance_enter_idle(int cpu); +extern void set_cpu_sd_state_idle(void); +extern int get_nohz_timer_target(void); +#else +static inline void nohz_balance_enter_idle(int cpu) { } +static inline void set_cpu_sd_state_idle(void) { } +#endif + +#ifdef CONFIG_NO_HZ_COMMON +void calc_load_enter_idle(void); +void calc_load_exit_idle(void); +#else +static inline void calc_load_enter_idle(void) { } +static inline void calc_load_exit_idle(void) { } +#endif /* CONFIG_NO_HZ_COMMON */ + +#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) +extern void wake_up_nohz_cpu(int cpu); +#else +static inline void wake_up_nohz_cpu(int cpu) { } +#endif + +#ifdef CONFIG_NO_HZ_FULL +extern u64 scheduler_tick_max_deferment(void); +#endif + +#endif /* _LINUX_SCHED_NOHZ_H */ diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h new file mode 100644 index 000000000000..35d5fc77b4be --- /dev/null +++ b/include/linux/sched/numa_balancing.h @@ -0,0 +1,46 @@ +#ifndef _LINUX_SCHED_NUMA_BALANCING_H +#define _LINUX_SCHED_NUMA_BALANCING_H + +/* + * This is the interface between the scheduler and the MM that + * implements memory access pattern based NUMA-balancing: + */ + +#include <linux/sched.h> + +#define TNF_MIGRATED 0x01 +#define TNF_NO_GROUP 0x02 +#define TNF_SHARED 0x04 +#define TNF_FAULT_LOCAL 0x08 +#define TNF_MIGRATE_FAIL 0x10 + +#ifdef CONFIG_NUMA_BALANCING +extern void task_numa_fault(int last_node, int node, int pages, int flags); +extern pid_t task_numa_group_id(struct task_struct *p); +extern void set_numabalancing_state(bool enabled); +extern void task_numa_free(struct task_struct *p); +extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, + int src_nid, int dst_cpu); +#else +static inline void task_numa_fault(int last_node, int node, int pages, + int flags) +{ +} +static inline pid_t task_numa_group_id(struct task_struct *p) +{ + return 0; +} +static inline void set_numabalancing_state(bool enabled) +{ +} +static inline void task_numa_free(struct task_struct *p) +{ +} +static inline bool should_numa_migrate_memory(struct task_struct *p, + struct page *page, int src_nid, int dst_cpu) +{ + return true; +} +#endif + +#endif /* _LINUX_SCHED_NUMA_BALANCING_H */ diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h index d9cf5a5762d9..2cc450f6ec54 100644 --- a/include/linux/sched/prio.h +++ b/include/linux/sched/prio.h @@ -1,5 +1,5 @@ -#ifndef _SCHED_PRIO_H -#define _SCHED_PRIO_H +#ifndef _LINUX_SCHED_PRIO_H +#define _LINUX_SCHED_PRIO_H #define MAX_NICE 19 #define MIN_NICE -20 @@ -57,4 +57,4 @@ static inline long rlimit_to_nice(long prio) return (MAX_NICE - prio + 1); } -#endif /* _SCHED_PRIO_H */ +#endif /* _LINUX_SCHED_PRIO_H */ diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index a30b172df6e1..3bd668414f61 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -1,7 +1,9 @@ -#ifndef _SCHED_RT_H -#define _SCHED_RT_H +#ifndef _LINUX_SCHED_RT_H +#define _LINUX_SCHED_RT_H -#include <linux/sched/prio.h> +#include <linux/sched.h> + +struct task_struct; static inline int rt_prio(int prio) { @@ -57,4 +59,4 @@ extern void normalize_rt_tasks(void); */ #define RR_TIMESLICE (100 * HZ / 1000) -#endif /* _SCHED_RT_H */ +#endif /* _LINUX_SCHED_RT_H */ diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h new file mode 100644 index 000000000000..2cf446704cd4 --- /dev/null +++ b/include/linux/sched/signal.h @@ -0,0 +1,613 @@ +#ifndef _LINUX_SCHED_SIGNAL_H +#define _LINUX_SCHED_SIGNAL_H + +#include <linux/rculist.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/sched/jobctl.h> +#include <linux/sched/task.h> +#include <linux/cred.h> + +/* + * Types defining task->signal and task->sighand and APIs using them: + */ + +struct sighand_struct { + atomic_t count; + struct k_sigaction action[_NSIG]; + spinlock_t siglock; + wait_queue_head_t signalfd_wqh; +}; + +/* + * Per-process accounting stats: + */ +struct pacct_struct { + int ac_flag; + long ac_exitcode; + unsigned long ac_mem; + u64 ac_utime, ac_stime; + unsigned long ac_minflt, ac_majflt; +}; + +struct cpu_itimer { + u64 expires; + u64 incr; +}; + +/* + * This is the atomic variant of task_cputime, which can be used for + * storing and updating task_cputime statistics without locking. + */ +struct task_cputime_atomic { + atomic64_t utime; + atomic64_t stime; + atomic64_t sum_exec_runtime; +}; + +#define INIT_CPUTIME_ATOMIC \ + (struct task_cputime_atomic) { \ + .utime = ATOMIC64_INIT(0), \ + .stime = ATOMIC64_INIT(0), \ + .sum_exec_runtime = ATOMIC64_INIT(0), \ + } +/** + * struct thread_group_cputimer - thread group interval timer counts + * @cputime_atomic: atomic thread group interval timers. + * @running: true when there are timers running and + * @cputime_atomic receives updates. + * @checking_timer: true when a thread in the group is in the + * process of checking for thread group timers. + * + * This structure contains the version of task_cputime, above, that is + * used for thread group CPU timer calculations. + */ +struct thread_group_cputimer { + struct task_cputime_atomic cputime_atomic; + bool running; + bool checking_timer; +}; + +/* + * NOTE! "signal_struct" does not have its own + * locking, because a shared signal_struct always + * implies a shared sighand_struct, so locking + * sighand_struct is always a proper superset of + * the locking of signal_struct. + */ +struct signal_struct { + atomic_t sigcnt; + atomic_t live; + int nr_threads; + struct list_head thread_head; + + wait_queue_head_t wait_chldexit; /* for wait4() */ + + /* current thread group signal load-balancing target: */ + struct task_struct *curr_target; + + /* shared signal handling: */ + struct sigpending shared_pending; + + /* thread group exit support */ + int group_exit_code; + /* overloaded: + * - notify group_exit_task when ->count is equal to notify_count + * - everyone except group_exit_task is stopped during signal delivery + * of fatal signals, group_exit_task processes the signal. + */ + int notify_count; + struct task_struct *group_exit_task; + + /* thread group stop support, overloads group_exit_code too */ + int group_stop_count; + unsigned int flags; /* see SIGNAL_* flags below */ + + /* + * PR_SET_CHILD_SUBREAPER marks a process, like a service + * manager, to re-parent orphan (double-forking) child processes + * to this process instead of 'init'. The service manager is + * able to receive SIGCHLD signals and is able to investigate + * the process until it calls wait(). All children of this + * process will inherit a flag if they should look for a + * child_subreaper process at exit. + */ + unsigned int is_child_subreaper:1; + unsigned int has_child_subreaper:1; + +#ifdef CONFIG_POSIX_TIMERS + + /* POSIX.1b Interval Timers */ + int posix_timer_id; + struct list_head posix_timers; + + /* ITIMER_REAL timer for the process */ + struct hrtimer real_timer; + ktime_t it_real_incr; + + /* + * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use + * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these + * values are defined to 0 and 1 respectively + */ + struct cpu_itimer it[2]; + + /* + * Thread group totals for process CPU timers. + * See thread_group_cputimer(), et al, for details. + */ + struct thread_group_cputimer cputimer; + + /* Earliest-expiration cache. */ + struct task_cputime cputime_expires; + + struct list_head cpu_timers[3]; + +#endif + + struct pid *leader_pid; + +#ifdef CONFIG_NO_HZ_FULL + atomic_t tick_dep_mask; +#endif + + struct pid *tty_old_pgrp; + + /* boolean value for session group leader */ + int leader; + + struct tty_struct *tty; /* NULL if no tty */ + +#ifdef CONFIG_SCHED_AUTOGROUP + struct autogroup *autogroup; +#endif + /* + * Cumulative resource counters for dead threads in the group, + * and for reaped dead child processes forked by this group. + * Live threads maintain their own counters and add to these + * in __exit_signal, except for the group leader. + */ + seqlock_t stats_lock; + u64 utime, stime, cutime, cstime; + u64 gtime; + u64 cgtime; + struct prev_cputime prev_cputime; + unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; + unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; + unsigned long inblock, oublock, cinblock, coublock; + unsigned long maxrss, cmaxrss; + struct task_io_accounting ioac; + + /* + * Cumulative ns of schedule CPU time fo dead threads in the + * group, not including a zombie group leader, (This only differs + * from jiffies_to_ns(utime + stime) if sched_clock uses something + * other than jiffies.) + */ + unsigned long long sum_sched_runtime; + + /* + * We don't bother to synchronize most readers of this at all, + * because there is no reader checking a limit that actually needs + * to get both rlim_cur and rlim_max atomically, and either one + * alone is a single word that can safely be read normally. + * getrlimit/setrlimit use task_lock(current->group_leader) to + * protect this instead of the siglock, because they really + * have no need to disable irqs. + */ + struct rlimit rlim[RLIM_NLIMITS]; + +#ifdef CONFIG_BSD_PROCESS_ACCT + struct pacct_struct pacct; /* per-process accounting information */ +#endif +#ifdef CONFIG_TASKSTATS + struct taskstats *stats; +#endif +#ifdef CONFIG_AUDIT + unsigned audit_tty; + struct tty_audit_buf *tty_audit_buf; +#endif + + /* + * Thread is the potential origin of an oom condition; kill first on + * oom + */ + bool oom_flag_origin; + short oom_score_adj; /* OOM kill score adjustment */ + short oom_score_adj_min; /* OOM kill score adjustment min value. + * Only settable by CAP_SYS_RESOURCE. */ + struct mm_struct *oom_mm; /* recorded mm when the thread group got + * killed by the oom killer */ + + struct mutex cred_guard_mutex; /* guard against foreign influences on + * credential calculations + * (notably. ptrace) */ +}; + +/* + * Bits in flags field of signal_struct. + */ +#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ +#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ +#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ +#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ +/* + * Pending notifications to parent. + */ +#define SIGNAL_CLD_STOPPED 0x00000010 +#define SIGNAL_CLD_CONTINUED 0x00000020 +#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) + +#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ + +#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ + SIGNAL_STOP_CONTINUED) + +static inline void signal_set_stop_flags(struct signal_struct *sig, + unsigned int flags) +{ + WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); + sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; +} + +/* If true, all threads except ->group_exit_task have pending SIGKILL */ +static inline int signal_group_exit(const struct signal_struct *sig) +{ + return (sig->flags & SIGNAL_GROUP_EXIT) || + (sig->group_exit_task != NULL); +} + +extern void flush_signals(struct task_struct *); +extern void ignore_signals(struct task_struct *); +extern void flush_signal_handlers(struct task_struct *, int force_default); +extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); + +static inline int kernel_dequeue_signal(siginfo_t *info) +{ + struct task_struct *tsk = current; + siginfo_t __info; + int ret; + + spin_lock_irq(&tsk->sighand->siglock); + ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); + spin_unlock_irq(&tsk->sighand->siglock); + + return ret; +} + +static inline void kernel_signal_stop(void) +{ + spin_lock_irq(¤t->sighand->siglock); + if (current->jobctl & JOBCTL_STOP_DEQUEUED) + __set_current_state(TASK_STOPPED); + spin_unlock_irq(¤t->sighand->siglock); + + schedule(); +} +extern int send_sig_info(int, struct siginfo *, struct task_struct *); +extern int force_sigsegv(int, struct task_struct *); +extern int force_sig_info(int, struct siginfo *, struct task_struct *); +extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); +extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); +extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, + const struct cred *, u32); +extern int kill_pgrp(struct pid *pid, int sig, int priv); +extern int kill_pid(struct pid *pid, int sig, int priv); +extern int kill_proc_info(int, struct siginfo *, pid_t); +extern __must_check bool do_notify_parent(struct task_struct *, int); +extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); +extern void force_sig(int, struct task_struct *); +extern int send_sig(int, struct task_struct *, int); +extern int zap_other_threads(struct task_struct *p); +extern struct sigqueue *sigqueue_alloc(void); +extern void sigqueue_free(struct sigqueue *); +extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); +extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); + +static inline int restart_syscall(void) +{ + set_tsk_thread_flag(current, TIF_SIGPENDING); + return -ERESTARTNOINTR; +} + +static inline int signal_pending(struct task_struct *p) +{ + return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); +} + +static inline int __fatal_signal_pending(struct task_struct *p) +{ + return unlikely(sigismember(&p->pending.signal, SIGKILL)); +} + +static inline int fatal_signal_pending(struct task_struct *p) +{ + return signal_pending(p) && __fatal_signal_pending(p); +} + +static inline int signal_pending_state(long state, struct task_struct *p) +{ + if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) + return 0; + if (!signal_pending(p)) + return 0; + + return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); +} + +/* + * Reevaluate whether the task has signals pending delivery. + * Wake the task if so. + * This is required every time the blocked sigset_t changes. + * callers must hold sighand->siglock. + */ +extern void recalc_sigpending_and_wake(struct task_struct *t); +extern void recalc_sigpending(void); + +extern void signal_wake_up_state(struct task_struct *t, unsigned int state); + +static inline void signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); +} +static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? __TASK_TRACED : 0); +} + +#ifdef TIF_RESTORE_SIGMASK +/* + * Legacy restore_sigmask accessors. These are inefficient on + * SMP architectures because they require atomic operations. + */ + +/** + * set_restore_sigmask() - make sure saved_sigmask processing gets done + * + * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code + * will run before returning to user mode, to process the flag. For + * all callers, TIF_SIGPENDING is already set or it's no harm to set + * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the + * arch code will notice on return to user mode, in case those bits + * are scarce. We set TIF_SIGPENDING here to ensure that the arch + * signal code always gets run when TIF_RESTORE_SIGMASK is set. + */ +static inline void set_restore_sigmask(void) +{ + set_thread_flag(TIF_RESTORE_SIGMASK); + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); +} +static inline void clear_restore_sigmask(void) +{ + clear_thread_flag(TIF_RESTORE_SIGMASK); +} +static inline bool test_restore_sigmask(void) +{ + return test_thread_flag(TIF_RESTORE_SIGMASK); +} +static inline bool test_and_clear_restore_sigmask(void) +{ + return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); +} + +#else /* TIF_RESTORE_SIGMASK */ + +/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ +static inline void set_restore_sigmask(void) +{ + current->restore_sigmask = true; + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); +} +static inline void clear_restore_sigmask(void) +{ + current->restore_sigmask = false; +} +static inline bool test_restore_sigmask(void) +{ + return current->restore_sigmask; +} +static inline bool test_and_clear_restore_sigmask(void) +{ + if (!current->restore_sigmask) + return false; + current->restore_sigmask = false; + return true; +} +#endif + +static inline void restore_saved_sigmask(void) +{ + if (test_and_clear_restore_sigmask()) + __set_current_blocked(¤t->saved_sigmask); +} + +static inline sigset_t *sigmask_to_save(void) +{ + sigset_t *res = ¤t->blocked; + if (unlikely(test_restore_sigmask())) + res = ¤t->saved_sigmask; + return res; +} + +static inline int kill_cad_pid(int sig, int priv) +{ + return kill_pid(cad_pid, sig, priv); +} + +/* These can be the second arg to send_sig_info/send_group_sig_info. */ +#define SEND_SIG_NOINFO ((struct siginfo *) 0) +#define SEND_SIG_PRIV ((struct siginfo *) 1) +#define SEND_SIG_FORCED ((struct siginfo *) 2) + +/* + * True if we are on the alternate signal stack. + */ +static inline int on_sig_stack(unsigned long sp) +{ + /* + * If the signal stack is SS_AUTODISARM then, by construction, we + * can't be on the signal stack unless user code deliberately set + * SS_AUTODISARM when we were already on it. + * + * This improves reliability: if user state gets corrupted such that + * the stack pointer points very close to the end of the signal stack, + * then this check will enable the signal to be handled anyway. + */ + if (current->sas_ss_flags & SS_AUTODISARM) + return 0; + +#ifdef CONFIG_STACK_GROWSUP + return sp >= current->sas_ss_sp && + sp - current->sas_ss_sp < current->sas_ss_size; +#else + return sp > current->sas_ss_sp && + sp - current->sas_ss_sp <= current->sas_ss_size; +#endif +} + +static inline int sas_ss_flags(unsigned long sp) +{ + if (!current->sas_ss_size) + return SS_DISABLE; + + return on_sig_stack(sp) ? SS_ONSTACK : 0; +} + +static inline void sas_ss_reset(struct task_struct *p) +{ + p->sas_ss_sp = 0; + p->sas_ss_size = 0; + p->sas_ss_flags = SS_DISABLE; +} + +static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) +{ + if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) +#ifdef CONFIG_STACK_GROWSUP + return current->sas_ss_sp; +#else + return current->sas_ss_sp + current->sas_ss_size; +#endif + return sp; +} + +extern void __cleanup_sighand(struct sighand_struct *); +extern void flush_itimer_signals(void); + +#define tasklist_empty() \ + list_empty(&init_task.tasks) + +#define next_task(p) \ + list_entry_rcu((p)->tasks.next, struct task_struct, tasks) + +#define for_each_process(p) \ + for (p = &init_task ; (p = next_task(p)) != &init_task ; ) + +extern bool current_is_single_threaded(void); + +/* + * Careful: do_each_thread/while_each_thread is a double loop so + * 'break' will not work as expected - use goto instead. + */ +#define do_each_thread(g, t) \ + for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do + +#define while_each_thread(g, t) \ + while ((t = next_thread(t)) != g) + +#define __for_each_thread(signal, t) \ + list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) + +#define for_each_thread(p, t) \ + __for_each_thread((p)->signal, t) + +/* Careful: this is a double loop, 'break' won't work as expected. */ +#define for_each_process_thread(p, t) \ + for_each_process(p) for_each_thread(p, t) + +typedef int (*proc_visitor)(struct task_struct *p, void *data); +void walk_process_tree(struct task_struct *top, proc_visitor, void *); + +static inline int get_nr_threads(struct task_struct *tsk) +{ + return tsk->signal->nr_threads; +} + +static inline bool thread_group_leader(struct task_struct *p) +{ + return p->exit_signal >= 0; +} + +/* Do to the insanities of de_thread it is possible for a process + * to have the pid of the thread group leader without actually being + * the thread group leader. For iteration through the pids in proc + * all we care about is that we have a task with the appropriate + * pid, we don't actually care if we have the right task. + */ +static inline bool has_group_leader_pid(struct task_struct *p) +{ + return task_pid(p) == p->signal->leader_pid; +} + +static inline +bool same_thread_group(struct task_struct *p1, struct task_struct *p2) +{ + return p1->signal == p2->signal; +} + +static inline struct task_struct *next_thread(const struct task_struct *p) +{ + return list_entry_rcu(p->thread_group.next, + struct task_struct, thread_group); +} + +static inline int thread_group_empty(struct task_struct *p) +{ + return list_empty(&p->thread_group); +} + +#define delay_group_leader(p) \ + (thread_group_leader(p) && !thread_group_empty(p)) + +extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, + unsigned long *flags); + +static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, + unsigned long *flags) +{ + struct sighand_struct *ret; + + ret = __lock_task_sighand(tsk, flags); + (void)__cond_lock(&tsk->sighand->siglock, ret); + return ret; +} + +static inline void unlock_task_sighand(struct task_struct *tsk, + unsigned long *flags) +{ + spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); +} + +static inline unsigned long task_rlimit(const struct task_struct *tsk, + unsigned int limit) +{ + return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); +} + +static inline unsigned long task_rlimit_max(const struct task_struct *tsk, + unsigned int limit) +{ + return READ_ONCE(tsk->signal->rlim[limit].rlim_max); +} + +static inline unsigned long rlimit(unsigned int limit) +{ + return task_rlimit(current, limit); +} + +static inline unsigned long rlimit_max(unsigned int limit) +{ + return task_rlimit_max(current, limit); +} + +#endif /* _LINUX_SCHED_SIGNAL_H */ diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h new file mode 100644 index 000000000000..141b74c53fad --- /dev/null +++ b/include/linux/sched/stat.h @@ -0,0 +1,40 @@ +#ifndef _LINUX_SCHED_STAT_H +#define _LINUX_SCHED_STAT_H + +#include <linux/percpu.h> + +/* + * Various counters maintained by the scheduler and fork(), + * exposed via /proc, sys.c or used by drivers via these APIs. + * + * ( Note that all these values are aquired without locking, + * so they can only be relied on in narrow circumstances. ) + */ + +extern unsigned long total_forks; +extern int nr_threads; +DECLARE_PER_CPU(unsigned long, process_counts); +extern int nr_processes(void); +extern unsigned long nr_running(void); +extern bool single_task_running(void); +extern unsigned long nr_iowait(void); +extern unsigned long nr_iowait_cpu(int cpu); +extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); + +static inline int sched_info_on(void) +{ +#ifdef CONFIG_SCHEDSTATS + return 1; +#elif defined(CONFIG_TASK_DELAY_ACCT) + extern int delayacct_on; + return delayacct_on; +#else + return 0; +#endif +} + +#ifdef CONFIG_SCHEDSTATS +void force_schedstat_enabled(void); +#endif + +#endif /* _LINUX_SCHED_STAT_H */ diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 49308e142aae..0f5ecd4d298e 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -1,5 +1,9 @@ -#ifndef _SCHED_SYSCTL_H -#define _SCHED_SYSCTL_H +#ifndef _LINUX_SCHED_SYSCTL_H +#define _LINUX_SCHED_SYSCTL_H + +#include <linux/types.h> + +struct ctl_table; #ifdef CONFIG_DETECT_HUNG_TASK extern int sysctl_hung_task_check_count; @@ -78,4 +82,4 @@ extern int sysctl_schedstats(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); -#endif /* _SCHED_SYSCTL_H */ +#endif /* _LINUX_SCHED_SYSCTL_H */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h new file mode 100644 index 000000000000..a978d7189cfd --- /dev/null +++ b/include/linux/sched/task.h @@ -0,0 +1,139 @@ +#ifndef _LINUX_SCHED_TASK_H +#define _LINUX_SCHED_TASK_H + +/* + * Interface between the scheduler and various task lifetime (fork()/exit()) + * functionality: + */ + +#include <linux/sched.h> + +struct task_struct; +union thread_union; + +/* + * This serializes "schedule()" and also protects + * the run-queue from deletions/modifications (but + * _adding_ to the beginning of the run-queue has + * a separate lock). + */ +extern rwlock_t tasklist_lock; +extern spinlock_t mmlist_lock; + +extern union thread_union init_thread_union; +extern struct task_struct init_task; + +#ifdef CONFIG_PROVE_RCU +extern int lockdep_tasklist_lock_is_held(void); +#endif /* #ifdef CONFIG_PROVE_RCU */ + +extern asmlinkage void schedule_tail(struct task_struct *prev); +extern void init_idle(struct task_struct *idle, int cpu); +extern void init_idle_bootup_task(struct task_struct *idle); + +extern int sched_fork(unsigned long clone_flags, struct task_struct *p); +extern void sched_dead(struct task_struct *p); + +void __noreturn do_task_dead(void); + +extern void proc_caches_init(void); + +extern void release_task(struct task_struct * p); + +#ifdef CONFIG_HAVE_COPY_THREAD_TLS +extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, + struct task_struct *, unsigned long); +#else +extern int copy_thread(unsigned long, unsigned long, unsigned long, + struct task_struct *); + +/* Architectures that haven't opted into copy_thread_tls get the tls argument + * via pt_regs, so ignore the tls argument passed via C. */ +static inline int copy_thread_tls( + unsigned long clone_flags, unsigned long sp, unsigned long arg, + struct task_struct *p, unsigned long tls) +{ + return copy_thread(clone_flags, sp, arg, p); +} +#endif +extern void flush_thread(void); + +#ifdef CONFIG_HAVE_EXIT_THREAD +extern void exit_thread(struct task_struct *tsk); +#else +static inline void exit_thread(struct task_struct *tsk) +{ +} +#endif +extern void do_group_exit(int); + +extern void exit_files(struct task_struct *); +extern void exit_itimers(struct signal_struct *); + +extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); +extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); +struct task_struct *fork_idle(int); +extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); + +extern void free_task(struct task_struct *tsk); + +/* sched_exec is called by processes performing an exec */ +#ifdef CONFIG_SMP +extern void sched_exec(void); +#else +#define sched_exec() {} +#endif + +#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) + +extern void __put_task_struct(struct task_struct *t); + +static inline void put_task_struct(struct task_struct *t) +{ + if (atomic_dec_and_test(&t->usage)) + __put_task_struct(t); +} + +struct task_struct *task_rcu_dereference(struct task_struct **ptask); +struct task_struct *try_get_task_struct(struct task_struct **ptask); + + +#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT +extern int arch_task_struct_size __read_mostly; +#else +# define arch_task_struct_size (sizeof(struct task_struct)) +#endif + +#ifdef CONFIG_VMAP_STACK +static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) +{ + return t->stack_vm_area; +} +#else +static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) +{ + return NULL; +} +#endif + +/* + * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring + * subscriptions and synchronises with wait4(). Also used in procfs. Also + * pins the final release of task.io_context. Also protects ->cpuset and + * ->cgroup.subsys[]. And ->vfork_done. + * + * Nests both inside and outside of read_lock(&tasklist_lock). + * It must not be nested with write_lock_irq(&tasklist_lock), + * neither inside nor outside. + */ +static inline void task_lock(struct task_struct *p) +{ + spin_lock(&p->alloc_lock); +} + +static inline void task_unlock(struct task_struct *p) +{ + spin_unlock(&p->alloc_lock); +} + +#endif /* _LINUX_SCHED_TASK_H */ diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h new file mode 100644 index 000000000000..df6ea6665b31 --- /dev/null +++ b/include/linux/sched/task_stack.h @@ -0,0 +1,121 @@ +#ifndef _LINUX_SCHED_TASK_STACK_H +#define _LINUX_SCHED_TASK_STACK_H + +/* + * task->stack (kernel stack) handling interfaces: + */ + +#include <linux/sched.h> +#include <linux/magic.h> + +#ifdef CONFIG_THREAD_INFO_IN_TASK + +/* + * When accessing the stack of a non-current task that might exit, use + * try_get_task_stack() instead. task_stack_page will return a pointer + * that could get freed out from under you. + */ +static inline void *task_stack_page(const struct task_struct *task) +{ + return task->stack; +} + +#define setup_thread_stack(new,old) do { } while(0) + +static inline unsigned long *end_of_stack(const struct task_struct *task) +{ + return task->stack; +} + +#elif !defined(__HAVE_THREAD_FUNCTIONS) + +#define task_stack_page(task) ((void *)(task)->stack) + +static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) +{ + *task_thread_info(p) = *task_thread_info(org); + task_thread_info(p)->task = p; +} + +/* + * Return the address of the last usable long on the stack. + * + * When the stack grows down, this is just above the thread + * info struct. Going any lower will corrupt the threadinfo. + * + * When the stack grows up, this is the highest address. + * Beyond that position, we corrupt data on the next page. + */ +static inline unsigned long *end_of_stack(struct task_struct *p) +{ +#ifdef CONFIG_STACK_GROWSUP + return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; +#else + return (unsigned long *)(task_thread_info(p) + 1); +#endif +} + +#endif + +#ifdef CONFIG_THREAD_INFO_IN_TASK +static inline void *try_get_task_stack(struct task_struct *tsk) +{ + return atomic_inc_not_zero(&tsk->stack_refcount) ? + task_stack_page(tsk) : NULL; +} + +extern void put_task_stack(struct task_struct *tsk); +#else +static inline void *try_get_task_stack(struct task_struct *tsk) +{ + return task_stack_page(tsk); +} + +static inline void put_task_stack(struct task_struct *tsk) {} +#endif + +#define task_stack_end_corrupted(task) \ + (*(end_of_stack(task)) != STACK_END_MAGIC) + +static inline int object_is_on_stack(void *obj) +{ + void *stack = task_stack_page(current); + + return (obj >= stack) && (obj < (stack + THREAD_SIZE)); +} + +extern void thread_stack_cache_init(void); + +#ifdef CONFIG_DEBUG_STACK_USAGE +static inline unsigned long stack_not_used(struct task_struct *p) +{ + unsigned long *n = end_of_stack(p); + + do { /* Skip over canary */ +# ifdef CONFIG_STACK_GROWSUP + n--; +# else + n++; +# endif + } while (!*n); + +# ifdef CONFIG_STACK_GROWSUP + return (unsigned long)end_of_stack(p) - (unsigned long)n; +# else + return (unsigned long)n - (unsigned long)end_of_stack(p); +# endif +} +#endif +extern void set_task_stack_end_magic(struct task_struct *tsk); + +#ifndef __HAVE_ARCH_KSTACK_END +static inline int kstack_end(void *addr) +{ + /* Reliable end of stack detection: + * Some APM bios versions misalign the stack + */ + return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); +} +#endif + +#endif /* _LINUX_SCHED_TASK_STACK_H */ diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h new file mode 100644 index 000000000000..7d065abc7a47 --- /dev/null +++ b/include/linux/sched/topology.h @@ -0,0 +1,226 @@ +#ifndef _LINUX_SCHED_TOPOLOGY_H +#define _LINUX_SCHED_TOPOLOGY_H + +#include <linux/topology.h> + +#include <linux/sched/idle.h> + +/* + * sched-domains (multiprocessor balancing) declarations: + */ +#ifdef CONFIG_SMP + +#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ +#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ +#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ +#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ +#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ +#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ +#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */ +#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */ +#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ +#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ +#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ +#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ +#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ +#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ +#define SD_NUMA 0x4000 /* cross-node balancing */ + +/* + * Increase resolution of cpu_capacity calculations + */ +#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT +#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) + +#ifdef CONFIG_SCHED_SMT +static inline int cpu_smt_flags(void) +{ + return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; +} +#endif + +#ifdef CONFIG_SCHED_MC +static inline int cpu_core_flags(void) +{ + return SD_SHARE_PKG_RESOURCES; +} +#endif + +#ifdef CONFIG_NUMA +static inline int cpu_numa_flags(void) +{ + return SD_NUMA; +} +#endif + +extern int arch_asym_cpu_priority(int cpu); + +struct sched_domain_attr { + int relax_domain_level; +}; + +#define SD_ATTR_INIT (struct sched_domain_attr) { \ + .relax_domain_level = -1, \ +} + +extern int sched_domain_level_max; + +struct sched_group; + +struct sched_domain_shared { + atomic_t ref; + atomic_t nr_busy_cpus; + int has_idle_cores; +}; + +struct sched_domain { + /* These fields must be setup */ + struct sched_domain *parent; /* top domain must be null terminated */ + struct sched_domain *child; /* bottom domain must be null terminated */ + struct sched_group *groups; /* the balancing groups of the domain */ + unsigned long min_interval; /* Minimum balance interval ms */ + unsigned long max_interval; /* Maximum balance interval ms */ + unsigned int busy_factor; /* less balancing by factor if busy */ + unsigned int imbalance_pct; /* No balance until over watermark */ + unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ + unsigned int busy_idx; + unsigned int idle_idx; + unsigned int newidle_idx; + unsigned int wake_idx; + unsigned int forkexec_idx; + unsigned int smt_gain; + + int nohz_idle; /* NOHZ IDLE status */ + int flags; /* See SD_* */ + int level; + + /* Runtime fields. */ + unsigned long last_balance; /* init to jiffies. units in jiffies */ + unsigned int balance_interval; /* initialise to 1. units in ms. */ + unsigned int nr_balance_failed; /* initialise to 0 */ + + /* idle_balance() stats */ + u64 max_newidle_lb_cost; + unsigned long next_decay_max_lb_cost; + + u64 avg_scan_cost; /* select_idle_sibling */ + +#ifdef CONFIG_SCHEDSTATS + /* load_balance() stats */ + unsigned int lb_count[CPU_MAX_IDLE_TYPES]; + unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; + unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; + unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; + unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; + unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; + unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; + unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; + + /* Active load balancing */ + unsigned int alb_count; + unsigned int alb_failed; + unsigned int alb_pushed; + + /* SD_BALANCE_EXEC stats */ + unsigned int sbe_count; + unsigned int sbe_balanced; + unsigned int sbe_pushed; + + /* SD_BALANCE_FORK stats */ + unsigned int sbf_count; + unsigned int sbf_balanced; + unsigned int sbf_pushed; + + /* try_to_wake_up() stats */ + unsigned int ttwu_wake_remote; + unsigned int ttwu_move_affine; + unsigned int ttwu_move_balance; +#endif +#ifdef CONFIG_SCHED_DEBUG + char *name; +#endif + union { + void *private; /* used during construction */ + struct rcu_head rcu; /* used during destruction */ + }; + struct sched_domain_shared *shared; + + unsigned int span_weight; + /* + * Span of all CPUs in this domain. + * + * NOTE: this field is variable length. (Allocated dynamically + * by attaching extra space to the end of the structure, + * depending on how many CPUs the kernel has booted up with) + */ + unsigned long span[0]; +}; + +static inline struct cpumask *sched_domain_span(struct sched_domain *sd) +{ + return to_cpumask(sd->span); +} + +extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new); + +/* Allocate an array of sched domains, for partition_sched_domains(). */ +cpumask_var_t *alloc_sched_domains(unsigned int ndoms); +void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); + +bool cpus_share_cache(int this_cpu, int that_cpu); + +typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); +typedef int (*sched_domain_flags_f)(void); + +#define SDTL_OVERLAP 0x01 + +struct sd_data { + struct sched_domain **__percpu sd; + struct sched_domain_shared **__percpu sds; + struct sched_group **__percpu sg; + struct sched_group_capacity **__percpu sgc; +}; + +struct sched_domain_topology_level { + sched_domain_mask_f mask; + sched_domain_flags_f sd_flags; + int flags; + int numa_level; + struct sd_data data; +#ifdef CONFIG_SCHED_DEBUG + char *name; +#endif +}; + +extern void set_sched_topology(struct sched_domain_topology_level *tl); + +#ifdef CONFIG_SCHED_DEBUG +# define SD_INIT_NAME(type) .name = #type +#else +# define SD_INIT_NAME(type) +#endif + +#else /* CONFIG_SMP */ + +struct sched_domain_attr; + +static inline void +partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new) +{ +} + +static inline bool cpus_share_cache(int this_cpu, int that_cpu) +{ + return true; +} + +#endif /* !CONFIG_SMP */ + +static inline int task_node(const struct task_struct *p) +{ + return cpu_to_node(task_cpu(p)); +} + +#endif /* _LINUX_SCHED_TOPOLOGY_H */ diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h new file mode 100644 index 000000000000..5d5415e129d4 --- /dev/null +++ b/include/linux/sched/user.h @@ -0,0 +1,61 @@ +#ifndef _LINUX_SCHED_USER_H +#define _LINUX_SCHED_USER_H + +#include <linux/uidgid.h> +#include <linux/atomic.h> + +struct key; + +/* + * Some day this will be a full-fledged user tracking system.. + */ +struct user_struct { + atomic_t __count; /* reference count */ + atomic_t processes; /* How many processes does this user have? */ + atomic_t sigpending; /* How many pending signals does this user have? */ +#ifdef CONFIG_FANOTIFY + atomic_t fanotify_listeners; +#endif +#ifdef CONFIG_EPOLL + atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ +#endif +#ifdef CONFIG_POSIX_MQUEUE + /* protected by mq_lock */ + unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ +#endif + unsigned long locked_shm; /* How many pages of mlocked shm ? */ + unsigned long unix_inflight; /* How many files in flight in unix sockets */ + atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ + +#ifdef CONFIG_KEYS + struct key *uid_keyring; /* UID specific keyring */ + struct key *session_keyring; /* UID's default session keyring */ +#endif + + /* Hash table maintenance information */ + struct hlist_node uidhash_node; + kuid_t uid; + +#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) + atomic_long_t locked_vm; +#endif +}; + +extern int uids_sysfs_init(void); + +extern struct user_struct *find_user(kuid_t); + +extern struct user_struct root_user; +#define INIT_USER (&root_user) + + +/* per-UID process charging. */ +extern struct user_struct * alloc_uid(kuid_t); +static inline struct user_struct *get_uid(struct user_struct *u) +{ + atomic_inc(&u->__count); + return u; +} +extern void free_uid(struct user_struct *); + +#endif /* _LINUX_SCHED_USER_H */ diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h new file mode 100644 index 000000000000..d03d8a9047dc --- /dev/null +++ b/include/linux/sched/wake_q.h @@ -0,0 +1,53 @@ +#ifndef _LINUX_SCHED_WAKE_Q_H +#define _LINUX_SCHED_WAKE_Q_H + +/* + * Wake-queues are lists of tasks with a pending wakeup, whose + * callers have already marked the task as woken internally, + * and can thus carry on. A common use case is being able to + * do the wakeups once the corresponding user lock as been + * released. + * + * We hold reference to each task in the list across the wakeup, + * thus guaranteeing that the memory is still valid by the time + * the actual wakeups are performed in wake_up_q(). + * + * One per task suffices, because there's never a need for a task to be + * in two wake queues simultaneously; it is forbidden to abandon a task + * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is + * already in a wake queue, the wakeup will happen soon and the second + * waker can just skip it. + * + * The DEFINE_WAKE_Q macro declares and initializes the list head. + * wake_up_q() does NOT reinitialize the list; it's expected to be + * called near the end of a function. Otherwise, the list can be + * re-initialized for later re-use by wake_q_init(). + * + * Note that this can cause spurious wakeups. schedule() callers + * must ensure the call is done inside a loop, confirming that the + * wakeup condition has in fact occurred. + */ + +#include <linux/sched.h> + +struct wake_q_head { + struct wake_q_node *first; + struct wake_q_node **lastp; +}; + +#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) + +#define DEFINE_WAKE_Q(name) \ + struct wake_q_head name = { WAKE_Q_TAIL, &name.first } + +static inline void wake_q_init(struct wake_q_head *head) +{ + head->first = WAKE_Q_TAIL; + head->lastp = &head->first; +} + +extern void wake_q_add(struct wake_q_head *head, + struct task_struct *task); +extern void wake_up_q(struct wake_q_head *head); + +#endif /* _LINUX_SCHED_WAKE_Q_H */ diff --git a/include/linux/sched/xacct.h b/include/linux/sched/xacct.h new file mode 100644 index 000000000000..a28156a0d34a --- /dev/null +++ b/include/linux/sched/xacct.h @@ -0,0 +1,48 @@ +#ifndef _LINUX_SCHED_XACCT_H +#define _LINUX_SCHED_XACCT_H + +/* + * Extended task accounting methods: + */ + +#include <linux/sched.h> + +#ifdef CONFIG_TASK_XACCT +static inline void add_rchar(struct task_struct *tsk, ssize_t amt) +{ + tsk->ioac.rchar += amt; +} + +static inline void add_wchar(struct task_struct *tsk, ssize_t amt) +{ + tsk->ioac.wchar += amt; +} + +static inline void inc_syscr(struct task_struct *tsk) +{ + tsk->ioac.syscr++; +} + +static inline void inc_syscw(struct task_struct *tsk) +{ + tsk->ioac.syscw++; +} +#else +static inline void add_rchar(struct task_struct *tsk, ssize_t amt) +{ +} + +static inline void add_wchar(struct task_struct *tsk, ssize_t amt) +{ +} + +static inline void inc_syscr(struct task_struct *tsk) +{ +} + +static inline void inc_syscw(struct task_struct *tsk) +{ +} +#endif + +#endif /* _LINUX_SCHED_XACCT_H */ diff --git a/include/linux/sem.h b/include/linux/sem.h index d0efd6e6c20a..4fc222f8755d 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h @@ -21,7 +21,7 @@ struct sem_array { struct list_head list_id; /* undo requests on this array */ int sem_nsems; /* no. of semaphores in array */ int complex_count; /* pending complex operations */ - bool complex_mode; /* no parallel simple ops */ + unsigned int use_global_lock;/* >0: global lock required */ }; #ifdef CONFIG_SYSVIPC diff --git a/include/linux/signal.h b/include/linux/signal.h index 5308304993be..94ad6eea9550 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -1,32 +1,13 @@ #ifndef _LINUX_SIGNAL_H #define _LINUX_SIGNAL_H -#include <linux/list.h> #include <linux/bug.h> -#include <uapi/linux/signal.h> +#include <linux/signal_types.h> struct task_struct; /* for sysctl */ extern int print_fatal_signals; -/* - * Real Time signals may be queued. - */ - -struct sigqueue { - struct list_head list; - int flags; - siginfo_t info; - struct user_struct *user; -}; - -/* flags values. */ -#define SIGQUEUE_PREALLOC 1 - -struct sigpending { - struct list_head list; - sigset_t signal; -}; #ifndef HAVE_ARCH_COPY_SIGINFO @@ -272,42 +253,6 @@ extern void set_current_blocked(sigset_t *); extern void __set_current_blocked(const sigset_t *); extern int show_unhandled_signals; -struct sigaction { -#ifndef __ARCH_HAS_IRIX_SIGACTION - __sighandler_t sa_handler; - unsigned long sa_flags; -#else - unsigned int sa_flags; - __sighandler_t sa_handler; -#endif -#ifdef __ARCH_HAS_SA_RESTORER - __sigrestore_t sa_restorer; -#endif - sigset_t sa_mask; /* mask last for extensibility */ -}; - -struct k_sigaction { - struct sigaction sa; -#ifdef __ARCH_HAS_KA_RESTORER - __sigrestore_t ka_restorer; -#endif -}; - -#ifdef CONFIG_OLD_SIGACTION -struct old_sigaction { - __sighandler_t sa_handler; - old_sigset_t sa_mask; - unsigned long sa_flags; - __sigrestore_t sa_restorer; -}; -#endif - -struct ksignal { - struct k_sigaction ka; - siginfo_t info; - int sig; -}; - extern int get_signal(struct ksignal *ksig); extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping); extern void exit_signals(struct task_struct *tsk); diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h new file mode 100644 index 000000000000..16d862a3d8f3 --- /dev/null +++ b/include/linux/signal_types.h @@ -0,0 +1,66 @@ +#ifndef _LINUX_SIGNAL_TYPES_H +#define _LINUX_SIGNAL_TYPES_H + +/* + * Basic signal handling related data type definitions: + */ + +#include <linux/list.h> +#include <uapi/linux/signal.h> + +/* + * Real Time signals may be queued. + */ + +struct sigqueue { + struct list_head list; + int flags; + siginfo_t info; + struct user_struct *user; +}; + +/* flags values. */ +#define SIGQUEUE_PREALLOC 1 + +struct sigpending { + struct list_head list; + sigset_t signal; +}; + +struct sigaction { +#ifndef __ARCH_HAS_IRIX_SIGACTION + __sighandler_t sa_handler; + unsigned long sa_flags; +#else + unsigned int sa_flags; + __sighandler_t sa_handler; +#endif +#ifdef __ARCH_HAS_SA_RESTORER + __sigrestore_t sa_restorer; +#endif + sigset_t sa_mask; /* mask last for extensibility */ +}; + +struct k_sigaction { + struct sigaction sa; +#ifdef __ARCH_HAS_KA_RESTORER + __sigrestore_t ka_restorer; +#endif +}; + +#ifdef CONFIG_OLD_SIGACTION +struct old_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + unsigned long sa_flags; + __sigrestore_t sa_restorer; +}; +#endif + +struct ksignal { + struct k_sigaction ka; + siginfo_t info; + int sig; +}; + +#endif /* _LINUX_SIGNAL_TYPES_H */ diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h index eadbe227c256..4985048640a7 100644 --- a/include/linux/signalfd.h +++ b/include/linux/signalfd.h @@ -8,7 +8,7 @@ #define _LINUX_SIGNALFD_H #include <uapi/linux/signalfd.h> - +#include <linux/sched/signal.h> #ifdef CONFIG_SIGNALFD diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 69ccd2636911..c776abd86937 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -34,6 +34,7 @@ #include <linux/dma-mapping.h> #include <linux/netdev_features.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <net/flow_dissector.h> #include <linux/splice.h> #include <linux/in6.h> diff --git a/include/linux/spi/flash.h b/include/linux/spi/flash.h index 3f22932e67a4..f4199e758f97 100644 --- a/include/linux/spi/flash.h +++ b/include/linux/spi/flash.h @@ -7,7 +7,7 @@ struct mtd_partition; * struct flash_platform_data: board-specific flash data * @name: optional flash device name (eg, as used with mtdparts=) * @parts: optional array of mtd_partitions for static partitioning - * @nr_parts: number of mtd_partitions for static partitoning + * @nr_parts: number of mtd_partitions for static partitioning * @type: optional flash device type (e.g. m25p80 vs m25p64), for use * with chips that can't be queried for JEDEC or other IDs * diff --git a/include/linux/stat.h b/include/linux/stat.h index 075cb0c7eb2a..c76e524fb34b 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -18,20 +18,32 @@ #include <linux/time.h> #include <linux/uidgid.h> +#define KSTAT_QUERY_FLAGS (AT_STATX_SYNC_TYPE) + struct kstat { - u64 ino; - dev_t dev; + u32 result_mask; /* What fields the user got */ umode_t mode; unsigned int nlink; + uint32_t blksize; /* Preferred I/O size */ + u64 attributes; +#define KSTAT_ATTR_FS_IOC_FLAGS \ + (STATX_ATTR_COMPRESSED | \ + STATX_ATTR_IMMUTABLE | \ + STATX_ATTR_APPEND | \ + STATX_ATTR_NODUMP | \ + STATX_ATTR_ENCRYPTED \ + )/* Attrs corresponding to FS_*_FL flags */ + u64 ino; + dev_t dev; + dev_t rdev; kuid_t uid; kgid_t gid; - dev_t rdev; loff_t size; - struct timespec atime; + struct timespec atime; struct timespec mtime; struct timespec ctime; - unsigned long blksize; - unsigned long long blocks; + struct timespec btime; /* File creation time */ + u64 blocks; }; #endif diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index b1bc62ba20a2..8fd3504946ad 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -32,6 +32,7 @@ */ #define UNX_MAXNODENAME __NEW_UTS_LEN #define UNX_CALLSLACK (21 + XDR_QUADLEN(UNX_MAXNODENAME)) +#define UNX_NGROUPS 16 struct rpcsec_gss_info; @@ -63,9 +64,6 @@ struct rpc_cred { struct rcu_head cr_rcu; struct rpc_auth * cr_auth; const struct rpc_credops *cr_ops; -#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) - unsigned long cr_magic; /* 0x0f4aa4f0 */ -#endif unsigned long cr_expire; /* when to gc */ unsigned long cr_flags; /* various flags */ atomic_t cr_count; /* ref count */ @@ -79,8 +77,6 @@ struct rpc_cred { #define RPCAUTH_CRED_HASHED 2 #define RPCAUTH_CRED_NEGATIVE 3 -#define RPCAUTH_CRED_MAGIC 0x0f4aa4f0 - /* rpc_auth au_flags */ #define RPCAUTH_AUTH_NO_CRKEY_TIMEOUT 0x0001 /* underlying cred has no key timeout */ diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 8a511c0985aa..270bad0e1bed 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -63,15 +63,6 @@ struct cache_head { #define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */ -struct cache_detail_procfs { - struct proc_dir_entry *proc_ent; - struct proc_dir_entry *flush_ent, *channel_ent, *content_ent; -}; - -struct cache_detail_pipefs { - struct dentry *dir; -}; - struct cache_detail { struct module * owner; int hash_size; @@ -123,9 +114,9 @@ struct cache_detail { time_t last_warn; /* when we last warned about no readers */ union { - struct cache_detail_procfs procfs; - struct cache_detail_pipefs pipefs; - } u; + struct proc_dir_entry *procfs; + struct dentry *pipefs; + }; struct net *net; }; @@ -204,8 +195,11 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd) kref_put(&h->ref, cd->cache_put); } -static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) +static inline bool cache_is_expired(struct cache_detail *detail, struct cache_head *h) { + if (!test_bit(CACHE_VALID, &h->flags)) + return false; + return (h->expiry_time < seconds_since_boot()) || (detail->flush_time >= h->last_refresh); } @@ -227,6 +221,7 @@ extern void sunrpc_destroy_cache_detail(struct cache_detail *cd); extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *, umode_t, struct cache_detail *); extern void sunrpc_cache_unregister_pipefs(struct cache_detail *); +extern void sunrpc_cache_unhash(struct cache_detail *, struct cache_head *); /* Must store cache_detail in seq_file->private if using next three functions */ extern void *cache_seq_start(struct seq_file *file, loff_t *pos); diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 333ad11b3dd9..6095ecba0dde 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -182,7 +182,6 @@ int rpc_protocol(struct rpc_clnt *); struct net * rpc_net_ns(struct rpc_clnt *); size_t rpc_max_payload(struct rpc_clnt *); size_t rpc_max_bc_payload(struct rpc_clnt *); -unsigned long rpc_get_timeout(struct rpc_clnt *clnt); void rpc_force_rebind(struct rpc_clnt *); size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); @@ -202,8 +201,9 @@ int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *, struct rpc_xprt *, void *), void *data); -void rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, - unsigned long timeo); +void rpc_set_connect_timeout(struct rpc_clnt *clnt, + unsigned long connect_timeout, + unsigned long reconnect_timeout); int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *, struct rpc_xprt_switch *, diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index 59a7889e15db..8da0f37f3bdc 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h @@ -20,33 +20,55 @@ extern unsigned int nfsd_debug; extern unsigned int nlm_debug; #endif -#define dprintk(args...) dfprintk(FACILITY, ## args) -#define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args) +#define dprintk(fmt, ...) \ + dfprintk(FACILITY, fmt, ##__VA_ARGS__) +#define dprintk_cont(fmt, ...) \ + dfprintk_cont(FACILITY, fmt, ##__VA_ARGS__) +#define dprintk_rcu(fmt, ...) \ + dfprintk_rcu(FACILITY, fmt, ##__VA_ARGS__) +#define dprintk_rcu_cont(fmt, ...) \ + dfprintk_rcu_cont(FACILITY, fmt, ##__VA_ARGS__) #undef ifdebug #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) -# define dfprintk(fac, args...) \ - do { \ - ifdebug(fac) \ - printk(KERN_DEFAULT args); \ - } while (0) - -# define dfprintk_rcu(fac, args...) \ - do { \ - ifdebug(fac) { \ - rcu_read_lock(); \ - printk(KERN_DEFAULT args); \ - rcu_read_unlock(); \ - } \ - } while (0) +# define dfprintk(fac, fmt, ...) \ +do { \ + ifdebug(fac) \ + printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \ +} while (0) + +# define dfprintk_cont(fac, fmt, ...) \ +do { \ + ifdebug(fac) \ + printk(KERN_CONT fmt, ##__VA_ARGS__); \ +} while (0) + +# define dfprintk_rcu(fac, fmt, ...) \ +do { \ + ifdebug(fac) { \ + rcu_read_lock(); \ + printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \ + rcu_read_unlock(); \ + } \ +} while (0) + +# define dfprintk_rcu_cont(fac, fmt, ...) \ +do { \ + ifdebug(fac) { \ + rcu_read_lock(); \ + printk(KERN_CONT fmt, ##__VA_ARGS__); \ + rcu_read_unlock(); \ + } \ +} while (0) # define RPC_IFDEBUG(x) x #else # define ifdebug(fac) if (0) -# define dfprintk(fac, args...) do {} while (0) -# define dfprintk_rcu(fac, args...) do {} while (0) +# define dfprintk(fac, fmt, ...) do {} while (0) +# define dfprintk_cont(fac, fmt, ...) do {} while (0) +# define dfprintk_rcu(fac, fmt, ...) do {} while (0) # define RPC_IFDEBUG(x) #endif diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index cfda6adcf33c..245fc59b7324 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h @@ -110,6 +110,15 @@ struct rpcrdma_msg { }; /* + * XDR sizes, in quads + */ +enum { + rpcrdma_fixed_maxsz = 4, + rpcrdma_segment_maxsz = 4, + rpcrdma_readchunk_maxsz = 2 + rpcrdma_segment_maxsz, +}; + +/* * Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks */ #define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7) diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 7321ae933867..e770abeed32d 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -400,10 +400,14 @@ struct svc_version { struct svc_procedure * vs_proc; /* per-procedure info */ u32 vs_xdrsize; /* xdrsize needed for this version */ - unsigned int vs_hidden : 1, /* Don't register with portmapper. - * Only used for nfsacl so far. */ - vs_rpcb_optnl:1;/* Don't care the result of register. - * Only used for nfsv4. */ + /* Don't register with rpcbind */ + bool vs_hidden; + + /* Don't care if the rpcbind registration fails */ + bool vs_rpcb_optnl; + + /* Need xprt with congestion control */ + bool vs_need_cong_ctrl; /* Override dispatch function (e.g. when caching replies). * A return value of 0 means drop the request. diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 757fb963696c..b105f73e3ca2 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -70,7 +70,7 @@ extern atomic_t rdma_stat_sq_prod; * completes. */ struct svc_rdma_op_ctxt { - struct list_head free; + struct list_head list; struct svc_rdma_op_ctxt *read_hdr; struct svc_rdma_fastreg_mr *frmr; int hdr_count; @@ -78,7 +78,6 @@ struct svc_rdma_op_ctxt { struct ib_cqe cqe; struct ib_cqe reg_cqe; struct ib_cqe inv_cqe; - struct list_head dto_q; u32 byte_len; u32 position; struct svcxprt_rdma *xprt; @@ -141,7 +140,8 @@ struct svcxprt_rdma { atomic_t sc_sq_avail; /* SQEs ready to be consumed */ unsigned int sc_sq_depth; /* Depth of SQ */ unsigned int sc_rq_depth; /* Depth of RQ */ - u32 sc_max_requests; /* Forward credits */ + __be32 sc_fc_credits; /* Forward credits */ + u32 sc_max_requests; /* Max requests */ u32 sc_max_bc_requests;/* Backward credits */ int sc_max_req_size; /* Size of each RQ WR buf */ @@ -171,7 +171,6 @@ struct svcxprt_rdma { wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */ unsigned long sc_flags; - struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */ struct list_head sc_read_complete_q; struct work_struct sc_work; }; @@ -214,11 +213,7 @@ extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int); extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int); extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int, __be32, __be64, u32); -extern void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *, - struct rpcrdma_msg *, - struct rpcrdma_msg *, - enum rpcrdma_proc); -extern int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *); +extern unsigned int svc_rdma_xdr_get_reply_hdr_len(__be32 *rdma_resp); /* svc_rdma_recvfrom.c */ extern int svc_rdma_recvfrom(struct svc_rqst *); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 7440290f64ac..ddb7f94a9d06 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -67,6 +67,7 @@ struct svc_xprt { #define XPT_CACHE_AUTH 11 /* cache auth info */ #define XPT_LOCAL 12 /* connection from loopback interface */ #define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */ +#define XPT_CONG_CTRL 14 /* has congestion control */ struct svc_serv *xpt_server; /* service for transport */ atomic_t xpt_reserved; /* space on outq that is rsvd */ diff --git a/include/linux/sunrpc/types.h b/include/linux/sunrpc/types.h index d222f47550af..11a7536c0fd2 100644 --- a/include/linux/sunrpc/types.h +++ b/include/linux/sunrpc/types.h @@ -10,6 +10,7 @@ #define _LINUX_SUNRPC_TYPES_H_ #include <linux/timer.h> +#include <linux/sched/signal.h> #include <linux/workqueue.h> #include <linux/sunrpc/debug.h> #include <linux/list.h> diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 56c48c884a24..054c8cde18f3 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -242,6 +242,185 @@ extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len); extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); +ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, + size_t maxlen, gfp_t gfp_flags); +/** + * xdr_align_size - Calculate padded size of an object + * @n: Size of an object being XDR encoded (in bytes) + * + * Return value: + * Size (in bytes) of the object including xdr padding + */ +static inline size_t +xdr_align_size(size_t n) +{ + const size_t mask = sizeof(__u32) - 1; + + return (n + mask) & ~mask; +} + +/** + * xdr_stream_encode_u32 - Encode a 32-bit integer + * @xdr: pointer to xdr_stream + * @n: integer to encode + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_u32(struct xdr_stream *xdr, __u32 n) +{ + const size_t len = sizeof(n); + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + *p = cpu_to_be32(n); + return len; +} + +/** + * xdr_stream_encode_u64 - Encode a 64-bit integer + * @xdr: pointer to xdr_stream + * @n: 64-bit integer to encode + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_u64(struct xdr_stream *xdr, __u64 n) +{ + const size_t len = sizeof(n); + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + xdr_encode_hyper(p, n); + return len; +} + +/** + * xdr_stream_encode_opaque_fixed - Encode fixed length opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: pointer to opaque data object + * @len: size of object pointed to by @ptr + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_opaque_fixed(struct xdr_stream *xdr, const void *ptr, size_t len) +{ + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + xdr_encode_opaque_fixed(p, ptr, len); + return xdr_align_size(len); +} + +/** + * xdr_stream_encode_opaque - Encode variable length opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: pointer to opaque data object + * @len: size of object pointed to by @ptr + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_opaque(struct xdr_stream *xdr, const void *ptr, size_t len) +{ + size_t count = sizeof(__u32) + xdr_align_size(len); + __be32 *p = xdr_reserve_space(xdr, count); + + if (unlikely(!p)) + return -EMSGSIZE; + xdr_encode_opaque(p, ptr, len); + return count; +} + +/** + * xdr_stream_decode_u32 - Decode a 32-bit integer + * @xdr: pointer to xdr_stream + * @ptr: location to store integer + * + * Return values: + * %0 on success + * %-EBADMSG on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_decode_u32(struct xdr_stream *xdr, __u32 *ptr) +{ + const size_t count = sizeof(*ptr); + __be32 *p = xdr_inline_decode(xdr, count); + + if (unlikely(!p)) + return -EBADMSG; + *ptr = be32_to_cpup(p); + return 0; +} + +/** + * xdr_stream_decode_opaque_fixed - Decode fixed length opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: location to store data + * @len: size of buffer pointed to by @ptr + * + * Return values: + * On success, returns size of object stored in @ptr + * %-EBADMSG on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_decode_opaque_fixed(struct xdr_stream *xdr, void *ptr, size_t len) +{ + __be32 *p = xdr_inline_decode(xdr, len); + + if (unlikely(!p)) + return -EBADMSG; + xdr_decode_opaque_fixed(p, ptr, len); + return len; +} + +/** + * xdr_stream_decode_opaque_inline - Decode variable length opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: location to store pointer to opaque data + * @maxlen: maximum acceptable object size + * + * Note: the pointer stored in @ptr cannot be assumed valid after the XDR + * buffer has been destroyed, or even after calling xdr_inline_decode() + * on @xdr. It is therefore expected that the object it points to should + * be processed immediately. + * + * Return values: + * On success, returns size of object stored in *@ptr + * %-EBADMSG on XDR buffer overflow + * %-EMSGSIZE if the size of the object would exceed @maxlen + */ +static inline ssize_t +xdr_stream_decode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t maxlen) +{ + __be32 *p; + __u32 len; + + *ptr = NULL; + if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) + return -EBADMSG; + if (len != 0) { + p = xdr_inline_decode(xdr, len); + if (unlikely(!p)) + return -EBADMSG; + if (unlikely(len > maxlen)) + return -EMSGSIZE; + *ptr = p; + } + return len; +} #endif /* __KERNEL__ */ #endif /* _SUNRPC_XDR_H_ */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index a5da60b24d83..eab1c749e192 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -137,6 +137,9 @@ struct rpc_xprt_ops { void (*release_request)(struct rpc_task *task); void (*close)(struct rpc_xprt *xprt); void (*destroy)(struct rpc_xprt *xprt); + void (*set_connect_timeout)(struct rpc_xprt *xprt, + unsigned long connect_timeout, + unsigned long reconnect_timeout); void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); int (*enable_swap)(struct rpc_xprt *xprt); void (*disable_swap)(struct rpc_xprt *xprt); @@ -221,6 +224,7 @@ struct rpc_xprt { struct timer_list timer; unsigned long last_used, idle_timeout, + connect_timeout, max_reconnect_timeout; /* diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index bef3fb0abb8f..c9959d7e3579 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -55,6 +55,8 @@ struct sock_xprt { size_t rcvsize, sndsize; + struct rpc_timeout tcp_timeout; + /* * Saved socket callback addresses */ @@ -81,6 +83,7 @@ struct sock_xprt { #define XPRT_SOCK_CONNECTING 1U #define XPRT_SOCK_DATA_READY (2) +#define XPRT_SOCK_UPD_TIMEOUT (3) #endif /* __KERNEL__ */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 91a740f6b884..980c3c9b06f8 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -48,6 +48,7 @@ struct stat; struct stat64; struct statfs; struct statfs64; +struct statx; struct __sysctl_args; struct sysinfo; struct timespec; @@ -902,5 +903,7 @@ asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len, unsigned long prot, int pkey); asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val); asmlinkage long sys_pkey_free(int pkey); +asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags, + unsigned mask, struct statx __user *buffer); #endif diff --git a/include/linux/taskstats_kern.h b/include/linux/taskstats_kern.h index 58de6edf751f..e2a5daf8d14f 100644 --- a/include/linux/taskstats_kern.h +++ b/include/linux/taskstats_kern.h @@ -8,7 +8,7 @@ #define _LINUX_TASKSTATS_KERN_H #include <linux/taskstats.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #ifdef CONFIG_TASKSTATS diff --git a/include/linux/thermal.h b/include/linux/thermal.h index e275e98bdceb..dab11f97e1c6 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -194,7 +194,7 @@ struct thermal_attr { * @governor: pointer to the governor for this thermal zone * @governor_data: private pointer for governor data * @thermal_instances: list of &struct thermal_instance of this thermal zone - * @idr: &struct idr to generate unique id for this zone's cooling + * @ida: &struct ida to generate unique id for this zone's cooling * devices * @lock: lock to protect thermal_instances list * @node: node in thermal_tz_list (in thermal_core.c) @@ -227,7 +227,7 @@ struct thermal_zone_device { struct thermal_governor *governor; void *governor_data; struct list_head thermal_instances; - struct idr idr; + struct ida ida; struct mutex lock; struct list_head node; struct delayed_work poll_queue; diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index d2e804e15c3e..b598cbc7b576 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -8,6 +8,10 @@ void timekeeping_init(void); extern int timekeeping_suspended; +/* Architecture timer tick functions: */ +extern void update_process_times(int user); +extern void xtime_update(unsigned long ticks); + /* * Get and set timeofday */ diff --git a/include/linux/timer.h b/include/linux/timer.h index 5a209b84fd9e..e6789b8757d5 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -61,6 +61,8 @@ struct timer_list { #define TIMER_ARRAYSHIFT 22 #define TIMER_ARRAYMASK 0xFFC00000 +#define TIMER_TRACE_FLAGMASK (TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE) + #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ .entry = { .next = TIMER_ENTRY_STATIC }, \ .function = (_function), \ @@ -210,7 +212,7 @@ struct hrtimer; extern enum hrtimer_restart it_real_fn(struct hrtimer *); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -#include <linux/sysctl.h> +struct ctl_table; extern unsigned int sysctl_timer_migration; int timer_migration_handler(struct ctl_table *table, int write, diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 363e0e8082a9..be765234c0a2 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -5,6 +5,9 @@ #include <linux/nsproxy.h> #include <linux/ns_common.h> #include <linux/sched.h> +#include <linux/workqueue.h> +#include <linux/rwsem.h> +#include <linux/sysctl.h> #include <linux/err.h> #define UID_GID_MAP_MAX_EXTENTS 5 diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 26c155bb639b..8355bab175e1 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -7,6 +7,8 @@ #include <linux/virtio_byteorder.h> #include <uapi/linux/virtio_config.h> +struct irq_affinity; + /** * virtio_config_ops - operations for configuring a virtio device * @get: read the value of a configuration field @@ -56,6 +58,7 @@ * This returns a pointer to the bus name a la pci_name from which * the caller can then copy. * @set_vq_affinity: set the affinity for a virtqueue. + * @get_vq_affinity: get the affinity for a virtqueue (optional). */ typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { @@ -68,14 +71,15 @@ struct virtio_config_ops { void (*set_status)(struct virtio_device *vdev, u8 status); void (*reset)(struct virtio_device *vdev); int (*find_vqs)(struct virtio_device *, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[]); + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], struct irq_affinity *desc); void (*del_vqs)(struct virtio_device *); u64 (*get_features)(struct virtio_device *vdev); int (*finalize_features)(struct virtio_device *vdev); const char *(*bus_name)(struct virtio_device *vdev); int (*set_vq_affinity)(struct virtqueue *vq, int cpu); + const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev, + int index); }; /* If driver didn't advertise the feature, it will never appear. */ @@ -169,7 +173,7 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, vq_callback_t *callbacks[] = { c }; const char *names[] = { n }; struct virtqueue *vq; - int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names); + int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL); if (err < 0) return ERR_PTR(err); return vq; diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h index c3fa0fd43949..1081db987391 100644 --- a/include/linux/vmacache.h +++ b/include/linux/vmacache.h @@ -12,7 +12,7 @@ static inline void vmacache_flush(struct task_struct *tsk) { - memset(tsk->vmacache, 0, sizeof(tsk->vmacache)); + memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); } extern void vmacache_flush_all(struct mm_struct *mm); diff --git a/include/linux/wait.h b/include/linux/wait.h index 1421132e9086..aacb1282d19a 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -6,6 +6,7 @@ #include <linux/list.h> #include <linux/stddef.h> #include <linux/spinlock.h> + #include <asm/current.h> #include <uapi/linux/wait.h> diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 35a4d8185b51..a786e5e8973b 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -117,6 +117,7 @@ struct watchdog_device { #define WDOG_NO_WAY_OUT 1 /* Is 'nowayout' feature set ? */ #define WDOG_STOP_ON_REBOOT 2 /* Should be stopped on reboot */ #define WDOG_HW_RUNNING 3 /* True if HW watchdog running */ +#define WDOG_STOP_ON_UNREGISTER 4 /* Should be stopped on unregister */ struct list_head deferred; }; @@ -151,6 +152,12 @@ static inline void watchdog_stop_on_reboot(struct watchdog_device *wdd) set_bit(WDOG_STOP_ON_REBOOT, &wdd->status); } +/* Use the following function to stop the watchdog when unregistering it */ +static inline void watchdog_stop_on_unregister(struct watchdog_device *wdd) +{ + set_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status); +} + /* Use the following function to check if a timeout value is invalid */ static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t) { diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index a26cc437293c..bde063cefd04 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -106,9 +106,9 @@ struct work_struct { #endif }; -#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL) +#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) #define WORK_DATA_STATIC_INIT() \ - ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC) + ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) struct delayed_work { struct work_struct work; diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index e1006b391cdc..bee1404391dd 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -174,10 +174,10 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv); * not freed when the control is deleted. Should this be needed * then a new internal bitfield can be added to tell the framework * to free this pointer. - * @p_cur: The control's current value represented via an union with + * @p_cur: The control's current value represented via a union with * provides a standard way of accessing control types * through a pointer. - * @p_new: The control's new value represented via an union with provides + * @p_new: The control's new value represented via a union with provides * a standard way of accessing control types * through a pointer. */ diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h index 574ff2ae94be..6cd94e5ee113 100644 --- a/include/media/v4l2-ioctl.h +++ b/include/media/v4l2-ioctl.h @@ -12,6 +12,7 @@ #include <linux/poll.h> #include <linux/fs.h> #include <linux/mutex.h> +#include <linux/sched/signal.h> #include <linux/compiler.h> /* need __user */ #include <linux/videodev2.h> diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 90708f68cc02..95ccc1eef558 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -26,6 +26,8 @@ #define __HCI_CORE_H #include <linux/leds.h> +#include <linux/rculist.h> + #include <net/bluetooth/hci.h> #include <net/bluetooth/hci_sock.h> diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index b8d637225a07..c0452de83086 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -25,6 +25,8 @@ #define _LINUX_NET_BUSY_POLL_H #include <linux/netdevice.h> +#include <linux/sched/clock.h> +#include <linux/sched/signal.h> #include <net/ip.h> #ifdef CONFIG_NET_RX_BUSY_POLL diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index c92dc03c8528..ead1aa6d003e 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1948,7 +1948,7 @@ struct cfg80211_deauth_request { * struct cfg80211_disassoc_request - Disassociation request data * * This structure provides information needed to complete IEEE 802.11 - * disassocation. + * disassociation. * * @bss: the BSS to disassociate from * @ie: Extra IEs to add to Disassociation frame or %NULL diff --git a/include/net/mac80211.h b/include/net/mac80211.h index b9a08cd1d97d..a3bab3c5ecfb 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -3392,7 +3392,7 @@ enum ieee80211_reconfig_type { * since there won't be any time to beacon before the switch anyway. * @pre_channel_switch: This is an optional callback that is called * before a channel switch procedure is started (ie. when a STA - * gets a CSA or an userspace initiated channel-switch), allowing + * gets a CSA or a userspace initiated channel-switch), allowing * the driver to prepare for the channel switch. * @post_channel_switch: This is an optional callback that is called * after a channel switch procedure is completed, allowing the diff --git a/include/net/scm.h b/include/net/scm.h index 59fa93c01d2a..142ea9e7a6d0 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -3,6 +3,7 @@ #include <linux/limits.h> #include <linux/net.h> +#include <linux/cred.h> #include <linux/security.h> #include <linux/pid.h> #include <linux/nsproxy.h> diff --git a/include/rdma/ib.h b/include/rdma/ib.h index a6b93706b0fc..9b4c22a36931 100644 --- a/include/rdma/ib.h +++ b/include/rdma/ib.h @@ -35,6 +35,7 @@ #include <linux/types.h> #include <linux/sched.h> +#include <linux/cred.h> struct ib_addr { union { diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 89f5bd4e1d52..0f1813c13687 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -60,6 +60,7 @@ #include <linux/atomic.h> #include <linux/mmu_notifier.h> #include <linux/uaccess.h> +#include <linux/cgroup_rdma.h> extern struct workqueue_struct *ib_wq; extern struct workqueue_struct *ib_comp_wq; @@ -1356,6 +1357,12 @@ struct ib_fmr_attr { struct ib_umem; +struct ib_rdmacg_object { +#ifdef CONFIG_CGROUP_RDMA + struct rdma_cgroup *cg; /* owner rdma cgroup */ +#endif +}; + struct ib_ucontext { struct ib_device *device; struct list_head pd_list; @@ -1388,6 +1395,8 @@ struct ib_ucontext { struct list_head no_private_counters; int odp_mrs_count; #endif + + struct ib_rdmacg_object cg_obj; }; struct ib_uobject { @@ -1395,6 +1404,7 @@ struct ib_uobject { struct ib_ucontext *context; /* associated user context */ void *object; /* containing object */ struct list_head list; /* link to context's list */ + struct ib_rdmacg_object cg_obj; /* rdmacg object */ int id; /* index into kernel idr */ struct kref ref; struct rw_semaphore mutex; /* protects .live */ @@ -1843,53 +1853,6 @@ struct ib_cache { struct ib_port_cache *ports; }; -struct ib_dma_mapping_ops { - int (*mapping_error)(struct ib_device *dev, - u64 dma_addr); - u64 (*map_single)(struct ib_device *dev, - void *ptr, size_t size, - enum dma_data_direction direction); - void (*unmap_single)(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction); - u64 (*map_page)(struct ib_device *dev, - struct page *page, unsigned long offset, - size_t size, - enum dma_data_direction direction); - void (*unmap_page)(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction); - int (*map_sg)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction); - void (*unmap_sg)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction); - int (*map_sg_attrs)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction, - unsigned long attrs); - void (*unmap_sg_attrs)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction, - unsigned long attrs); - void (*sync_single_for_cpu)(struct ib_device *dev, - u64 dma_handle, - size_t size, - enum dma_data_direction dir); - void (*sync_single_for_device)(struct ib_device *dev, - u64 dma_handle, - size_t size, - enum dma_data_direction dir); - void *(*alloc_coherent)(struct ib_device *dev, - size_t size, - u64 *dma_handle, - gfp_t flag); - void (*free_coherent)(struct ib_device *dev, - size_t size, void *cpu_addr, - u64 dma_handle); -}; - struct iw_cm_verbs; struct ib_port_immutable { @@ -1900,8 +1863,6 @@ struct ib_port_immutable { }; struct ib_device { - struct device *dma_device; - char name[IB_DEVICE_NAME_MAX]; struct list_head event_handler_list; @@ -2151,7 +2112,6 @@ struct ib_device { struct ib_rwq_ind_table_init_attr *init_attr, struct ib_udata *udata); int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); - struct ib_dma_mapping_ops *dma_ops; struct module *owner; struct device dev; @@ -2178,6 +2138,10 @@ struct ib_device { struct attribute_group *hw_stats_ag; struct rdma_hw_stats *hw_stats; +#ifdef CONFIG_CGROUP_RDMA + struct rdmacg_device cg_device; +#endif + /** * The following mandatory functions are used only at device * registration. Keep functions such as these at the end of this @@ -3043,9 +3007,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) */ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) { - if (dev->dma_ops) - return dev->dma_ops->mapping_error(dev, dma_addr); - return dma_mapping_error(dev->dma_device, dma_addr); + return dma_mapping_error(&dev->dev, dma_addr); } /** @@ -3059,9 +3021,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - return dev->dma_ops->map_single(dev, cpu_addr, size, direction); - return dma_map_single(dev->dma_device, cpu_addr, size, direction); + return dma_map_single(&dev->dev, cpu_addr, size, direction); } /** @@ -3075,28 +3035,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - dev->dma_ops->unmap_single(dev, addr, size, direction); - else - dma_unmap_single(dev->dma_device, addr, size, direction); -} - -static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, - void *cpu_addr, size_t size, - enum dma_data_direction direction, - unsigned long dma_attrs) -{ - return dma_map_single_attrs(dev->dma_device, cpu_addr, size, - direction, dma_attrs); -} - -static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction, - unsigned long dma_attrs) -{ - return dma_unmap_single_attrs(dev->dma_device, addr, size, - direction, dma_attrs); + dma_unmap_single(&dev->dev, addr, size, direction); } /** @@ -3113,9 +3052,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - return dev->dma_ops->map_page(dev, page, offset, size, direction); - return dma_map_page(dev->dma_device, page, offset, size, direction); + return dma_map_page(&dev->dev, page, offset, size, direction); } /** @@ -3129,10 +3066,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - dev->dma_ops->unmap_page(dev, addr, size, direction); - else - dma_unmap_page(dev->dma_device, addr, size, direction); + dma_unmap_page(&dev->dev, addr, size, direction); } /** @@ -3146,9 +3080,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - if (dev->dma_ops) - return dev->dma_ops->map_sg(dev, sg, nents, direction); - return dma_map_sg(dev->dma_device, sg, nents, direction); + return dma_map_sg(&dev->dev, sg, nents, direction); } /** @@ -3162,10 +3094,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - if (dev->dma_ops) - dev->dma_ops->unmap_sg(dev, sg, nents, direction); - else - dma_unmap_sg(dev->dma_device, sg, nents, direction); + dma_unmap_sg(&dev->dev, sg, nents, direction); } static inline int ib_dma_map_sg_attrs(struct ib_device *dev, @@ -3173,12 +3102,7 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev, enum dma_data_direction direction, unsigned long dma_attrs) { - if (dev->dma_ops) - return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, - dma_attrs); - else - return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, - dma_attrs); + return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); } static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, @@ -3186,12 +3110,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, enum dma_data_direction direction, unsigned long dma_attrs) { - if (dev->dma_ops) - return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, - dma_attrs); - else - dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, - dma_attrs); + dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); } /** * ib_sg_dma_address - Return the DMA address from a scatter/gather entry @@ -3233,10 +3152,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, size_t size, enum dma_data_direction dir) { - if (dev->dma_ops) - dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); - else - dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); + dma_sync_single_for_cpu(&dev->dev, addr, size, dir); } /** @@ -3251,10 +3167,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, size_t size, enum dma_data_direction dir) { - if (dev->dma_ops) - dev->dma_ops->sync_single_for_device(dev, addr, size, dir); - else - dma_sync_single_for_device(dev->dma_device, addr, size, dir); + dma_sync_single_for_device(&dev->dev, addr, size, dir); } /** @@ -3266,19 +3179,10 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, */ static inline void *ib_dma_alloc_coherent(struct ib_device *dev, size_t size, - u64 *dma_handle, + dma_addr_t *dma_handle, gfp_t flag) { - if (dev->dma_ops) - return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); - else { - dma_addr_t handle; - void *ret; - - ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); - *dma_handle = handle; - return ret; - } + return dma_alloc_coherent(&dev->dev, size, dma_handle, flag); } /** @@ -3290,12 +3194,9 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev, */ static inline void ib_dma_free_coherent(struct ib_device *dev, size_t size, void *cpu_addr, - u64 dma_handle) + dma_addr_t dma_handle) { - if (dev->dma_ops) - dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); - else - dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); + dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle); } /** diff --git a/include/sound/control.h b/include/sound/control.h index 21d047f229a1..bd7246de58e7 100644 --- a/include/sound/control.h +++ b/include/sound/control.h @@ -22,6 +22,7 @@ * */ +#include <linux/wait.h> #include <sound/asound.h> #define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data) diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h index 1277e9ba0318..ff1a4f4cd66d 100644 --- a/include/target/iscsi/iscsi_transport.h +++ b/include/target/iscsi/iscsi_transport.h @@ -55,8 +55,12 @@ extern int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *, extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *); extern int iscsit_process_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *, struct iscsi_scsi_req *); -extern int iscsit_check_dataout_hdr(struct iscsi_conn *, unsigned char *, - struct iscsi_cmd **); +extern int +__iscsit_check_dataout_hdr(struct iscsi_conn *, void *, + struct iscsi_cmd *, u32, bool *); +extern int +iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf, + struct iscsi_cmd **out_cmd); extern int iscsit_check_dataout_payload(struct iscsi_cmd *, struct iscsi_data *, bool); extern int iscsit_setup_nop_out(struct iscsi_conn *, struct iscsi_cmd *, @@ -125,6 +129,9 @@ extern void iscsit_release_cmd(struct iscsi_cmd *); extern void iscsit_free_cmd(struct iscsi_cmd *, bool); extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); +extern struct iscsi_cmd * +iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *conn, + itt_t init_task_tag, u32 length); /* * From iscsi_target_nego.c diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 878560e60c75..37c274e61acc 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -4,7 +4,9 @@ #include <linux/configfs.h> /* struct config_group */ #include <linux/dma-direction.h> /* enum dma_data_direction */ #include <linux/percpu_ida.h> /* struct percpu_ida */ +#include <linux/percpu-refcount.h> #include <linux/semaphore.h> /* struct semaphore */ +#include <linux/completion.h> #define TARGET_CORE_VERSION "v5.0" @@ -197,6 +199,7 @@ enum tcm_tmreq_table { TMR_LUN_RESET = 5, TMR_TARGET_WARM_RESET = 6, TMR_TARGET_COLD_RESET = 7, + TMR_UNKNOWN = 0xff, }; /* fabric independent task management response values */ @@ -397,7 +400,6 @@ struct se_tmr_req { void *fabric_tmr_ptr; struct se_cmd *task_cmd; struct se_device *tmr_dev; - struct se_lun *tmr_lun; struct list_head tmr_list; }; @@ -488,8 +490,6 @@ struct se_cmd { #define CMD_T_COMPLETE (1 << 2) #define CMD_T_SENT (1 << 4) #define CMD_T_STOP (1 << 5) -#define CMD_T_DEV_ACTIVE (1 << 7) -#define CMD_T_BUSY (1 << 9) #define CMD_T_TAS (1 << 10) #define CMD_T_FABRIC_STOP (1 << 11) spinlock_t t_state_lock; @@ -732,6 +732,7 @@ struct se_lun { struct config_group lun_group; struct se_port_stat_grps port_stat_grps; struct completion lun_ref_comp; + struct completion lun_shutdown_comp; struct percpu_ref lun_ref; struct list_head lun_dev_link; struct hlist_node link; @@ -767,6 +768,8 @@ struct se_device { u32 dev_index; u64 creation_time; atomic_long_t num_resets; + atomic_long_t aborts_complete; + atomic_long_t aborts_no_task; atomic_long_t num_cmds; atomic_long_t read_bytes; atomic_long_t write_bytes; diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 358041bad1da..d7dd1427fe0d 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -47,7 +47,7 @@ struct target_core_fabric_ops { u32 (*tpg_get_inst_index)(struct se_portal_group *); /* * Optional to release struct se_cmd and fabric dependent allocated - * I/O descriptor in transport_cmd_check_stop(). + * I/O descriptor after command execution has finished. * * Returning 1 will signal a descriptor has been released. * Returning 0 will signal a descriptor has not been released. diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 88d18a8ceb59..a3c3cab643a9 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -184,7 +184,7 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict, TRACE_EVENT_CONDITION(btrfs_get_extent, - TP_PROTO(struct btrfs_root *root, struct inode *inode, + TP_PROTO(struct btrfs_root *root, struct btrfs_inode *inode, struct extent_map *map), TP_ARGS(root, inode, map), diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 01b3c9869a0d..c80fcad0a6c9 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -6,8 +6,8 @@ #include <linux/tracepoint.h> -#define show_dev(entry) MAJOR(entry->dev), MINOR(entry->dev) -#define show_dev_ino(entry) show_dev(entry), (unsigned long)entry->ino +#define show_dev(dev) MAJOR(dev), MINOR(dev) +#define show_dev_ino(entry) show_dev(entry->dev), (unsigned long)entry->ino TRACE_DEFINE_ENUM(NODE); TRACE_DEFINE_ENUM(DATA); @@ -55,25 +55,35 @@ TRACE_DEFINE_ENUM(CP_DISCARD); { IPU, "IN-PLACE" }, \ { OPU, "OUT-OF-PLACE" }) -#define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | REQ_PREFLUSH | REQ_FUA)) -#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO)) - -#define show_bio_type(op_flags) show_bio_op_flags(op_flags), \ - show_bio_extra(op_flags) +#define F2FS_OP_FLAGS (REQ_RAHEAD | REQ_SYNC | REQ_PREFLUSH | REQ_META |\ + REQ_PRIO) +#define F2FS_BIO_FLAG_MASK(t) (t & F2FS_OP_FLAGS) + +#define show_bio_type(op,op_flags) show_bio_op(op), \ + show_bio_op_flags(op_flags) + +#define show_bio_op(op) \ + __print_symbolic(op, \ + { REQ_OP_READ, "READ" }, \ + { REQ_OP_WRITE, "WRITE" }, \ + { REQ_OP_FLUSH, "FLUSH" }, \ + { REQ_OP_DISCARD, "DISCARD" }, \ + { REQ_OP_ZONE_REPORT, "ZONE_REPORT" }, \ + { REQ_OP_SECURE_ERASE, "SECURE_ERASE" }, \ + { REQ_OP_ZONE_RESET, "ZONE_RESET" }, \ + { REQ_OP_WRITE_SAME, "WRITE_SAME" }, \ + { REQ_OP_WRITE_ZEROES, "WRITE_ZEROES" }) #define show_bio_op_flags(flags) \ __print_symbolic(F2FS_BIO_FLAG_MASK(flags), \ - { 0, "WRITE" }, \ - { REQ_RAHEAD, "READAHEAD" }, \ - { REQ_SYNC, "REQ_SYNC" }, \ - { REQ_PREFLUSH, "REQ_PREFLUSH" }, \ - { REQ_FUA, "REQ_FUA" }) - -#define show_bio_extra(type) \ - __print_symbolic(F2FS_BIO_EXTRA_MASK(type), \ + { REQ_RAHEAD, "(RA)" }, \ + { REQ_SYNC, "(S)" }, \ + { REQ_SYNC | REQ_PRIO, "(SP)" }, \ { REQ_META, "(M)" }, \ - { REQ_PRIO, "(P)" }, \ { REQ_META | REQ_PRIO, "(MP)" }, \ + { REQ_SYNC | REQ_PREFLUSH , "(SF)" }, \ + { REQ_SYNC | REQ_META | REQ_PRIO, "(SMP)" }, \ + { REQ_PREFLUSH | REQ_META | REQ_PRIO, "(FMP)" }, \ { 0, " \b" }) #define show_data_type(type) \ @@ -235,7 +245,7 @@ TRACE_EVENT(f2fs_sync_fs, ), TP_printk("dev = (%d,%d), superblock is %s, wait = %d", - show_dev(__entry), + show_dev(__entry->dev), __entry->dirty ? "dirty" : "not dirty", __entry->wait) ); @@ -305,6 +315,13 @@ DEFINE_EVENT(f2fs__inode_exit, f2fs_unlink_exit, TP_ARGS(inode, ret) ); +DEFINE_EVENT(f2fs__inode_exit, f2fs_drop_inode, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret) +); + DEFINE_EVENT(f2fs__inode, f2fs_truncate, TP_PROTO(struct inode *inode), @@ -534,7 +551,7 @@ TRACE_EVENT(f2fs_background_gc, ), TP_printk("dev = (%d,%d), wait_ms = %ld, prefree = %u, free = %u", - show_dev(__entry), + show_dev(__entry->dev), __entry->wait_ms, __entry->prefree, __entry->free) @@ -555,6 +572,7 @@ TRACE_EVENT(f2fs_get_victim, __field(int, alloc_mode) __field(int, gc_mode) __field(unsigned int, victim) + __field(unsigned int, cost) __field(unsigned int, ofs_unit) __field(unsigned int, pre_victim) __field(unsigned int, prefree) @@ -568,20 +586,23 @@ TRACE_EVENT(f2fs_get_victim, __entry->alloc_mode = p->alloc_mode; __entry->gc_mode = p->gc_mode; __entry->victim = p->min_segno; + __entry->cost = p->min_cost; __entry->ofs_unit = p->ofs_unit; __entry->pre_victim = pre_victim; __entry->prefree = prefree; __entry->free = free; ), - TP_printk("dev = (%d,%d), type = %s, policy = (%s, %s, %s), victim = %u " - "ofs_unit = %u, pre_victim_secno = %d, prefree = %u, free = %u", - show_dev(__entry), + TP_printk("dev = (%d,%d), type = %s, policy = (%s, %s, %s), " + "victim = %u, cost = %u, ofs_unit = %u, " + "pre_victim_secno = %d, prefree = %u, free = %u", + show_dev(__entry->dev), show_data_type(__entry->type), show_gc_type(__entry->gc_type), show_alloc_mode(__entry->alloc_mode), show_victim_policy(__entry->gc_mode), __entry->victim, + __entry->cost, __entry->ofs_unit, (int)__entry->pre_victim, __entry->prefree, @@ -713,7 +734,7 @@ TRACE_EVENT(f2fs_reserve_new_blocks, ), TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %llu", - show_dev(__entry), + show_dev(__entry->dev), (unsigned int)__entry->nid, __entry->ofs_in_node, (unsigned long long)__entry->count) @@ -753,7 +774,7 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, (unsigned long)__entry->index, (unsigned long long)__entry->old_blkaddr, (unsigned long long)__entry->new_blkaddr, - show_bio_type(__entry->op_flags), + show_bio_type(__entry->op, __entry->op_flags), show_block_type(__entry->type)) ); @@ -775,15 +796,15 @@ DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_mbio, TP_CONDITION(page->mapping) ); -DECLARE_EVENT_CLASS(f2fs__submit_bio, +DECLARE_EVENT_CLASS(f2fs__bio, - TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio, - struct bio *bio), + TP_PROTO(struct super_block *sb, int type, struct bio *bio), - TP_ARGS(sb, fio, bio), + TP_ARGS(sb, type, bio), TP_STRUCT__entry( __field(dev_t, dev) + __field(dev_t, target) __field(int, op) __field(int, op_flags) __field(int, type) @@ -793,37 +814,55 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio, TP_fast_assign( __entry->dev = sb->s_dev; - __entry->op = fio->op; - __entry->op_flags = fio->op_flags; - __entry->type = fio->type; + __entry->target = bio->bi_bdev->bd_dev; + __entry->op = bio_op(bio); + __entry->op_flags = bio->bi_opf; + __entry->type = type; __entry->sector = bio->bi_iter.bi_sector; __entry->size = bio->bi_iter.bi_size; ), - TP_printk("dev = (%d,%d), rw = %s%s, %s, sector = %lld, size = %u", - show_dev(__entry), - show_bio_type(__entry->op_flags), + TP_printk("dev = (%d,%d)/(%d,%d), rw = %s%s, %s, sector = %lld, size = %u", + show_dev(__entry->target), + show_dev(__entry->dev), + show_bio_type(__entry->op, __entry->op_flags), show_block_type(__entry->type), (unsigned long long)__entry->sector, __entry->size) ); -DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio, +DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_prepare_write_bio, + + TP_PROTO(struct super_block *sb, int type, struct bio *bio), + + TP_ARGS(sb, type, bio), + + TP_CONDITION(bio) +); + +DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_prepare_read_bio, - TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio, - struct bio *bio), + TP_PROTO(struct super_block *sb, int type, struct bio *bio), - TP_ARGS(sb, fio, bio), + TP_ARGS(sb, type, bio), TP_CONDITION(bio) ); -DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio, +DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_submit_read_bio, - TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio, - struct bio *bio), + TP_PROTO(struct super_block *sb, int type, struct bio *bio), - TP_ARGS(sb, fio, bio), + TP_ARGS(sb, type, bio), + + TP_CONDITION(bio) +); + +DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_submit_write_bio, + + TP_PROTO(struct super_block *sb, int type, struct bio *bio), + + TP_ARGS(sb, type, bio), TP_CONDITION(bio) ); @@ -1082,16 +1121,16 @@ TRACE_EVENT(f2fs_write_checkpoint, ), TP_printk("dev = (%d,%d), checkpoint for %s, state = %s", - show_dev(__entry), + show_dev(__entry->dev), show_cpreason(__entry->reason), __entry->msg) ); TRACE_EVENT(f2fs_issue_discard, - TP_PROTO(struct super_block *sb, block_t blkstart, block_t blklen), + TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen), - TP_ARGS(sb, blkstart, blklen), + TP_ARGS(dev, blkstart, blklen), TP_STRUCT__entry( __field(dev_t, dev) @@ -1100,22 +1139,22 @@ TRACE_EVENT(f2fs_issue_discard, ), TP_fast_assign( - __entry->dev = sb->s_dev; + __entry->dev = dev->bd_dev; __entry->blkstart = blkstart; __entry->blklen = blklen; ), TP_printk("dev = (%d,%d), blkstart = 0x%llx, blklen = 0x%llx", - show_dev(__entry), + show_dev(__entry->dev), (unsigned long long)__entry->blkstart, (unsigned long long)__entry->blklen) ); TRACE_EVENT(f2fs_issue_reset_zone, - TP_PROTO(struct super_block *sb, block_t blkstart), + TP_PROTO(struct block_device *dev, block_t blkstart), - TP_ARGS(sb, blkstart), + TP_ARGS(dev, blkstart), TP_STRUCT__entry( __field(dev_t, dev) @@ -1123,21 +1162,21 @@ TRACE_EVENT(f2fs_issue_reset_zone, ), TP_fast_assign( - __entry->dev = sb->s_dev; + __entry->dev = dev->bd_dev; __entry->blkstart = blkstart; ), TP_printk("dev = (%d,%d), reset zone at block = 0x%llx", - show_dev(__entry), + show_dev(__entry->dev), (unsigned long long)__entry->blkstart) ); TRACE_EVENT(f2fs_issue_flush, - TP_PROTO(struct super_block *sb, unsigned int nobarrier, + TP_PROTO(struct block_device *dev, unsigned int nobarrier, unsigned int flush_merge), - TP_ARGS(sb, nobarrier, flush_merge), + TP_ARGS(dev, nobarrier, flush_merge), TP_STRUCT__entry( __field(dev_t, dev) @@ -1146,13 +1185,13 @@ TRACE_EVENT(f2fs_issue_flush, ), TP_fast_assign( - __entry->dev = sb->s_dev; + __entry->dev = dev->bd_dev; __entry->nobarrier = nobarrier; __entry->flush_merge = flush_merge; ), TP_printk("dev = (%d,%d), %s %s", - show_dev(__entry), + show_dev(__entry->dev), __entry->nobarrier ? "skip (nobarrier)" : "issue", __entry->flush_merge ? " with flush_merge" : "") ); @@ -1267,7 +1306,7 @@ TRACE_EVENT(f2fs_shrink_extent_tree, ), TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u", - show_dev(__entry), + show_dev(__entry->dev), __entry->node_cnt, __entry->tree_cnt) ); @@ -1314,7 +1353,7 @@ DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes, ), TP_printk("dev = (%d,%d), %s, dirty count = %lld", - show_dev(__entry), + show_dev(__entry->dev), show_file_type(__entry->type), __entry->count) ); diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 9b90c57517a9..9e3ef6c99e4b 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -4,7 +4,7 @@ #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SCHED_H -#include <linux/sched.h> +#include <linux/sched/numa_balancing.h> #include <linux/tracepoint.h> #include <linux/binfmts.h> diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 1bca99dbb98f..80787eafba99 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -36,6 +36,13 @@ DEFINE_EVENT(timer_class, timer_init, TP_ARGS(timer) ); +#define decode_timer_flags(flags) \ + __print_flags(flags, "|", \ + { TIMER_MIGRATING, "M" }, \ + { TIMER_DEFERRABLE, "D" }, \ + { TIMER_PINNED, "P" }, \ + { TIMER_IRQSAFE, "I" }) + /** * timer_start - called when the timer is started * @timer: pointer to struct timer_list @@ -65,9 +72,12 @@ TRACE_EVENT(timer_start, __entry->flags = flags; ), - TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] flags=0x%08x", + TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s", __entry->timer, __entry->function, __entry->expires, - (long)__entry->expires - __entry->now, __entry->flags) + (long)__entry->expires - __entry->now, + __entry->flags & TIMER_CPUMASK, + __entry->flags >> TIMER_ARRAYSHIFT, + decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK)) ); /** diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 1c80efb67d10..dd9820b1c779 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -466,6 +466,7 @@ header-y += virtio_console.h header-y += virtio_gpu.h header-y += virtio_ids.h header-y += virtio_input.h +header-y += virtio_mmio.h header-y += virtio_net.h header-y += virtio_pci.h header-y += virtio_ring.h diff --git a/include/uapi/linux/auto_dev-ioctl.h b/include/uapi/linux/auto_dev-ioctl.h index 021ed331dd71..744b3d060968 100644 --- a/include/uapi/linux/auto_dev-ioctl.h +++ b/include/uapi/linux/auto_dev-ioctl.h @@ -113,17 +113,13 @@ struct autofs_dev_ioctl { static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in) { - memset(in, 0, sizeof(struct autofs_dev_ioctl)); + memset(in, 0, AUTOFS_DEV_IOCTL_SIZE); in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR; in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR; - in->size = sizeof(struct autofs_dev_ioctl); + in->size = AUTOFS_DEV_IOCTL_SIZE; in->ioctlfd = -1; } -/* - * If you change this make sure you make the corresponding change - * to autofs-dev-ioctl.c:lookup_ioctl() - */ enum { /* Get various version info */ AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71, @@ -160,8 +156,6 @@ enum { AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, }; -#define AUTOFS_IOCTL 0x93 - #define AUTOFS_DEV_IOCTL_VERSION \ _IOWR(AUTOFS_IOCTL, \ AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl) diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h index 1bfc3ed8b284..aa63451ef20a 100644 --- a/include/uapi/linux/auto_fs.h +++ b/include/uapi/linux/auto_fs.h @@ -61,12 +61,23 @@ struct autofs_packet_expire { char name[NAME_MAX+1]; }; -#define AUTOFS_IOC_READY _IO(0x93, 0x60) -#define AUTOFS_IOC_FAIL _IO(0x93, 0x61) -#define AUTOFS_IOC_CATATONIC _IO(0x93, 0x62) -#define AUTOFS_IOC_PROTOVER _IOR(0x93, 0x63, int) -#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93, 0x64, compat_ulong_t) -#define AUTOFS_IOC_SETTIMEOUT _IOWR(0x93, 0x64, unsigned long) -#define AUTOFS_IOC_EXPIRE _IOR(0x93, 0x65, struct autofs_packet_expire) +#define AUTOFS_IOCTL 0x93 + +enum { + AUTOFS_IOC_READY_CMD = 0x60, + AUTOFS_IOC_FAIL_CMD, + AUTOFS_IOC_CATATONIC_CMD, + AUTOFS_IOC_PROTOVER_CMD, + AUTOFS_IOC_SETTIMEOUT_CMD, + AUTOFS_IOC_EXPIRE_CMD, +}; + +#define AUTOFS_IOC_READY _IO(AUTOFS_IOCTL, AUTOFS_IOC_READY_CMD) +#define AUTOFS_IOC_FAIL _IO(AUTOFS_IOCTL, AUTOFS_IOC_FAIL_CMD) +#define AUTOFS_IOC_CATATONIC _IO(AUTOFS_IOCTL, AUTOFS_IOC_CATATONIC_CMD) +#define AUTOFS_IOC_PROTOVER _IOR(AUTOFS_IOCTL, AUTOFS_IOC_PROTOVER_CMD, int) +#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(AUTOFS_IOCTL, AUTOFS_IOC_SETTIMEOUT_CMD, compat_ulong_t) +#define AUTOFS_IOC_SETTIMEOUT _IOWR(AUTOFS_IOCTL, AUTOFS_IOC_SETTIMEOUT_CMD, unsigned long) +#define AUTOFS_IOC_EXPIRE _IOR(AUTOFS_IOCTL, AUTOFS_IOC_EXPIRE_CMD, struct autofs_packet_expire) #endif /* _UAPI_LINUX_AUTO_FS_H */ diff --git a/include/uapi/linux/auto_fs4.h b/include/uapi/linux/auto_fs4.h index 8f8f1bdcca8c..7c6da423d54e 100644 --- a/include/uapi/linux/auto_fs4.h +++ b/include/uapi/linux/auto_fs4.h @@ -148,10 +148,16 @@ union autofs_v5_packet_union { autofs_packet_expire_direct_t expire_direct; }; -#define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93, 0x66, int) -#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI -#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI -#define AUTOFS_IOC_PROTOSUBVER _IOR(0x93, 0x67, int) -#define AUTOFS_IOC_ASKUMOUNT _IOR(0x93, 0x70, int) +enum { + AUTOFS_IOC_EXPIRE_MULTI_CMD = 0x66, /* AUTOFS_IOC_EXPIRE_CMD + 1 */ + AUTOFS_IOC_PROTOSUBVER_CMD, + AUTOFS_IOC_ASKUMOUNT_CMD = 0x70, /* AUTOFS_DEV_IOCTL_VERSION_CMD - 1 */ +}; + +#define AUTOFS_IOC_EXPIRE_MULTI _IOW(AUTOFS_IOCTL, AUTOFS_IOC_EXPIRE_MULTI_CMD, int) +#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI +#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI +#define AUTOFS_IOC_PROTOSUBVER _IOR(AUTOFS_IOCTL, AUTOFS_IOC_PROTOSUBVER_CMD, int) +#define AUTOFS_IOC_ASKUMOUNT _IOR(AUTOFS_IOCTL, AUTOFS_IOC_ASKUMOUNT_CMD, int) #endif /* _LINUX_AUTO_FS4_H */ diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h index beed138bd359..813afd6eee71 100644 --- a/include/uapi/linux/fcntl.h +++ b/include/uapi/linux/fcntl.h @@ -63,5 +63,10 @@ #define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */ #define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */ +#define AT_STATX_SYNC_TYPE 0x6000 /* Type of synchronisation required from statx() */ +#define AT_STATX_SYNC_AS_STAT 0x0000 /* - Do whatever stat() does */ +#define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */ +#define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */ + #endif /* _UAPI_LINUX_FCNTL_H */ diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h index 7550e9176a54..c111a91adcc0 100644 --- a/include/uapi/linux/netfilter.h +++ b/include/uapi/linux/netfilter.h @@ -3,7 +3,6 @@ #include <linux/types.h> #include <linux/compiler.h> -#include <linux/sysctl.h> #include <linux/in.h> #include <linux/in6.h> diff --git a/include/uapi/linux/netfilter/xt_hashlimit.h b/include/uapi/linux/netfilter/xt_hashlimit.h index 3efc0ca18345..79da349f1060 100644 --- a/include/uapi/linux/netfilter/xt_hashlimit.h +++ b/include/uapi/linux/netfilter/xt_hashlimit.h @@ -2,6 +2,7 @@ #define _UAPI_XT_HASHLIMIT_H #include <linux/types.h> +#include <linux/limits.h> #include <linux/if.h> /* timings are in milliseconds. */ diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h index 0df7bd5d2fb1..c3be256107c6 100644 --- a/include/uapi/linux/nfsd/export.h +++ b/include/uapi/linux/nfsd/export.h @@ -32,7 +32,8 @@ #define NFSEXP_ASYNC 0x0010 #define NFSEXP_GATHERED_WRITES 0x0020 #define NFSEXP_NOREADDIRPLUS 0x0040 -/* 80 100 currently unused */ +#define NFSEXP_SECURITY_LABEL 0x0080 +/* 0x100 currently unused */ #define NFSEXP_NOHIDE 0x0200 #define NFSEXP_NOSUBTREECHECK 0x0400 #define NFSEXP_NOAUTHNLM 0x0800 /* Don't authenticate NLM requests - just trust */ @@ -53,7 +54,7 @@ #define NFSEXP_PNFS 0x20000 /* All flags that we claim to support. (Note we don't support NOACL.) */ -#define NFSEXP_ALLFLAGS 0x3FE7F +#define NFSEXP_ALLFLAGS 0x3FEFF /* The flags that may vary depending on security flavor: */ #define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \ diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h new file mode 100644 index 000000000000..307acbc82d80 --- /dev/null +++ b/include/uapi/linux/sched/types.h @@ -0,0 +1,74 @@ +#ifndef _UAPI_LINUX_SCHED_TYPES_H +#define _UAPI_LINUX_SCHED_TYPES_H + +#include <linux/types.h> + +struct sched_param { + int sched_priority; +}; + +#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ + +/* + * Extended scheduling parameters data structure. + * + * This is needed because the original struct sched_param can not be + * altered without introducing ABI issues with legacy applications + * (e.g., in sched_getparam()). + * + * However, the possibility of specifying more than just a priority for + * the tasks may be useful for a wide variety of application fields, e.g., + * multimedia, streaming, automation and control, and many others. + * + * This variant (sched_attr) is meant at describing a so-called + * sporadic time-constrained task. In such model a task is specified by: + * - the activation period or minimum instance inter-arrival time; + * - the maximum (or average, depending on the actual scheduling + * discipline) computation time of all instances, a.k.a. runtime; + * - the deadline (relative to the actual activation time) of each + * instance. + * Very briefly, a periodic (sporadic) task asks for the execution of + * some specific computation --which is typically called an instance-- + * (at most) every period. Moreover, each instance typically lasts no more + * than the runtime and must be completed by time instant t equal to + * the instance activation time + the deadline. + * + * This is reflected by the actual fields of the sched_attr structure: + * + * @size size of the structure, for fwd/bwd compat. + * + * @sched_policy task's scheduling policy + * @sched_flags for customizing the scheduler behaviour + * @sched_nice task's nice value (SCHED_NORMAL/BATCH) + * @sched_priority task's static priority (SCHED_FIFO/RR) + * @sched_deadline representative of the task's deadline + * @sched_runtime representative of the task's runtime + * @sched_period representative of the task's period + * + * Given this task model, there are a multiplicity of scheduling algorithms + * and policies, that can be used to ensure all the tasks will make their + * timing constraints. + * + * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the + * only user of this new interface. More information about the algorithm + * available in the scheduling class file or in Documentation/. + */ +struct sched_attr { + u32 size; + + u32 sched_policy; + u64 sched_flags; + + /* SCHED_NORMAL, SCHED_BATCH */ + s32 sched_nice; + + /* SCHED_FIFO, SCHED_RR */ + u32 sched_priority; + + /* SCHED_DEADLINE */ + u64 sched_runtime; + u64 sched_deadline; + u64 sched_period; +}; + +#endif /* _UAPI_LINUX_SCHED_TYPES_H */ diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h index 7fec7e36d921..51a6b86e3700 100644 --- a/include/uapi/linux/stat.h +++ b/include/uapi/linux/stat.h @@ -1,6 +1,7 @@ #ifndef _UAPI_LINUX_STAT_H #define _UAPI_LINUX_STAT_H +#include <linux/types.h> #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) @@ -41,5 +42,135 @@ #endif +/* + * Timestamp structure for the timestamps in struct statx. + * + * tv_sec holds the number of seconds before (negative) or after (positive) + * 00:00:00 1st January 1970 UTC. + * + * tv_nsec holds a number of nanoseconds before (0..-999,999,999 if tv_sec is + * negative) or after (0..999,999,999 if tv_sec is positive) the tv_sec time. + * + * Note that if both tv_sec and tv_nsec are non-zero, then the two values must + * either be both positive or both negative. + * + * __reserved is held in case we need a yet finer resolution. + */ +struct statx_timestamp { + __s64 tv_sec; + __s32 tv_nsec; + __s32 __reserved; +}; + +/* + * Structures for the extended file attribute retrieval system call + * (statx()). + * + * The caller passes a mask of what they're specifically interested in as a + * parameter to statx(). What statx() actually got will be indicated in + * st_mask upon return. + * + * For each bit in the mask argument: + * + * - if the datum is not supported: + * + * - the bit will be cleared, and + * + * - the datum will be set to an appropriate fabricated value if one is + * available (eg. CIFS can take a default uid and gid), otherwise + * + * - the field will be cleared; + * + * - otherwise, if explicitly requested: + * + * - the datum will be synchronised to the server if AT_STATX_FORCE_SYNC is + * set or if the datum is considered out of date, and + * + * - the field will be filled in and the bit will be set; + * + * - otherwise, if not requested, but available in approximate form without any + * effort, it will be filled in anyway, and the bit will be set upon return + * (it might not be up to date, however, and no attempt will be made to + * synchronise the internal state first); + * + * - otherwise the field and the bit will be cleared before returning. + * + * Items in STATX_BASIC_STATS may be marked unavailable on return, but they + * will have values installed for compatibility purposes so that stat() and + * co. can be emulated in userspace. + */ +struct statx { + /* 0x00 */ + __u32 stx_mask; /* What results were written [uncond] */ + __u32 stx_blksize; /* Preferred general I/O size [uncond] */ + __u64 stx_attributes; /* Flags conveying information about the file [uncond] */ + /* 0x10 */ + __u32 stx_nlink; /* Number of hard links */ + __u32 stx_uid; /* User ID of owner */ + __u32 stx_gid; /* Group ID of owner */ + __u16 stx_mode; /* File mode */ + __u16 __spare0[1]; + /* 0x20 */ + __u64 stx_ino; /* Inode number */ + __u64 stx_size; /* File size */ + __u64 stx_blocks; /* Number of 512-byte blocks allocated */ + __u64 __spare1[1]; + /* 0x40 */ + struct statx_timestamp stx_atime; /* Last access time */ + struct statx_timestamp stx_btime; /* File creation time */ + struct statx_timestamp stx_ctime; /* Last attribute change time */ + struct statx_timestamp stx_mtime; /* Last data modification time */ + /* 0x80 */ + __u32 stx_rdev_major; /* Device ID of special file [if bdev/cdev] */ + __u32 stx_rdev_minor; + __u32 stx_dev_major; /* ID of device containing file [uncond] */ + __u32 stx_dev_minor; + /* 0x90 */ + __u64 __spare2[14]; /* Spare space for future expansion */ + /* 0x100 */ +}; + +/* + * Flags to be stx_mask + * + * Query request/result mask for statx() and struct statx::stx_mask. + * + * These bits should be set in the mask argument of statx() to request + * particular items when calling statx(). + */ +#define STATX_TYPE 0x00000001U /* Want/got stx_mode & S_IFMT */ +#define STATX_MODE 0x00000002U /* Want/got stx_mode & ~S_IFMT */ +#define STATX_NLINK 0x00000004U /* Want/got stx_nlink */ +#define STATX_UID 0x00000008U /* Want/got stx_uid */ +#define STATX_GID 0x00000010U /* Want/got stx_gid */ +#define STATX_ATIME 0x00000020U /* Want/got stx_atime */ +#define STATX_MTIME 0x00000040U /* Want/got stx_mtime */ +#define STATX_CTIME 0x00000080U /* Want/got stx_ctime */ +#define STATX_INO 0x00000100U /* Want/got stx_ino */ +#define STATX_SIZE 0x00000200U /* Want/got stx_size */ +#define STATX_BLOCKS 0x00000400U /* Want/got stx_blocks */ +#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */ +#define STATX_BTIME 0x00000800U /* Want/got stx_btime */ +#define STATX_ALL 0x00000fffU /* All currently supported flags */ + +/* + * Attributes to be found in stx_attributes + * + * These give information about the features or the state of a file that might + * be of use to ordinary userspace programs such as GUIs or ls rather than + * specialised tools. + * + * Note that the flags marked [I] correspond to generic FS_IOC_FLAGS + * semantically. Where possible, the numerical value is picked to correspond + * also. + */ +#define STATX_ATTR_COMPRESSED 0x00000004 /* [I] File is compressed by the fs */ +#define STATX_ATTR_IMMUTABLE 0x00000010 /* [I] File is marked immutable */ +#define STATX_ATTR_APPEND 0x00000020 /* [I] File is append-only */ +#define STATX_ATTR_NODUMP 0x00000040 /* [I] File is not to be dumped */ +#define STATX_ATTR_ENCRYPTED 0x00000800 /* [I] File requires key to decrypt in fs */ + +#define STATX_ATTR_AUTOMOUNT 0x00001000 /* Dir: Automount trigger */ + #endif /* _UAPI_LINUX_STAT_H */ diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index c506cddb8165..af17b4154ef6 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -105,26 +105,26 @@ struct tcmu_cmd_entry { union { struct { - uint32_t iov_cnt; - uint32_t iov_bidi_cnt; - uint32_t iov_dif_cnt; - uint64_t cdb_off; - uint64_t __pad1; - uint64_t __pad2; + __u32 iov_cnt; + __u32 iov_bidi_cnt; + __u32 iov_dif_cnt; + __u64 cdb_off; + __u64 __pad1; + __u64 __pad2; struct iovec iov[0]; } req; struct { - uint8_t scsi_status; - uint8_t __pad1; - uint16_t __pad2; - uint32_t __pad3; + __u8 scsi_status; + __u8 __pad1; + __u16 __pad2; + __u32 __pad3; char sense_buffer[TCMU_SENSE_BUFFERSIZE]; } rsp; }; } __packed; -#define TCMU_OP_ALIGN_SIZE sizeof(uint64_t) +#define TCMU_OP_ALIGN_SIZE sizeof(__u64) enum tcmu_genl_cmd { TCMU_CMD_UNSPEC, diff --git a/include/linux/virtio_mmio.h b/include/uapi/linux/virtio_mmio.h index c4b09689ab64..c4b09689ab64 100644 --- a/include/linux/virtio_mmio.h +++ b/include/uapi/linux/virtio_mmio.h diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 90007a1abcab..15b4385a2be1 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -79,7 +79,7 @@ * configuration space */ #define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20) /* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */ -#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->msix_enabled) +#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->pci_dev->msix_enabled) /* Virtio ABI version, this must match exactly */ #define VIRTIO_PCI_ABI_VERSION 0 diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h index 95251512e2c4..44b587b49904 100644 --- a/include/xen/arm/hypervisor.h +++ b/include/xen/arm/hypervisor.h @@ -18,7 +18,7 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) return PARAVIRT_LAZY_NONE; } -extern struct dma_map_ops *xen_dma_ops; +extern const struct dma_map_ops *xen_dma_ops; #ifdef CONFIG_XEN void __init xen_early_init(void); diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h index 56806bc90c2f..7fb7112d667c 100644 --- a/include/xen/interface/grant_table.h +++ b/include/xen/interface/grant_table.h @@ -181,7 +181,7 @@ struct grant_entry_header { }; /* - * Version 2 of the grant entry structure, here is an union because three + * Version 2 of the grant entry structure, here is a union because three * different types are suppotted: full_page, sub_page and transitive. */ union grant_entry_v2 { diff --git a/init/Kconfig b/init/Kconfig index 8c39615165b7..a92f27da4a27 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1078,6 +1078,16 @@ config CGROUP_PIDS since the PIDs limit only affects a process's ability to fork, not to attach to a cgroup. +config CGROUP_RDMA + bool "RDMA controller" + help + Provides enforcement of RDMA resources defined by IB stack. + It is fairly easy for consumers to exhaust RDMA resources, which + can result into resource unavailability to other consumers. + RDMA controller is designed to stop this from happening. + Attaching processes with active RDMA resources to the cgroup + hierarchy is allowed even if can cross the hierarchy's limit. + config CGROUP_FREEZER bool "Freezer controller" help diff --git a/init/init_task.c b/init/init_task.c index 53d4ce942a88..66787e30a419 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -4,6 +4,7 @@ #include <linux/sched.h> #include <linux/sched/sysctl.h> #include <linux/sched/rt.h> +#include <linux/sched/task.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/mm.h> diff --git a/init/initramfs.c b/init/initramfs.c index b32ad7d97ac9..981f286c1d16 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -18,6 +18,7 @@ #include <linux/dirent.h> #include <linux/syscalls.h> #include <linux/utime.h> +#include <linux/file.h> static ssize_t __init xwrite(int fd, const char *p, size_t count) { @@ -647,6 +648,7 @@ static int __init populate_rootfs(void) printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); free_initrd(); #endif + flush_delayed_fput(); /* * Try loading default modules from initramfs. This gives * us a chance to load before device_initcalls. diff --git a/init/main.c b/init/main.c index 24ea48745061..eae2f15657c6 100644 --- a/init/main.c +++ b/init/main.c @@ -15,6 +15,7 @@ #include <linux/extable.h> #include <linux/module.h> #include <linux/proc_fs.h> +#include <linux/binfmts.h> #include <linux/kernel.h> #include <linux/syscalls.h> #include <linux/stackprotector.h> @@ -27,6 +28,7 @@ #include <linux/bootmem.h> #include <linux/acpi.h> #include <linux/tty.h> +#include <linux/nmi.h> #include <linux/percpu.h> #include <linux/kmod.h> #include <linux/vmalloc.h> @@ -61,6 +63,7 @@ #include <linux/device.h> #include <linux/kthread.h> #include <linux/sched.h> +#include <linux/sched/init.h> #include <linux/signal.h> #include <linux/idr.h> #include <linux/kgdb.h> @@ -71,11 +74,12 @@ #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/perf_event.h> -#include <linux/file.h> #include <linux/ptrace.h> #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/sched_clock.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/context_tracking.h> #include <linux/random.h> #include <linux/list.h> @@ -83,6 +87,7 @@ #include <linux/proc_ns.h> #include <linux/io.h> #include <linux/cache.h> +#include <linux/rodata_test.h> #include <asm/io.h> #include <asm/bugs.h> @@ -554,7 +559,7 @@ asmlinkage __visible void __init start_kernel(void) if (WARN(!irqs_disabled(), "Interrupts were enabled *very* early, fixing it\n")) local_irq_disable(); - idr_init_cache(); + radix_tree_init(); /* * Allow workqueue creation and work item queueing/cancelling @@ -569,7 +574,6 @@ asmlinkage __visible void __init start_kernel(void) trace_init(); context_tracking_init(); - radix_tree_init(); /* init some links before init_ISA_irqs() */ early_irq_init(); init_IRQ(); @@ -936,9 +940,10 @@ __setup("rodata=", set_debug_rodata); #ifdef CONFIG_STRICT_KERNEL_RWX static void mark_readonly(void) { - if (rodata_enabled) + if (rodata_enabled) { mark_rodata_ro(); - else + rodata_test(); + } else pr_info("Kernel memory protection disabled.\n"); } #else @@ -960,8 +965,6 @@ static int __ref kernel_init(void *unused) system_state = SYSTEM_RUNNING; numa_default_policy(); - flush_delayed_fput(); - rcu_end_inkernel_boot(); if (ramdisk_execute_command) { diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 7a2d8f0c8ae5..e8d41ff57241 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -35,6 +35,9 @@ #include <linux/ipc_namespace.h> #include <linux/user_namespace.h> #include <linux/slab.h> +#include <linux/sched/wake_q.h> +#include <linux/sched/signal.h> +#include <linux/sched/user.h> #include <net/sock.h> #include "util.h" @@ -558,6 +561,7 @@ static void wq_add(struct mqueue_inode_info *info, int sr, */ static int wq_sleep(struct mqueue_inode_info *info, int sr, ktime_t *timeout, struct ext_wait_queue *ewp) + __releases(&info->lock) { int retval; signed long time; diff --git a/ipc/msg.c b/ipc/msg.c index e3e52ce01123..104926dc72be 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -30,7 +30,7 @@ #include <linux/proc_fs.h> #include <linux/list.h> #include <linux/security.h> -#include <linux/sched.h> +#include <linux/sched/wake_q.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/seq_file.h> diff --git a/ipc/namespace.c b/ipc/namespace.c index 0abdea496493..b4d80f9f7246 100644 --- a/ipc/namespace.c +++ b/ipc/namespace.c @@ -9,10 +9,12 @@ #include <linux/rcupdate.h> #include <linux/nsproxy.h> #include <linux/slab.h> +#include <linux/cred.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/user_namespace.h> #include <linux/proc_ns.h> +#include <linux/sched/task.h> #include "util.h" diff --git a/ipc/sem.c b/ipc/sem.c index 3ec5742b5640..947dc2348271 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -82,6 +82,7 @@ #include <linux/rwsem.h> #include <linux/nsproxy.h> #include <linux/ipc_namespace.h> +#include <linux/sched/wake_q.h> #include <linux/uaccess.h> #include "util.h" @@ -159,22 +160,42 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it); #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ /* + * Switching from the mode suitable for simple ops + * to the mode for complex ops is costly. Therefore: + * use some hysteresis + */ +#define USE_GLOBAL_LOCK_HYSTERESIS 10 + +/* * Locking: * a) global sem_lock() for read/write * sem_undo.id_next, * sem_array.complex_count, - * sem_array.complex_mode * sem_array.pending{_alter,_const}, * sem_array.sem_undo * * b) global or semaphore sem_lock() for read/write: * sem_array.sem_base[i].pending_{const,alter}: - * sem_array.complex_mode (for read) * * c) special: * sem_undo_list.list_proc: * * undo_list->lock for write * * rcu for read + * use_global_lock: + * * global sem_lock() for write + * * either local or global sem_lock() for read. + * + * Memory ordering: + * Most ordering is enforced by using spin_lock() and spin_unlock(). + * The special case is use_global_lock: + * Setting it from non-zero to 0 is a RELEASE, this is ensured by + * using smp_store_release(). + * Testing if it is non-zero is an ACQUIRE, this is ensured by using + * smp_load_acquire(). + * Setting it from 0 to non-zero must be ordered with regards to + * this smp_load_acquire(), this is guaranteed because the smp_load_acquire() + * is inside a spin_lock() and after a write from 0 to non-zero a + * spin_lock()+spin_unlock() is done. */ #define sc_semmsl sem_ctls[0] @@ -273,29 +294,22 @@ static void complexmode_enter(struct sem_array *sma) int i; struct sem *sem; - if (sma->complex_mode) { - /* We are already in complex_mode. Nothing to do */ + if (sma->use_global_lock > 0) { + /* + * We are already in global lock mode. + * Nothing to do, just reset the + * counter until we return to simple mode. + */ + sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS; return; } - - /* We need a full barrier after seting complex_mode: - * The write to complex_mode must be visible - * before we read the first sem->lock spinlock state. - */ - smp_store_mb(sma->complex_mode, true); + sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS; for (i = 0; i < sma->sem_nsems; i++) { sem = sma->sem_base + i; - spin_unlock_wait(&sem->lock); + spin_lock(&sem->lock); + spin_unlock(&sem->lock); } - /* - * spin_unlock_wait() is not a memory barriers, it is only a - * control barrier. The code must pair with spin_unlock(&sem->lock), - * thus just the control barrier is insufficient. - * - * smp_rmb() is sufficient, as writes cannot pass the control barrier. - */ - smp_rmb(); } /* @@ -310,13 +324,17 @@ static void complexmode_tryleave(struct sem_array *sma) */ return; } - /* - * Immediately after setting complex_mode to false, - * a simple op can start. Thus: all memory writes - * performed by the current operation must be visible - * before we set complex_mode to false. - */ - smp_store_release(&sma->complex_mode, false); + if (sma->use_global_lock == 1) { + /* + * Immediately after setting use_global_lock to 0, + * a simple op can start. Thus: all memory writes + * performed by the current operation must be visible + * before we set use_global_lock to 0. + */ + smp_store_release(&sma->use_global_lock, 0); + } else { + sma->use_global_lock--; + } } #define SEM_GLOBAL_LOCK (-1) @@ -346,30 +364,23 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, * Optimized locking is possible if no complex operation * is either enqueued or processed right now. * - * Both facts are tracked by complex_mode. + * Both facts are tracked by use_global_mode. */ sem = sma->sem_base + sops->sem_num; /* - * Initial check for complex_mode. Just an optimization, + * Initial check for use_global_lock. Just an optimization, * no locking, no memory barrier. */ - if (!sma->complex_mode) { + if (!sma->use_global_lock) { /* * It appears that no complex operation is around. * Acquire the per-semaphore lock. */ spin_lock(&sem->lock); - /* - * See 51d7d5205d33 - * ("powerpc: Add smp_mb() to arch_spin_is_locked()"): - * A full barrier is required: the write of sem->lock - * must be visible before the read is executed - */ - smp_mb(); - - if (!smp_load_acquire(&sma->complex_mode)) { + /* pairs with smp_store_release() */ + if (!smp_load_acquire(&sma->use_global_lock)) { /* fast path successful! */ return sops->sem_num; } @@ -379,19 +390,26 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, /* slow path: acquire the full lock */ ipc_lock_object(&sma->sem_perm); - if (sma->complex_count == 0) { - /* False alarm: - * There is no complex operation, thus we can switch - * back to the fast path. + if (sma->use_global_lock == 0) { + /* + * The use_global_lock mode ended while we waited for + * sma->sem_perm.lock. Thus we must switch to locking + * with sem->lock. + * Unlike in the fast path, there is no need to recheck + * sma->use_global_lock after we have acquired sem->lock: + * We own sma->sem_perm.lock, thus use_global_lock cannot + * change. */ spin_lock(&sem->lock); + ipc_unlock_object(&sma->sem_perm); return sops->sem_num; } else { - /* Not a false alarm, thus complete the sequence for a - * full lock. + /* + * Not a false alarm, thus continue to use the global lock + * mode. No need for complexmode_enter(), this was done by + * the caller that has set use_global_mode to non-zero. */ - complexmode_enter(sma); return SEM_GLOBAL_LOCK; } } @@ -495,7 +513,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) } sma->complex_count = 0; - sma->complex_mode = true; /* dropped by sem_unlock below */ + sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS; INIT_LIST_HEAD(&sma->pending_alter); INIT_LIST_HEAD(&sma->pending_const); INIT_LIST_HEAD(&sma->list_id); diff --git a/ipc/shm.c b/ipc/shm.c index d7805acb44fd..481d2a9c298a 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -423,7 +423,7 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma) if (ret) return ret; - ret = sfd->file->f_op->mmap(sfd->file, vma); + ret = call_mmap(sfd->file, vma); if (ret) { shm_close(vma); return ret; @@ -452,7 +452,7 @@ static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) if (!sfd->file->f_op->fsync) return -EINVAL; - return sfd->file->f_op->fsync(sfd->file, start, end, datasync); + return call_fsync(sfd->file, start, end, datasync); } static long shm_fallocate(struct file *file, int mode, loff_t offset, @@ -1091,8 +1091,8 @@ out_unlock1: * "raddr" thing points to kernel space, and there has to be a wrapper around * this. */ -long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, - unsigned long shmlba) +long do_shmat(int shmid, char __user *shmaddr, int shmflg, + ulong *raddr, unsigned long shmlba) { struct shmid_kernel *shp; unsigned long addr; @@ -1113,8 +1113,13 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, goto out; else if ((addr = (ulong)shmaddr)) { if (addr & (shmlba - 1)) { - if (shmflg & SHM_RND) - addr &= ~(shmlba - 1); /* round down */ + /* + * Round down to the nearest multiple of shmlba. + * For sane do_mmap_pgoff() parameters, avoid + * round downs that trigger nil-page and MAP_FIXED. + */ + if ((shmflg & SHM_RND) && addr >= shmlba) + addr &= ~(shmlba - 1); else #ifndef __ARCH_FORCE_SHMLBA if (addr & ~PAGE_MASK) diff --git a/kernel/Makefile b/kernel/Makefile index 12c679f769c6..b302b4731d16 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -64,10 +64,7 @@ obj-$(CONFIG_KEXEC) += kexec.o obj-$(CONFIG_KEXEC_FILE) += kexec_file.o obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o obj-$(CONFIG_COMPAT) += compat.o -obj-$(CONFIG_CGROUPS) += cgroup.o -obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o -obj-$(CONFIG_CGROUP_PIDS) += cgroup_pids.o -obj-$(CONFIG_CPUSETS) += cpuset.o +obj-$(CONFIG_CGROUPS) += cgroup/ obj-$(CONFIG_UTS_NS) += utsname.o obj-$(CONFIG_USER_NS) += user_namespace.o obj-$(CONFIG_PID_NS) += pid_namespace.o diff --git a/kernel/acct.c b/kernel/acct.c index ca9cb55b5855..5b1284370367 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -56,6 +56,8 @@ #include <linux/syscalls.h> #include <linux/mount.h> #include <linux/uaccess.h> +#include <linux/sched/cputime.h> + #include <asm/div64.h> #include <linux/blkdev.h> /* sector_div */ #include <linux/pid_namespace.h> diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 461eb1e66a0f..7af0dcc5d755 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -13,6 +13,7 @@ #include <linux/bpf_trace.h> #include <linux/syscalls.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/vmalloc.h> #include <linux/mmzone.h> #include <linux/anon_inodes.h> diff --git a/kernel/cgroup/Makefile b/kernel/cgroup/Makefile new file mode 100644 index 000000000000..387348a40c64 --- /dev/null +++ b/kernel/cgroup/Makefile @@ -0,0 +1,6 @@ +obj-y := cgroup.o namespace.o cgroup-v1.o + +obj-$(CONFIG_CGROUP_FREEZER) += freezer.o +obj-$(CONFIG_CGROUP_PIDS) += pids.o +obj-$(CONFIG_CGROUP_RDMA) += rdma.o +obj-$(CONFIG_CPUSETS) += cpuset.o diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h new file mode 100644 index 000000000000..9203bfb05603 --- /dev/null +++ b/kernel/cgroup/cgroup-internal.h @@ -0,0 +1,214 @@ +#ifndef __CGROUP_INTERNAL_H +#define __CGROUP_INTERNAL_H + +#include <linux/cgroup.h> +#include <linux/kernfs.h> +#include <linux/workqueue.h> +#include <linux/list.h> + +/* + * A cgroup can be associated with multiple css_sets as different tasks may + * belong to different cgroups on different hierarchies. In the other + * direction, a css_set is naturally associated with multiple cgroups. + * This M:N relationship is represented by the following link structure + * which exists for each association and allows traversing the associations + * from both sides. + */ +struct cgrp_cset_link { + /* the cgroup and css_set this link associates */ + struct cgroup *cgrp; + struct css_set *cset; + + /* list of cgrp_cset_links anchored at cgrp->cset_links */ + struct list_head cset_link; + + /* list of cgrp_cset_links anchored at css_set->cgrp_links */ + struct list_head cgrp_link; +}; + +/* used to track tasks and csets during migration */ +struct cgroup_taskset { + /* the src and dst cset list running through cset->mg_node */ + struct list_head src_csets; + struct list_head dst_csets; + + /* the subsys currently being processed */ + int ssid; + + /* + * Fields for cgroup_taskset_*() iteration. + * + * Before migration is committed, the target migration tasks are on + * ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of + * the csets on ->dst_csets. ->csets point to either ->src_csets + * or ->dst_csets depending on whether migration is committed. + * + * ->cur_csets and ->cur_task point to the current task position + * during iteration. + */ + struct list_head *csets; + struct css_set *cur_cset; + struct task_struct *cur_task; +}; + +/* migration context also tracks preloading */ +struct cgroup_mgctx { + /* + * Preloaded source and destination csets. Used to guarantee + * atomic success or failure on actual migration. + */ + struct list_head preloaded_src_csets; + struct list_head preloaded_dst_csets; + + /* tasks and csets to migrate */ + struct cgroup_taskset tset; + + /* subsystems affected by migration */ + u16 ss_mask; +}; + +#define CGROUP_TASKSET_INIT(tset) \ +{ \ + .src_csets = LIST_HEAD_INIT(tset.src_csets), \ + .dst_csets = LIST_HEAD_INIT(tset.dst_csets), \ + .csets = &tset.src_csets, \ +} + +#define CGROUP_MGCTX_INIT(name) \ +{ \ + LIST_HEAD_INIT(name.preloaded_src_csets), \ + LIST_HEAD_INIT(name.preloaded_dst_csets), \ + CGROUP_TASKSET_INIT(name.tset), \ +} + +#define DEFINE_CGROUP_MGCTX(name) \ + struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name) + +struct cgroup_sb_opts { + u16 subsys_mask; + unsigned int flags; + char *release_agent; + bool cpuset_clone_children; + char *name; + /* User explicitly requested empty subsystem */ + bool none; +}; + +extern struct mutex cgroup_mutex; +extern spinlock_t css_set_lock; +extern struct cgroup_subsys *cgroup_subsys[]; +extern struct list_head cgroup_roots; +extern struct file_system_type cgroup_fs_type; + +/* iterate across the hierarchies */ +#define for_each_root(root) \ + list_for_each_entry((root), &cgroup_roots, root_list) + +/** + * for_each_subsys - iterate all enabled cgroup subsystems + * @ss: the iteration cursor + * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end + */ +#define for_each_subsys(ss, ssid) \ + for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \ + (((ss) = cgroup_subsys[ssid]) || true); (ssid)++) + +static inline bool cgroup_is_dead(const struct cgroup *cgrp) +{ + return !(cgrp->self.flags & CSS_ONLINE); +} + +static inline bool notify_on_release(const struct cgroup *cgrp) +{ + return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); +} + +void put_css_set_locked(struct css_set *cset); + +static inline void put_css_set(struct css_set *cset) +{ + unsigned long flags; + + /* + * Ensure that the refcount doesn't hit zero while any readers + * can see it. Similar to atomic_dec_and_lock(), but for an + * rwlock + */ + if (atomic_add_unless(&cset->refcount, -1, 1)) + return; + + spin_lock_irqsave(&css_set_lock, flags); + put_css_set_locked(cset); + spin_unlock_irqrestore(&css_set_lock, flags); +} + +/* + * refcounted get/put for css_set objects + */ +static inline void get_css_set(struct css_set *cset) +{ + atomic_inc(&cset->refcount); +} + +bool cgroup_ssid_enabled(int ssid); +bool cgroup_on_dfl(const struct cgroup *cgrp); + +struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root); +struct cgroup *task_cgroup_from_root(struct task_struct *task, + struct cgroup_root *root); +struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline); +void cgroup_kn_unlock(struct kernfs_node *kn); +int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen, + struct cgroup_namespace *ns); + +void cgroup_free_root(struct cgroup_root *root); +void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts); +int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask); +int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask); +struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags, + struct cgroup_root *root, unsigned long magic, + struct cgroup_namespace *ns); + +bool cgroup_may_migrate_to(struct cgroup *dst_cgrp); +void cgroup_migrate_finish(struct cgroup_mgctx *mgctx); +void cgroup_migrate_add_src(struct css_set *src_cset, struct cgroup *dst_cgrp, + struct cgroup_mgctx *mgctx); +int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx); +int cgroup_migrate(struct task_struct *leader, bool threadgroup, + struct cgroup_mgctx *mgctx); + +int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, + bool threadgroup); +ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off, bool threadgroup); +ssize_t cgroup_procs_write(struct kernfs_open_file *of, char *buf, size_t nbytes, + loff_t off); + +void cgroup_lock_and_drain_offline(struct cgroup *cgrp); + +int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode); +int cgroup_rmdir(struct kernfs_node *kn); +int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node, + struct kernfs_root *kf_root); + +/* + * namespace.c + */ +extern const struct proc_ns_operations cgroupns_operations; + +/* + * cgroup-v1.c + */ +extern struct cftype cgroup1_base_files[]; +extern const struct file_operations proc_cgroupstats_operations; +extern struct kernfs_syscall_ops cgroup1_kf_syscall_ops; + +bool cgroup1_ssid_disabled(int ssid); +void cgroup1_pidlist_destroy_all(struct cgroup *cgrp); +void cgroup1_release_agent(struct work_struct *work); +void cgroup1_check_for_release(struct cgroup *cgrp); +struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, + void *data, unsigned long magic, + struct cgroup_namespace *ns); + +#endif /* __CGROUP_INTERNAL_H */ diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c new file mode 100644 index 000000000000..56eba9caa632 --- /dev/null +++ b/kernel/cgroup/cgroup-v1.c @@ -0,0 +1,1398 @@ +#include "cgroup-internal.h" + +#include <linux/ctype.h> +#include <linux/kmod.h> +#include <linux/sort.h> +#include <linux/delay.h> +#include <linux/mm.h> +#include <linux/sched/signal.h> +#include <linux/sched/task.h> +#include <linux/magic.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/delayacct.h> +#include <linux/pid_namespace.h> +#include <linux/cgroupstats.h> + +#include <trace/events/cgroup.h> + +/* + * pidlists linger the following amount before being destroyed. The goal + * is avoiding frequent destruction in the middle of consecutive read calls + * Expiring in the middle is a performance problem not a correctness one. + * 1 sec should be enough. + */ +#define CGROUP_PIDLIST_DESTROY_DELAY HZ + +/* Controllers blocked by the commandline in v1 */ +static u16 cgroup_no_v1_mask; + +/* + * pidlist destructions need to be flushed on cgroup destruction. Use a + * separate workqueue as flush domain. + */ +static struct workqueue_struct *cgroup_pidlist_destroy_wq; + +/* + * Protects cgroup_subsys->release_agent_path. Modifying it also requires + * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock. + */ +static DEFINE_SPINLOCK(release_agent_path_lock); + +bool cgroup1_ssid_disabled(int ssid) +{ + return cgroup_no_v1_mask & (1 << ssid); +} + +/** + * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' + * @from: attach to all cgroups of a given task + * @tsk: the task to be attached + */ +int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) +{ + struct cgroup_root *root; + int retval = 0; + + mutex_lock(&cgroup_mutex); + percpu_down_write(&cgroup_threadgroup_rwsem); + for_each_root(root) { + struct cgroup *from_cgrp; + + if (root == &cgrp_dfl_root) + continue; + + spin_lock_irq(&css_set_lock); + from_cgrp = task_cgroup_from_root(from, root); + spin_unlock_irq(&css_set_lock); + + retval = cgroup_attach_task(from_cgrp, tsk, false); + if (retval) + break; + } + percpu_up_write(&cgroup_threadgroup_rwsem); + mutex_unlock(&cgroup_mutex); + + return retval; +} +EXPORT_SYMBOL_GPL(cgroup_attach_task_all); + +/** + * cgroup_trasnsfer_tasks - move tasks from one cgroup to another + * @to: cgroup to which the tasks will be moved + * @from: cgroup in which the tasks currently reside + * + * Locking rules between cgroup_post_fork() and the migration path + * guarantee that, if a task is forking while being migrated, the new child + * is guaranteed to be either visible in the source cgroup after the + * parent's migration is complete or put into the target cgroup. No task + * can slip out of migration through forking. + */ +int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) +{ + DEFINE_CGROUP_MGCTX(mgctx); + struct cgrp_cset_link *link; + struct css_task_iter it; + struct task_struct *task; + int ret; + + if (cgroup_on_dfl(to)) + return -EINVAL; + + if (!cgroup_may_migrate_to(to)) + return -EBUSY; + + mutex_lock(&cgroup_mutex); + + percpu_down_write(&cgroup_threadgroup_rwsem); + + /* all tasks in @from are being moved, all csets are source */ + spin_lock_irq(&css_set_lock); + list_for_each_entry(link, &from->cset_links, cset_link) + cgroup_migrate_add_src(link->cset, to, &mgctx); + spin_unlock_irq(&css_set_lock); + + ret = cgroup_migrate_prepare_dst(&mgctx); + if (ret) + goto out_err; + + /* + * Migrate tasks one-by-one until @from is empty. This fails iff + * ->can_attach() fails. + */ + do { + css_task_iter_start(&from->self, &it); + task = css_task_iter_next(&it); + if (task) + get_task_struct(task); + css_task_iter_end(&it); + + if (task) { + ret = cgroup_migrate(task, false, &mgctx); + if (!ret) + trace_cgroup_transfer_tasks(to, task, false); + put_task_struct(task); + } + } while (task && !ret); +out_err: + cgroup_migrate_finish(&mgctx); + percpu_up_write(&cgroup_threadgroup_rwsem); + mutex_unlock(&cgroup_mutex); + return ret; +} + +/* + * Stuff for reading the 'tasks'/'procs' files. + * + * Reading this file can return large amounts of data if a cgroup has + * *lots* of attached tasks. So it may need several calls to read(), + * but we cannot guarantee that the information we produce is correct + * unless we produce it entirely atomically. + * + */ + +/* which pidlist file are we talking about? */ +enum cgroup_filetype { + CGROUP_FILE_PROCS, + CGROUP_FILE_TASKS, +}; + +/* + * A pidlist is a list of pids that virtually represents the contents of one + * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists, + * a pair (one each for procs, tasks) for each pid namespace that's relevant + * to the cgroup. + */ +struct cgroup_pidlist { + /* + * used to find which pidlist is wanted. doesn't change as long as + * this particular list stays in the list. + */ + struct { enum cgroup_filetype type; struct pid_namespace *ns; } key; + /* array of xids */ + pid_t *list; + /* how many elements the above list has */ + int length; + /* each of these stored in a list by its cgroup */ + struct list_head links; + /* pointer to the cgroup we belong to, for list removal purposes */ + struct cgroup *owner; + /* for delayed destruction */ + struct delayed_work destroy_dwork; +}; + +/* + * The following two functions "fix" the issue where there are more pids + * than kmalloc will give memory for; in such cases, we use vmalloc/vfree. + * TODO: replace with a kernel-wide solution to this problem + */ +#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2)) +static void *pidlist_allocate(int count) +{ + if (PIDLIST_TOO_LARGE(count)) + return vmalloc(count * sizeof(pid_t)); + else + return kmalloc(count * sizeof(pid_t), GFP_KERNEL); +} + +static void pidlist_free(void *p) +{ + kvfree(p); +} + +/* + * Used to destroy all pidlists lingering waiting for destroy timer. None + * should be left afterwards. + */ +void cgroup1_pidlist_destroy_all(struct cgroup *cgrp) +{ + struct cgroup_pidlist *l, *tmp_l; + + mutex_lock(&cgrp->pidlist_mutex); + list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links) + mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0); + mutex_unlock(&cgrp->pidlist_mutex); + + flush_workqueue(cgroup_pidlist_destroy_wq); + BUG_ON(!list_empty(&cgrp->pidlists)); +} + +static void cgroup_pidlist_destroy_work_fn(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist, + destroy_dwork); + struct cgroup_pidlist *tofree = NULL; + + mutex_lock(&l->owner->pidlist_mutex); + + /* + * Destroy iff we didn't get queued again. The state won't change + * as destroy_dwork can only be queued while locked. + */ + if (!delayed_work_pending(dwork)) { + list_del(&l->links); + pidlist_free(l->list); + put_pid_ns(l->key.ns); + tofree = l; + } + + mutex_unlock(&l->owner->pidlist_mutex); + kfree(tofree); +} + +/* + * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries + * Returns the number of unique elements. + */ +static int pidlist_uniq(pid_t *list, int length) +{ + int src, dest = 1; + + /* + * we presume the 0th element is unique, so i starts at 1. trivial + * edge cases first; no work needs to be done for either + */ + if (length == 0 || length == 1) + return length; + /* src and dest walk down the list; dest counts unique elements */ + for (src = 1; src < length; src++) { + /* find next unique element */ + while (list[src] == list[src-1]) { + src++; + if (src == length) + goto after; + } + /* dest always points to where the next unique element goes */ + list[dest] = list[src]; + dest++; + } +after: + return dest; +} + +/* + * The two pid files - task and cgroup.procs - guaranteed that the result + * is sorted, which forced this whole pidlist fiasco. As pid order is + * different per namespace, each namespace needs differently sorted list, + * making it impossible to use, for example, single rbtree of member tasks + * sorted by task pointer. As pidlists can be fairly large, allocating one + * per open file is dangerous, so cgroup had to implement shared pool of + * pidlists keyed by cgroup and namespace. + */ +static int cmppid(const void *a, const void *b) +{ + return *(pid_t *)a - *(pid_t *)b; +} + +static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, + enum cgroup_filetype type) +{ + struct cgroup_pidlist *l; + /* don't need task_nsproxy() if we're looking at ourself */ + struct pid_namespace *ns = task_active_pid_ns(current); + + lockdep_assert_held(&cgrp->pidlist_mutex); + + list_for_each_entry(l, &cgrp->pidlists, links) + if (l->key.type == type && l->key.ns == ns) + return l; + return NULL; +} + +/* + * find the appropriate pidlist for our purpose (given procs vs tasks) + * returns with the lock on that pidlist already held, and takes care + * of the use count, or returns NULL with no locks held if we're out of + * memory. + */ +static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp, + enum cgroup_filetype type) +{ + struct cgroup_pidlist *l; + + lockdep_assert_held(&cgrp->pidlist_mutex); + + l = cgroup_pidlist_find(cgrp, type); + if (l) + return l; + + /* entry not found; create a new one */ + l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL); + if (!l) + return l; + + INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn); + l->key.type = type; + /* don't need task_nsproxy() if we're looking at ourself */ + l->key.ns = get_pid_ns(task_active_pid_ns(current)); + l->owner = cgrp; + list_add(&l->links, &cgrp->pidlists); + return l; +} + +/** + * cgroup_task_count - count the number of tasks in a cgroup. + * @cgrp: the cgroup in question + * + * Return the number of tasks in the cgroup. The returned number can be + * higher than the actual number of tasks due to css_set references from + * namespace roots and temporary usages. + */ +static int cgroup_task_count(const struct cgroup *cgrp) +{ + int count = 0; + struct cgrp_cset_link *link; + + spin_lock_irq(&css_set_lock); + list_for_each_entry(link, &cgrp->cset_links, cset_link) + count += atomic_read(&link->cset->refcount); + spin_unlock_irq(&css_set_lock); + return count; +} + +/* + * Load a cgroup's pidarray with either procs' tgids or tasks' pids + */ +static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, + struct cgroup_pidlist **lp) +{ + pid_t *array; + int length; + int pid, n = 0; /* used for populating the array */ + struct css_task_iter it; + struct task_struct *tsk; + struct cgroup_pidlist *l; + + lockdep_assert_held(&cgrp->pidlist_mutex); + + /* + * If cgroup gets more users after we read count, we won't have + * enough space - tough. This race is indistinguishable to the + * caller from the case that the additional cgroup users didn't + * show up until sometime later on. + */ + length = cgroup_task_count(cgrp); + array = pidlist_allocate(length); + if (!array) + return -ENOMEM; + /* now, populate the array */ + css_task_iter_start(&cgrp->self, &it); + while ((tsk = css_task_iter_next(&it))) { + if (unlikely(n == length)) + break; + /* get tgid or pid for procs or tasks file respectively */ + if (type == CGROUP_FILE_PROCS) + pid = task_tgid_vnr(tsk); + else + pid = task_pid_vnr(tsk); + if (pid > 0) /* make sure to only use valid results */ + array[n++] = pid; + } + css_task_iter_end(&it); + length = n; + /* now sort & (if procs) strip out duplicates */ + sort(array, length, sizeof(pid_t), cmppid, NULL); + if (type == CGROUP_FILE_PROCS) + length = pidlist_uniq(array, length); + + l = cgroup_pidlist_find_create(cgrp, type); + if (!l) { + pidlist_free(array); + return -ENOMEM; + } + + /* store array, freeing old if necessary */ + pidlist_free(l->list); + l->list = array; + l->length = length; + *lp = l; + return 0; +} + +/* + * seq_file methods for the tasks/procs files. The seq_file position is the + * next pid to display; the seq_file iterator is a pointer to the pid + * in the cgroup->l->list array. + */ + +static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) +{ + /* + * Initially we receive a position value that corresponds to + * one more than the last pid shown (or 0 on the first call or + * after a seek to the start). Use a binary-search to find the + * next pid to display, if any + */ + struct kernfs_open_file *of = s->private; + struct cgroup *cgrp = seq_css(s)->cgroup; + struct cgroup_pidlist *l; + enum cgroup_filetype type = seq_cft(s)->private; + int index = 0, pid = *pos; + int *iter, ret; + + mutex_lock(&cgrp->pidlist_mutex); + + /* + * !NULL @of->priv indicates that this isn't the first start() + * after open. If the matching pidlist is around, we can use that. + * Look for it. Note that @of->priv can't be used directly. It + * could already have been destroyed. + */ + if (of->priv) + of->priv = cgroup_pidlist_find(cgrp, type); + + /* + * Either this is the first start() after open or the matching + * pidlist has been destroyed inbetween. Create a new one. + */ + if (!of->priv) { + ret = pidlist_array_load(cgrp, type, + (struct cgroup_pidlist **)&of->priv); + if (ret) + return ERR_PTR(ret); + } + l = of->priv; + + if (pid) { + int end = l->length; + + while (index < end) { + int mid = (index + end) / 2; + if (l->list[mid] == pid) { + index = mid; + break; + } else if (l->list[mid] <= pid) + index = mid + 1; + else + end = mid; + } + } + /* If we're off the end of the array, we're done */ + if (index >= l->length) + return NULL; + /* Update the abstract position to be the actual pid that we found */ + iter = l->list + index; + *pos = *iter; + return iter; +} + +static void cgroup_pidlist_stop(struct seq_file *s, void *v) +{ + struct kernfs_open_file *of = s->private; + struct cgroup_pidlist *l = of->priv; + + if (l) + mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, + CGROUP_PIDLIST_DESTROY_DELAY); + mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex); +} + +static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct kernfs_open_file *of = s->private; + struct cgroup_pidlist *l = of->priv; + pid_t *p = v; + pid_t *end = l->list + l->length; + /* + * Advance to the next pid in the array. If this goes off the + * end, we're done + */ + p++; + if (p >= end) { + return NULL; + } else { + *pos = *p; + return p; + } +} + +static int cgroup_pidlist_show(struct seq_file *s, void *v) +{ + seq_printf(s, "%d\n", *(int *)v); + + return 0; +} + +static ssize_t cgroup_tasks_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + return __cgroup_procs_write(of, buf, nbytes, off, false); +} + +static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct cgroup *cgrp; + + BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); + + cgrp = cgroup_kn_lock_live(of->kn, false); + if (!cgrp) + return -ENODEV; + spin_lock(&release_agent_path_lock); + strlcpy(cgrp->root->release_agent_path, strstrip(buf), + sizeof(cgrp->root->release_agent_path)); + spin_unlock(&release_agent_path_lock); + cgroup_kn_unlock(of->kn); + return nbytes; +} + +static int cgroup_release_agent_show(struct seq_file *seq, void *v) +{ + struct cgroup *cgrp = seq_css(seq)->cgroup; + + spin_lock(&release_agent_path_lock); + seq_puts(seq, cgrp->root->release_agent_path); + spin_unlock(&release_agent_path_lock); + seq_putc(seq, '\n'); + return 0; +} + +static int cgroup_sane_behavior_show(struct seq_file *seq, void *v) +{ + seq_puts(seq, "0\n"); + return 0; +} + +static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + return notify_on_release(css->cgroup); +} + +static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + if (val) + set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags); + else + clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags); + return 0; +} + +static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); +} + +static int cgroup_clone_children_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + if (val) + set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); + else + clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); + return 0; +} + +/* cgroup core interface files for the legacy hierarchies */ +struct cftype cgroup1_base_files[] = { + { + .name = "cgroup.procs", + .seq_start = cgroup_pidlist_start, + .seq_next = cgroup_pidlist_next, + .seq_stop = cgroup_pidlist_stop, + .seq_show = cgroup_pidlist_show, + .private = CGROUP_FILE_PROCS, + .write = cgroup_procs_write, + }, + { + .name = "cgroup.clone_children", + .read_u64 = cgroup_clone_children_read, + .write_u64 = cgroup_clone_children_write, + }, + { + .name = "cgroup.sane_behavior", + .flags = CFTYPE_ONLY_ON_ROOT, + .seq_show = cgroup_sane_behavior_show, + }, + { + .name = "tasks", + .seq_start = cgroup_pidlist_start, + .seq_next = cgroup_pidlist_next, + .seq_stop = cgroup_pidlist_stop, + .seq_show = cgroup_pidlist_show, + .private = CGROUP_FILE_TASKS, + .write = cgroup_tasks_write, + }, + { + .name = "notify_on_release", + .read_u64 = cgroup_read_notify_on_release, + .write_u64 = cgroup_write_notify_on_release, + }, + { + .name = "release_agent", + .flags = CFTYPE_ONLY_ON_ROOT, + .seq_show = cgroup_release_agent_show, + .write = cgroup_release_agent_write, + .max_write_len = PATH_MAX - 1, + }, + { } /* terminate */ +}; + +/* Display information about each subsystem and each hierarchy */ +static int proc_cgroupstats_show(struct seq_file *m, void *v) +{ + struct cgroup_subsys *ss; + int i; + + seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n"); + /* + * ideally we don't want subsystems moving around while we do this. + * cgroup_mutex is also necessary to guarantee an atomic snapshot of + * subsys/hierarchy state. + */ + mutex_lock(&cgroup_mutex); + + for_each_subsys(ss, i) + seq_printf(m, "%s\t%d\t%d\t%d\n", + ss->legacy_name, ss->root->hierarchy_id, + atomic_read(&ss->root->nr_cgrps), + cgroup_ssid_enabled(i)); + + mutex_unlock(&cgroup_mutex); + return 0; +} + +static int cgroupstats_open(struct inode *inode, struct file *file) +{ + return single_open(file, proc_cgroupstats_show, NULL); +} + +const struct file_operations proc_cgroupstats_operations = { + .open = cgroupstats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/** + * cgroupstats_build - build and fill cgroupstats + * @stats: cgroupstats to fill information into + * @dentry: A dentry entry belonging to the cgroup for which stats have + * been requested. + * + * Build and fill cgroupstats so that taskstats can export it to user + * space. + */ +int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) +{ + struct kernfs_node *kn = kernfs_node_from_dentry(dentry); + struct cgroup *cgrp; + struct css_task_iter it; + struct task_struct *tsk; + + /* it should be kernfs_node belonging to cgroupfs and is a directory */ + if (dentry->d_sb->s_type != &cgroup_fs_type || !kn || + kernfs_type(kn) != KERNFS_DIR) + return -EINVAL; + + mutex_lock(&cgroup_mutex); + + /* + * We aren't being called from kernfs and there's no guarantee on + * @kn->priv's validity. For this and css_tryget_online_from_dir(), + * @kn->priv is RCU safe. Let's do the RCU dancing. + */ + rcu_read_lock(); + cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); + if (!cgrp || cgroup_is_dead(cgrp)) { + rcu_read_unlock(); + mutex_unlock(&cgroup_mutex); + return -ENOENT; + } + rcu_read_unlock(); + + css_task_iter_start(&cgrp->self, &it); + while ((tsk = css_task_iter_next(&it))) { + switch (tsk->state) { + case TASK_RUNNING: + stats->nr_running++; + break; + case TASK_INTERRUPTIBLE: + stats->nr_sleeping++; + break; + case TASK_UNINTERRUPTIBLE: + stats->nr_uninterruptible++; + break; + case TASK_STOPPED: + stats->nr_stopped++; + break; + default: + if (delayacct_is_task_waiting_on_io(tsk)) + stats->nr_io_wait++; + break; + } + } + css_task_iter_end(&it); + + mutex_unlock(&cgroup_mutex); + return 0; +} + +void cgroup1_check_for_release(struct cgroup *cgrp) +{ + if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) && + !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp)) + schedule_work(&cgrp->release_agent_work); +} + +/* + * Notify userspace when a cgroup is released, by running the + * configured release agent with the name of the cgroup (path + * relative to the root of cgroup file system) as the argument. + * + * Most likely, this user command will try to rmdir this cgroup. + * + * This races with the possibility that some other task will be + * attached to this cgroup before it is removed, or that some other + * user task will 'mkdir' a child cgroup of this cgroup. That's ok. + * The presumed 'rmdir' will fail quietly if this cgroup is no longer + * unused, and this cgroup will be reprieved from its death sentence, + * to continue to serve a useful existence. Next time it's released, + * we will get notified again, if it still has 'notify_on_release' set. + * + * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which + * means only wait until the task is successfully execve()'d. The + * separate release agent task is forked by call_usermodehelper(), + * then control in this thread returns here, without waiting for the + * release agent task. We don't bother to wait because the caller of + * this routine has no use for the exit status of the release agent + * task, so no sense holding our caller up for that. + */ +void cgroup1_release_agent(struct work_struct *work) +{ + struct cgroup *cgrp = + container_of(work, struct cgroup, release_agent_work); + char *pathbuf = NULL, *agentbuf = NULL; + char *argv[3], *envp[3]; + int ret; + + mutex_lock(&cgroup_mutex); + + pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); + agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); + if (!pathbuf || !agentbuf) + goto out; + + spin_lock_irq(&css_set_lock); + ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns); + spin_unlock_irq(&css_set_lock); + if (ret < 0 || ret >= PATH_MAX) + goto out; + + argv[0] = agentbuf; + argv[1] = pathbuf; + argv[2] = NULL; + + /* minimal command environment */ + envp[0] = "HOME=/"; + envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; + envp[2] = NULL; + + mutex_unlock(&cgroup_mutex); + call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); + goto out_free; +out: + mutex_unlock(&cgroup_mutex); +out_free: + kfree(agentbuf); + kfree(pathbuf); +} + +/* + * cgroup_rename - Only allow simple rename of directories in place. + */ +static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, + const char *new_name_str) +{ + struct cgroup *cgrp = kn->priv; + int ret; + + if (kernfs_type(kn) != KERNFS_DIR) + return -ENOTDIR; + if (kn->parent != new_parent) + return -EIO; + + /* + * We're gonna grab cgroup_mutex which nests outside kernfs + * active_ref. kernfs_rename() doesn't require active_ref + * protection. Break them before grabbing cgroup_mutex. + */ + kernfs_break_active_protection(new_parent); + kernfs_break_active_protection(kn); + + mutex_lock(&cgroup_mutex); + + ret = kernfs_rename(kn, new_parent, new_name_str); + if (!ret) + trace_cgroup_rename(cgrp); + + mutex_unlock(&cgroup_mutex); + + kernfs_unbreak_active_protection(kn); + kernfs_unbreak_active_protection(new_parent); + return ret; +} + +static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root) +{ + struct cgroup_root *root = cgroup_root_from_kf(kf_root); + struct cgroup_subsys *ss; + int ssid; + + for_each_subsys(ss, ssid) + if (root->subsys_mask & (1 << ssid)) + seq_show_option(seq, ss->legacy_name, NULL); + if (root->flags & CGRP_ROOT_NOPREFIX) + seq_puts(seq, ",noprefix"); + if (root->flags & CGRP_ROOT_XATTR) + seq_puts(seq, ",xattr"); + + spin_lock(&release_agent_path_lock); + if (strlen(root->release_agent_path)) + seq_show_option(seq, "release_agent", + root->release_agent_path); + spin_unlock(&release_agent_path_lock); + + if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags)) + seq_puts(seq, ",clone_children"); + if (strlen(root->name)) + seq_show_option(seq, "name", root->name); + return 0; +} + +static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) +{ + char *token, *o = data; + bool all_ss = false, one_ss = false; + u16 mask = U16_MAX; + struct cgroup_subsys *ss; + int nr_opts = 0; + int i; + +#ifdef CONFIG_CPUSETS + mask = ~((u16)1 << cpuset_cgrp_id); +#endif + + memset(opts, 0, sizeof(*opts)); + + while ((token = strsep(&o, ",")) != NULL) { + nr_opts++; + + if (!*token) + return -EINVAL; + if (!strcmp(token, "none")) { + /* Explicitly have no subsystems */ + opts->none = true; + continue; + } + if (!strcmp(token, "all")) { + /* Mutually exclusive option 'all' + subsystem name */ + if (one_ss) + return -EINVAL; + all_ss = true; + continue; + } + if (!strcmp(token, "noprefix")) { + opts->flags |= CGRP_ROOT_NOPREFIX; + continue; + } + if (!strcmp(token, "clone_children")) { + opts->cpuset_clone_children = true; + continue; + } + if (!strcmp(token, "xattr")) { + opts->flags |= CGRP_ROOT_XATTR; + continue; + } + if (!strncmp(token, "release_agent=", 14)) { + /* Specifying two release agents is forbidden */ + if (opts->release_agent) + return -EINVAL; + opts->release_agent = + kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL); + if (!opts->release_agent) + return -ENOMEM; + continue; + } + if (!strncmp(token, "name=", 5)) { + const char *name = token + 5; + /* Can't specify an empty name */ + if (!strlen(name)) + return -EINVAL; + /* Must match [\w.-]+ */ + for (i = 0; i < strlen(name); i++) { + char c = name[i]; + if (isalnum(c)) + continue; + if ((c == '.') || (c == '-') || (c == '_')) + continue; + return -EINVAL; + } + /* Specifying two names is forbidden */ + if (opts->name) + return -EINVAL; + opts->name = kstrndup(name, + MAX_CGROUP_ROOT_NAMELEN - 1, + GFP_KERNEL); + if (!opts->name) + return -ENOMEM; + + continue; + } + + for_each_subsys(ss, i) { + if (strcmp(token, ss->legacy_name)) + continue; + if (!cgroup_ssid_enabled(i)) + continue; + if (cgroup1_ssid_disabled(i)) + continue; + + /* Mutually exclusive option 'all' + subsystem name */ + if (all_ss) + return -EINVAL; + opts->subsys_mask |= (1 << i); + one_ss = true; + + break; + } + if (i == CGROUP_SUBSYS_COUNT) + return -ENOENT; + } + + /* + * If the 'all' option was specified select all the subsystems, + * otherwise if 'none', 'name=' and a subsystem name options were + * not specified, let's default to 'all' + */ + if (all_ss || (!one_ss && !opts->none && !opts->name)) + for_each_subsys(ss, i) + if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i)) + opts->subsys_mask |= (1 << i); + + /* + * We either have to specify by name or by subsystems. (So all + * empty hierarchies must have a name). + */ + if (!opts->subsys_mask && !opts->name) + return -EINVAL; + + /* + * Option noprefix was introduced just for backward compatibility + * with the old cpuset, so we allow noprefix only if mounting just + * the cpuset subsystem. + */ + if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask)) + return -EINVAL; + + /* Can't specify "none" and some subsystems */ + if (opts->subsys_mask && opts->none) + return -EINVAL; + + return 0; +} + +static int cgroup1_remount(struct kernfs_root *kf_root, int *flags, char *data) +{ + int ret = 0; + struct cgroup_root *root = cgroup_root_from_kf(kf_root); + struct cgroup_sb_opts opts; + u16 added_mask, removed_mask; + + cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); + + /* See what subsystems are wanted */ + ret = parse_cgroupfs_options(data, &opts); + if (ret) + goto out_unlock; + + if (opts.subsys_mask != root->subsys_mask || opts.release_agent) + pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n", + task_tgid_nr(current), current->comm); + + added_mask = opts.subsys_mask & ~root->subsys_mask; + removed_mask = root->subsys_mask & ~opts.subsys_mask; + + /* Don't allow flags or name to change at remount */ + if ((opts.flags ^ root->flags) || + (opts.name && strcmp(opts.name, root->name))) { + pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n", + opts.flags, opts.name ?: "", root->flags, root->name); + ret = -EINVAL; + goto out_unlock; + } + + /* remounting is not allowed for populated hierarchies */ + if (!list_empty(&root->cgrp.self.children)) { + ret = -EBUSY; + goto out_unlock; + } + + ret = rebind_subsystems(root, added_mask); + if (ret) + goto out_unlock; + + WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask)); + + if (opts.release_agent) { + spin_lock(&release_agent_path_lock); + strcpy(root->release_agent_path, opts.release_agent); + spin_unlock(&release_agent_path_lock); + } + + trace_cgroup_remount(root); + + out_unlock: + kfree(opts.release_agent); + kfree(opts.name); + mutex_unlock(&cgroup_mutex); + return ret; +} + +struct kernfs_syscall_ops cgroup1_kf_syscall_ops = { + .rename = cgroup1_rename, + .show_options = cgroup1_show_options, + .remount_fs = cgroup1_remount, + .mkdir = cgroup_mkdir, + .rmdir = cgroup_rmdir, + .show_path = cgroup_show_path, +}; + +struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, + void *data, unsigned long magic, + struct cgroup_namespace *ns) +{ + struct super_block *pinned_sb = NULL; + struct cgroup_sb_opts opts; + struct cgroup_root *root; + struct cgroup_subsys *ss; + struct dentry *dentry; + int i, ret; + + cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); + + /* First find the desired set of subsystems */ + ret = parse_cgroupfs_options(data, &opts); + if (ret) + goto out_unlock; + + /* + * Destruction of cgroup root is asynchronous, so subsystems may + * still be dying after the previous unmount. Let's drain the + * dying subsystems. We just need to ensure that the ones + * unmounted previously finish dying and don't care about new ones + * starting. Testing ref liveliness is good enough. + */ + for_each_subsys(ss, i) { + if (!(opts.subsys_mask & (1 << i)) || + ss->root == &cgrp_dfl_root) + continue; + + if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) { + mutex_unlock(&cgroup_mutex); + msleep(10); + ret = restart_syscall(); + goto out_free; + } + cgroup_put(&ss->root->cgrp); + } + + for_each_root(root) { + bool name_match = false; + + if (root == &cgrp_dfl_root) + continue; + + /* + * If we asked for a name then it must match. Also, if + * name matches but sybsys_mask doesn't, we should fail. + * Remember whether name matched. + */ + if (opts.name) { + if (strcmp(opts.name, root->name)) + continue; + name_match = true; + } + + /* + * If we asked for subsystems (or explicitly for no + * subsystems) then they must match. + */ + if ((opts.subsys_mask || opts.none) && + (opts.subsys_mask != root->subsys_mask)) { + if (!name_match) + continue; + ret = -EBUSY; + goto out_unlock; + } + + if (root->flags ^ opts.flags) + pr_warn("new mount options do not match the existing superblock, will be ignored\n"); + + /* + * We want to reuse @root whose lifetime is governed by its + * ->cgrp. Let's check whether @root is alive and keep it + * that way. As cgroup_kill_sb() can happen anytime, we + * want to block it by pinning the sb so that @root doesn't + * get killed before mount is complete. + * + * With the sb pinned, tryget_live can reliably indicate + * whether @root can be reused. If it's being killed, + * drain it. We can use wait_queue for the wait but this + * path is super cold. Let's just sleep a bit and retry. + */ + pinned_sb = kernfs_pin_sb(root->kf_root, NULL); + if (IS_ERR(pinned_sb) || + !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { + mutex_unlock(&cgroup_mutex); + if (!IS_ERR_OR_NULL(pinned_sb)) + deactivate_super(pinned_sb); + msleep(10); + ret = restart_syscall(); + goto out_free; + } + + ret = 0; + goto out_unlock; + } + + /* + * No such thing, create a new one. name= matching without subsys + * specification is allowed for already existing hierarchies but we + * can't create new one without subsys specification. + */ + if (!opts.subsys_mask && !opts.none) { + ret = -EINVAL; + goto out_unlock; + } + + /* Hierarchies may only be created in the initial cgroup namespace. */ + if (ns != &init_cgroup_ns) { + ret = -EPERM; + goto out_unlock; + } + + root = kzalloc(sizeof(*root), GFP_KERNEL); + if (!root) { + ret = -ENOMEM; + goto out_unlock; + } + + init_cgroup_root(root, &opts); + + ret = cgroup_setup_root(root, opts.subsys_mask); + if (ret) + cgroup_free_root(root); + +out_unlock: + mutex_unlock(&cgroup_mutex); +out_free: + kfree(opts.release_agent); + kfree(opts.name); + + if (ret) + return ERR_PTR(ret); + + dentry = cgroup_do_mount(&cgroup_fs_type, flags, root, + CGROUP_SUPER_MAGIC, ns); + + /* + * If @pinned_sb, we're reusing an existing root and holding an + * extra ref on its sb. Mount is complete. Put the extra ref. + */ + if (pinned_sb) + deactivate_super(pinned_sb); + + return dentry; +} + +static int __init cgroup1_wq_init(void) +{ + /* + * Used to destroy pidlists and separate to serve as flush domain. + * Cap @max_active to 1 too. + */ + cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy", + 0, 1); + BUG_ON(!cgroup_pidlist_destroy_wq); + return 0; +} +core_initcall(cgroup1_wq_init); + +static int __init cgroup_no_v1(char *str) +{ + struct cgroup_subsys *ss; + char *token; + int i; + + while ((token = strsep(&str, ",")) != NULL) { + if (!*token) + continue; + + if (!strcmp(token, "all")) { + cgroup_no_v1_mask = U16_MAX; + break; + } + + for_each_subsys(ss, i) { + if (strcmp(token, ss->name) && + strcmp(token, ss->legacy_name)) + continue; + + cgroup_no_v1_mask |= 1 << i; + } + } + return 1; +} +__setup("cgroup_no_v1=", cgroup_no_v1); + + +#ifdef CONFIG_CGROUP_DEBUG +static struct cgroup_subsys_state * +debug_css_alloc(struct cgroup_subsys_state *parent_css) +{ + struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL); + + if (!css) + return ERR_PTR(-ENOMEM); + + return css; +} + +static void debug_css_free(struct cgroup_subsys_state *css) +{ + kfree(css); +} + +static u64 debug_taskcount_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + return cgroup_task_count(css->cgroup); +} + +static u64 current_css_set_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + return (u64)(unsigned long)current->cgroups; +} + +static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + u64 count; + + rcu_read_lock(); + count = atomic_read(&task_css_set(current)->refcount); + rcu_read_unlock(); + return count; +} + +static int current_css_set_cg_links_read(struct seq_file *seq, void *v) +{ + struct cgrp_cset_link *link; + struct css_set *cset; + char *name_buf; + + name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL); + if (!name_buf) + return -ENOMEM; + + spin_lock_irq(&css_set_lock); + rcu_read_lock(); + cset = rcu_dereference(current->cgroups); + list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { + struct cgroup *c = link->cgrp; + + cgroup_name(c, name_buf, NAME_MAX + 1); + seq_printf(seq, "Root %d group %s\n", + c->root->hierarchy_id, name_buf); + } + rcu_read_unlock(); + spin_unlock_irq(&css_set_lock); + kfree(name_buf); + return 0; +} + +#define MAX_TASKS_SHOWN_PER_CSS 25 +static int cgroup_css_links_read(struct seq_file *seq, void *v) +{ + struct cgroup_subsys_state *css = seq_css(seq); + struct cgrp_cset_link *link; + + spin_lock_irq(&css_set_lock); + list_for_each_entry(link, &css->cgroup->cset_links, cset_link) { + struct css_set *cset = link->cset; + struct task_struct *task; + int count = 0; + + seq_printf(seq, "css_set %p\n", cset); + + list_for_each_entry(task, &cset->tasks, cg_list) { + if (count++ > MAX_TASKS_SHOWN_PER_CSS) + goto overflow; + seq_printf(seq, " task %d\n", task_pid_vnr(task)); + } + + list_for_each_entry(task, &cset->mg_tasks, cg_list) { + if (count++ > MAX_TASKS_SHOWN_PER_CSS) + goto overflow; + seq_printf(seq, " task %d\n", task_pid_vnr(task)); + } + continue; + overflow: + seq_puts(seq, " ...\n"); + } + spin_unlock_irq(&css_set_lock); + return 0; +} + +static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft) +{ + return (!cgroup_is_populated(css->cgroup) && + !css_has_online_children(&css->cgroup->self)); +} + +static struct cftype debug_files[] = { + { + .name = "taskcount", + .read_u64 = debug_taskcount_read, + }, + + { + .name = "current_css_set", + .read_u64 = current_css_set_read, + }, + + { + .name = "current_css_set_refcount", + .read_u64 = current_css_set_refcount_read, + }, + + { + .name = "current_css_set_cg_links", + .seq_show = current_css_set_cg_links_read, + }, + + { + .name = "cgroup_css_links", + .seq_show = cgroup_css_links_read, + }, + + { + .name = "releasable", + .read_u64 = releasable_read, + }, + + { } /* terminate */ +}; + +struct cgroup_subsys debug_cgrp_subsys = { + .css_alloc = debug_css_alloc, + .css_free = debug_css_free, + .legacy_cftypes = debug_files, +}; +#endif /* CONFIG_CGROUP_DEBUG */ diff --git a/kernel/cgroup.c b/kernel/cgroup/cgroup.c index 53bbca7c4859..0125589c7428 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -28,35 +28,27 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/cgroup.h> +#include "cgroup-internal.h" + #include <linux/cred.h> -#include <linux/ctype.h> #include <linux/errno.h> #include <linux/init_task.h> #include <linux/kernel.h> -#include <linux/list.h> #include <linux/magic.h> -#include <linux/mm.h> #include <linux/mutex.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/proc_fs.h> #include <linux/rcupdate.h> #include <linux/sched.h> +#include <linux/sched/task.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/percpu-rwsem.h> #include <linux/string.h> -#include <linux/sort.h> -#include <linux/kmod.h> -#include <linux/delayacct.h> -#include <linux/cgroupstats.h> #include <linux/hashtable.h> -#include <linux/pid_namespace.h> #include <linux/idr.h> -#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ #include <linux/kthread.h> -#include <linux/delay.h> #include <linux/atomic.h> #include <linux/cpuset.h> #include <linux/proc_ns.h> @@ -67,14 +59,6 @@ #define CREATE_TRACE_POINTS #include <trace/events/cgroup.h> -/* - * pidlists linger the following amount before being destroyed. The goal - * is avoiding frequent destruction in the middle of consecutive read calls - * Expiring in the middle is a performance problem not a correctness one. - * 1 sec should be enough. - */ -#define CGROUP_PIDLIST_DESTROY_DELAY HZ - #define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \ MAX_CFTYPE_NAME + 2) @@ -88,14 +72,12 @@ * These locks are exported if CONFIG_PROVE_RCU so that accessors in * cgroup.h can use them for lockdep annotations. */ -#ifdef CONFIG_PROVE_RCU DEFINE_MUTEX(cgroup_mutex); DEFINE_SPINLOCK(css_set_lock); + +#ifdef CONFIG_PROVE_RCU EXPORT_SYMBOL_GPL(cgroup_mutex); EXPORT_SYMBOL_GPL(css_set_lock); -#else -static DEFINE_MUTEX(cgroup_mutex); -static DEFINE_SPINLOCK(css_set_lock); #endif /* @@ -110,12 +92,6 @@ static DEFINE_SPINLOCK(cgroup_idr_lock); */ static DEFINE_SPINLOCK(cgroup_file_kn_lock); -/* - * Protects cgroup_subsys->release_agent_path. Modifying it also requires - * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock. - */ -static DEFINE_SPINLOCK(release_agent_path_lock); - struct percpu_rw_semaphore cgroup_threadgroup_rwsem; #define cgroup_assert_mutex_or_rcu_locked() \ @@ -131,15 +107,9 @@ struct percpu_rw_semaphore cgroup_threadgroup_rwsem; */ static struct workqueue_struct *cgroup_destroy_wq; -/* - * pidlist destructions need to be flushed on cgroup destruction. Use a - * separate workqueue as flush domain. - */ -static struct workqueue_struct *cgroup_pidlist_destroy_wq; - /* generate an array of cgroup subsystem pointers */ #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys, -static struct cgroup_subsys *cgroup_subsys[] = { +struct cgroup_subsys *cgroup_subsys[] = { #include <linux/cgroup_subsys.h> }; #undef SUBSYS @@ -186,18 +156,14 @@ EXPORT_SYMBOL_GPL(cgrp_dfl_root); */ static bool cgrp_dfl_visible; -/* Controllers blocked by the commandline in v1 */ -static u16 cgroup_no_v1_mask; - /* some controllers are not supported in the default hierarchy */ static u16 cgrp_dfl_inhibit_ss_mask; /* some controllers are implicitly enabled on the default hierarchy */ -static unsigned long cgrp_dfl_implicit_ss_mask; +static u16 cgrp_dfl_implicit_ss_mask; /* The list of hierarchy roots */ - -static LIST_HEAD(cgroup_roots); +LIST_HEAD(cgroup_roots); static int cgroup_root_count; /* hierarchy ID allocation and mapping, protected by cgroup_mutex */ @@ -213,13 +179,13 @@ static DEFINE_IDR(cgroup_hierarchy_idr); static u64 css_serial_nr_next = 1; /* - * These bitmask flags indicate whether tasks in the fork and exit paths have - * fork/exit handlers to call. This avoids us having to do extra work in the - * fork/exit path to check which subsystems have fork/exit callbacks. + * These bitmasks identify subsystems with specific features to avoid + * having to do iterative checks repeatedly. */ static u16 have_fork_callback __read_mostly; static u16 have_exit_callback __read_mostly; static u16 have_free_callback __read_mostly; +static u16 have_canfork_callback __read_mostly; /* cgroup namespace for init task */ struct cgroup_namespace init_cgroup_ns = { @@ -230,15 +196,9 @@ struct cgroup_namespace init_cgroup_ns = { .root_cset = &init_css_set, }; -/* Ditto for the can_fork callback. */ -static u16 have_canfork_callback __read_mostly; - static struct file_system_type cgroup2_fs_type; -static struct cftype cgroup_dfl_base_files[]; -static struct cftype cgroup_legacy_base_files[]; +static struct cftype cgroup_base_files[]; -static int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask); -static void cgroup_lock_and_drain_offline(struct cgroup *cgrp); static int cgroup_apply_control(struct cgroup *cgrp); static void cgroup_finalize_control(struct cgroup *cgrp, int ret); static void css_task_iter_advance(struct css_task_iter *it); @@ -259,7 +219,7 @@ static int cgroup_addrm_files(struct cgroup_subsys_state *css, * is fine for individual subsystems but unsuitable for cgroup core. This * is slower static_key_enabled() based test indexed by @ssid. */ -static bool cgroup_ssid_enabled(int ssid) +bool cgroup_ssid_enabled(int ssid) { if (CGROUP_SUBSYS_COUNT == 0) return false; @@ -267,11 +227,6 @@ static bool cgroup_ssid_enabled(int ssid) return static_key_enabled(cgroup_subsys_enabled_key[ssid]); } -static bool cgroup_ssid_no_v1(int ssid) -{ - return cgroup_no_v1_mask & (1 << ssid); -} - /** * cgroup_on_dfl - test whether a cgroup is on the default hierarchy * @cgrp: the cgroup of interest @@ -325,7 +280,7 @@ static bool cgroup_ssid_no_v1(int ssid) * * - debug: disallowed on the default hierarchy. */ -static bool cgroup_on_dfl(const struct cgroup *cgrp) +bool cgroup_on_dfl(const struct cgroup *cgrp) { return cgrp->root == &cgrp_dfl_root; } @@ -481,12 +436,6 @@ out_unlock: return css; } -/* convenient tests for these bits */ -static inline bool cgroup_is_dead(const struct cgroup *cgrp) -{ - return !(cgrp->self.flags & CSS_ONLINE); -} - static void cgroup_get(struct cgroup *cgrp) { WARN_ON_ONCE(cgroup_is_dead(cgrp)); @@ -518,11 +467,6 @@ struct cgroup_subsys_state *of_css(struct kernfs_open_file *of) } EXPORT_SYMBOL_GPL(of_css); -static int notify_on_release(const struct cgroup *cgrp) -{ - return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); -} - /** * for_each_css - iterate all css's of a cgroup * @css: the iteration cursor @@ -553,15 +497,6 @@ static int notify_on_release(const struct cgroup *cgrp) else /** - * for_each_subsys - iterate all enabled cgroup subsystems - * @ss: the iteration cursor - * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end - */ -#define for_each_subsys(ss, ssid) \ - for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \ - (((ss) = cgroup_subsys[ssid]) || true); (ssid)++) - -/** * do_each_subsys_mask - filter for_each_subsys with a bitmask * @ss: the iteration cursor * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end @@ -585,10 +520,6 @@ static int notify_on_release(const struct cgroup *cgrp) } \ } while (false) -/* iterate across the hierarchies */ -#define for_each_root(root) \ - list_for_each_entry((root), &cgroup_roots, root_list) - /* iterate over child cgrps, lock should be held throughout iteration */ #define cgroup_for_each_live_child(child, cgrp) \ list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \ @@ -615,29 +546,6 @@ static int notify_on_release(const struct cgroup *cgrp) ; \ else -static void cgroup_release_agent(struct work_struct *work); -static void check_for_release(struct cgroup *cgrp); - -/* - * A cgroup can be associated with multiple css_sets as different tasks may - * belong to different cgroups on different hierarchies. In the other - * direction, a css_set is naturally associated with multiple cgroups. - * This M:N relationship is represented by the following link structure - * which exists for each association and allows traversing the associations - * from both sides. - */ -struct cgrp_cset_link { - /* the cgroup and css_set this link associates */ - struct cgroup *cgrp; - struct css_set *cset; - - /* list of cgrp_cset_links anchored at cgrp->cset_links */ - struct list_head cset_link; - - /* list of cgrp_cset_links anchored at css_set->cgrp_links */ - struct list_head cgrp_link; -}; - /* * The default css_set - used by init and its children prior to any * hierarchies being mounted. It contains a pointer to the root state @@ -647,12 +555,12 @@ struct cgrp_cset_link { */ struct css_set init_css_set = { .refcount = ATOMIC_INIT(1), - .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), .tasks = LIST_HEAD_INIT(init_css_set.tasks), .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks), + .task_iters = LIST_HEAD_INIT(init_css_set.task_iters), + .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), .mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node), .mg_node = LIST_HEAD_INIT(init_css_set.mg_node), - .task_iters = LIST_HEAD_INIT(init_css_set.task_iters), }; static int css_set_count = 1; /* 1 for init_css_set */ @@ -699,7 +607,7 @@ static void cgroup_update_populated(struct cgroup *cgrp, bool populated) if (!trigger) break; - check_for_release(cgrp); + cgroup1_check_for_release(cgrp); cgroup_file_notify(&cgrp->events_file); cgrp = cgroup_parent(cgrp); @@ -808,7 +716,7 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[]) return key; } -static void put_css_set_locked(struct css_set *cset) +void put_css_set_locked(struct css_set *cset) { struct cgrp_cset_link *link, *tmp_link; struct cgroup_subsys *ss; @@ -838,31 +746,6 @@ static void put_css_set_locked(struct css_set *cset) kfree_rcu(cset, rcu_head); } -static void put_css_set(struct css_set *cset) -{ - unsigned long flags; - - /* - * Ensure that the refcount doesn't hit zero while any readers - * can see it. Similar to atomic_dec_and_lock(), but for an - * rwlock - */ - if (atomic_add_unless(&cset->refcount, -1, 1)) - return; - - spin_lock_irqsave(&css_set_lock, flags); - put_css_set_locked(cset); - spin_unlock_irqrestore(&css_set_lock, flags); -} - -/* - * refcounted get/put for css_set objects - */ -static inline void get_css_set(struct css_set *cset) -{ - atomic_inc(&cset->refcount); -} - /** * compare_css_sets - helper function for find_existing_css_set(). * @cset: candidate css_set being tested @@ -1095,13 +978,13 @@ static struct css_set *find_css_set(struct css_set *old_cset, } atomic_set(&cset->refcount, 1); - INIT_LIST_HEAD(&cset->cgrp_links); INIT_LIST_HEAD(&cset->tasks); INIT_LIST_HEAD(&cset->mg_tasks); - INIT_LIST_HEAD(&cset->mg_preload_node); - INIT_LIST_HEAD(&cset->mg_node); INIT_LIST_HEAD(&cset->task_iters); INIT_HLIST_NODE(&cset->hlist); + INIT_LIST_HEAD(&cset->cgrp_links); + INIT_LIST_HEAD(&cset->mg_preload_node); + INIT_LIST_HEAD(&cset->mg_node); /* Copy the set of subsystem state objects generated in * find_existing_css_set() */ @@ -1138,7 +1021,7 @@ static struct css_set *find_css_set(struct css_set *old_cset, return cset; } -static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) +struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) { struct cgroup *root_cgrp = kf_root->kn->priv; @@ -1166,7 +1049,7 @@ static void cgroup_exit_root_id(struct cgroup_root *root) idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id); } -static void cgroup_free_root(struct cgroup_root *root) +void cgroup_free_root(struct cgroup_root *root) { if (root) { idr_destroy(&root->cgroup_idr); @@ -1283,8 +1166,8 @@ static struct cgroup *cset_cgroup_from_root(struct css_set *cset, * Return the cgroup for "task" from the given hierarchy. Must be * called with cgroup_mutex and css_set_lock held. */ -static struct cgroup *task_cgroup_from_root(struct task_struct *task, - struct cgroup_root *root) +struct cgroup *task_cgroup_from_root(struct task_struct *task, + struct cgroup_root *root) { /* * No need to lock the task - since we hold cgroup_mutex the @@ -1321,7 +1204,6 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, */ static struct kernfs_syscall_ops cgroup_kf_syscall_ops; -static const struct file_operations proc_cgroupstats_operations; static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft, char *buf) @@ -1415,7 +1297,7 @@ static u16 cgroup_calc_subtree_ss_mask(u16 subtree_control, u16 this_ss_mask) * inaccessible any time. If the caller intends to continue to access the * cgroup, it should pin it before invoking this function. */ -static void cgroup_kn_unlock(struct kernfs_node *kn) +void cgroup_kn_unlock(struct kernfs_node *kn) { struct cgroup *cgrp; @@ -1447,8 +1329,7 @@ static void cgroup_kn_unlock(struct kernfs_node *kn) * locking under kernfs active protection and allows all kernfs operations * including self-removal. */ -static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, - bool drain_offline) +struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline) { struct cgroup *cgrp; @@ -1532,9 +1413,9 @@ static int css_populate_dir(struct cgroup_subsys_state *css) if (!css->ss) { if (cgroup_on_dfl(cgrp)) - cfts = cgroup_dfl_base_files; + cfts = cgroup_base_files; else - cfts = cgroup_legacy_base_files; + cfts = cgroup1_base_files; return cgroup_addrm_files(&cgrp->self, cgrp, cfts, true); } @@ -1559,7 +1440,7 @@ err: return ret; } -static int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask) +int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask) { struct cgroup *dcgrp = &dst_root->cgrp; struct cgroup_subsys *ss; @@ -1629,8 +1510,8 @@ static int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask) return 0; } -static int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node, - struct kernfs_root *kf_root) +int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node, + struct kernfs_root *kf_root) { int len = 0; char *buf = NULL; @@ -1656,237 +1537,10 @@ static int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node, return len; } -static int cgroup_show_options(struct seq_file *seq, - struct kernfs_root *kf_root) -{ - struct cgroup_root *root = cgroup_root_from_kf(kf_root); - struct cgroup_subsys *ss; - int ssid; - - if (root != &cgrp_dfl_root) - for_each_subsys(ss, ssid) - if (root->subsys_mask & (1 << ssid)) - seq_show_option(seq, ss->legacy_name, NULL); - if (root->flags & CGRP_ROOT_NOPREFIX) - seq_puts(seq, ",noprefix"); - if (root->flags & CGRP_ROOT_XATTR) - seq_puts(seq, ",xattr"); - - spin_lock(&release_agent_path_lock); - if (strlen(root->release_agent_path)) - seq_show_option(seq, "release_agent", - root->release_agent_path); - spin_unlock(&release_agent_path_lock); - - if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags)) - seq_puts(seq, ",clone_children"); - if (strlen(root->name)) - seq_show_option(seq, "name", root->name); - return 0; -} - -struct cgroup_sb_opts { - u16 subsys_mask; - unsigned int flags; - char *release_agent; - bool cpuset_clone_children; - char *name; - /* User explicitly requested empty subsystem */ - bool none; -}; - -static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) -{ - char *token, *o = data; - bool all_ss = false, one_ss = false; - u16 mask = U16_MAX; - struct cgroup_subsys *ss; - int nr_opts = 0; - int i; - -#ifdef CONFIG_CPUSETS - mask = ~((u16)1 << cpuset_cgrp_id); -#endif - - memset(opts, 0, sizeof(*opts)); - - while ((token = strsep(&o, ",")) != NULL) { - nr_opts++; - - if (!*token) - return -EINVAL; - if (!strcmp(token, "none")) { - /* Explicitly have no subsystems */ - opts->none = true; - continue; - } - if (!strcmp(token, "all")) { - /* Mutually exclusive option 'all' + subsystem name */ - if (one_ss) - return -EINVAL; - all_ss = true; - continue; - } - if (!strcmp(token, "noprefix")) { - opts->flags |= CGRP_ROOT_NOPREFIX; - continue; - } - if (!strcmp(token, "clone_children")) { - opts->cpuset_clone_children = true; - continue; - } - if (!strcmp(token, "xattr")) { - opts->flags |= CGRP_ROOT_XATTR; - continue; - } - if (!strncmp(token, "release_agent=", 14)) { - /* Specifying two release agents is forbidden */ - if (opts->release_agent) - return -EINVAL; - opts->release_agent = - kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL); - if (!opts->release_agent) - return -ENOMEM; - continue; - } - if (!strncmp(token, "name=", 5)) { - const char *name = token + 5; - /* Can't specify an empty name */ - if (!strlen(name)) - return -EINVAL; - /* Must match [\w.-]+ */ - for (i = 0; i < strlen(name); i++) { - char c = name[i]; - if (isalnum(c)) - continue; - if ((c == '.') || (c == '-') || (c == '_')) - continue; - return -EINVAL; - } - /* Specifying two names is forbidden */ - if (opts->name) - return -EINVAL; - opts->name = kstrndup(name, - MAX_CGROUP_ROOT_NAMELEN - 1, - GFP_KERNEL); - if (!opts->name) - return -ENOMEM; - - continue; - } - - for_each_subsys(ss, i) { - if (strcmp(token, ss->legacy_name)) - continue; - if (!cgroup_ssid_enabled(i)) - continue; - if (cgroup_ssid_no_v1(i)) - continue; - - /* Mutually exclusive option 'all' + subsystem name */ - if (all_ss) - return -EINVAL; - opts->subsys_mask |= (1 << i); - one_ss = true; - - break; - } - if (i == CGROUP_SUBSYS_COUNT) - return -ENOENT; - } - - /* - * If the 'all' option was specified select all the subsystems, - * otherwise if 'none', 'name=' and a subsystem name options were - * not specified, let's default to 'all' - */ - if (all_ss || (!one_ss && !opts->none && !opts->name)) - for_each_subsys(ss, i) - if (cgroup_ssid_enabled(i) && !cgroup_ssid_no_v1(i)) - opts->subsys_mask |= (1 << i); - - /* - * We either have to specify by name or by subsystems. (So all - * empty hierarchies must have a name). - */ - if (!opts->subsys_mask && !opts->name) - return -EINVAL; - - /* - * Option noprefix was introduced just for backward compatibility - * with the old cpuset, so we allow noprefix only if mounting just - * the cpuset subsystem. - */ - if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask)) - return -EINVAL; - - /* Can't specify "none" and some subsystems */ - if (opts->subsys_mask && opts->none) - return -EINVAL; - - return 0; -} - static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) { - int ret = 0; - struct cgroup_root *root = cgroup_root_from_kf(kf_root); - struct cgroup_sb_opts opts; - u16 added_mask, removed_mask; - - if (root == &cgrp_dfl_root) { - pr_err("remount is not allowed\n"); - return -EINVAL; - } - - cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); - - /* See what subsystems are wanted */ - ret = parse_cgroupfs_options(data, &opts); - if (ret) - goto out_unlock; - - if (opts.subsys_mask != root->subsys_mask || opts.release_agent) - pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n", - task_tgid_nr(current), current->comm); - - added_mask = opts.subsys_mask & ~root->subsys_mask; - removed_mask = root->subsys_mask & ~opts.subsys_mask; - - /* Don't allow flags or name to change at remount */ - if ((opts.flags ^ root->flags) || - (opts.name && strcmp(opts.name, root->name))) { - pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n", - opts.flags, opts.name ?: "", root->flags, root->name); - ret = -EINVAL; - goto out_unlock; - } - - /* remounting is not allowed for populated hierarchies */ - if (!list_empty(&root->cgrp.self.children)) { - ret = -EBUSY; - goto out_unlock; - } - - ret = rebind_subsystems(root, added_mask); - if (ret) - goto out_unlock; - - WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask)); - - if (opts.release_agent) { - spin_lock(&release_agent_path_lock); - strcpy(root->release_agent_path, opts.release_agent); - spin_unlock(&release_agent_path_lock); - } - - trace_cgroup_remount(root); - - out_unlock: - kfree(opts.release_agent); - kfree(opts.name); - mutex_unlock(&cgroup_mutex); - return ret; + pr_err("remount is not allowed\n"); + return -EINVAL; } /* @@ -1964,11 +1618,10 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) INIT_LIST_HEAD(&cgrp->e_csets[ssid]); init_waitqueue_head(&cgrp->offline_waitq); - INIT_WORK(&cgrp->release_agent_work, cgroup_release_agent); + INIT_WORK(&cgrp->release_agent_work, cgroup1_release_agent); } -static void init_cgroup_root(struct cgroup_root *root, - struct cgroup_sb_opts *opts) +void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts) { struct cgroup *cgrp = &root->cgrp; @@ -1987,10 +1640,11 @@ static void init_cgroup_root(struct cgroup_root *root, set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags); } -static int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask) +int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask) { LIST_HEAD(tmp_links); struct cgroup *root_cgrp = &root->cgrp; + struct kernfs_syscall_ops *kf_sops; struct css_set *cset; int i, ret; @@ -2022,7 +1676,10 @@ static int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask) if (ret) goto cancel_ref; - root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops, + kf_sops = root == &cgrp_dfl_root ? + &cgroup_kf_syscall_ops : &cgroup1_kf_syscall_ops; + + root->kf_root = kernfs_create_root(kf_sops, KERNFS_ROOT_CREATE_DEACTIVATED, root_cgrp); if (IS_ERR(root->kf_root)) { @@ -2080,182 +1737,18 @@ out: return ret; } -static struct dentry *cgroup_mount(struct file_system_type *fs_type, - int flags, const char *unused_dev_name, - void *data) +struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags, + struct cgroup_root *root, unsigned long magic, + struct cgroup_namespace *ns) { - bool is_v2 = fs_type == &cgroup2_fs_type; - struct super_block *pinned_sb = NULL; - struct cgroup_namespace *ns = current->nsproxy->cgroup_ns; - struct cgroup_subsys *ss; - struct cgroup_root *root; - struct cgroup_sb_opts opts; struct dentry *dentry; - int ret; - int i; bool new_sb; - get_cgroup_ns(ns); - - /* Check if the caller has permission to mount. */ - if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) { - put_cgroup_ns(ns); - return ERR_PTR(-EPERM); - } - - /* - * The first time anyone tries to mount a cgroup, enable the list - * linking each css_set to its tasks and fix up all existing tasks. - */ - if (!use_task_css_set_links) - cgroup_enable_task_cg_lists(); - - if (is_v2) { - if (data) { - pr_err("cgroup2: unknown option \"%s\"\n", (char *)data); - put_cgroup_ns(ns); - return ERR_PTR(-EINVAL); - } - cgrp_dfl_visible = true; - root = &cgrp_dfl_root; - cgroup_get(&root->cgrp); - goto out_mount; - } - - cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); - - /* First find the desired set of subsystems */ - ret = parse_cgroupfs_options(data, &opts); - if (ret) - goto out_unlock; - - /* - * Destruction of cgroup root is asynchronous, so subsystems may - * still be dying after the previous unmount. Let's drain the - * dying subsystems. We just need to ensure that the ones - * unmounted previously finish dying and don't care about new ones - * starting. Testing ref liveliness is good enough. - */ - for_each_subsys(ss, i) { - if (!(opts.subsys_mask & (1 << i)) || - ss->root == &cgrp_dfl_root) - continue; - - if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) { - mutex_unlock(&cgroup_mutex); - msleep(10); - ret = restart_syscall(); - goto out_free; - } - cgroup_put(&ss->root->cgrp); - } - - for_each_root(root) { - bool name_match = false; - - if (root == &cgrp_dfl_root) - continue; - - /* - * If we asked for a name then it must match. Also, if - * name matches but sybsys_mask doesn't, we should fail. - * Remember whether name matched. - */ - if (opts.name) { - if (strcmp(opts.name, root->name)) - continue; - name_match = true; - } - - /* - * If we asked for subsystems (or explicitly for no - * subsystems) then they must match. - */ - if ((opts.subsys_mask || opts.none) && - (opts.subsys_mask != root->subsys_mask)) { - if (!name_match) - continue; - ret = -EBUSY; - goto out_unlock; - } - - if (root->flags ^ opts.flags) - pr_warn("new mount options do not match the existing superblock, will be ignored\n"); - - /* - * We want to reuse @root whose lifetime is governed by its - * ->cgrp. Let's check whether @root is alive and keep it - * that way. As cgroup_kill_sb() can happen anytime, we - * want to block it by pinning the sb so that @root doesn't - * get killed before mount is complete. - * - * With the sb pinned, tryget_live can reliably indicate - * whether @root can be reused. If it's being killed, - * drain it. We can use wait_queue for the wait but this - * path is super cold. Let's just sleep a bit and retry. - */ - pinned_sb = kernfs_pin_sb(root->kf_root, NULL); - if (IS_ERR(pinned_sb) || - !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { - mutex_unlock(&cgroup_mutex); - if (!IS_ERR_OR_NULL(pinned_sb)) - deactivate_super(pinned_sb); - msleep(10); - ret = restart_syscall(); - goto out_free; - } - - ret = 0; - goto out_unlock; - } + dentry = kernfs_mount(fs_type, flags, root->kf_root, magic, &new_sb); /* - * No such thing, create a new one. name= matching without subsys - * specification is allowed for already existing hierarchies but we - * can't create new one without subsys specification. - */ - if (!opts.subsys_mask && !opts.none) { - ret = -EINVAL; - goto out_unlock; - } - - /* Hierarchies may only be created in the initial cgroup namespace. */ - if (ns != &init_cgroup_ns) { - ret = -EPERM; - goto out_unlock; - } - - root = kzalloc(sizeof(*root), GFP_KERNEL); - if (!root) { - ret = -ENOMEM; - goto out_unlock; - } - - init_cgroup_root(root, &opts); - - ret = cgroup_setup_root(root, opts.subsys_mask); - if (ret) - cgroup_free_root(root); - -out_unlock: - mutex_unlock(&cgroup_mutex); -out_free: - kfree(opts.release_agent); - kfree(opts.name); - - if (ret) { - put_cgroup_ns(ns); - return ERR_PTR(ret); - } -out_mount: - dentry = kernfs_mount(fs_type, flags, root->kf_root, - is_v2 ? CGROUP2_SUPER_MAGIC : CGROUP_SUPER_MAGIC, - &new_sb); - - /* - * In non-init cgroup namespace, instead of root cgroup's - * dentry, we return the dentry corresponding to the - * cgroupns->root_cgrp. + * In non-init cgroup namespace, instead of root cgroup's dentry, + * we return the dentry corresponding to the cgroupns->root_cgrp. */ if (!IS_ERR(dentry) && ns != &init_cgroup_ns) { struct dentry *nsdentry; @@ -2277,13 +1770,45 @@ out_mount: if (IS_ERR(dentry) || !new_sb) cgroup_put(&root->cgrp); + return dentry; +} + +static struct dentry *cgroup_mount(struct file_system_type *fs_type, + int flags, const char *unused_dev_name, + void *data) +{ + struct cgroup_namespace *ns = current->nsproxy->cgroup_ns; + struct dentry *dentry; + + get_cgroup_ns(ns); + + /* Check if the caller has permission to mount. */ + if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) { + put_cgroup_ns(ns); + return ERR_PTR(-EPERM); + } + /* - * If @pinned_sb, we're reusing an existing root and holding an - * extra ref on its sb. Mount is complete. Put the extra ref. + * The first time anyone tries to mount a cgroup, enable the list + * linking each css_set to its tasks and fix up all existing tasks. */ - if (pinned_sb) { - WARN_ON(new_sb); - deactivate_super(pinned_sb); + if (!use_task_css_set_links) + cgroup_enable_task_cg_lists(); + + if (fs_type == &cgroup2_fs_type) { + if (data) { + pr_err("cgroup2: unknown option \"%s\"\n", (char *)data); + put_cgroup_ns(ns); + return ERR_PTR(-EINVAL); + } + cgrp_dfl_visible = true; + cgroup_get(&cgrp_dfl_root.cgrp); + + dentry = cgroup_do_mount(&cgroup2_fs_type, flags, &cgrp_dfl_root, + CGROUP2_SUPER_MAGIC, ns); + } else { + dentry = cgroup1_mount(&cgroup_fs_type, flags, data, + CGROUP_SUPER_MAGIC, ns); } put_cgroup_ns(ns); @@ -2311,7 +1836,7 @@ static void cgroup_kill_sb(struct super_block *sb) kernfs_kill_sb(sb); } -static struct file_system_type cgroup_fs_type = { +struct file_system_type cgroup_fs_type = { .name = "cgroup", .mount = cgroup_mount, .kill_sb = cgroup_kill_sb, @@ -2325,8 +1850,8 @@ static struct file_system_type cgroup2_fs_type = { .fs_flags = FS_USERNS_MOUNT, }; -static int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen, - struct cgroup_namespace *ns) +int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen, + struct cgroup_namespace *ns) { struct cgroup *root = cset_cgroup_from_root(ns->root_cset, cgrp->root); @@ -2389,49 +1914,18 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) } EXPORT_SYMBOL_GPL(task_cgroup_path); -/* used to track tasks and other necessary states during migration */ -struct cgroup_taskset { - /* the src and dst cset list running through cset->mg_node */ - struct list_head src_csets; - struct list_head dst_csets; - - /* the subsys currently being processed */ - int ssid; - - /* - * Fields for cgroup_taskset_*() iteration. - * - * Before migration is committed, the target migration tasks are on - * ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of - * the csets on ->dst_csets. ->csets point to either ->src_csets - * or ->dst_csets depending on whether migration is committed. - * - * ->cur_csets and ->cur_task point to the current task position - * during iteration. - */ - struct list_head *csets; - struct css_set *cur_cset; - struct task_struct *cur_task; -}; - -#define CGROUP_TASKSET_INIT(tset) (struct cgroup_taskset){ \ - .src_csets = LIST_HEAD_INIT(tset.src_csets), \ - .dst_csets = LIST_HEAD_INIT(tset.dst_csets), \ - .csets = &tset.src_csets, \ -} - /** - * cgroup_taskset_add - try to add a migration target task to a taskset + * cgroup_migrate_add_task - add a migration target task to a migration context * @task: target task - * @tset: target taskset + * @mgctx: target migration context * - * Add @task, which is a migration target, to @tset. This function becomes - * noop if @task doesn't need to be migrated. @task's css_set should have - * been added as a migration source and @task->cg_list will be moved from - * the css_set's tasks list to mg_tasks one. + * Add @task, which is a migration target, to @mgctx->tset. This function + * becomes noop if @task doesn't need to be migrated. @task's css_set + * should have been added as a migration source and @task->cg_list will be + * moved from the css_set's tasks list to mg_tasks one. */ -static void cgroup_taskset_add(struct task_struct *task, - struct cgroup_taskset *tset) +static void cgroup_migrate_add_task(struct task_struct *task, + struct cgroup_mgctx *mgctx) { struct css_set *cset; @@ -2451,10 +1945,11 @@ static void cgroup_taskset_add(struct task_struct *task, list_move_tail(&task->cg_list, &cset->mg_tasks); if (list_empty(&cset->mg_node)) - list_add_tail(&cset->mg_node, &tset->src_csets); + list_add_tail(&cset->mg_node, + &mgctx->tset.src_csets); if (list_empty(&cset->mg_dst_cset->mg_node)) - list_move_tail(&cset->mg_dst_cset->mg_node, - &tset->dst_csets); + list_add_tail(&cset->mg_dst_cset->mg_node, + &mgctx->tset.dst_csets); } /** @@ -2521,17 +2016,16 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, /** * cgroup_taskset_migrate - migrate a taskset - * @tset: taget taskset - * @root: cgroup root the migration is taking place on + * @mgctx: migration context * - * Migrate tasks in @tset as setup by migration preparation functions. + * Migrate tasks in @mgctx as setup by migration preparation functions. * This function fails iff one of the ->can_attach callbacks fails and - * guarantees that either all or none of the tasks in @tset are migrated. - * @tset is consumed regardless of success. + * guarantees that either all or none of the tasks in @mgctx are migrated. + * @mgctx is consumed regardless of success. */ -static int cgroup_taskset_migrate(struct cgroup_taskset *tset, - struct cgroup_root *root) +static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) { + struct cgroup_taskset *tset = &mgctx->tset; struct cgroup_subsys *ss; struct task_struct *task, *tmp_task; struct css_set *cset, *tmp_cset; @@ -2542,7 +2036,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset, return 0; /* check that we can legitimately attach to the cgroup */ - do_each_subsys_mask(ss, ssid, root->subsys_mask) { + do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { if (ss->can_attach) { tset->ssid = ssid; ret = ss->can_attach(tset); @@ -2578,7 +2072,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset, */ tset->csets = &tset->dst_csets; - do_each_subsys_mask(ss, ssid, root->subsys_mask) { + do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { if (ss->attach) { tset->ssid = ssid; ss->attach(tset); @@ -2589,7 +2083,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset, goto out_release_tset; out_cancel_attach: - do_each_subsys_mask(ss, ssid, root->subsys_mask) { + do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { if (ssid == failed_ssid) break; if (ss->cancel_attach) { @@ -2616,7 +2110,7 @@ out_release_tset: * zero for migration destination cgroups with tasks so that child cgroups * don't compete against tasks. */ -static bool cgroup_may_migrate_to(struct cgroup *dst_cgrp) +bool cgroup_may_migrate_to(struct cgroup *dst_cgrp) { return !cgroup_on_dfl(dst_cgrp) || !cgroup_parent(dst_cgrp) || !dst_cgrp->subtree_control; @@ -2624,25 +2118,31 @@ static bool cgroup_may_migrate_to(struct cgroup *dst_cgrp) /** * cgroup_migrate_finish - cleanup after attach - * @preloaded_csets: list of preloaded css_sets + * @mgctx: migration context * * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See * those functions for details. */ -static void cgroup_migrate_finish(struct list_head *preloaded_csets) +void cgroup_migrate_finish(struct cgroup_mgctx *mgctx) { + LIST_HEAD(preloaded); struct css_set *cset, *tmp_cset; lockdep_assert_held(&cgroup_mutex); spin_lock_irq(&css_set_lock); - list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) { + + list_splice_tail_init(&mgctx->preloaded_src_csets, &preloaded); + list_splice_tail_init(&mgctx->preloaded_dst_csets, &preloaded); + + list_for_each_entry_safe(cset, tmp_cset, &preloaded, mg_preload_node) { cset->mg_src_cgrp = NULL; cset->mg_dst_cgrp = NULL; cset->mg_dst_cset = NULL; list_del_init(&cset->mg_preload_node); put_css_set_locked(cset); } + spin_unlock_irq(&css_set_lock); } @@ -2650,10 +2150,10 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets) * cgroup_migrate_add_src - add a migration source css_set * @src_cset: the source css_set to add * @dst_cgrp: the destination cgroup - * @preloaded_csets: list of preloaded css_sets + * @mgctx: migration context * * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin - * @src_cset and add it to @preloaded_csets, which should later be cleaned + * @src_cset and add it to @mgctx->src_csets, which should later be cleaned * up by cgroup_migrate_finish(). * * This function may be called without holding cgroup_threadgroup_rwsem @@ -2662,9 +2162,9 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets) * into play and the preloaded css_sets are guaranteed to cover all * migrations. */ -static void cgroup_migrate_add_src(struct css_set *src_cset, - struct cgroup *dst_cgrp, - struct list_head *preloaded_csets) +void cgroup_migrate_add_src(struct css_set *src_cset, + struct cgroup *dst_cgrp, + struct cgroup_mgctx *mgctx) { struct cgroup *src_cgrp; @@ -2692,33 +2192,35 @@ static void cgroup_migrate_add_src(struct css_set *src_cset, src_cset->mg_src_cgrp = src_cgrp; src_cset->mg_dst_cgrp = dst_cgrp; get_css_set(src_cset); - list_add(&src_cset->mg_preload_node, preloaded_csets); + list_add_tail(&src_cset->mg_preload_node, &mgctx->preloaded_src_csets); } /** * cgroup_migrate_prepare_dst - prepare destination css_sets for migration - * @preloaded_csets: list of preloaded source css_sets + * @mgctx: migration context * * Tasks are about to be moved and all the source css_sets have been - * preloaded to @preloaded_csets. This function looks up and pins all - * destination css_sets, links each to its source, and append them to - * @preloaded_csets. + * preloaded to @mgctx->preloaded_src_csets. This function looks up and + * pins all destination css_sets, links each to its source, and append them + * to @mgctx->preloaded_dst_csets. * * This function must be called after cgroup_migrate_add_src() has been * called on each migration source css_set. After migration is performed * using cgroup_migrate(), cgroup_migrate_finish() must be called on - * @preloaded_csets. + * @mgctx. */ -static int cgroup_migrate_prepare_dst(struct list_head *preloaded_csets) +int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) { - LIST_HEAD(csets); struct css_set *src_cset, *tmp_cset; lockdep_assert_held(&cgroup_mutex); /* look up the dst cset for each src cset and link it to src */ - list_for_each_entry_safe(src_cset, tmp_cset, preloaded_csets, mg_preload_node) { + list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets, + mg_preload_node) { struct css_set *dst_cset; + struct cgroup_subsys *ss; + int ssid; dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp); if (!dst_cset) @@ -2743,15 +2245,19 @@ static int cgroup_migrate_prepare_dst(struct list_head *preloaded_csets) src_cset->mg_dst_cset = dst_cset; if (list_empty(&dst_cset->mg_preload_node)) - list_add(&dst_cset->mg_preload_node, &csets); + list_add_tail(&dst_cset->mg_preload_node, + &mgctx->preloaded_dst_csets); else put_css_set(dst_cset); + + for_each_subsys(ss, ssid) + if (src_cset->subsys[ssid] != dst_cset->subsys[ssid]) + mgctx->ss_mask |= 1 << ssid; } - list_splice_tail(&csets, preloaded_csets); return 0; err: - cgroup_migrate_finish(&csets); + cgroup_migrate_finish(mgctx); return -ENOMEM; } @@ -2759,7 +2265,7 @@ err: * cgroup_migrate - migrate a process or task to a cgroup * @leader: the leader of the process or the task to migrate * @threadgroup: whether @leader points to the whole process or a single task - * @root: cgroup root migration is taking place on + * @mgctx: migration context * * Migrate a process or task denoted by @leader. If migrating a process, * the caller must be holding cgroup_threadgroup_rwsem. The caller is also @@ -2773,10 +2279,9 @@ err: * decided for all targets by invoking group_migrate_prepare_dst() before * actually starting migrating. */ -static int cgroup_migrate(struct task_struct *leader, bool threadgroup, - struct cgroup_root *root) +int cgroup_migrate(struct task_struct *leader, bool threadgroup, + struct cgroup_mgctx *mgctx) { - struct cgroup_taskset tset = CGROUP_TASKSET_INIT(tset); struct task_struct *task; /* @@ -2788,14 +2293,14 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup, rcu_read_lock(); task = leader; do { - cgroup_taskset_add(task, &tset); + cgroup_migrate_add_task(task, mgctx); if (!threadgroup) break; } while_each_thread(leader, task); rcu_read_unlock(); spin_unlock_irq(&css_set_lock); - return cgroup_taskset_migrate(&tset, root); + return cgroup_migrate_execute(mgctx); } /** @@ -2806,10 +2311,10 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup, * * Call holding cgroup_mutex and cgroup_threadgroup_rwsem. */ -static int cgroup_attach_task(struct cgroup *dst_cgrp, - struct task_struct *leader, bool threadgroup) +int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, + bool threadgroup) { - LIST_HEAD(preloaded_csets); + DEFINE_CGROUP_MGCTX(mgctx); struct task_struct *task; int ret; @@ -2821,8 +2326,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp, rcu_read_lock(); task = leader; do { - cgroup_migrate_add_src(task_css_set(task), dst_cgrp, - &preloaded_csets); + cgroup_migrate_add_src(task_css_set(task), dst_cgrp, &mgctx); if (!threadgroup) break; } while_each_thread(leader, task); @@ -2830,11 +2334,11 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp, spin_unlock_irq(&css_set_lock); /* prepare dst csets and commit */ - ret = cgroup_migrate_prepare_dst(&preloaded_csets); + ret = cgroup_migrate_prepare_dst(&mgctx); if (!ret) - ret = cgroup_migrate(leader, threadgroup, dst_cgrp->root); + ret = cgroup_migrate(leader, threadgroup, &mgctx); - cgroup_migrate_finish(&preloaded_csets); + cgroup_migrate_finish(&mgctx); if (!ret) trace_cgroup_attach_task(dst_cgrp, leader, threadgroup); @@ -2846,20 +2350,9 @@ static int cgroup_procs_write_permission(struct task_struct *task, struct cgroup *dst_cgrp, struct kernfs_open_file *of) { - const struct cred *cred = current_cred(); - const struct cred *tcred = get_task_cred(task); int ret = 0; - /* - * even if we're attaching all tasks in the thread group, we only - * need to check permissions on one of them. - */ - if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && - !uid_eq(cred->euid, tcred->uid) && - !uid_eq(cred->euid, tcred->suid)) - ret = -EACCES; - - if (!ret && cgroup_on_dfl(dst_cgrp)) { + if (cgroup_on_dfl(dst_cgrp)) { struct super_block *sb = of->file->f_path.dentry->d_sb; struct cgroup *cgrp; struct inode *inode; @@ -2877,9 +2370,21 @@ static int cgroup_procs_write_permission(struct task_struct *task, ret = inode_permission(inode, MAY_WRITE); iput(inode); } + } else { + const struct cred *cred = current_cred(); + const struct cred *tcred = get_task_cred(task); + + /* + * even if we're attaching all tasks in the thread group, + * we only need to check permissions on one of them. + */ + if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && + !uid_eq(cred->euid, tcred->uid) && + !uid_eq(cred->euid, tcred->suid)) + ret = -EACCES; + put_cred(tcred); } - put_cred(tcred); return ret; } @@ -2888,8 +2393,8 @@ static int cgroup_procs_write_permission(struct task_struct *task, * function to attach either it or all tasks in its threadgroup. Will lock * cgroup_mutex and threadgroup. */ -static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, - size_t nbytes, loff_t off, bool threadgroup) +ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off, bool threadgroup) { struct task_struct *tsk; struct cgroup_subsys *ss; @@ -2950,86 +2455,12 @@ out_unlock_threadgroup: return ret ?: nbytes; } -/** - * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' - * @from: attach to all cgroups of a given task - * @tsk: the task to be attached - */ -int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) -{ - struct cgroup_root *root; - int retval = 0; - - mutex_lock(&cgroup_mutex); - percpu_down_write(&cgroup_threadgroup_rwsem); - for_each_root(root) { - struct cgroup *from_cgrp; - - if (root == &cgrp_dfl_root) - continue; - - spin_lock_irq(&css_set_lock); - from_cgrp = task_cgroup_from_root(from, root); - spin_unlock_irq(&css_set_lock); - - retval = cgroup_attach_task(from_cgrp, tsk, false); - if (retval) - break; - } - percpu_up_write(&cgroup_threadgroup_rwsem); - mutex_unlock(&cgroup_mutex); - - return retval; -} -EXPORT_SYMBOL_GPL(cgroup_attach_task_all); - -static ssize_t cgroup_tasks_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - return __cgroup_procs_write(of, buf, nbytes, off, false); -} - -static ssize_t cgroup_procs_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) +ssize_t cgroup_procs_write(struct kernfs_open_file *of, char *buf, size_t nbytes, + loff_t off) { return __cgroup_procs_write(of, buf, nbytes, off, true); } -static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - struct cgroup *cgrp; - - BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); - - cgrp = cgroup_kn_lock_live(of->kn, false); - if (!cgrp) - return -ENODEV; - spin_lock(&release_agent_path_lock); - strlcpy(cgrp->root->release_agent_path, strstrip(buf), - sizeof(cgrp->root->release_agent_path)); - spin_unlock(&release_agent_path_lock); - cgroup_kn_unlock(of->kn); - return nbytes; -} - -static int cgroup_release_agent_show(struct seq_file *seq, void *v) -{ - struct cgroup *cgrp = seq_css(seq)->cgroup; - - spin_lock(&release_agent_path_lock); - seq_puts(seq, cgrp->root->release_agent_path); - spin_unlock(&release_agent_path_lock); - seq_putc(seq, '\n'); - return 0; -} - -static int cgroup_sane_behavior_show(struct seq_file *seq, void *v) -{ - seq_puts(seq, "0\n"); - return 0; -} - static void cgroup_print_ss_mask(struct seq_file *seq, u16 ss_mask) { struct cgroup_subsys *ss; @@ -3075,8 +2506,7 @@ static int cgroup_subtree_control_show(struct seq_file *seq, void *v) */ static int cgroup_update_dfl_csses(struct cgroup *cgrp) { - LIST_HEAD(preloaded_csets); - struct cgroup_taskset tset = CGROUP_TASKSET_INIT(tset); + DEFINE_CGROUP_MGCTX(mgctx); struct cgroup_subsys_state *d_css; struct cgroup *dsct; struct css_set *src_cset; @@ -3092,33 +2522,28 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) struct cgrp_cset_link *link; list_for_each_entry(link, &dsct->cset_links, cset_link) - cgroup_migrate_add_src(link->cset, dsct, - &preloaded_csets); + cgroup_migrate_add_src(link->cset, dsct, &mgctx); } spin_unlock_irq(&css_set_lock); /* NULL dst indicates self on default hierarchy */ - ret = cgroup_migrate_prepare_dst(&preloaded_csets); + ret = cgroup_migrate_prepare_dst(&mgctx); if (ret) goto out_finish; spin_lock_irq(&css_set_lock); - list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) { + list_for_each_entry(src_cset, &mgctx.preloaded_src_csets, mg_preload_node) { struct task_struct *task, *ntask; - /* src_csets precede dst_csets, break on the first dst_cset */ - if (!src_cset->mg_src_cgrp) - break; - /* all tasks in src_csets need to be migrated */ list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list) - cgroup_taskset_add(task, &tset); + cgroup_migrate_add_task(task, &mgctx); } spin_unlock_irq(&css_set_lock); - ret = cgroup_taskset_migrate(&tset, cgrp->root); + ret = cgroup_migrate_execute(&mgctx); out_finish: - cgroup_migrate_finish(&preloaded_csets); + cgroup_migrate_finish(&mgctx); percpu_up_write(&cgroup_threadgroup_rwsem); return ret; } @@ -3131,7 +2556,7 @@ out_finish: * controller while the previous css is still around. This function grabs * cgroup_mutex and drains the previous css instances of @cgrp's subtree. */ -static void cgroup_lock_and_drain_offline(struct cgroup *cgrp) +void cgroup_lock_and_drain_offline(struct cgroup *cgrp) __acquires(&cgroup_mutex) { struct cgroup *dsct; @@ -3503,6 +2928,23 @@ static int cgroup_events_show(struct seq_file *seq, void *v) return 0; } +static int cgroup_file_open(struct kernfs_open_file *of) +{ + struct cftype *cft = of->kn->priv; + + if (cft->open) + return cft->open(of); + return 0; +} + +static void cgroup_file_release(struct kernfs_open_file *of) +{ + struct cftype *cft = of->kn->priv; + + if (cft->release) + cft->release(of); +} + static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { @@ -3553,7 +2995,8 @@ static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos) static void cgroup_seqfile_stop(struct seq_file *seq, void *v) { - seq_cft(seq)->seq_stop(seq, v); + if (seq_cft(seq)->seq_stop) + seq_cft(seq)->seq_stop(seq, v); } static int cgroup_seqfile_show(struct seq_file *m, void *arg) @@ -3575,12 +3018,16 @@ static int cgroup_seqfile_show(struct seq_file *m, void *arg) static struct kernfs_ops cgroup_kf_single_ops = { .atomic_write_len = PAGE_SIZE, + .open = cgroup_file_open, + .release = cgroup_file_release, .write = cgroup_file_write, .seq_show = cgroup_seqfile_show, }; static struct kernfs_ops cgroup_kf_ops = { .atomic_write_len = PAGE_SIZE, + .open = cgroup_file_open, + .release = cgroup_file_release, .write = cgroup_file_write, .seq_start = cgroup_seqfile_start, .seq_next = cgroup_seqfile_next, @@ -3588,48 +3035,6 @@ static struct kernfs_ops cgroup_kf_ops = { .seq_show = cgroup_seqfile_show, }; -/* - * cgroup_rename - Only allow simple rename of directories in place. - */ -static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, - const char *new_name_str) -{ - struct cgroup *cgrp = kn->priv; - int ret; - - if (kernfs_type(kn) != KERNFS_DIR) - return -ENOTDIR; - if (kn->parent != new_parent) - return -EIO; - - /* - * This isn't a proper migration and its usefulness is very - * limited. Disallow on the default hierarchy. - */ - if (cgroup_on_dfl(cgrp)) - return -EPERM; - - /* - * We're gonna grab cgroup_mutex which nests outside kernfs - * active_ref. kernfs_rename() doesn't require active_ref - * protection. Break them before grabbing cgroup_mutex. - */ - kernfs_break_active_protection(new_parent); - kernfs_break_active_protection(kn); - - mutex_lock(&cgroup_mutex); - - ret = kernfs_rename(kn, new_parent, new_name_str); - if (!ret) - trace_cgroup_rename(cgrp); - - mutex_unlock(&cgroup_mutex); - - kernfs_unbreak_active_protection(kn); - kernfs_unbreak_active_protection(new_parent); - return ret; -} - /* set uid and gid of cgroup dirs and files to that of the creator */ static int cgroup_kn_set_ugid(struct kernfs_node *kn) { @@ -3926,26 +3331,6 @@ void cgroup_file_notify(struct cgroup_file *cfile) } /** - * cgroup_task_count - count the number of tasks in a cgroup. - * @cgrp: the cgroup in question - * - * Return the number of tasks in the cgroup. The returned number can be - * higher than the actual number of tasks due to css_set references from - * namespace roots and temporary usages. - */ -static int cgroup_task_count(const struct cgroup *cgrp) -{ - int count = 0; - struct cgrp_cset_link *link; - - spin_lock_irq(&css_set_lock); - list_for_each_entry(link, &cgrp->cset_links, cset_link) - count += atomic_read(&link->cset->refcount); - spin_unlock_irq(&css_set_lock); - return count; -} - -/** * css_next_child - find the next child of a given css * @pos: the current position (%NULL to initiate traversal) * @parent: css whose children to walk @@ -4343,560 +3728,69 @@ void css_task_iter_end(struct css_task_iter *it) put_task_struct(it->cur_task); } -/** - * cgroup_trasnsfer_tasks - move tasks from one cgroup to another - * @to: cgroup to which the tasks will be moved - * @from: cgroup in which the tasks currently reside - * - * Locking rules between cgroup_post_fork() and the migration path - * guarantee that, if a task is forking while being migrated, the new child - * is guaranteed to be either visible in the source cgroup after the - * parent's migration is complete or put into the target cgroup. No task - * can slip out of migration through forking. - */ -int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) -{ - LIST_HEAD(preloaded_csets); - struct cgrp_cset_link *link; - struct css_task_iter it; - struct task_struct *task; - int ret; - - if (!cgroup_may_migrate_to(to)) - return -EBUSY; - - mutex_lock(&cgroup_mutex); - - percpu_down_write(&cgroup_threadgroup_rwsem); - - /* all tasks in @from are being moved, all csets are source */ - spin_lock_irq(&css_set_lock); - list_for_each_entry(link, &from->cset_links, cset_link) - cgroup_migrate_add_src(link->cset, to, &preloaded_csets); - spin_unlock_irq(&css_set_lock); - - ret = cgroup_migrate_prepare_dst(&preloaded_csets); - if (ret) - goto out_err; - - /* - * Migrate tasks one-by-one until @from is empty. This fails iff - * ->can_attach() fails. - */ - do { - css_task_iter_start(&from->self, &it); - task = css_task_iter_next(&it); - if (task) - get_task_struct(task); - css_task_iter_end(&it); - - if (task) { - ret = cgroup_migrate(task, false, to->root); - if (!ret) - trace_cgroup_transfer_tasks(to, task, false); - put_task_struct(task); - } - } while (task && !ret); -out_err: - cgroup_migrate_finish(&preloaded_csets); - percpu_up_write(&cgroup_threadgroup_rwsem); - mutex_unlock(&cgroup_mutex); - return ret; -} - -/* - * Stuff for reading the 'tasks'/'procs' files. - * - * Reading this file can return large amounts of data if a cgroup has - * *lots* of attached tasks. So it may need several calls to read(), - * but we cannot guarantee that the information we produce is correct - * unless we produce it entirely atomically. - * - */ - -/* which pidlist file are we talking about? */ -enum cgroup_filetype { - CGROUP_FILE_PROCS, - CGROUP_FILE_TASKS, -}; - -/* - * A pidlist is a list of pids that virtually represents the contents of one - * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists, - * a pair (one each for procs, tasks) for each pid namespace that's relevant - * to the cgroup. - */ -struct cgroup_pidlist { - /* - * used to find which pidlist is wanted. doesn't change as long as - * this particular list stays in the list. - */ - struct { enum cgroup_filetype type; struct pid_namespace *ns; } key; - /* array of xids */ - pid_t *list; - /* how many elements the above list has */ - int length; - /* each of these stored in a list by its cgroup */ - struct list_head links; - /* pointer to the cgroup we belong to, for list removal purposes */ - struct cgroup *owner; - /* for delayed destruction */ - struct delayed_work destroy_dwork; -}; - -/* - * The following two functions "fix" the issue where there are more pids - * than kmalloc will give memory for; in such cases, we use vmalloc/vfree. - * TODO: replace with a kernel-wide solution to this problem - */ -#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2)) -static void *pidlist_allocate(int count) -{ - if (PIDLIST_TOO_LARGE(count)) - return vmalloc(count * sizeof(pid_t)); - else - return kmalloc(count * sizeof(pid_t), GFP_KERNEL); -} - -static void pidlist_free(void *p) -{ - kvfree(p); -} - -/* - * Used to destroy all pidlists lingering waiting for destroy timer. None - * should be left afterwards. - */ -static void cgroup_pidlist_destroy_all(struct cgroup *cgrp) -{ - struct cgroup_pidlist *l, *tmp_l; - - mutex_lock(&cgrp->pidlist_mutex); - list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links) - mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0); - mutex_unlock(&cgrp->pidlist_mutex); - - flush_workqueue(cgroup_pidlist_destroy_wq); - BUG_ON(!list_empty(&cgrp->pidlists)); -} - -static void cgroup_pidlist_destroy_work_fn(struct work_struct *work) -{ - struct delayed_work *dwork = to_delayed_work(work); - struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist, - destroy_dwork); - struct cgroup_pidlist *tofree = NULL; - - mutex_lock(&l->owner->pidlist_mutex); - - /* - * Destroy iff we didn't get queued again. The state won't change - * as destroy_dwork can only be queued while locked. - */ - if (!delayed_work_pending(dwork)) { - list_del(&l->links); - pidlist_free(l->list); - put_pid_ns(l->key.ns); - tofree = l; - } - - mutex_unlock(&l->owner->pidlist_mutex); - kfree(tofree); -} - -/* - * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries - * Returns the number of unique elements. - */ -static int pidlist_uniq(pid_t *list, int length) -{ - int src, dest = 1; - - /* - * we presume the 0th element is unique, so i starts at 1. trivial - * edge cases first; no work needs to be done for either - */ - if (length == 0 || length == 1) - return length; - /* src and dest walk down the list; dest counts unique elements */ - for (src = 1; src < length; src++) { - /* find next unique element */ - while (list[src] == list[src-1]) { - src++; - if (src == length) - goto after; - } - /* dest always points to where the next unique element goes */ - list[dest] = list[src]; - dest++; - } -after: - return dest; -} - -/* - * The two pid files - task and cgroup.procs - guaranteed that the result - * is sorted, which forced this whole pidlist fiasco. As pid order is - * different per namespace, each namespace needs differently sorted list, - * making it impossible to use, for example, single rbtree of member tasks - * sorted by task pointer. As pidlists can be fairly large, allocating one - * per open file is dangerous, so cgroup had to implement shared pool of - * pidlists keyed by cgroup and namespace. - * - * All this extra complexity was caused by the original implementation - * committing to an entirely unnecessary property. In the long term, we - * want to do away with it. Explicitly scramble sort order if on the - * default hierarchy so that no such expectation exists in the new - * interface. - * - * Scrambling is done by swapping every two consecutive bits, which is - * non-identity one-to-one mapping which disturbs sort order sufficiently. - */ -static pid_t pid_fry(pid_t pid) +static void cgroup_procs_release(struct kernfs_open_file *of) { - unsigned a = pid & 0x55555555; - unsigned b = pid & 0xAAAAAAAA; - - return (a << 1) | (b >> 1); -} - -static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid) -{ - if (cgroup_on_dfl(cgrp)) - return pid_fry(pid); - else - return pid; -} - -static int cmppid(const void *a, const void *b) -{ - return *(pid_t *)a - *(pid_t *)b; -} - -static int fried_cmppid(const void *a, const void *b) -{ - return pid_fry(*(pid_t *)a) - pid_fry(*(pid_t *)b); -} - -static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, - enum cgroup_filetype type) -{ - struct cgroup_pidlist *l; - /* don't need task_nsproxy() if we're looking at ourself */ - struct pid_namespace *ns = task_active_pid_ns(current); - - lockdep_assert_held(&cgrp->pidlist_mutex); - - list_for_each_entry(l, &cgrp->pidlists, links) - if (l->key.type == type && l->key.ns == ns) - return l; - return NULL; -} - -/* - * find the appropriate pidlist for our purpose (given procs vs tasks) - * returns with the lock on that pidlist already held, and takes care - * of the use count, or returns NULL with no locks held if we're out of - * memory. - */ -static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp, - enum cgroup_filetype type) -{ - struct cgroup_pidlist *l; - - lockdep_assert_held(&cgrp->pidlist_mutex); - - l = cgroup_pidlist_find(cgrp, type); - if (l) - return l; - - /* entry not found; create a new one */ - l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL); - if (!l) - return l; - - INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn); - l->key.type = type; - /* don't need task_nsproxy() if we're looking at ourself */ - l->key.ns = get_pid_ns(task_active_pid_ns(current)); - l->owner = cgrp; - list_add(&l->links, &cgrp->pidlists); - return l; -} - -/* - * Load a cgroup's pidarray with either procs' tgids or tasks' pids - */ -static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, - struct cgroup_pidlist **lp) -{ - pid_t *array; - int length; - int pid, n = 0; /* used for populating the array */ - struct css_task_iter it; - struct task_struct *tsk; - struct cgroup_pidlist *l; - - lockdep_assert_held(&cgrp->pidlist_mutex); - - /* - * If cgroup gets more users after we read count, we won't have - * enough space - tough. This race is indistinguishable to the - * caller from the case that the additional cgroup users didn't - * show up until sometime later on. - */ - length = cgroup_task_count(cgrp); - array = pidlist_allocate(length); - if (!array) - return -ENOMEM; - /* now, populate the array */ - css_task_iter_start(&cgrp->self, &it); - while ((tsk = css_task_iter_next(&it))) { - if (unlikely(n == length)) - break; - /* get tgid or pid for procs or tasks file respectively */ - if (type == CGROUP_FILE_PROCS) - pid = task_tgid_vnr(tsk); - else - pid = task_pid_vnr(tsk); - if (pid > 0) /* make sure to only use valid results */ - array[n++] = pid; - } - css_task_iter_end(&it); - length = n; - /* now sort & (if procs) strip out duplicates */ - if (cgroup_on_dfl(cgrp)) - sort(array, length, sizeof(pid_t), fried_cmppid, NULL); - else - sort(array, length, sizeof(pid_t), cmppid, NULL); - if (type == CGROUP_FILE_PROCS) - length = pidlist_uniq(array, length); - - l = cgroup_pidlist_find_create(cgrp, type); - if (!l) { - pidlist_free(array); - return -ENOMEM; + if (of->priv) { + css_task_iter_end(of->priv); + kfree(of->priv); } - - /* store array, freeing old if necessary */ - pidlist_free(l->list); - l->list = array; - l->length = length; - *lp = l; - return 0; } -/** - * cgroupstats_build - build and fill cgroupstats - * @stats: cgroupstats to fill information into - * @dentry: A dentry entry belonging to the cgroup for which stats have - * been requested. - * - * Build and fill cgroupstats so that taskstats can export it to user - * space. - */ -int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) +static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos) { - struct kernfs_node *kn = kernfs_node_from_dentry(dentry); - struct cgroup *cgrp; - struct css_task_iter it; - struct task_struct *tsk; - - /* it should be kernfs_node belonging to cgroupfs and is a directory */ - if (dentry->d_sb->s_type != &cgroup_fs_type || !kn || - kernfs_type(kn) != KERNFS_DIR) - return -EINVAL; - - mutex_lock(&cgroup_mutex); - - /* - * We aren't being called from kernfs and there's no guarantee on - * @kn->priv's validity. For this and css_tryget_online_from_dir(), - * @kn->priv is RCU safe. Let's do the RCU dancing. - */ - rcu_read_lock(); - cgrp = rcu_dereference(kn->priv); - if (!cgrp || cgroup_is_dead(cgrp)) { - rcu_read_unlock(); - mutex_unlock(&cgroup_mutex); - return -ENOENT; - } - rcu_read_unlock(); + struct kernfs_open_file *of = s->private; + struct css_task_iter *it = of->priv; + struct task_struct *task; - css_task_iter_start(&cgrp->self, &it); - while ((tsk = css_task_iter_next(&it))) { - switch (tsk->state) { - case TASK_RUNNING: - stats->nr_running++; - break; - case TASK_INTERRUPTIBLE: - stats->nr_sleeping++; - break; - case TASK_UNINTERRUPTIBLE: - stats->nr_uninterruptible++; - break; - case TASK_STOPPED: - stats->nr_stopped++; - break; - default: - if (delayacct_is_task_waiting_on_io(tsk)) - stats->nr_io_wait++; - break; - } - } - css_task_iter_end(&it); + do { + task = css_task_iter_next(it); + } while (task && !thread_group_leader(task)); - mutex_unlock(&cgroup_mutex); - return 0; + return task; } - -/* - * seq_file methods for the tasks/procs files. The seq_file position is the - * next pid to display; the seq_file iterator is a pointer to the pid - * in the cgroup->l->list array. - */ - -static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) +static void *cgroup_procs_start(struct seq_file *s, loff_t *pos) { - /* - * Initially we receive a position value that corresponds to - * one more than the last pid shown (or 0 on the first call or - * after a seek to the start). Use a binary-search to find the - * next pid to display, if any - */ struct kernfs_open_file *of = s->private; struct cgroup *cgrp = seq_css(s)->cgroup; - struct cgroup_pidlist *l; - enum cgroup_filetype type = seq_cft(s)->private; - int index = 0, pid = *pos; - int *iter, ret; - - mutex_lock(&cgrp->pidlist_mutex); + struct css_task_iter *it = of->priv; /* - * !NULL @of->priv indicates that this isn't the first start() - * after open. If the matching pidlist is around, we can use that. - * Look for it. Note that @of->priv can't be used directly. It - * could already have been destroyed. + * When a seq_file is seeked, it's always traversed sequentially + * from position 0, so we can simply keep iterating on !0 *pos. */ - if (of->priv) - of->priv = cgroup_pidlist_find(cgrp, type); - - /* - * Either this is the first start() after open or the matching - * pidlist has been destroyed inbetween. Create a new one. - */ - if (!of->priv) { - ret = pidlist_array_load(cgrp, type, - (struct cgroup_pidlist **)&of->priv); - if (ret) - return ERR_PTR(ret); - } - l = of->priv; - - if (pid) { - int end = l->length; - - while (index < end) { - int mid = (index + end) / 2; - if (cgroup_pid_fry(cgrp, l->list[mid]) == pid) { - index = mid; - break; - } else if (cgroup_pid_fry(cgrp, l->list[mid]) <= pid) - index = mid + 1; - else - end = mid; - } - } - /* If we're off the end of the array, we're done */ - if (index >= l->length) - return NULL; - /* Update the abstract position to be the actual pid that we found */ - iter = l->list + index; - *pos = cgroup_pid_fry(cgrp, *iter); - return iter; -} - -static void cgroup_pidlist_stop(struct seq_file *s, void *v) -{ - struct kernfs_open_file *of = s->private; - struct cgroup_pidlist *l = of->priv; - - if (l) - mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, - CGROUP_PIDLIST_DESTROY_DELAY); - mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex); -} + if (!it) { + if (WARN_ON_ONCE((*pos)++)) + return ERR_PTR(-EINVAL); -static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) -{ - struct kernfs_open_file *of = s->private; - struct cgroup_pidlist *l = of->priv; - pid_t *p = v; - pid_t *end = l->list + l->length; - /* - * Advance to the next pid in the array. If this goes off the - * end, we're done - */ - p++; - if (p >= end) { - return NULL; - } else { - *pos = cgroup_pid_fry(seq_css(s)->cgroup, *p); - return p; + it = kzalloc(sizeof(*it), GFP_KERNEL); + if (!it) + return ERR_PTR(-ENOMEM); + of->priv = it; + css_task_iter_start(&cgrp->self, it); + } else if (!(*pos)++) { + css_task_iter_end(it); + css_task_iter_start(&cgrp->self, it); } -} - -static int cgroup_pidlist_show(struct seq_file *s, void *v) -{ - seq_printf(s, "%d\n", *(int *)v); - return 0; + return cgroup_procs_next(s, NULL, NULL); } -static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css, - struct cftype *cft) +static int cgroup_procs_show(struct seq_file *s, void *v) { - return notify_on_release(css->cgroup); -} - -static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css, - struct cftype *cft, u64 val) -{ - if (val) - set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags); - else - clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags); - return 0; -} - -static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css, - struct cftype *cft) -{ - return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); -} - -static int cgroup_clone_children_write(struct cgroup_subsys_state *css, - struct cftype *cft, u64 val) -{ - if (val) - set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); - else - clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); + seq_printf(s, "%d\n", task_tgid_vnr(v)); return 0; } /* cgroup core interface files for the default hierarchy */ -static struct cftype cgroup_dfl_base_files[] = { +static struct cftype cgroup_base_files[] = { { .name = "cgroup.procs", .file_offset = offsetof(struct cgroup, procs_file), - .seq_start = cgroup_pidlist_start, - .seq_next = cgroup_pidlist_next, - .seq_stop = cgroup_pidlist_stop, - .seq_show = cgroup_pidlist_show, - .private = CGROUP_FILE_PROCS, + .release = cgroup_procs_release, + .seq_start = cgroup_procs_start, + .seq_next = cgroup_procs_next, + .seq_show = cgroup_procs_show, .write = cgroup_procs_write, }, { @@ -4917,51 +3811,6 @@ static struct cftype cgroup_dfl_base_files[] = { { } /* terminate */ }; -/* cgroup core interface files for the legacy hierarchies */ -static struct cftype cgroup_legacy_base_files[] = { - { - .name = "cgroup.procs", - .seq_start = cgroup_pidlist_start, - .seq_next = cgroup_pidlist_next, - .seq_stop = cgroup_pidlist_stop, - .seq_show = cgroup_pidlist_show, - .private = CGROUP_FILE_PROCS, - .write = cgroup_procs_write, - }, - { - .name = "cgroup.clone_children", - .read_u64 = cgroup_clone_children_read, - .write_u64 = cgroup_clone_children_write, - }, - { - .name = "cgroup.sane_behavior", - .flags = CFTYPE_ONLY_ON_ROOT, - .seq_show = cgroup_sane_behavior_show, - }, - { - .name = "tasks", - .seq_start = cgroup_pidlist_start, - .seq_next = cgroup_pidlist_next, - .seq_stop = cgroup_pidlist_stop, - .seq_show = cgroup_pidlist_show, - .private = CGROUP_FILE_TASKS, - .write = cgroup_tasks_write, - }, - { - .name = "notify_on_release", - .read_u64 = cgroup_read_notify_on_release, - .write_u64 = cgroup_write_notify_on_release, - }, - { - .name = "release_agent", - .flags = CFTYPE_ONLY_ON_ROOT, - .seq_show = cgroup_release_agent_show, - .write = cgroup_release_agent_write, - .max_write_len = PATH_MAX - 1, - }, - { } /* terminate */ -}; - /* * css destruction is four-stage process. * @@ -5007,7 +3856,7 @@ static void css_free_work_fn(struct work_struct *work) } else { /* cgroup free path */ atomic_dec(&cgrp->root->nr_cgrps); - cgroup_pidlist_destroy_all(cgrp); + cgroup1_pidlist_destroy_all(cgrp); cancel_work_sync(&cgrp->release_agent_work); if (cgroup_parent(cgrp)) { @@ -5302,8 +4151,7 @@ out_free_cgrp: return ERR_PTR(ret); } -static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, - umode_t mode) +int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode) { struct cgroup *parent, *cgrp; struct kernfs_node *kn; @@ -5507,7 +4355,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) */ kernfs_remove(cgrp->kn); - check_for_release(cgroup_parent(cgrp)); + cgroup1_check_for_release(cgroup_parent(cgrp)); /* put the base reference */ percpu_ref_kill(&cgrp->self.refcnt); @@ -5515,7 +4363,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) return 0; }; -static int cgroup_rmdir(struct kernfs_node *kn) +int cgroup_rmdir(struct kernfs_node *kn) { struct cgroup *cgrp; int ret = 0; @@ -5535,10 +4383,8 @@ static int cgroup_rmdir(struct kernfs_node *kn) static struct kernfs_syscall_ops cgroup_kf_syscall_ops = { .remount_fs = cgroup_remount, - .show_options = cgroup_show_options, .mkdir = cgroup_mkdir, .rmdir = cgroup_rmdir, - .rename = cgroup_rename, .show_path = cgroup_show_path, }; @@ -5646,8 +4492,8 @@ int __init cgroup_init(void) BUILD_BUG_ON(CGROUP_SUBSYS_COUNT > 16); BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem)); - BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files)); - BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files)); + BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files)); + BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files)); /* * The latency of the synchronize_sched() is too high for cgroups, @@ -5697,7 +4543,7 @@ int __init cgroup_init(void) continue; } - if (cgroup_ssid_no_v1(ssid)) + if (cgroup1_ssid_disabled(ssid)) printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n", ss->name); @@ -5744,15 +4590,6 @@ static int __init cgroup_wq_init(void) */ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); BUG_ON(!cgroup_destroy_wq); - - /* - * Used to destroy pidlists and separate to serve as flush domain. - * Cap @max_active to 1 too. - */ - cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy", - 0, 1); - BUG_ON(!cgroup_pidlist_destroy_wq); - return 0; } core_initcall(cgroup_wq_init); @@ -5835,42 +4672,6 @@ out: return retval; } -/* Display information about each subsystem and each hierarchy */ -static int proc_cgroupstats_show(struct seq_file *m, void *v) -{ - struct cgroup_subsys *ss; - int i; - - seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n"); - /* - * ideally we don't want subsystems moving around while we do this. - * cgroup_mutex is also necessary to guarantee an atomic snapshot of - * subsys/hierarchy state. - */ - mutex_lock(&cgroup_mutex); - - for_each_subsys(ss, i) - seq_printf(m, "%s\t%d\t%d\t%d\n", - ss->legacy_name, ss->root->hierarchy_id, - atomic_read(&ss->root->nr_cgrps), - cgroup_ssid_enabled(i)); - - mutex_unlock(&cgroup_mutex); - return 0; -} - -static int cgroupstats_open(struct inode *inode, struct file *file) -{ - return single_open(file, proc_cgroupstats_show, NULL); -} - -static const struct file_operations proc_cgroupstats_operations = { - .open = cgroupstats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - /** * cgroup_fork - initialize cgroup related fields during copy_process() * @child: pointer to task_struct of forking parent process. @@ -6050,76 +4851,6 @@ void cgroup_free(struct task_struct *task) put_css_set(cset); } -static void check_for_release(struct cgroup *cgrp) -{ - if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) && - !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp)) - schedule_work(&cgrp->release_agent_work); -} - -/* - * Notify userspace when a cgroup is released, by running the - * configured release agent with the name of the cgroup (path - * relative to the root of cgroup file system) as the argument. - * - * Most likely, this user command will try to rmdir this cgroup. - * - * This races with the possibility that some other task will be - * attached to this cgroup before it is removed, or that some other - * user task will 'mkdir' a child cgroup of this cgroup. That's ok. - * The presumed 'rmdir' will fail quietly if this cgroup is no longer - * unused, and this cgroup will be reprieved from its death sentence, - * to continue to serve a useful existence. Next time it's released, - * we will get notified again, if it still has 'notify_on_release' set. - * - * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which - * means only wait until the task is successfully execve()'d. The - * separate release agent task is forked by call_usermodehelper(), - * then control in this thread returns here, without waiting for the - * release agent task. We don't bother to wait because the caller of - * this routine has no use for the exit status of the release agent - * task, so no sense holding our caller up for that. - */ -static void cgroup_release_agent(struct work_struct *work) -{ - struct cgroup *cgrp = - container_of(work, struct cgroup, release_agent_work); - char *pathbuf = NULL, *agentbuf = NULL; - char *argv[3], *envp[3]; - int ret; - - mutex_lock(&cgroup_mutex); - - pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); - agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); - if (!pathbuf || !agentbuf) - goto out; - - spin_lock_irq(&css_set_lock); - ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns); - spin_unlock_irq(&css_set_lock); - if (ret < 0 || ret >= PATH_MAX) - goto out; - - argv[0] = agentbuf; - argv[1] = pathbuf; - argv[2] = NULL; - - /* minimal command environment */ - envp[0] = "HOME=/"; - envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; - envp[2] = NULL; - - mutex_unlock(&cgroup_mutex); - call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); - goto out_free; -out: - mutex_unlock(&cgroup_mutex); -out_free: - kfree(agentbuf); - kfree(pathbuf); -} - static int __init cgroup_disable(char *str) { struct cgroup_subsys *ss; @@ -6141,33 +4872,6 @@ static int __init cgroup_disable(char *str) } __setup("cgroup_disable=", cgroup_disable); -static int __init cgroup_no_v1(char *str) -{ - struct cgroup_subsys *ss; - char *token; - int i; - - while ((token = strsep(&str, ",")) != NULL) { - if (!*token) - continue; - - if (!strcmp(token, "all")) { - cgroup_no_v1_mask = U16_MAX; - break; - } - - for_each_subsys(ss, i) { - if (strcmp(token, ss->name) && - strcmp(token, ss->legacy_name)) - continue; - - cgroup_no_v1_mask |= 1 << i; - } - } - return 1; -} -__setup("cgroup_no_v1=", cgroup_no_v1); - /** * css_tryget_online_from_dir - get corresponding css from a cgroup dentry * @dentry: directory dentry of interest @@ -6197,7 +4901,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, * have been or be removed at any point. @kn->priv is RCU * protected for this access. See css_release_work_fn() for details. */ - cgrp = rcu_dereference(kn->priv); + cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); if (cgrp) css = cgroup_css(cgrp, ss); @@ -6349,154 +5053,6 @@ void cgroup_sk_free(struct sock_cgroup_data *skcd) #endif /* CONFIG_SOCK_CGROUP_DATA */ -/* cgroup namespaces */ - -static struct ucounts *inc_cgroup_namespaces(struct user_namespace *ns) -{ - return inc_ucount(ns, current_euid(), UCOUNT_CGROUP_NAMESPACES); -} - -static void dec_cgroup_namespaces(struct ucounts *ucounts) -{ - dec_ucount(ucounts, UCOUNT_CGROUP_NAMESPACES); -} - -static struct cgroup_namespace *alloc_cgroup_ns(void) -{ - struct cgroup_namespace *new_ns; - int ret; - - new_ns = kzalloc(sizeof(struct cgroup_namespace), GFP_KERNEL); - if (!new_ns) - return ERR_PTR(-ENOMEM); - ret = ns_alloc_inum(&new_ns->ns); - if (ret) { - kfree(new_ns); - return ERR_PTR(ret); - } - atomic_set(&new_ns->count, 1); - new_ns->ns.ops = &cgroupns_operations; - return new_ns; -} - -void free_cgroup_ns(struct cgroup_namespace *ns) -{ - put_css_set(ns->root_cset); - dec_cgroup_namespaces(ns->ucounts); - put_user_ns(ns->user_ns); - ns_free_inum(&ns->ns); - kfree(ns); -} -EXPORT_SYMBOL(free_cgroup_ns); - -struct cgroup_namespace *copy_cgroup_ns(unsigned long flags, - struct user_namespace *user_ns, - struct cgroup_namespace *old_ns) -{ - struct cgroup_namespace *new_ns; - struct ucounts *ucounts; - struct css_set *cset; - - BUG_ON(!old_ns); - - if (!(flags & CLONE_NEWCGROUP)) { - get_cgroup_ns(old_ns); - return old_ns; - } - - /* Allow only sysadmin to create cgroup namespace. */ - if (!ns_capable(user_ns, CAP_SYS_ADMIN)) - return ERR_PTR(-EPERM); - - ucounts = inc_cgroup_namespaces(user_ns); - if (!ucounts) - return ERR_PTR(-ENOSPC); - - /* It is not safe to take cgroup_mutex here */ - spin_lock_irq(&css_set_lock); - cset = task_css_set(current); - get_css_set(cset); - spin_unlock_irq(&css_set_lock); - - new_ns = alloc_cgroup_ns(); - if (IS_ERR(new_ns)) { - put_css_set(cset); - dec_cgroup_namespaces(ucounts); - return new_ns; - } - - new_ns->user_ns = get_user_ns(user_ns); - new_ns->ucounts = ucounts; - new_ns->root_cset = cset; - - return new_ns; -} - -static inline struct cgroup_namespace *to_cg_ns(struct ns_common *ns) -{ - return container_of(ns, struct cgroup_namespace, ns); -} - -static int cgroupns_install(struct nsproxy *nsproxy, struct ns_common *ns) -{ - struct cgroup_namespace *cgroup_ns = to_cg_ns(ns); - - if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN) || - !ns_capable(cgroup_ns->user_ns, CAP_SYS_ADMIN)) - return -EPERM; - - /* Don't need to do anything if we are attaching to our own cgroupns. */ - if (cgroup_ns == nsproxy->cgroup_ns) - return 0; - - get_cgroup_ns(cgroup_ns); - put_cgroup_ns(nsproxy->cgroup_ns); - nsproxy->cgroup_ns = cgroup_ns; - - return 0; -} - -static struct ns_common *cgroupns_get(struct task_struct *task) -{ - struct cgroup_namespace *ns = NULL; - struct nsproxy *nsproxy; - - task_lock(task); - nsproxy = task->nsproxy; - if (nsproxy) { - ns = nsproxy->cgroup_ns; - get_cgroup_ns(ns); - } - task_unlock(task); - - return ns ? &ns->ns : NULL; -} - -static void cgroupns_put(struct ns_common *ns) -{ - put_cgroup_ns(to_cg_ns(ns)); -} - -static struct user_namespace *cgroupns_owner(struct ns_common *ns) -{ - return to_cg_ns(ns)->user_ns; -} - -const struct proc_ns_operations cgroupns_operations = { - .name = "cgroup", - .type = CLONE_NEWCGROUP, - .get = cgroupns_get, - .put = cgroupns_put, - .install = cgroupns_install, - .owner = cgroupns_owner, -}; - -static __init int cgroup_namespaces_init(void) -{ - return 0; -} -subsys_initcall(cgroup_namespaces_init); - #ifdef CONFIG_CGROUP_BPF int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog, enum bpf_attach_type type, bool overridable) @@ -6510,149 +5066,3 @@ int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog, return ret; } #endif /* CONFIG_CGROUP_BPF */ - -#ifdef CONFIG_CGROUP_DEBUG -static struct cgroup_subsys_state * -debug_css_alloc(struct cgroup_subsys_state *parent_css) -{ - struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL); - - if (!css) - return ERR_PTR(-ENOMEM); - - return css; -} - -static void debug_css_free(struct cgroup_subsys_state *css) -{ - kfree(css); -} - -static u64 debug_taskcount_read(struct cgroup_subsys_state *css, - struct cftype *cft) -{ - return cgroup_task_count(css->cgroup); -} - -static u64 current_css_set_read(struct cgroup_subsys_state *css, - struct cftype *cft) -{ - return (u64)(unsigned long)current->cgroups; -} - -static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css, - struct cftype *cft) -{ - u64 count; - - rcu_read_lock(); - count = atomic_read(&task_css_set(current)->refcount); - rcu_read_unlock(); - return count; -} - -static int current_css_set_cg_links_read(struct seq_file *seq, void *v) -{ - struct cgrp_cset_link *link; - struct css_set *cset; - char *name_buf; - - name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL); - if (!name_buf) - return -ENOMEM; - - spin_lock_irq(&css_set_lock); - rcu_read_lock(); - cset = rcu_dereference(current->cgroups); - list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { - struct cgroup *c = link->cgrp; - - cgroup_name(c, name_buf, NAME_MAX + 1); - seq_printf(seq, "Root %d group %s\n", - c->root->hierarchy_id, name_buf); - } - rcu_read_unlock(); - spin_unlock_irq(&css_set_lock); - kfree(name_buf); - return 0; -} - -#define MAX_TASKS_SHOWN_PER_CSS 25 -static int cgroup_css_links_read(struct seq_file *seq, void *v) -{ - struct cgroup_subsys_state *css = seq_css(seq); - struct cgrp_cset_link *link; - - spin_lock_irq(&css_set_lock); - list_for_each_entry(link, &css->cgroup->cset_links, cset_link) { - struct css_set *cset = link->cset; - struct task_struct *task; - int count = 0; - - seq_printf(seq, "css_set %p\n", cset); - - list_for_each_entry(task, &cset->tasks, cg_list) { - if (count++ > MAX_TASKS_SHOWN_PER_CSS) - goto overflow; - seq_printf(seq, " task %d\n", task_pid_vnr(task)); - } - - list_for_each_entry(task, &cset->mg_tasks, cg_list) { - if (count++ > MAX_TASKS_SHOWN_PER_CSS) - goto overflow; - seq_printf(seq, " task %d\n", task_pid_vnr(task)); - } - continue; - overflow: - seq_puts(seq, " ...\n"); - } - spin_unlock_irq(&css_set_lock); - return 0; -} - -static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft) -{ - return (!cgroup_is_populated(css->cgroup) && - !css_has_online_children(&css->cgroup->self)); -} - -static struct cftype debug_files[] = { - { - .name = "taskcount", - .read_u64 = debug_taskcount_read, - }, - - { - .name = "current_css_set", - .read_u64 = current_css_set_read, - }, - - { - .name = "current_css_set_refcount", - .read_u64 = current_css_set_refcount_read, - }, - - { - .name = "current_css_set_cg_links", - .seq_show = current_css_set_cg_links_read, - }, - - { - .name = "cgroup_css_links", - .seq_show = cgroup_css_links_read, - }, - - { - .name = "releasable", - .read_u64 = releasable_read, - }, - - { } /* terminate */ -}; - -struct cgroup_subsys debug_cgrp_subsys = { - .css_alloc = debug_css_alloc, - .css_free = debug_css_free, - .legacy_cftypes = debug_files, -}; -#endif /* CONFIG_CGROUP_DEBUG */ diff --git a/kernel/cpuset.c b/kernel/cgroup/cpuset.c index b3088886cd37..0f41292be0fb 100644 --- a/kernel/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -44,6 +44,8 @@ #include <linux/proc_fs.h> #include <linux/rcupdate.h> #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include <linux/seq_file.h> #include <linux/security.h> #include <linux/slab.h> diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup/freezer.c index 1b72d56edce5..1b72d56edce5 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup/freezer.c diff --git a/kernel/cgroup/namespace.c b/kernel/cgroup/namespace.c new file mode 100644 index 000000000000..96d38dab6fb2 --- /dev/null +++ b/kernel/cgroup/namespace.c @@ -0,0 +1,155 @@ +#include "cgroup-internal.h" + +#include <linux/sched/task.h> +#include <linux/slab.h> +#include <linux/nsproxy.h> +#include <linux/proc_ns.h> + + +/* cgroup namespaces */ + +static struct ucounts *inc_cgroup_namespaces(struct user_namespace *ns) +{ + return inc_ucount(ns, current_euid(), UCOUNT_CGROUP_NAMESPACES); +} + +static void dec_cgroup_namespaces(struct ucounts *ucounts) +{ + dec_ucount(ucounts, UCOUNT_CGROUP_NAMESPACES); +} + +static struct cgroup_namespace *alloc_cgroup_ns(void) +{ + struct cgroup_namespace *new_ns; + int ret; + + new_ns = kzalloc(sizeof(struct cgroup_namespace), GFP_KERNEL); + if (!new_ns) + return ERR_PTR(-ENOMEM); + ret = ns_alloc_inum(&new_ns->ns); + if (ret) { + kfree(new_ns); + return ERR_PTR(ret); + } + atomic_set(&new_ns->count, 1); + new_ns->ns.ops = &cgroupns_operations; + return new_ns; +} + +void free_cgroup_ns(struct cgroup_namespace *ns) +{ + put_css_set(ns->root_cset); + dec_cgroup_namespaces(ns->ucounts); + put_user_ns(ns->user_ns); + ns_free_inum(&ns->ns); + kfree(ns); +} +EXPORT_SYMBOL(free_cgroup_ns); + +struct cgroup_namespace *copy_cgroup_ns(unsigned long flags, + struct user_namespace *user_ns, + struct cgroup_namespace *old_ns) +{ + struct cgroup_namespace *new_ns; + struct ucounts *ucounts; + struct css_set *cset; + + BUG_ON(!old_ns); + + if (!(flags & CLONE_NEWCGROUP)) { + get_cgroup_ns(old_ns); + return old_ns; + } + + /* Allow only sysadmin to create cgroup namespace. */ + if (!ns_capable(user_ns, CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + + ucounts = inc_cgroup_namespaces(user_ns); + if (!ucounts) + return ERR_PTR(-ENOSPC); + + /* It is not safe to take cgroup_mutex here */ + spin_lock_irq(&css_set_lock); + cset = task_css_set(current); + get_css_set(cset); + spin_unlock_irq(&css_set_lock); + + new_ns = alloc_cgroup_ns(); + if (IS_ERR(new_ns)) { + put_css_set(cset); + dec_cgroup_namespaces(ucounts); + return new_ns; + } + + new_ns->user_ns = get_user_ns(user_ns); + new_ns->ucounts = ucounts; + new_ns->root_cset = cset; + + return new_ns; +} + +static inline struct cgroup_namespace *to_cg_ns(struct ns_common *ns) +{ + return container_of(ns, struct cgroup_namespace, ns); +} + +static int cgroupns_install(struct nsproxy *nsproxy, struct ns_common *ns) +{ + struct cgroup_namespace *cgroup_ns = to_cg_ns(ns); + + if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN) || + !ns_capable(cgroup_ns->user_ns, CAP_SYS_ADMIN)) + return -EPERM; + + /* Don't need to do anything if we are attaching to our own cgroupns. */ + if (cgroup_ns == nsproxy->cgroup_ns) + return 0; + + get_cgroup_ns(cgroup_ns); + put_cgroup_ns(nsproxy->cgroup_ns); + nsproxy->cgroup_ns = cgroup_ns; + + return 0; +} + +static struct ns_common *cgroupns_get(struct task_struct *task) +{ + struct cgroup_namespace *ns = NULL; + struct nsproxy *nsproxy; + + task_lock(task); + nsproxy = task->nsproxy; + if (nsproxy) { + ns = nsproxy->cgroup_ns; + get_cgroup_ns(ns); + } + task_unlock(task); + + return ns ? &ns->ns : NULL; +} + +static void cgroupns_put(struct ns_common *ns) +{ + put_cgroup_ns(to_cg_ns(ns)); +} + +static struct user_namespace *cgroupns_owner(struct ns_common *ns) +{ + return to_cg_ns(ns)->user_ns; +} + +const struct proc_ns_operations cgroupns_operations = { + .name = "cgroup", + .type = CLONE_NEWCGROUP, + .get = cgroupns_get, + .put = cgroupns_put, + .install = cgroupns_install, + .owner = cgroupns_owner, +}; + +static __init int cgroup_namespaces_init(void) +{ + return 0; +} +subsys_initcall(cgroup_namespaces_init); diff --git a/kernel/cgroup_pids.c b/kernel/cgroup/pids.c index 2bd673783f1a..e756dae49300 100644 --- a/kernel/cgroup_pids.c +++ b/kernel/cgroup/pids.c @@ -214,7 +214,7 @@ static void pids_cancel_attach(struct cgroup_taskset *tset) /* * task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies - * on threadgroup_change_begin() held by the copy_process(). + * on cgroup_threadgroup_change_begin() held by the copy_process(). */ static int pids_can_fork(struct task_struct *task) { diff --git a/kernel/cgroup/rdma.c b/kernel/cgroup/rdma.c new file mode 100644 index 000000000000..defad3c5e7dc --- /dev/null +++ b/kernel/cgroup/rdma.c @@ -0,0 +1,619 @@ +/* + * RDMA resource limiting controller for cgroups. + * + * Used to allow a cgroup hierarchy to stop processes from consuming + * additional RDMA resources after a certain limit is reached. + * + * Copyright (C) 2016 Parav Pandit <pandit.parav@gmail.com> + * + * This file is subject to the terms and conditions of version 2 of the GNU + * General Public License. See the file COPYING in the main directory of the + * Linux distribution for more details. + */ + +#include <linux/bitops.h> +#include <linux/slab.h> +#include <linux/seq_file.h> +#include <linux/cgroup.h> +#include <linux/parser.h> +#include <linux/cgroup_rdma.h> + +#define RDMACG_MAX_STR "max" + +/* + * Protects list of resource pools maintained on per cgroup basis + * and rdma device list. + */ +static DEFINE_MUTEX(rdmacg_mutex); +static LIST_HEAD(rdmacg_devices); + +enum rdmacg_file_type { + RDMACG_RESOURCE_TYPE_MAX, + RDMACG_RESOURCE_TYPE_STAT, +}; + +/* + * resource table definition as to be seen by the user. + * Need to add entries to it when more resources are + * added/defined at IB verb/core layer. + */ +static char const *rdmacg_resource_names[] = { + [RDMACG_RESOURCE_HCA_HANDLE] = "hca_handle", + [RDMACG_RESOURCE_HCA_OBJECT] = "hca_object", +}; + +/* resource tracker for each resource of rdma cgroup */ +struct rdmacg_resource { + int max; + int usage; +}; + +/* + * resource pool object which represents per cgroup, per device + * resources. There are multiple instances of this object per cgroup, + * therefore it cannot be embedded within rdma_cgroup structure. It + * is maintained as list. + */ +struct rdmacg_resource_pool { + struct rdmacg_device *device; + struct rdmacg_resource resources[RDMACG_RESOURCE_MAX]; + + struct list_head cg_node; + struct list_head dev_node; + + /* count active user tasks of this pool */ + u64 usage_sum; + /* total number counts which are set to max */ + int num_max_cnt; +}; + +static struct rdma_cgroup *css_rdmacg(struct cgroup_subsys_state *css) +{ + return container_of(css, struct rdma_cgroup, css); +} + +static struct rdma_cgroup *parent_rdmacg(struct rdma_cgroup *cg) +{ + return css_rdmacg(cg->css.parent); +} + +static inline struct rdma_cgroup *get_current_rdmacg(void) +{ + return css_rdmacg(task_get_css(current, rdma_cgrp_id)); +} + +static void set_resource_limit(struct rdmacg_resource_pool *rpool, + int index, int new_max) +{ + if (new_max == S32_MAX) { + if (rpool->resources[index].max != S32_MAX) + rpool->num_max_cnt++; + } else { + if (rpool->resources[index].max == S32_MAX) + rpool->num_max_cnt--; + } + rpool->resources[index].max = new_max; +} + +static void set_all_resource_max_limit(struct rdmacg_resource_pool *rpool) +{ + int i; + + for (i = 0; i < RDMACG_RESOURCE_MAX; i++) + set_resource_limit(rpool, i, S32_MAX); +} + +static void free_cg_rpool_locked(struct rdmacg_resource_pool *rpool) +{ + lockdep_assert_held(&rdmacg_mutex); + + list_del(&rpool->cg_node); + list_del(&rpool->dev_node); + kfree(rpool); +} + +static struct rdmacg_resource_pool * +find_cg_rpool_locked(struct rdma_cgroup *cg, + struct rdmacg_device *device) + +{ + struct rdmacg_resource_pool *pool; + + lockdep_assert_held(&rdmacg_mutex); + + list_for_each_entry(pool, &cg->rpools, cg_node) + if (pool->device == device) + return pool; + + return NULL; +} + +static struct rdmacg_resource_pool * +get_cg_rpool_locked(struct rdma_cgroup *cg, struct rdmacg_device *device) +{ + struct rdmacg_resource_pool *rpool; + + rpool = find_cg_rpool_locked(cg, device); + if (rpool) + return rpool; + + rpool = kzalloc(sizeof(*rpool), GFP_KERNEL); + if (!rpool) + return ERR_PTR(-ENOMEM); + + rpool->device = device; + set_all_resource_max_limit(rpool); + + INIT_LIST_HEAD(&rpool->cg_node); + INIT_LIST_HEAD(&rpool->dev_node); + list_add_tail(&rpool->cg_node, &cg->rpools); + list_add_tail(&rpool->dev_node, &device->rpools); + return rpool; +} + +/** + * uncharge_cg_locked - uncharge resource for rdma cgroup + * @cg: pointer to cg to uncharge and all parents in hierarchy + * @device: pointer to rdmacg device + * @index: index of the resource to uncharge in cg (resource pool) + * + * It also frees the resource pool which was created as part of + * charging operation when there are no resources attached to + * resource pool. + */ +static void +uncharge_cg_locked(struct rdma_cgroup *cg, + struct rdmacg_device *device, + enum rdmacg_resource_type index) +{ + struct rdmacg_resource_pool *rpool; + + rpool = find_cg_rpool_locked(cg, device); + + /* + * rpool cannot be null at this stage. Let kernel operate in case + * if there a bug in IB stack or rdma controller, instead of crashing + * the system. + */ + if (unlikely(!rpool)) { + pr_warn("Invalid device %p or rdma cgroup %p\n", cg, device); + return; + } + + rpool->resources[index].usage--; + + /* + * A negative count (or overflow) is invalid, + * it indicates a bug in the rdma controller. + */ + WARN_ON_ONCE(rpool->resources[index].usage < 0); + rpool->usage_sum--; + if (rpool->usage_sum == 0 && + rpool->num_max_cnt == RDMACG_RESOURCE_MAX) { + /* + * No user of the rpool and all entries are set to max, so + * safe to delete this rpool. + */ + free_cg_rpool_locked(rpool); + } +} + +/** + * rdmacg_uncharge_hierarchy - hierarchically uncharge rdma resource count + * @device: pointer to rdmacg device + * @stop_cg: while traversing hirerchy, when meet with stop_cg cgroup + * stop uncharging + * @index: index of the resource to uncharge in cg in given resource pool + */ +static void rdmacg_uncharge_hierarchy(struct rdma_cgroup *cg, + struct rdmacg_device *device, + struct rdma_cgroup *stop_cg, + enum rdmacg_resource_type index) +{ + struct rdma_cgroup *p; + + mutex_lock(&rdmacg_mutex); + + for (p = cg; p != stop_cg; p = parent_rdmacg(p)) + uncharge_cg_locked(p, device, index); + + mutex_unlock(&rdmacg_mutex); + + css_put(&cg->css); +} + +/** + * rdmacg_uncharge - hierarchically uncharge rdma resource count + * @device: pointer to rdmacg device + * @index: index of the resource to uncharge in cgroup in given resource pool + */ +void rdmacg_uncharge(struct rdma_cgroup *cg, + struct rdmacg_device *device, + enum rdmacg_resource_type index) +{ + if (index >= RDMACG_RESOURCE_MAX) + return; + + rdmacg_uncharge_hierarchy(cg, device, NULL, index); +} +EXPORT_SYMBOL(rdmacg_uncharge); + +/** + * rdmacg_try_charge - hierarchically try to charge the rdma resource + * @rdmacg: pointer to rdma cgroup which will own this resource + * @device: pointer to rdmacg device + * @index: index of the resource to charge in cgroup (resource pool) + * + * This function follows charging resource in hierarchical way. + * It will fail if the charge would cause the new value to exceed the + * hierarchical limit. + * Returns 0 if the charge succeded, otherwise -EAGAIN, -ENOMEM or -EINVAL. + * Returns pointer to rdmacg for this resource when charging is successful. + * + * Charger needs to account resources on two criteria. + * (a) per cgroup & (b) per device resource usage. + * Per cgroup resource usage ensures that tasks of cgroup doesn't cross + * the configured limits. Per device provides granular configuration + * in multi device usage. It allocates resource pool in the hierarchy + * for each parent it come across for first resource. Later on resource + * pool will be available. Therefore it will be much faster thereon + * to charge/uncharge. + */ +int rdmacg_try_charge(struct rdma_cgroup **rdmacg, + struct rdmacg_device *device, + enum rdmacg_resource_type index) +{ + struct rdma_cgroup *cg, *p; + struct rdmacg_resource_pool *rpool; + s64 new; + int ret = 0; + + if (index >= RDMACG_RESOURCE_MAX) + return -EINVAL; + + /* + * hold on to css, as cgroup can be removed but resource + * accounting happens on css. + */ + cg = get_current_rdmacg(); + + mutex_lock(&rdmacg_mutex); + for (p = cg; p; p = parent_rdmacg(p)) { + rpool = get_cg_rpool_locked(p, device); + if (IS_ERR(rpool)) { + ret = PTR_ERR(rpool); + goto err; + } else { + new = rpool->resources[index].usage + 1; + if (new > rpool->resources[index].max) { + ret = -EAGAIN; + goto err; + } else { + rpool->resources[index].usage = new; + rpool->usage_sum++; + } + } + } + mutex_unlock(&rdmacg_mutex); + + *rdmacg = cg; + return 0; + +err: + mutex_unlock(&rdmacg_mutex); + rdmacg_uncharge_hierarchy(cg, device, p, index); + return ret; +} +EXPORT_SYMBOL(rdmacg_try_charge); + +/** + * rdmacg_register_device - register rdmacg device to rdma controller. + * @device: pointer to rdmacg device whose resources need to be accounted. + * + * If IB stack wish a device to participate in rdma cgroup resource + * tracking, it must invoke this API to register with rdma cgroup before + * any user space application can start using the RDMA resources. + * Returns 0 on success or EINVAL when table length given is beyond + * supported size. + */ +int rdmacg_register_device(struct rdmacg_device *device) +{ + INIT_LIST_HEAD(&device->dev_node); + INIT_LIST_HEAD(&device->rpools); + + mutex_lock(&rdmacg_mutex); + list_add_tail(&device->dev_node, &rdmacg_devices); + mutex_unlock(&rdmacg_mutex); + return 0; +} +EXPORT_SYMBOL(rdmacg_register_device); + +/** + * rdmacg_unregister_device - unregister rdmacg device from rdma controller. + * @device: pointer to rdmacg device which was previously registered with rdma + * controller using rdmacg_register_device(). + * + * IB stack must invoke this after all the resources of the IB device + * are destroyed and after ensuring that no more resources will be created + * when this API is invoked. + */ +void rdmacg_unregister_device(struct rdmacg_device *device) +{ + struct rdmacg_resource_pool *rpool, *tmp; + + /* + * Synchronize with any active resource settings, + * usage query happening via configfs. + */ + mutex_lock(&rdmacg_mutex); + list_del_init(&device->dev_node); + + /* + * Now that this device is off the cgroup list, its safe to free + * all the rpool resources. + */ + list_for_each_entry_safe(rpool, tmp, &device->rpools, dev_node) + free_cg_rpool_locked(rpool); + + mutex_unlock(&rdmacg_mutex); +} +EXPORT_SYMBOL(rdmacg_unregister_device); + +static int parse_resource(char *c, int *intval) +{ + substring_t argstr; + const char **table = &rdmacg_resource_names[0]; + char *name, *value = c; + size_t len; + int ret, i = 0; + + name = strsep(&value, "="); + if (!name || !value) + return -EINVAL; + + len = strlen(value); + + for (i = 0; i < RDMACG_RESOURCE_MAX; i++) { + if (strcmp(table[i], name)) + continue; + + argstr.from = value; + argstr.to = value + len; + + ret = match_int(&argstr, intval); + if (ret >= 0) { + if (*intval < 0) + break; + return i; + } + if (strncmp(value, RDMACG_MAX_STR, len) == 0) { + *intval = S32_MAX; + return i; + } + break; + } + return -EINVAL; +} + +static int rdmacg_parse_limits(char *options, + int *new_limits, unsigned long *enables) +{ + char *c; + int err = -EINVAL; + + /* parse resource options */ + while ((c = strsep(&options, " ")) != NULL) { + int index, intval; + + index = parse_resource(c, &intval); + if (index < 0) + goto err; + + new_limits[index] = intval; + *enables |= BIT(index); + } + return 0; + +err: + return err; +} + +static struct rdmacg_device *rdmacg_get_device_locked(const char *name) +{ + struct rdmacg_device *device; + + lockdep_assert_held(&rdmacg_mutex); + + list_for_each_entry(device, &rdmacg_devices, dev_node) + if (!strcmp(name, device->name)) + return device; + + return NULL; +} + +static ssize_t rdmacg_resource_set_max(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct rdma_cgroup *cg = css_rdmacg(of_css(of)); + const char *dev_name; + struct rdmacg_resource_pool *rpool; + struct rdmacg_device *device; + char *options = strstrip(buf); + int *new_limits; + unsigned long enables = 0; + int i = 0, ret = 0; + + /* extract the device name first */ + dev_name = strsep(&options, " "); + if (!dev_name) { + ret = -EINVAL; + goto err; + } + + new_limits = kcalloc(RDMACG_RESOURCE_MAX, sizeof(int), GFP_KERNEL); + if (!new_limits) { + ret = -ENOMEM; + goto err; + } + + ret = rdmacg_parse_limits(options, new_limits, &enables); + if (ret) + goto parse_err; + + /* acquire lock to synchronize with hot plug devices */ + mutex_lock(&rdmacg_mutex); + + device = rdmacg_get_device_locked(dev_name); + if (!device) { + ret = -ENODEV; + goto dev_err; + } + + rpool = get_cg_rpool_locked(cg, device); + if (IS_ERR(rpool)) { + ret = PTR_ERR(rpool); + goto dev_err; + } + + /* now set the new limits of the rpool */ + for_each_set_bit(i, &enables, RDMACG_RESOURCE_MAX) + set_resource_limit(rpool, i, new_limits[i]); + + if (rpool->usage_sum == 0 && + rpool->num_max_cnt == RDMACG_RESOURCE_MAX) { + /* + * No user of the rpool and all entries are set to max, so + * safe to delete this rpool. + */ + free_cg_rpool_locked(rpool); + } + +dev_err: + mutex_unlock(&rdmacg_mutex); + +parse_err: + kfree(new_limits); + +err: + return ret ?: nbytes; +} + +static void print_rpool_values(struct seq_file *sf, + struct rdmacg_resource_pool *rpool) +{ + enum rdmacg_file_type sf_type; + int i; + u32 value; + + sf_type = seq_cft(sf)->private; + + for (i = 0; i < RDMACG_RESOURCE_MAX; i++) { + seq_puts(sf, rdmacg_resource_names[i]); + seq_putc(sf, '='); + if (sf_type == RDMACG_RESOURCE_TYPE_MAX) { + if (rpool) + value = rpool->resources[i].max; + else + value = S32_MAX; + } else { + if (rpool) + value = rpool->resources[i].usage; + else + value = 0; + } + + if (value == S32_MAX) + seq_puts(sf, RDMACG_MAX_STR); + else + seq_printf(sf, "%d", value); + seq_putc(sf, ' '); + } +} + +static int rdmacg_resource_read(struct seq_file *sf, void *v) +{ + struct rdmacg_device *device; + struct rdmacg_resource_pool *rpool; + struct rdma_cgroup *cg = css_rdmacg(seq_css(sf)); + + mutex_lock(&rdmacg_mutex); + + list_for_each_entry(device, &rdmacg_devices, dev_node) { + seq_printf(sf, "%s ", device->name); + + rpool = find_cg_rpool_locked(cg, device); + print_rpool_values(sf, rpool); + + seq_putc(sf, '\n'); + } + + mutex_unlock(&rdmacg_mutex); + return 0; +} + +static struct cftype rdmacg_files[] = { + { + .name = "max", + .write = rdmacg_resource_set_max, + .seq_show = rdmacg_resource_read, + .private = RDMACG_RESOURCE_TYPE_MAX, + .flags = CFTYPE_NOT_ON_ROOT, + }, + { + .name = "current", + .seq_show = rdmacg_resource_read, + .private = RDMACG_RESOURCE_TYPE_STAT, + .flags = CFTYPE_NOT_ON_ROOT, + }, + { } /* terminate */ +}; + +static struct cgroup_subsys_state * +rdmacg_css_alloc(struct cgroup_subsys_state *parent) +{ + struct rdma_cgroup *cg; + + cg = kzalloc(sizeof(*cg), GFP_KERNEL); + if (!cg) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&cg->rpools); + return &cg->css; +} + +static void rdmacg_css_free(struct cgroup_subsys_state *css) +{ + struct rdma_cgroup *cg = css_rdmacg(css); + + kfree(cg); +} + +/** + * rdmacg_css_offline - cgroup css_offline callback + * @css: css of interest + * + * This function is called when @css is about to go away and responsible + * for shooting down all rdmacg associated with @css. As part of that it + * marks all the resource pool entries to max value, so that when resources are + * uncharged, associated resource pool can be freed as well. + */ +static void rdmacg_css_offline(struct cgroup_subsys_state *css) +{ + struct rdma_cgroup *cg = css_rdmacg(css); + struct rdmacg_resource_pool *rpool; + + mutex_lock(&rdmacg_mutex); + + list_for_each_entry(rpool, &cg->rpools, cg_node) + set_all_resource_max_limit(rpool); + + mutex_unlock(&rdmacg_mutex); +} + +struct cgroup_subsys rdma_cgrp_subsys = { + .css_alloc = rdmacg_css_alloc, + .css_free = rdmacg_css_free, + .css_offline = rdmacg_css_offline, + .legacy_cftypes = rdmacg_files, + .dfl_cftypes = rdmacg_files, +}; diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config index 1a8f34f63601..26a06e09a5bd 100644 --- a/kernel/configs/android-base.config +++ b/kernel/configs/android-base.config @@ -21,6 +21,7 @@ CONFIG_CP15_BARRIER_EMULATION=y CONFIG_DEFAULT_SECURITY_SELINUX=y CONFIG_EMBEDDED=y CONFIG_FB=y +CONFIG_HARDENED_USERCOPY=y CONFIG_HIGH_RES_TIMERS=y CONFIG_INET6_AH=y CONFIG_INET6_ESP=y @@ -129,6 +130,7 @@ CONFIG_PPP_DEFLATE=y CONFIG_PPP_MPPE=y CONFIG_PREEMPT=y CONFIG_QUOTA=y +CONFIG_RANDOMIZE_BASE=y CONFIG_RTC_CLASS=y CONFIG_RT_GROUP_SCHED=y CONFIG_SECCOMP=y diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config index 99127edc5204..28ee064b6744 100644 --- a/kernel/configs/android-recommended.config +++ b/kernel/configs/android-recommended.config @@ -1,4 +1,5 @@ # KEEP ALPHABETICALLY SORTED +# CONFIG_AIO is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_LEGACY_PTYS is not set diff --git a/kernel/cpu.c b/kernel/cpu.c index 0a5f630f5c54..f7c063239fa5 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -7,7 +7,9 @@ #include <linux/smp.h> #include <linux/init.h> #include <linux/notifier.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/hotplug.h> +#include <linux/sched/task.h> #include <linux/unistd.h> #include <linux/cpu.h> #include <linux/oom.h> diff --git a/kernel/cred.c b/kernel/cred.c index 5f264fb5737d..2bc66075740f 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -12,6 +12,7 @@ #include <linux/cred.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/sched/coredump.h> #include <linux/key.h> #include <linux/keyctl.h> #include <linux/init_task.h> diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 79517e5549f1..65c0f1363788 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -49,6 +49,7 @@ #include <linux/init.h> #include <linux/kgdb.h> #include <linux/kdb.h> +#include <linux/nmi.h> #include <linux/pid.h> #include <linux/smp.h> #include <linux/mm.h> @@ -232,9 +233,9 @@ static void kgdb_flush_swbreak_addr(unsigned long addr) int i; for (i = 0; i < VMACACHE_SIZE; i++) { - if (!current->vmacache[i]) + if (!current->vmacache.vmas[i]) continue; - flush_cache_range(current->vmacache[i], + flush_cache_range(current->vmacache.vmas[i], addr, addr + BREAK_INSTR_SIZE); } } diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c index 19d9a578c753..7510dc687c0d 100644 --- a/kernel/debug/gdbstub.c +++ b/kernel/debug/gdbstub.c @@ -29,6 +29,7 @@ */ #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/kgdb.h> #include <linux/kdb.h> #include <linux/serial_core.h> diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c index fe15fff5df53..6ad4a9fcbd6f 100644 --- a/kernel/debug/kdb/kdb_bt.c +++ b/kernel/debug/kdb/kdb_bt.c @@ -12,7 +12,8 @@ #include <linux/ctype.h> #include <linux/string.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> #include <linux/kdb.h> #include <linux/nmi.h> #include "kdb_private.h" diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index ca183919d302..c8146d53ca67 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -18,6 +18,9 @@ #include <linux/kmsg_dump.h> #include <linux/reboot.h> #include <linux/sched.h> +#include <linux/sched/loadavg.h> +#include <linux/sched/stat.h> +#include <linux/sched/debug.h> #include <linux/sysrq.h> #include <linux/smp.h> #include <linux/utsname.h> diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 660549656991..4a1c33416b6a 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -14,6 +14,8 @@ */ #include <linux/sched.h> +#include <linux/sched/task.h> +#include <linux/sched/cputime.h> #include <linux/slab.h> #include <linux/taskstats.h> #include <linux/time.h> diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index e9fdb5203de5..c04917cad1bf 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -11,6 +11,8 @@ #include <linux/perf_event.h> #include <linux/slab.h> +#include <linux/sched/task_stack.h> + #include "internal.h" struct callchain_cpus_entries { diff --git a/kernel/events/core.c b/kernel/events/core.c index b2eb3542e829..6f41548f2e32 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -46,6 +46,8 @@ #include <linux/filter.h> #include <linux/namei.h> #include <linux/parser.h> +#include <linux/sched/clock.h> +#include <linux/sched/mm.h> #include "internal.h" @@ -455,7 +457,7 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - int ret = proc_dointvec(table, write, buffer, lenp, ppos); + int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret || !write) return ret; @@ -3522,6 +3524,8 @@ static void perf_event_enable_on_exec(int ctxn) if (enabled) { clone_ctx = unclone_ctx(ctx); ctx_resched(cpuctx, ctx, event_type); + } else { + ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); } perf_ctx_unlock(cpuctx, ctx); @@ -9955,6 +9959,7 @@ SYSCALL_DEFINE5(perf_event_open, * of swizzling perf_event::ctx. */ perf_remove_from_context(group_leader, 0); + put_ctx(gctx); list_for_each_entry(sibling, &group_leader->sibling_list, group_entry) { @@ -9993,13 +9998,6 @@ SYSCALL_DEFINE5(perf_event_open, perf_event__state_init(group_leader); perf_install_in_context(ctx, group_leader, group_leader->cpu); get_ctx(ctx); - - /* - * Now that all events are installed in @ctx, nothing - * references @gctx anymore, so drop the last reference we have - * on it. - */ - put_ctx(gctx); } /* @@ -10959,5 +10957,11 @@ struct cgroup_subsys perf_event_cgrp_subsys = { .css_alloc = perf_cgroup_css_alloc, .css_free = perf_cgroup_css_free, .attach = perf_cgroup_attach, + /* + * Implicitly enable on dfl hierarchy so that perf events can + * always be filtered by cgroup2 path as long as perf_event + * controller is not mounted on a legacy hierarchy. + */ + .implicit_on_dfl = true, }; #endif /* CONFIG_CGROUP_PERF */ diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 18c6b23edd3c..0e137f98a50c 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -27,6 +27,8 @@ #include <linux/pagemap.h> /* read_mapping_page */ #include <linux/slab.h> #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/coredump.h> #include <linux/export.h> #include <linux/rmap.h> /* anon_vma_prepare */ #include <linux/mmu_notifier.h> /* set_pte_at_notify */ @@ -747,7 +749,7 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register) continue; } - if (!atomic_inc_not_zero(&vma->vm_mm->mm_users)) + if (!mmget_not_zero(vma->vm_mm)) continue; info = prev; diff --git a/kernel/exit.c b/kernel/exit.c index 90b09ca35c84..e126ebf2400c 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -6,6 +6,12 @@ #include <linux/mm.h> #include <linux/slab.h> +#include <linux/sched/autogroup.h> +#include <linux/sched/mm.h> +#include <linux/sched/stat.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> +#include <linux/sched/cputime.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/capability.h> @@ -539,7 +545,7 @@ static void exit_mm(void) __set_current_state(TASK_RUNNING); down_read(&mm->mmap_sem); } - atomic_inc(&mm->mm_count); + mmgrab(mm); BUG_ON(mm != current->active_mm); /* more a memory barrier than a real lock */ task_lock(current); diff --git a/kernel/fork.c b/kernel/fork.c index 348fe73155bc..6c463c80e93d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -12,6 +12,16 @@ */ #include <linux/slab.h> +#include <linux/sched/autogroup.h> +#include <linux/sched/mm.h> +#include <linux/sched/coredump.h> +#include <linux/sched/user.h> +#include <linux/sched/numa_balancing.h> +#include <linux/sched/stat.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> +#include <linux/sched/cputime.h> +#include <linux/rtmutex.h> #include <linux/init.h> #include <linux/unistd.h> #include <linux/module.h> @@ -1000,7 +1010,7 @@ struct mm_struct *get_task_mm(struct task_struct *task) if (task->flags & PF_KTHREAD) mm = NULL; else - atomic_inc(&mm->mm_users); + mmget(mm); } task_unlock(task); return mm; @@ -1188,7 +1198,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) vmacache_flush(tsk); if (clone_flags & CLONE_VM) { - atomic_inc(&oldmm->mm_users); + mmget(oldmm); mm = oldmm; goto good_mm; } @@ -1455,6 +1465,21 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) task->pids[type].pid = pid; } +static inline void rcu_copy_process(struct task_struct *p) +{ +#ifdef CONFIG_PREEMPT_RCU + p->rcu_read_lock_nesting = 0; + p->rcu_read_unlock_special.s = 0; + p->rcu_blocked_node = NULL; + INIT_LIST_HEAD(&p->rcu_node_entry); +#endif /* #ifdef CONFIG_PREEMPT_RCU */ +#ifdef CONFIG_TASKS_RCU + p->rcu_tasks_holdout = false; + INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); + p->rcu_tasks_idle_cpu = -1; +#endif /* #ifdef CONFIG_TASKS_RCU */ +} + /* * This creates a new process as a copy of the old one, * but does not actually start it yet. @@ -1746,7 +1771,7 @@ static __latent_entropy struct task_struct *copy_process( INIT_LIST_HEAD(&p->thread_group); p->task_works = NULL; - threadgroup_change_begin(current); + cgroup_threadgroup_change_begin(current); /* * Ensure that the cgroup subsystem policies allow the new process to be * forked. It should be noted the the new process's css_set can be changed @@ -1843,7 +1868,7 @@ static __latent_entropy struct task_struct *copy_process( proc_fork_connector(p); cgroup_post_fork(p); - threadgroup_change_end(current); + cgroup_threadgroup_change_end(current); perf_event_fork(p); trace_task_newtask(p, clone_flags); @@ -1854,7 +1879,7 @@ static __latent_entropy struct task_struct *copy_process( bad_fork_cancel_cgroup: cgroup_cancel_fork(p); bad_fork_free_pid: - threadgroup_change_end(current); + cgroup_threadgroup_change_end(current); if (pid != &init_struct_pid) free_pid(pid); bad_fork_cleanup_thread: diff --git a/kernel/futex.c b/kernel/futex.c index cdf365036141..229a744b1781 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -61,6 +61,8 @@ #include <linux/nsproxy.h> #include <linux/ptrace.h> #include <linux/sched/rt.h> +#include <linux/sched/wake_q.h> +#include <linux/sched/mm.h> #include <linux/hugetlb.h> #include <linux/freezer.h> #include <linux/bootmem.h> @@ -338,7 +340,7 @@ static inline bool should_fail_futex(bool fshared) static inline void futex_get_mm(union futex_key *key) { - atomic_inc(&key->private.mm->mm_count); + mmgrab(key->private.mm); /* * Ensure futex_get_mm() implies a full barrier such that * get_futex_key() implies a full barrier. This is relied upon diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 40c07e4fa116..f0f8e2a8496f 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -16,6 +16,9 @@ #include <linux/export.h> #include <linux/sysctl.h> #include <linux/utsname.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> + #include <trace/events/sched.h> /* diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 6b669593e7eb..a4afe5cc5af1 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -17,6 +17,8 @@ #include <linux/slab.h> #include <linux/sched.h> #include <linux/sched/rt.h> +#include <linux/sched/task.h> +#include <uapi/linux/sched/types.h> #include <linux/task_work.h> #include "internals.h" @@ -353,7 +355,7 @@ static int setup_affinity(struct irq_desc *desc, struct cpumask *mask) return 0; /* - * Preserve the managed affinity setting and an userspace affinity + * Preserve the managed affinity setting and a userspace affinity * setup, but make sure that one of the targets is online. */ if (irqd_affinity_is_managed(&desc->irq_data) || diff --git a/kernel/jump_label.c b/kernel/jump_label.c index a9b8cf500591..6c9cb208ac48 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -236,12 +236,28 @@ void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry static inline struct jump_entry *static_key_entries(struct static_key *key) { - return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK); + WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED); + return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK); } static inline bool static_key_type(struct static_key *key) { - return (unsigned long)key->entries & JUMP_TYPE_MASK; + return key->type & JUMP_TYPE_TRUE; +} + +static inline bool static_key_linked(struct static_key *key) +{ + return key->type & JUMP_TYPE_LINKED; +} + +static inline void static_key_clear_linked(struct static_key *key) +{ + key->type &= ~JUMP_TYPE_LINKED; +} + +static inline void static_key_set_linked(struct static_key *key) +{ + key->type |= JUMP_TYPE_LINKED; } static inline struct static_key *jump_entry_key(struct jump_entry *entry) @@ -254,6 +270,26 @@ static bool jump_entry_branch(struct jump_entry *entry) return (unsigned long)entry->key & 1UL; } +/*** + * A 'struct static_key' uses a union such that it either points directly + * to a table of 'struct jump_entry' or to a linked list of modules which in + * turn point to 'struct jump_entry' tables. + * + * The two lower bits of the pointer are used to keep track of which pointer + * type is in use and to store the initial branch direction, we use an access + * function which preserves these bits. + */ +static void static_key_set_entries(struct static_key *key, + struct jump_entry *entries) +{ + unsigned long type; + + WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK); + type = key->type & JUMP_TYPE_MASK; + key->entries = entries; + key->type |= type; +} + static enum jump_label_type jump_label_type(struct jump_entry *entry) { struct static_key *key = jump_entry_key(entry); @@ -313,13 +349,7 @@ void __init jump_label_init(void) continue; key = iterk; - /* - * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. - */ - *((unsigned long *)&key->entries) += (unsigned long)iter; -#ifdef CONFIG_MODULES - key->next = NULL; -#endif + static_key_set_entries(key, iter); } static_key_initialized = true; jump_label_unlock(); @@ -343,6 +373,29 @@ struct static_key_mod { struct module *mod; }; +static inline struct static_key_mod *static_key_mod(struct static_key *key) +{ + WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED)); + return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); +} + +/*** + * key->type and key->next are the same via union. + * This sets key->next and preserves the type bits. + * + * See additional comments above static_key_set_entries(). + */ +static void static_key_set_mod(struct static_key *key, + struct static_key_mod *mod) +{ + unsigned long type; + + WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK); + type = key->type & JUMP_TYPE_MASK; + key->next = mod; + key->type |= type; +} + static int __jump_label_mod_text_reserved(void *start, void *end) { struct module *mod; @@ -365,11 +418,23 @@ static void __jump_label_mod_update(struct static_key *key) { struct static_key_mod *mod; - for (mod = key->next; mod; mod = mod->next) { - struct module *m = mod->mod; + for (mod = static_key_mod(key); mod; mod = mod->next) { + struct jump_entry *stop; + struct module *m; + + /* + * NULL if the static_key is defined in a module + * that does not use it + */ + if (!mod->entries) + continue; - __jump_label_update(key, mod->entries, - m->jump_entries + m->num_jump_entries); + m = mod->mod; + if (!m) + stop = __stop___jump_table; + else + stop = m->jump_entries + m->num_jump_entries; + __jump_label_update(key, mod->entries, stop); } } @@ -404,7 +469,7 @@ static int jump_label_add_module(struct module *mod) struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter; struct static_key *key = NULL; - struct static_key_mod *jlm; + struct static_key_mod *jlm, *jlm2; /* if the module doesn't have jump label entries, just return */ if (iter_start == iter_stop) @@ -421,20 +486,32 @@ static int jump_label_add_module(struct module *mod) key = iterk; if (within_module(iter->key, mod)) { - /* - * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. - */ - *((unsigned long *)&key->entries) += (unsigned long)iter; - key->next = NULL; + static_key_set_entries(key, iter); continue; } jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); if (!jlm) return -ENOMEM; + if (!static_key_linked(key)) { + jlm2 = kzalloc(sizeof(struct static_key_mod), + GFP_KERNEL); + if (!jlm2) { + kfree(jlm); + return -ENOMEM; + } + preempt_disable(); + jlm2->mod = __module_address((unsigned long)key); + preempt_enable(); + jlm2->entries = static_key_entries(key); + jlm2->next = NULL; + static_key_set_mod(key, jlm2); + static_key_set_linked(key); + } jlm->mod = mod; jlm->entries = iter; - jlm->next = key->next; - key->next = jlm; + jlm->next = static_key_mod(key); + static_key_set_mod(key, jlm); + static_key_set_linked(key); /* Only update if we've changed from our initial state */ if (jump_label_type(iter) != jump_label_init_type(iter)) @@ -461,16 +538,34 @@ static void jump_label_del_module(struct module *mod) if (within_module(iter->key, mod)) continue; + /* No memory during module load */ + if (WARN_ON(!static_key_linked(key))) + continue; + prev = &key->next; - jlm = key->next; + jlm = static_key_mod(key); while (jlm && jlm->mod != mod) { prev = &jlm->next; jlm = jlm->next; } - if (jlm) { + /* No memory during module load */ + if (WARN_ON(!jlm)) + continue; + + if (prev == &key->next) + static_key_set_mod(key, jlm->next); + else *prev = jlm->next; + + kfree(jlm); + + jlm = static_key_mod(key); + /* if only one etry is left, fold it back into the static_key */ + if (jlm->next == NULL) { + static_key_set_entries(key, jlm->entries); + static_key_clear_linked(key); kfree(jlm); } } @@ -499,8 +594,10 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, case MODULE_STATE_COMING: jump_label_lock(); ret = jump_label_add_module(mod); - if (ret) + if (ret) { + WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n"); jump_label_del_module(mod); + } jump_label_unlock(); break; case MODULE_STATE_GOING: @@ -561,11 +658,14 @@ int jump_label_text_reserved(void *start, void *end) static void jump_label_update(struct static_key *key) { struct jump_entry *stop = __stop___jump_table; - struct jump_entry *entry = static_key_entries(key); + struct jump_entry *entry; #ifdef CONFIG_MODULES struct module *mod; - __jump_label_mod_update(key); + if (static_key_linked(key)) { + __jump_label_mod_update(key); + return; + } preempt_disable(); mod = __module_address((unsigned long)key); @@ -573,6 +673,7 @@ static void jump_label_update(struct static_key *key) stop = mod->jump_entries + mod->num_jump_entries; preempt_enable(); #endif + entry = static_key_entries(key); /* if there are no users, entry can be NULL */ if (entry) __jump_label_update(key, entry, stop); diff --git a/kernel/kmod.c b/kernel/kmod.c index 0c407f905ca4..563f97e2be36 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -20,6 +20,8 @@ */ #include <linux/module.h> #include <linux/sched.h> +#include <linux/sched/task.h> +#include <linux/binfmts.h> #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/kmod.h> diff --git a/kernel/kthread.c b/kernel/kthread.c index 8461a4372e8a..2f26adea0f84 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -5,7 +5,9 @@ * even if we're invoked from userspace (think modprobe, hotplug cpu, * etc.). */ +#include <uapi/linux/sched/types.h> #include <linux/sched.h> +#include <linux/sched/task.h> #include <linux/kthread.h> #include <linux/completion.h> #include <linux/err.h> diff --git a/kernel/latencytop.c b/kernel/latencytop.c index b5c30d9f46c5..96b4179cee6a 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c @@ -55,6 +55,8 @@ #include <linux/latencytop.h> #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/stat.h> #include <linux/list.h> #include <linux/stacktrace.h> diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 9812e5dd409e..12e38c213b70 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -28,6 +28,8 @@ #define DISABLE_BRANCH_PROFILING #include <linux/mutex.h> #include <linux/sched.h> +#include <linux/sched/clock.h> +#include <linux/sched/task.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/proc_fs.h> diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 28350dc8ecbb..f24582d4dad3 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -32,6 +32,8 @@ #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/sched.h> +#include <uapi/linux/sched/types.h> +#include <linux/rtmutex.h> #include <linux/atomic.h> #include <linux/moduleparam.h> #include <linux/delay.h> diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index ad2d9e22697b..198527a62149 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -19,8 +19,10 @@ */ #include <linux/mutex.h> #include <linux/ww_mutex.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/sched/rt.h> +#include <linux/sched/wake_q.h> +#include <linux/sched/debug.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/interrupt.h> diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h index e852be4851fc..4a30ef63c607 100644 --- a/kernel/locking/qspinlock_stat.h +++ b/kernel/locking/qspinlock_stat.h @@ -63,6 +63,7 @@ enum qlock_stats { */ #include <linux/debugfs.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/fs.h> static const char * const qstat_names[qstat_num + 1] = { diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c index 62b6cee8ea7f..97ee9df32e0f 100644 --- a/kernel/locking/rtmutex-debug.c +++ b/kernel/locking/rtmutex-debug.c @@ -18,6 +18,7 @@ */ #include <linux/sched.h> #include <linux/sched/rt.h> +#include <linux/sched/debug.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/spinlock.h> diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index d340be3a488f..6edc32ecd9c5 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -12,9 +12,11 @@ */ #include <linux/spinlock.h> #include <linux/export.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/sched/rt.h> #include <linux/sched/deadline.h> +#include <linux/sched/wake_q.h> +#include <linux/sched/debug.h> #include <linux/timer.h> #include "rtmutex_common.h" diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index 990134617b4c..856dfff5c33a 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -13,6 +13,7 @@ #define __KERNEL_RTMUTEX_COMMON_H #include <linux/rtmutex.h> +#include <linux/sched/wake_q.h> /* * This is the control structure for tasks blocked on a rt_mutex, diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c index 5eacab880f67..7bc24d477805 100644 --- a/kernel/locking/rwsem-spinlock.c +++ b/kernel/locking/rwsem-spinlock.c @@ -6,7 +6,8 @@ * - Derived also from comments by Linus */ #include <linux/rwsem.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> #include <linux/export.h> enum rwsem_waiter_type { diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 2ad8d8dc3bb1..34e727f18e49 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -10,10 +10,12 @@ * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes. */ #include <linux/rwsem.h> -#include <linux/sched.h> #include <linux/init.h> #include <linux/export.h> +#include <linux/sched/signal.h> #include <linux/sched/rt.h> +#include <linux/sched/wake_q.h> +#include <linux/sched/debug.h> #include <linux/osq_lock.h> #include "rwsem.h" diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 45ba475d4be3..90a74ccd85a4 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -7,6 +7,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/export.h> #include <linux/rwsem.h> #include <linux/atomic.h> diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c index 9512e37637dc..561acdd39960 100644 --- a/kernel/locking/semaphore.c +++ b/kernel/locking/semaphore.c @@ -29,6 +29,7 @@ #include <linux/kernel.h> #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/semaphore.h> #include <linux/spinlock.h> #include <linux/ftrace.h> diff --git a/kernel/panic.c b/kernel/panic.c index 3ec16e603e88..a58932b41700 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -9,6 +9,7 @@ * to indicate a major problem. */ #include <linux/debug_locks.h> +#include <linux/sched/debug.h> #include <linux/interrupt.h> #include <linux/kmsg_dump.h> #include <linux/kallsyms.h> diff --git a/kernel/pid.c b/kernel/pid.c index 0291804151b5..0143ac0ddceb 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -38,6 +38,7 @@ #include <linux/syscalls.h> #include <linux/proc_ns.h> #include <linux/proc_fs.h> +#include <linux/sched/task.h> #define pid_hashfn(nr, ns) \ hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index eef2ce968636..de461aa0bf9a 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -12,12 +12,15 @@ #include <linux/pid_namespace.h> #include <linux/user_namespace.h> #include <linux/syscalls.h> +#include <linux/cred.h> #include <linux/err.h> #include <linux/acct.h> #include <linux/slab.h> #include <linux/proc_ns.h> #include <linux/reboot.h> #include <linux/export.h> +#include <linux/sched/task.h> +#include <linux/sched/signal.h> struct pid_cache { int nr_ids; diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 86385af1080f..a8b978c35a6a 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -10,6 +10,8 @@ * This file is released under the GPLv2. */ +#define pr_fmt(fmt) "PM: " fmt + #include <linux/export.h> #include <linux/suspend.h> #include <linux/syscalls.h> @@ -21,6 +23,7 @@ #include <linux/fs.h> #include <linux/mount.h> #include <linux/pm.h> +#include <linux/nmi.h> #include <linux/console.h> #include <linux/cpu.h> #include <linux/freezer.h> @@ -104,7 +107,7 @@ EXPORT_SYMBOL(system_entering_hibernation); #ifdef CONFIG_PM_DEBUG static void hibernation_debug_sleep(void) { - printk(KERN_INFO "hibernation debug: Waiting for 5 seconds.\n"); + pr_info("hibernation debug: Waiting for 5 seconds.\n"); mdelay(5000); } @@ -250,10 +253,9 @@ void swsusp_show_speed(ktime_t start, ktime_t stop, centisecs = 1; /* avoid div-by-zero */ k = nr_pages * (PAGE_SIZE / 1024); kps = (k * 100) / centisecs; - printk(KERN_INFO "PM: %s %u kbytes in %u.%02u seconds (%u.%02u MB/s)\n", - msg, k, - centisecs / 100, centisecs % 100, - kps / 1000, (kps % 1000) / 10); + pr_info("%s %u kbytes in %u.%02u seconds (%u.%02u MB/s)\n", + msg, k, centisecs / 100, centisecs % 100, kps / 1000, + (kps % 1000) / 10); } /** @@ -271,8 +273,7 @@ static int create_image(int platform_mode) error = dpm_suspend_end(PMSG_FREEZE); if (error) { - printk(KERN_ERR "PM: Some devices failed to power down, " - "aborting hibernation\n"); + pr_err("Some devices failed to power down, aborting hibernation\n"); return error; } @@ -288,8 +289,7 @@ static int create_image(int platform_mode) error = syscore_suspend(); if (error) { - printk(KERN_ERR "PM: Some system devices failed to power down, " - "aborting hibernation\n"); + pr_err("Some system devices failed to power down, aborting hibernation\n"); goto Enable_irqs; } @@ -304,8 +304,8 @@ static int create_image(int platform_mode) restore_processor_state(); trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false); if (error) - printk(KERN_ERR "PM: Error %d creating hibernation image\n", - error); + pr_err("Error %d creating hibernation image\n", error); + if (!in_suspend) { events_check_enabled = false; clear_free_pages(); @@ -432,8 +432,7 @@ static int resume_target_kernel(bool platform_mode) error = dpm_suspend_end(PMSG_QUIESCE); if (error) { - printk(KERN_ERR "PM: Some devices failed to power down, " - "aborting resume\n"); + pr_err("Some devices failed to power down, aborting resume\n"); return error; } @@ -608,6 +607,22 @@ static void power_down(void) { #ifdef CONFIG_SUSPEND int error; + + if (hibernation_mode == HIBERNATION_SUSPEND) { + error = suspend_devices_and_enter(PM_SUSPEND_MEM); + if (error) { + hibernation_mode = hibernation_ops ? + HIBERNATION_PLATFORM : + HIBERNATION_SHUTDOWN; + } else { + /* Restore swap signature. */ + error = swsusp_unmark(); + if (error) + pr_err("Swap will be unusable! Try swapon -a.\n"); + + return; + } + } #endif switch (hibernation_mode) { @@ -620,32 +635,13 @@ static void power_down(void) if (pm_power_off) kernel_power_off(); break; -#ifdef CONFIG_SUSPEND - case HIBERNATION_SUSPEND: - error = suspend_devices_and_enter(PM_SUSPEND_MEM); - if (error) { - if (hibernation_ops) - hibernation_mode = HIBERNATION_PLATFORM; - else - hibernation_mode = HIBERNATION_SHUTDOWN; - power_down(); - } - /* - * Restore swap signature. - */ - error = swsusp_unmark(); - if (error) - printk(KERN_ERR "PM: Swap will be unusable! " - "Try swapon -a.\n"); - return; -#endif } kernel_halt(); /* * Valid image is on the disk, if we continue we risk serious data * corruption after resume. */ - printk(KERN_CRIT "PM: Please power down manually\n"); + pr_crit("Power down manually\n"); while (1) cpu_relax(); } @@ -655,7 +651,7 @@ static int load_image_and_restore(void) int error; unsigned int flags; - pr_debug("PM: Loading hibernation image.\n"); + pr_debug("Loading hibernation image.\n"); lock_device_hotplug(); error = create_basic_memory_bitmaps(); @@ -667,7 +663,7 @@ static int load_image_and_restore(void) if (!error) hibernation_restore(flags & SF_PLATFORM_MODE); - printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n"); + pr_err("Failed to load hibernation image, recovering.\n"); swsusp_free(); free_basic_memory_bitmaps(); Unlock: @@ -685,7 +681,7 @@ int hibernate(void) bool snapshot_test = false; if (!hibernation_available()) { - pr_debug("PM: Hibernation not available.\n"); + pr_debug("Hibernation not available.\n"); return -EPERM; } @@ -703,9 +699,9 @@ int hibernate(void) goto Exit; } - printk(KERN_INFO "PM: Syncing filesystems ... "); + pr_info("Syncing filesystems ... \n"); sys_sync(); - printk("done.\n"); + pr_info("done.\n"); error = freeze_processes(); if (error) @@ -731,7 +727,7 @@ int hibernate(void) else flags |= SF_CRC32_MODE; - pr_debug("PM: writing image.\n"); + pr_debug("Writing image.\n"); error = swsusp_write(flags); swsusp_free(); if (!error) { @@ -743,7 +739,7 @@ int hibernate(void) in_suspend = 0; pm_restore_gfp_mask(); } else { - pr_debug("PM: Image restored successfully.\n"); + pr_debug("Image restored successfully.\n"); } Free_bitmaps: @@ -751,7 +747,7 @@ int hibernate(void) Thaw: unlock_device_hotplug(); if (snapshot_test) { - pr_debug("PM: Checking hibernation image\n"); + pr_debug("Checking hibernation image\n"); error = swsusp_check(); if (!error) error = load_image_and_restore(); @@ -815,10 +811,10 @@ static int software_resume(void) goto Unlock; } - pr_debug("PM: Checking hibernation image partition %s\n", resume_file); + pr_debug("Checking hibernation image partition %s\n", resume_file); if (resume_delay) { - printk(KERN_INFO "Waiting %dsec before reading resume device...\n", + pr_info("Waiting %dsec before reading resume device ...\n", resume_delay); ssleep(resume_delay); } @@ -857,10 +853,10 @@ static int software_resume(void) } Check_image: - pr_debug("PM: Hibernation image partition %d:%d present\n", + pr_debug("Hibernation image partition %d:%d present\n", MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); - pr_debug("PM: Looking for hibernation image.\n"); + pr_debug("Looking for hibernation image.\n"); error = swsusp_check(); if (error) goto Unlock; @@ -879,7 +875,7 @@ static int software_resume(void) goto Close_Finish; } - pr_debug("PM: Preparing processes for restore.\n"); + pr_debug("Preparing processes for restore.\n"); error = freeze_processes(); if (error) goto Close_Finish; @@ -892,7 +888,7 @@ static int software_resume(void) /* For success case, the suspend path will release the lock */ Unlock: mutex_unlock(&pm_mutex); - pr_debug("PM: Hibernation image not present or could not be loaded.\n"); + pr_debug("Hibernation image not present or could not be loaded.\n"); return error; Close_Finish: swsusp_close(FMODE_READ); @@ -1016,7 +1012,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, error = -EINVAL; if (!error) - pr_debug("PM: Hibernation mode set to '%s'\n", + pr_debug("Hibernation mode set to '%s'\n", hibernation_modes[mode]); unlock_system_sleep(); return error ? error : n; @@ -1052,7 +1048,7 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, lock_system_sleep(); swsusp_resume_device = res; unlock_system_sleep(); - printk(KERN_INFO "PM: Starting manual resume from disk\n"); + pr_info("Starting manual resume from disk\n"); noresume = 0; software_resume(); return n; diff --git a/kernel/power/process.c b/kernel/power/process.c index 2fba066e125f..c7209f060eeb 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -12,6 +12,8 @@ #include <linux/oom.h> #include <linux/suspend.h> #include <linux/module.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> #include <linux/syscalls.h> #include <linux/freezer.h> #include <linux/delay.h> diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 905d5bbd595f..d79a38de425a 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -22,6 +22,7 @@ #include <linux/device.h> #include <linux/init.h> #include <linux/bootmem.h> +#include <linux/nmi.h> #include <linux/syscalls.h> #include <linux/console.h> #include <linux/highmem.h> diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 34da86e73d00..2984fb0f0257 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -45,6 +45,9 @@ #include <linux/utsname.h> #include <linux/ctype.h> #include <linux/uio.h> +#include <linux/sched/clock.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/uaccess.h> #include <asm/sections.h> diff --git a/kernel/profile.c b/kernel/profile.c index f67ce0aa6bc4..9aa2a4445b0d 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -25,6 +25,8 @@ #include <linux/mutex.h> #include <linux/slab.h> #include <linux/vmalloc.h> +#include <linux/sched/stat.h> + #include <asm/sections.h> #include <asm/irq_regs.h> #include <asm/ptrace.h> diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 49ba7c1ade9d..0af928712174 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -10,6 +10,9 @@ #include <linux/capability.h> #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/coredump.h> +#include <linux/sched/task.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/highmem.h> diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c index 123ccbd22449..a4a86fb47e4a 100644 --- a/kernel/rcu/rcuperf.c +++ b/kernel/rcu/rcuperf.c @@ -30,6 +30,7 @@ #include <linux/rcupdate.h> #include <linux/interrupt.h> #include <linux/sched.h> +#include <uapi/linux/sched/types.h> #include <linux/atomic.h> #include <linux/bitops.h> #include <linux/completion.h> diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index d81345be730e..cccc417a8135 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -32,7 +32,8 @@ #include <linux/smp.h> #include <linux/rcupdate.h> #include <linux/interrupt.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <uapi/linux/sched/types.h> #include <linux/atomic.h> #include <linux/bitops.h> #include <linux/completion.h> diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index e773129c8b08..ef3bcfb15b39 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c @@ -30,7 +30,7 @@ #include <linux/mutex.h> #include <linux/percpu.h> #include <linux/preempt.h> -#include <linux/rcupdate.h> +#include <linux/rcupdate_wait.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/delay.h> diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index fa6a48d3917b..6ad330dbbae2 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -25,7 +25,7 @@ #include <linux/completion.h> #include <linux/interrupt.h> #include <linux/notifier.h> -#include <linux/rcupdate.h> +#include <linux/rcupdate_wait.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/mutex.h> @@ -47,6 +47,18 @@ static void __call_rcu(struct rcu_head *head, #include "tiny_plugin.h" +void rcu_barrier_bh(void) +{ + wait_rcu_gp(call_rcu_bh); +} +EXPORT_SYMBOL(rcu_barrier_bh); + +void rcu_barrier_sched(void) +{ + wait_rcu_gp(call_rcu_sched); +} +EXPORT_SYMBOL(rcu_barrier_sched); + #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) /* diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index d80e0d2f68c6..50fee7689e71 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -32,9 +32,10 @@ #include <linux/init.h> #include <linux/spinlock.h> #include <linux/smp.h> -#include <linux/rcupdate.h> +#include <linux/rcupdate_wait.h> #include <linux/interrupt.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/nmi.h> #include <linux/atomic.h> #include <linux/bitops.h> @@ -49,6 +50,7 @@ #include <linux/kernel_stat.h> #include <linux/wait.h> #include <linux/kthread.h> +#include <uapi/linux/sched/types.h> #include <linux/prefetch.h> #include <linux/delay.h> #include <linux/stop_machine.h> diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index b60f2b6caa14..ec62a05bfdb3 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -24,6 +24,7 @@ #include <linux/cache.h> #include <linux/spinlock.h> +#include <linux/rtmutex.h> #include <linux/threads.h> #include <linux/cpumask.h> #include <linux/seqlock.h> diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index a240f3308be6..0a62a8f1caac 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -27,7 +27,9 @@ #include <linux/delay.h> #include <linux/gfp.h> #include <linux/oom.h> +#include <linux/sched/debug.h> #include <linux/smpboot.h> +#include <uapi/linux/sched/types.h> #include "../time/tick-internal.h" #ifdef CONFIG_RCU_BOOST diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 9e03db9ea9c0..55c8530316c7 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -36,7 +36,8 @@ #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/interrupt.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> #include <linux/atomic.h> #include <linux/bitops.h> #include <linux/percpu.h> @@ -49,6 +50,7 @@ #include <linux/moduleparam.h> #include <linux/kthread.h> #include <linux/tick.h> +#include <linux/rcupdate_wait.h> #define CREATE_TRACE_POINTS diff --git a/kernel/relay.c b/kernel/relay.c index 8f8dc91db680..0e413d9eec8a 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -847,7 +847,7 @@ void relay_close(struct rchan *chan) if (chan->last_toobig) printk(KERN_WARNING "relay: one or more items not logged " - "[item size (%Zd) > sub-buffer size (%Zd)]\n", + "[item size (%zd) > sub-buffer size (%zd)]\n", chan->last_toobig, chan->subbuf_size); list_del(&chan->list); diff --git a/kernel/sched/autogroup.h b/kernel/sched/autogroup.h index 890c95f2587a..ce40c810cd5c 100644 --- a/kernel/sched/autogroup.h +++ b/kernel/sched/autogroup.h @@ -2,6 +2,7 @@ #include <linux/kref.h> #include <linux/rwsem.h> +#include <linux/sched/autogroup.h> struct autogroup { /* diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index ad64efe41722..a08795e21628 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -58,6 +58,8 @@ #include <linux/percpu.h> #include <linux/ktime.h> #include <linux/sched.h> +#include <linux/nmi.h> +#include <linux/sched/clock.h> #include <linux/static_key.h> #include <linux/workqueue.h> #include <linux/compiler.h> diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c index f063a25d4449..53f9558fa925 100644 --- a/kernel/sched/completion.c +++ b/kernel/sched/completion.c @@ -11,7 +11,8 @@ * Waiting for completion is a typically sync point, but not an exclusion point. */ -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> #include <linux/completion.h> /** diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e1ae6ac15eac..956383844116 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6,10 +6,15 @@ * Copyright (C) 1991-2002 Linus Torvalds */ #include <linux/sched.h> +#include <linux/sched/clock.h> +#include <uapi/linux/sched/types.h> +#include <linux/sched/loadavg.h> +#include <linux/sched/hotplug.h> #include <linux/cpuset.h> #include <linux/delayacct.h> #include <linux/init_task.h> #include <linux/context_tracking.h> +#include <linux/rcupdate_wait.h> #include <linux/blkdev.h> #include <linux/kprobes.h> @@ -981,7 +986,7 @@ static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_ return rq; /* Affinity changed (again). */ - if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) + if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) return rq; rq = move_queued_task(rq, p, dest_cpu); @@ -1090,6 +1095,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, int ret = 0; rq = task_rq_lock(p, &rf); + update_rq_clock(rq); if (p->flags & PF_KTHREAD) { /* @@ -1258,10 +1264,10 @@ static int migrate_swap_stop(void *data) if (task_cpu(arg->src_task) != arg->src_cpu) goto unlock; - if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task))) + if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed)) goto unlock; - if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task))) + if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed)) goto unlock; __migrate_swap_task(arg->src_task, arg->dst_cpu); @@ -1302,10 +1308,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p) if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) goto out; - if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task))) + if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed)) goto out; - if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task))) + if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed)) goto out; trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); @@ -1489,14 +1495,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p) for_each_cpu(dest_cpu, nodemask) { if (!cpu_active(dest_cpu)) continue; - if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) + if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) return dest_cpu; } } for (;;) { /* Any allowed, online CPU? */ - for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { + for_each_cpu(dest_cpu, &p->cpus_allowed) { if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu)) continue; if (!cpu_online(dest_cpu)) @@ -1548,10 +1554,10 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) { lockdep_assert_held(&p->pi_lock); - if (tsk_nr_cpus_allowed(p) > 1) + if (p->nr_cpus_allowed > 1) cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); else - cpu = cpumask_any(tsk_cpus_allowed(p)); + cpu = cpumask_any(&p->cpus_allowed); /* * In order not to call set_task_cpu() on a blocking task we need @@ -1563,7 +1569,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) * [ this allows ->select_task() to simply return task_cpu(p) and * not worry about this generic constraint ] */ - if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || + if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || !cpu_online(cpu))) cpu = select_fallback_rq(task_cpu(p), p); @@ -2847,7 +2853,7 @@ context_switch(struct rq *rq, struct task_struct *prev, if (!mm) { next->active_mm = oldmm; - atomic_inc(&oldmm->mm_count); + mmgrab(oldmm); enter_lazy_tlb(oldmm, next); } else switch_mm_irqs_off(oldmm, mm, next); @@ -3210,6 +3216,15 @@ static inline void preempt_latency_start(int val) { } static inline void preempt_latency_stop(int val) { } #endif +static inline unsigned long get_preempt_disable_ip(struct task_struct *p) +{ +#ifdef CONFIG_DEBUG_PREEMPT + return p->preempt_disable_ip; +#else + return 0; +#endif +} + /* * Print scheduling while atomic bug: */ @@ -5232,6 +5247,9 @@ void sched_show_task(struct task_struct *p) int ppid; unsigned long state = p->state; + /* Make sure the string lines up properly with the number of task states: */ + BUILD_BUG_ON(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1); + if (!try_get_task_stack(p)) return; if (state) @@ -5460,7 +5478,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu) if (curr_cpu == target_cpu) return 0; - if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p))) + if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed)) return -EINVAL; /* TODO: This is not properly updating schedstats */ @@ -5560,7 +5578,7 @@ static void migrate_tasks(struct rq *dead_rq) { struct rq *rq = dead_rq; struct task_struct *next, *stop = rq->stop; - struct rq_flags rf, old_rf; + struct rq_flags rf; int dest_cpu; /* @@ -5579,7 +5597,9 @@ static void migrate_tasks(struct rq *dead_rq) * class method both need to have an up-to-date * value of rq->clock[_task] */ + rq_pin_lock(rq, &rf); update_rq_clock(rq); + rq_unpin_lock(rq, &rf); for (;;) { /* @@ -5592,7 +5612,7 @@ static void migrate_tasks(struct rq *dead_rq) /* * pick_next_task() assumes pinned rq->lock: */ - rq_pin_lock(rq, &rf); + rq_repin_lock(rq, &rf); next = pick_next_task(rq, &fake_task, &rf); BUG_ON(!next); next->sched_class->put_prev_task(rq, next); @@ -5621,13 +5641,6 @@ static void migrate_tasks(struct rq *dead_rq) continue; } - /* - * __migrate_task() may return with a different - * rq->lock held and a new cookie in 'rf', but we need - * to preserve rf::clock_update_flags for 'dead_rq'. - */ - old_rf = rf; - /* Find suitable destination for @next, with force if needed. */ dest_cpu = select_fallback_rq(dead_rq->cpu, next); @@ -5636,7 +5649,6 @@ static void migrate_tasks(struct rq *dead_rq) raw_spin_unlock(&rq->lock); rq = dead_rq; raw_spin_lock(&rq->lock); - rf = old_rf; } raw_spin_unlock(&next->pi_lock); } @@ -6098,7 +6110,7 @@ void __init sched_init(void) /* * The boot idle thread does lazy MMU switching as well: */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); enter_lazy_tlb(&init_mm, current); /* @@ -6819,11 +6831,20 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) if (IS_ERR(tg)) return ERR_PTR(-ENOMEM); - sched_online_group(tg, parent); - return &tg->css; } +/* Expose task group only after completing cgroup initialization */ +static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) +{ + struct task_group *tg = css_tg(css); + struct task_group *parent = css_tg(css->parent); + + if (parent) + sched_online_group(tg, parent); + return 0; +} + static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) { struct task_group *tg = css_tg(css); @@ -7229,6 +7250,7 @@ static struct cftype cpu_files[] = { struct cgroup_subsys cpu_cgrp_subsys = { .css_alloc = cpu_cgroup_css_alloc, + .css_online = cpu_cgroup_css_online, .css_released = cpu_cgroup_css_released, .css_free = cpu_cgroup_css_free, .fork = cpu_cgroup_fork, diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index e73119013c53..fba235c7d026 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -128,10 +128,10 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, const struct sched_dl_entity *dl_se = &p->dl; if (later_mask && - cpumask_and(later_mask, cp->free_cpus, tsk_cpus_allowed(p))) { + cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { best_cpu = cpumask_any(later_mask); goto out; - } else if (cpumask_test_cpu(cpudl_maximum(cp), tsk_cpus_allowed(p)) && + } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) && dl_time_before(dl_se->deadline, cp->elements[0].dl)) { best_cpu = cpudl_maximum(cp); if (later_mask) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index fd4659313640..8f8de3d4d6b7 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -13,6 +13,7 @@ #include <linux/cpufreq.h> #include <linux/kthread.h> +#include <uapi/linux/sched/types.h> #include <linux/slab.h> #include <trace/events/power.h> diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 11e9705bf937..981fcd7dc394 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -103,11 +103,11 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, if (skip) continue; - if (cpumask_any_and(tsk_cpus_allowed(p), vec->mask) >= nr_cpu_ids) + if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) continue; if (lowest_mask) { - cpumask_and(lowest_mask, tsk_cpus_allowed(p), vec->mask); + cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); /* * We have to ensure that we have at least one bit diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 2ecec3a4f1ee..f3778e2b46c8 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -4,12 +4,8 @@ #include <linux/kernel_stat.h> #include <linux/static_key.h> #include <linux/context_tracking.h> -#include <linux/cputime.h> +#include <linux/sched/cputime.h> #include "sched.h" -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#endif - #ifdef CONFIG_IRQ_TIME_ACCOUNTING diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 27737f34757d..99b2c33a9fbc 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -134,7 +134,7 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { struct task_struct *p = dl_task_of(dl_se); - if (tsk_nr_cpus_allowed(p) > 1) + if (p->nr_cpus_allowed > 1) dl_rq->dl_nr_migratory++; update_dl_migration(dl_rq); @@ -144,7 +144,7 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { struct task_struct *p = dl_task_of(dl_se); - if (tsk_nr_cpus_allowed(p) > 1) + if (p->nr_cpus_allowed > 1) dl_rq->dl_nr_migratory--; update_dl_migration(dl_rq); @@ -252,7 +252,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p * If we cannot preempt any rq, fall back to pick any * online cpu. */ - cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p)); + cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); if (cpu >= nr_cpu_ids) { /* * Fail to find any suitable cpu. @@ -958,7 +958,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) enqueue_dl_entity(&p->dl, pi_se, flags); - if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1) + if (!task_current(rq, p) && p->nr_cpus_allowed > 1) enqueue_pushable_dl_task(rq, p); } @@ -1032,9 +1032,9 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) * try to make it stay here, it might be important. */ if (unlikely(dl_task(curr)) && - (tsk_nr_cpus_allowed(curr) < 2 || + (curr->nr_cpus_allowed < 2 || !dl_entity_preempt(&p->dl, &curr->dl)) && - (tsk_nr_cpus_allowed(p) > 1)) { + (p->nr_cpus_allowed > 1)) { int target = find_later_rq(p); if (target != -1 && @@ -1055,7 +1055,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) * Current can't be migrated, useless to reschedule, * let's hope p can move out. */ - if (tsk_nr_cpus_allowed(rq->curr) == 1 || + if (rq->curr->nr_cpus_allowed == 1 || cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) return; @@ -1063,7 +1063,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) * p is migratable, so let's not schedule it and * see if it is pushed or pulled somewhere else. */ - if (tsk_nr_cpus_allowed(p) != 1 && + if (p->nr_cpus_allowed != 1 && cpudl_find(&rq->rd->cpudl, p, NULL) != -1) return; @@ -1178,7 +1178,7 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p) { update_curr_dl(rq); - if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1) + if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) enqueue_pushable_dl_task(rq, p); } @@ -1235,7 +1235,7 @@ static void set_curr_task_dl(struct rq *rq) static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && - cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) + cpumask_test_cpu(cpu, &p->cpus_allowed)) return 1; return 0; } @@ -1279,7 +1279,7 @@ static int find_later_rq(struct task_struct *task) if (unlikely(!later_mask)) return -1; - if (tsk_nr_cpus_allowed(task) == 1) + if (task->nr_cpus_allowed == 1) return -1; /* @@ -1384,8 +1384,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) /* Retry if something changed. */ if (double_lock_balance(rq, later_rq)) { if (unlikely(task_rq(task) != rq || - !cpumask_test_cpu(later_rq->cpu, - tsk_cpus_allowed(task)) || + !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) || task_running(rq, task) || !dl_task(task) || !task_on_rq_queued(task))) { @@ -1425,7 +1424,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) BUG_ON(rq->cpu != task_cpu(p)); BUG_ON(task_current(rq, p)); - BUG_ON(tsk_nr_cpus_allowed(p) <= 1); + BUG_ON(p->nr_cpus_allowed <= 1); BUG_ON(!task_on_rq_queued(p)); BUG_ON(!dl_task(p)); @@ -1464,7 +1463,7 @@ retry: */ if (dl_task(rq->curr) && dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && - tsk_nr_cpus_allowed(rq->curr) > 1) { + rq->curr->nr_cpus_allowed > 1) { resched_curr(rq); return 0; } @@ -1611,9 +1610,9 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p) { if (!task_running(rq, p) && !test_tsk_need_resched(rq->curr) && - tsk_nr_cpus_allowed(p) > 1 && + p->nr_cpus_allowed > 1 && dl_task(rq->curr) && - (tsk_nr_cpus_allowed(rq->curr) < 2 || + (rq->curr->nr_cpus_allowed < 2 || !dl_entity_preempt(&p->dl, &rq->curr->dl))) { push_dl_tasks(rq); } @@ -1727,7 +1726,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) if (rq->curr != p) { #ifdef CONFIG_SMP - if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded) + if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) queue_push_tasks(rq); #endif if (dl_task(rq->curr)) diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 109adc0e9cb9..38f019324f1a 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -11,7 +11,8 @@ */ #include <linux/proc_fs.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include <linux/seq_file.h> #include <linux/kallsyms.h> #include <linux/utsname.h> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 274c747a01ce..3e88b35ac157 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -20,7 +20,9 @@ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra */ -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/topology.h> + #include <linux/latencytop.h> #include <linux/cpumask.h> #include <linux/cpuidle.h> @@ -1551,7 +1553,7 @@ static void task_numa_compare(struct task_numa_env *env, */ if (cur) { /* Skip this swap candidate if cannot move to the source cpu */ - if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur))) + if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) goto unlock; /* @@ -1661,7 +1663,7 @@ static void task_numa_find_cpu(struct task_numa_env *env, for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { /* Skip this CPU if the source task cannot migrate */ - if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p))) + if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed)) continue; env->dst_cpu = cpu; @@ -5458,7 +5460,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, /* Skip over this group if it has no CPUs allowed */ if (!cpumask_intersects(sched_group_cpus(group), - tsk_cpus_allowed(p))) + &p->cpus_allowed)) continue; local_group = cpumask_test_cpu(this_cpu, @@ -5578,7 +5580,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) return cpumask_first(sched_group_cpus(group)); /* Traverse only the allowed CPUs */ - for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) { + for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) { if (idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); @@ -5717,7 +5719,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int if (!test_idle_cores(target, false)) return -1; - cpumask_and(cpus, sched_domain_span(sd), tsk_cpus_allowed(p)); + cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); for_each_cpu_wrap(core, cpus, target, wrap) { bool idle = true; @@ -5751,7 +5753,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t return -1; for_each_cpu(cpu, cpu_smt_mask(target)) { - if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) + if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) continue; if (idle_cpu(cpu)) return cpu; @@ -5803,7 +5805,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t time = local_clock(); for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) { - if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) + if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) continue; if (idle_cpu(cpu)) break; @@ -5958,7 +5960,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f if (sd_flag & SD_BALANCE_WAKE) { record_wakee(p); want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) - && cpumask_test_cpu(cpu, tsk_cpus_allowed(p)); + && cpumask_test_cpu(cpu, &p->cpus_allowed); } rcu_read_lock(); @@ -6698,7 +6700,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) return 0; - if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) { + if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) { int cpu; schedstat_inc(p->se.statistics.nr_failed_migrations_affine); @@ -6718,7 +6720,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) /* Prevent to re-select dst_cpu via env's cpus */ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { - if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) { + if (cpumask_test_cpu(cpu, &p->cpus_allowed)) { env->flags |= LBF_DST_PINNED; env->new_dst_cpu = cpu; break; @@ -7252,7 +7254,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) /* * Group imbalance indicates (and tries to solve) the problem where balancing - * groups is inadequate due to tsk_cpus_allowed() constraints. + * groups is inadequate due to ->cpus_allowed constraints. * * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a * cpumask covering 1 cpu of the first group and 3 cpus of the second group. @@ -8211,8 +8213,7 @@ more_balance: * if the curr task on busiest cpu can't be * moved to this_cpu */ - if (!cpumask_test_cpu(this_cpu, - tsk_cpus_allowed(busiest->curr))) { + if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { raw_spin_unlock_irqrestore(&busiest->lock, flags); env.flags |= LBF_ALL_PINNED; diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 6a4bae0a649d..ac6d5176463d 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -2,6 +2,7 @@ * Generic entry point for the idle threads */ #include <linux/sched.h> +#include <linux/sched/idle.h> #include <linux/cpu.h> #include <linux/cpuidle.h> #include <linux/cpuhotplug.h> diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index a2d6eb71f06b..7296b7308eca 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -7,6 +7,7 @@ */ #include <linux/export.h> +#include <linux/sched/loadavg.h> #include "sched.h" diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index e8836cfc4cdb..9f3e40226dec 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -335,7 +335,7 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq->rt_nr_total++; - if (tsk_nr_cpus_allowed(p) > 1) + if (p->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory++; update_rt_migration(rt_rq); @@ -352,7 +352,7 @@ static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq->rt_nr_total--; - if (tsk_nr_cpus_allowed(p) > 1) + if (p->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory--; update_rt_migration(rt_rq); @@ -1324,7 +1324,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) enqueue_rt_entity(rt_se, flags); - if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1) + if (!task_current(rq, p) && p->nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); } @@ -1413,7 +1413,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) * will have to sort it out. */ if (curr && unlikely(rt_task(curr)) && - (tsk_nr_cpus_allowed(curr) < 2 || + (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio)) { int target = find_lowest_rq(p); @@ -1437,7 +1437,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) * Current can't be migrated, useless to reschedule, * let's hope p can move out. */ - if (tsk_nr_cpus_allowed(rq->curr) == 1 || + if (rq->curr->nr_cpus_allowed == 1 || !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) return; @@ -1445,7 +1445,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) * p is migratable, so let's not schedule it and * see if it is pushed or pulled somewhere else. */ - if (tsk_nr_cpus_allowed(p) != 1 + if (p->nr_cpus_allowed != 1 && cpupri_find(&rq->rd->cpupri, p, NULL)) return; @@ -1579,7 +1579,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) * The previous task needs to be made eligible for pushing * if it is still active */ - if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1) + if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); } @@ -1591,7 +1591,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && - cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) + cpumask_test_cpu(cpu, &p->cpus_allowed)) return 1; return 0; } @@ -1629,7 +1629,7 @@ static int find_lowest_rq(struct task_struct *task) if (unlikely(!lowest_mask)) return -1; - if (tsk_nr_cpus_allowed(task) == 1) + if (task->nr_cpus_allowed == 1) return -1; /* No other targets possible */ if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) @@ -1726,8 +1726,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) * Also make sure that it wasn't scheduled on its rq. */ if (unlikely(task_rq(task) != rq || - !cpumask_test_cpu(lowest_rq->cpu, - tsk_cpus_allowed(task)) || + !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) || task_running(rq, task) || !rt_task(task) || !task_on_rq_queued(task))) { @@ -1762,7 +1761,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq) BUG_ON(rq->cpu != task_cpu(p)); BUG_ON(task_current(rq, p)); - BUG_ON(tsk_nr_cpus_allowed(p) <= 1); + BUG_ON(p->nr_cpus_allowed <= 1); BUG_ON(!task_on_rq_queued(p)); BUG_ON(!rt_task(p)); @@ -2122,9 +2121,9 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) { if (!task_running(rq, p) && !test_tsk_need_resched(rq->curr) && - tsk_nr_cpus_allowed(p) > 1 && + p->nr_cpus_allowed > 1 && (dl_task(rq->curr) || rt_task(rq->curr)) && - (tsk_nr_cpus_allowed(rq->curr) < 2 || + (rq->curr->nr_cpus_allowed < 2 || rq->curr->prio <= p->prio)) push_rt_tasks(rq); } @@ -2197,7 +2196,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) */ if (task_on_rq_queued(p) && rq->curr != p) { #ifdef CONFIG_SMP - if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded) + if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) queue_push_tasks(rq); #endif /* CONFIG_SMP */ if (p->prio < rq->curr->prio) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 71b10a9b73cf..5cbf92214ad8 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1,9 +1,26 @@ #include <linux/sched.h> +#include <linux/sched/autogroup.h> #include <linux/sched/sysctl.h> +#include <linux/sched/topology.h> #include <linux/sched/rt.h> -#include <linux/u64_stats_sync.h> #include <linux/sched/deadline.h> +#include <linux/sched/clock.h> +#include <linux/sched/wake_q.h> +#include <linux/sched/signal.h> +#include <linux/sched/numa_balancing.h> +#include <linux/sched/mm.h> +#include <linux/sched/cpufreq.h> +#include <linux/sched/stat.h> +#include <linux/sched/nohz.h> +#include <linux/sched/debug.h> +#include <linux/sched/hotplug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> +#include <linux/sched/cputime.h> +#include <linux/sched/init.h> + +#include <linux/u64_stats_sync.h> #include <linux/kernel_stat.h> #include <linux/binfmts.h> #include <linux/mutex.h> @@ -13,6 +30,10 @@ #include <linux/tick.h> #include <linux/slab.h> +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#endif + #include "cpupri.h" #include "cpudeadline.h" #include "cpuacct.h" @@ -1817,7 +1838,6 @@ extern void print_rt_stats(struct seq_file *m, int cpu); extern void print_dl_stats(struct seq_file *m, int cpu); extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); - #ifdef CONFIG_NUMA_BALANCING extern void show_numa_stats(struct task_struct *p, struct seq_file *m); diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index bf0da0aa0a14..d5710651043b 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -164,114 +164,3 @@ sched_info_switch(struct rq *rq, #define sched_info_arrive(rq, next) do { } while (0) #define sched_info_switch(rq, t, next) do { } while (0) #endif /* CONFIG_SCHED_INFO */ - -/* - * The following are functions that support scheduler-internal time accounting. - * These functions are generally called at the timer tick. None of this depends - * on CONFIG_SCHEDSTATS. - */ - -/** - * get_running_cputimer - return &tsk->signal->cputimer if cputimer is running - * - * @tsk: Pointer to target task. - */ -#ifdef CONFIG_POSIX_TIMERS -static inline -struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) -{ - struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; - - /* Check if cputimer isn't running. This is accessed without locking. */ - if (!READ_ONCE(cputimer->running)) - return NULL; - - /* - * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime - * in __exit_signal(), we won't account to the signal struct further - * cputime consumed by that task, even though the task can still be - * ticking after __exit_signal(). - * - * In order to keep a consistent behaviour between thread group cputime - * and thread group cputimer accounting, lets also ignore the cputime - * elapsing after __exit_signal() in any thread group timer running. - * - * This makes sure that POSIX CPU clocks and timers are synchronized, so - * that a POSIX CPU timer won't expire while the corresponding POSIX CPU - * clock delta is behind the expiring timer value. - */ - if (unlikely(!tsk->sighand)) - return NULL; - - return cputimer; -} -#else -static inline -struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) -{ - return NULL; -} -#endif - -/** - * account_group_user_time - Maintain utime for a thread group. - * - * @tsk: Pointer to task structure. - * @cputime: Time value by which to increment the utime field of the - * thread_group_cputime structure. - * - * If thread group time is being maintained, get the structure for the - * running CPU and update the utime field there. - */ -static inline void account_group_user_time(struct task_struct *tsk, - u64 cputime) -{ - struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); - - if (!cputimer) - return; - - atomic64_add(cputime, &cputimer->cputime_atomic.utime); -} - -/** - * account_group_system_time - Maintain stime for a thread group. - * - * @tsk: Pointer to task structure. - * @cputime: Time value by which to increment the stime field of the - * thread_group_cputime structure. - * - * If thread group time is being maintained, get the structure for the - * running CPU and update the stime field there. - */ -static inline void account_group_system_time(struct task_struct *tsk, - u64 cputime) -{ - struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); - - if (!cputimer) - return; - - atomic64_add(cputime, &cputimer->cputime_atomic.stime); -} - -/** - * account_group_exec_runtime - Maintain exec runtime for a thread group. - * - * @tsk: Pointer to task structure. - * @ns: Time value by which to increment the sum_exec_runtime field - * of the thread_group_cputime structure. - * - * If thread group time is being maintained, get the structure for the - * running CPU and update the sum_exec_runtime field there. - */ -static inline void account_group_exec_runtime(struct task_struct *tsk, - unsigned long long ns) -{ - struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); - - if (!cputimer) - return; - - atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); -} diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c index 82f0dff90030..3d5610dcce11 100644 --- a/kernel/sched/swait.c +++ b/kernel/sched/swait.c @@ -1,4 +1,4 @@ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/swait.h> void __init_swait_queue_head(struct swait_queue_head *q, const char *name, diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 9453efe9b25a..4d2ea6f25568 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -5,7 +5,8 @@ */ #include <linux/init.h> #include <linux/export.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> #include <linux/mm.h> #include <linux/wait.h> #include <linux/hash.h> diff --git a/kernel/seccomp.c b/kernel/seccomp.c index e15185c28de5..65f61077ad50 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -18,6 +18,7 @@ #include <linux/compat.h> #include <linux/coredump.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/seccomp.h> #include <linux/slab.h> #include <linux/syscalls.h> diff --git a/kernel/signal.c b/kernel/signal.c index 13f9def8b24a..7e59ebc2c25e 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -13,7 +13,12 @@ #include <linux/slab.h> #include <linux/export.h> #include <linux/init.h> -#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/user.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> +#include <linux/sched/cputime.h> #include <linux/fs.h> #include <linux/tty.h> #include <linux/binfmts.h> @@ -2395,11 +2400,11 @@ void exit_signals(struct task_struct *tsk) * @tsk is about to have PF_EXITING set - lock out users which * expect stable threadgroup. */ - threadgroup_change_begin(tsk); + cgroup_threadgroup_change_begin(tsk); if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { tsk->flags |= PF_EXITING; - threadgroup_change_end(tsk); + cgroup_threadgroup_change_end(tsk); return; } @@ -2410,7 +2415,7 @@ void exit_signals(struct task_struct *tsk) */ tsk->flags |= PF_EXITING; - threadgroup_change_end(tsk); + cgroup_threadgroup_change_end(tsk); if (!signal_pending(tsk)) goto out; @@ -3239,10 +3244,17 @@ int compat_restore_altstack(const compat_stack_t __user *uss) int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) { + int err; struct task_struct *t = current; - return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) | - __put_user(sas_ss_flags(sp), &uss->ss_flags) | + err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), + &uss->ss_sp) | + __put_user(t->sas_ss_flags, &uss->ss_flags) | __put_user(t->sas_ss_size, &uss->ss_size); + if (err) + return err; + if (t->sas_ss_flags & SS_AUTODISARM) + sas_ss_reset(t); + return 0; } #endif diff --git a/kernel/smp.c b/kernel/smp.c index 77fcdb9f2775..a817769b53c0 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -17,6 +17,7 @@ #include <linux/smp.h> #include <linux/cpu.h> #include <linux/sched.h> +#include <linux/sched/idle.h> #include <linux/hypervisor.h> #include "smpboot.h" diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 4a5c6e73ecd4..1d71c051a951 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -9,6 +9,7 @@ #include <linux/list.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/sched/task.h> #include <linux/export.h> #include <linux/percpu.h> #include <linux/kthread.h> diff --git a/kernel/sys.c b/kernel/sys.c index b07adca97ea3..7ff6d1b10cec 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -49,6 +49,13 @@ #include <linux/binfmts.h> #include <linux/sched.h> +#include <linux/sched/autogroup.h> +#include <linux/sched/loadavg.h> +#include <linux/sched/stat.h> +#include <linux/sched/mm.h> +#include <linux/sched/coredump.h> +#include <linux/sched/task.h> +#include <linux/sched/cputime.h> #include <linux/rcupdate.h> #include <linux/uidgid.h> #include <linux/cred.h> diff --git a/kernel/sysctl.c b/kernel/sysctl.c index bb260ceb3718..acf0a5a06da7 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -63,6 +63,7 @@ #include <linux/capability.h> #include <linux/binfmts.h> #include <linux/sched/sysctl.h> +#include <linux/sched/coredump.h> #include <linux/kexec.h> #include <linux/bpf.h> #include <linux/mount.h> diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index e6dc9a538efa..ce3a31e8eb36 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -19,6 +19,8 @@ #include <linux/hrtimer.h> #include <linux/timerqueue.h> #include <linux/rtc.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> #include <linux/alarmtimer.h> #include <linux/mutex.h> #include <linux/platform_device.h> diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 8e11d8d9f419..ec08f527d7ee 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -43,10 +43,12 @@ #include <linux/seq_file.h> #include <linux/err.h> #include <linux/debugobjects.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/sched/sysctl.h> #include <linux/sched/rt.h> #include <linux/sched/deadline.h> +#include <linux/sched/nohz.h> +#include <linux/sched/debug.h> #include <linux/timer.h> #include <linux/freezer.h> diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c index a95f13c31464..087d6a1279b8 100644 --- a/kernel/time/itimer.c +++ b/kernel/time/itimer.c @@ -10,6 +10,8 @@ #include <linux/interrupt.h> #include <linux/syscalls.h> #include <linux/time.h> +#include <linux/sched/signal.h> +#include <linux/sched/cputime.h> #include <linux/posix-timers.h> #include <linux/hrtimer.h> #include <trace/events/timer.h> diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index b4377a5e4269..4513ad16a253 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -2,7 +2,8 @@ * Implement CPU time clocks for the POSIX clock interface. */ -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/cputime.h> #include <linux/posix-timers.h> #include <linux/errno.h> #include <linux/math64.h> diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 1e6623d76750..50a6a47020de 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -35,6 +35,7 @@ #include <linux/slab.h> #include <linux/time.h> #include <linux/mutex.h> +#include <linux/sched/task.h> #include <linux/uaccess.h> #include <linux/list.h> diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index a26036d37a38..ea6b610c4c57 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/moduleparam.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/syscore_ops.h> #include <linux/hrtimer.h> #include <linux/sched_clock.h> diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 2c115fdab397..7fe53be86077 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -17,8 +17,12 @@ #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/percpu.h> +#include <linux/nmi.h> #include <linux/profile.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/clock.h> +#include <linux/sched/stat.h> +#include <linux/sched/nohz.h> #include <linux/module.h> #include <linux/irq_work.h> #include <linux/posix-timers.h> diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 95b258dd75db..5b63a2102c29 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -14,7 +14,9 @@ #include <linux/percpu.h> #include <linux/init.h> #include <linux/mm.h> +#include <linux/nmi.h> #include <linux/sched.h> +#include <linux/sched/loadavg.h> #include <linux/syscore_ops.h> #include <linux/clocksource.h> #include <linux/jiffies.h> diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 82a6bfa0c307..1dc0256bfb6e 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -38,8 +38,10 @@ #include <linux/tick.h> #include <linux/kallsyms.h> #include <linux/irq_work.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/sched/sysctl.h> +#include <linux/sched/nohz.h> +#include <linux/sched/debug.h> #include <linux/slab.h> #include <linux/compat.h> diff --git a/kernel/torture.c b/kernel/torture.c index 0d887eb62856..55de96529287 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -30,6 +30,7 @@ #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/atomic.h> #include <linux/bitops.h> #include <linux/completion.h> @@ -311,7 +312,7 @@ EXPORT_SYMBOL_GPL(torture_random); /* * Variables for shuffling. The idea is to ensure that each CPU stays * idle for an extended period to test interactions with dyntick idle, - * as well as interactions with any per-CPU varibles. + * as well as interactions with any per-CPU variables. */ struct shuffle_task { struct list_head st_l; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index eb230f06ba41..0d1597c9ee30 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -15,6 +15,7 @@ #include <linux/stop_machine.h> #include <linux/clocksource.h> +#include <linux/sched/task.h> #include <linux/kallsyms.h> #include <linux/seq_file.h> #include <linux/suspend.h> @@ -1110,13 +1111,6 @@ struct ftrace_func_entry { unsigned long ip; }; -struct ftrace_hash { - unsigned long size_bits; - struct hlist_head *buckets; - unsigned long count; - struct rcu_head rcu; -}; - /* * We make these constant because no one should touch them, * but they are used as the default "empty hash", to avoid allocating @@ -1192,26 +1186,24 @@ struct ftrace_page { static struct ftrace_page *ftrace_pages_start; static struct ftrace_page *ftrace_pages; -static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash) +static __always_inline unsigned long +ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip) { - return !hash || !hash->count; + if (hash->size_bits > 0) + return hash_long(ip, hash->size_bits); + + return 0; } -static struct ftrace_func_entry * -ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) +/* Only use this function if ftrace_hash_empty() has already been tested */ +static __always_inline struct ftrace_func_entry * +__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) { unsigned long key; struct ftrace_func_entry *entry; struct hlist_head *hhd; - if (ftrace_hash_empty(hash)) - return NULL; - - if (hash->size_bits > 0) - key = hash_long(ip, hash->size_bits); - else - key = 0; - + key = ftrace_hash_key(hash, ip); hhd = &hash->buckets[key]; hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { @@ -1221,17 +1213,32 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) return NULL; } +/** + * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash + * @hash: The hash to look at + * @ip: The instruction pointer to test + * + * Search a given @hash to see if a given instruction pointer (@ip) + * exists in it. + * + * Returns the entry that holds the @ip if found. NULL otherwise. + */ +struct ftrace_func_entry * +ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) +{ + if (ftrace_hash_empty(hash)) + return NULL; + + return __ftrace_lookup_ip(hash, ip); +} + static void __add_hash_entry(struct ftrace_hash *hash, struct ftrace_func_entry *entry) { struct hlist_head *hhd; unsigned long key; - if (hash->size_bits) - key = hash_long(entry->ip, hash->size_bits); - else - key = 0; - + key = ftrace_hash_key(hash, entry->ip); hhd = &hash->buckets[key]; hlist_add_head(&entry->hlist, hhd); hash->count++; @@ -1383,9 +1390,8 @@ ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, struct ftrace_hash *new_hash); -static int -ftrace_hash_move(struct ftrace_ops *ops, int enable, - struct ftrace_hash **dst, struct ftrace_hash *src) +static struct ftrace_hash * +__ftrace_hash_move(struct ftrace_hash *src) { struct ftrace_func_entry *entry; struct hlist_node *tn; @@ -1393,21 +1399,13 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, struct ftrace_hash *new_hash; int size = src->count; int bits = 0; - int ret; int i; - /* Reject setting notrace hash on IPMODIFY ftrace_ops */ - if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) - return -EINVAL; - /* - * If the new source is empty, just free dst and assign it - * the empty_hash. + * If the new source is empty, just return the empty_hash. */ - if (!src->count) { - new_hash = EMPTY_HASH; - goto update; - } + if (!src->count) + return EMPTY_HASH; /* * Make the hash size about 1/2 the # found @@ -1421,7 +1419,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, new_hash = alloc_ftrace_hash(bits); if (!new_hash) - return -ENOMEM; + return NULL; size = 1 << src->size_bits; for (i = 0; i < size; i++) { @@ -1432,7 +1430,24 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, } } -update: + return new_hash; +} + +static int +ftrace_hash_move(struct ftrace_ops *ops, int enable, + struct ftrace_hash **dst, struct ftrace_hash *src) +{ + struct ftrace_hash *new_hash; + int ret; + + /* Reject setting notrace hash on IPMODIFY ftrace_ops */ + if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) + return -EINVAL; + + new_hash = __ftrace_hash_move(src); + if (!new_hash) + return -ENOMEM; + /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ if (enable) { /* IPMODIFY should be updated only when filter_hash updating */ @@ -1466,9 +1481,9 @@ static bool hash_contains_ip(unsigned long ip, * notrace hash is considered not in the notrace hash. */ return (ftrace_hash_empty(hash->filter_hash) || - ftrace_lookup_ip(hash->filter_hash, ip)) && + __ftrace_lookup_ip(hash->filter_hash, ip)) && (ftrace_hash_empty(hash->notrace_hash) || - !ftrace_lookup_ip(hash->notrace_hash, ip)); + !__ftrace_lookup_ip(hash->notrace_hash, ip)); } /* @@ -2880,7 +2895,7 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) /* The function must be in the filter */ if (!ftrace_hash_empty(ops->func_hash->filter_hash) && - !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) + !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) return 0; /* If in notrace hash, we ignore it too */ @@ -4382,7 +4397,7 @@ __setup("ftrace_filter=", set_ftrace_filter); #ifdef CONFIG_FUNCTION_GRAPH_TRACER static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; -static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); +static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); static unsigned long save_global_trampoline; static unsigned long save_global_flags; @@ -4405,18 +4420,17 @@ static void __init set_ftrace_early_graph(char *buf, int enable) { int ret; char *func; - unsigned long *table = ftrace_graph_funcs; - int *count = &ftrace_graph_count; + struct ftrace_hash *hash; - if (!enable) { - table = ftrace_graph_notrace_funcs; - count = &ftrace_graph_notrace_count; - } + if (enable) + hash = ftrace_graph_hash; + else + hash = ftrace_graph_notrace_hash; while (buf) { func = strsep(&buf, ","); /* we allow only one expression at a time */ - ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func); + ret = ftrace_graph_set_hash(hash, func); if (ret) printk(KERN_DEBUG "ftrace: function %s not " "traceable\n", func); @@ -4540,26 +4554,55 @@ static const struct file_operations ftrace_notrace_fops = { static DEFINE_MUTEX(graph_lock); -int ftrace_graph_count; -int ftrace_graph_notrace_count; -unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; -unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; +struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH; +struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH; + +enum graph_filter_type { + GRAPH_FILTER_NOTRACE = 0, + GRAPH_FILTER_FUNCTION, +}; + +#define FTRACE_GRAPH_EMPTY ((void *)1) struct ftrace_graph_data { - unsigned long *table; - size_t size; - int *count; - const struct seq_operations *seq_ops; + struct ftrace_hash *hash; + struct ftrace_func_entry *entry; + int idx; /* for hash table iteration */ + enum graph_filter_type type; + struct ftrace_hash *new_hash; + const struct seq_operations *seq_ops; + struct trace_parser parser; }; static void * __g_next(struct seq_file *m, loff_t *pos) { struct ftrace_graph_data *fgd = m->private; + struct ftrace_func_entry *entry = fgd->entry; + struct hlist_head *head; + int i, idx = fgd->idx; - if (*pos >= *fgd->count) + if (*pos >= fgd->hash->count) return NULL; - return &fgd->table[*pos]; + + if (entry) { + hlist_for_each_entry_continue(entry, hlist) { + fgd->entry = entry; + return entry; + } + + idx++; + } + + for (i = idx; i < 1 << fgd->hash->size_bits; i++) { + head = &fgd->hash->buckets[i]; + hlist_for_each_entry(entry, head, hlist) { + fgd->entry = entry; + fgd->idx = i; + return entry; + } + } + return NULL; } static void * @@ -4575,10 +4618,19 @@ static void *g_start(struct seq_file *m, loff_t *pos) mutex_lock(&graph_lock); + if (fgd->type == GRAPH_FILTER_FUNCTION) + fgd->hash = rcu_dereference_protected(ftrace_graph_hash, + lockdep_is_held(&graph_lock)); + else + fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, + lockdep_is_held(&graph_lock)); + /* Nothing, tell g_show to print all functions are enabled */ - if (!*fgd->count && !*pos) - return (void *)1; + if (ftrace_hash_empty(fgd->hash) && !*pos) + return FTRACE_GRAPH_EMPTY; + fgd->idx = 0; + fgd->entry = NULL; return __g_next(m, pos); } @@ -4589,22 +4641,22 @@ static void g_stop(struct seq_file *m, void *p) static int g_show(struct seq_file *m, void *v) { - unsigned long *ptr = v; + struct ftrace_func_entry *entry = v; - if (!ptr) + if (!entry) return 0; - if (ptr == (unsigned long *)1) { + if (entry == FTRACE_GRAPH_EMPTY) { struct ftrace_graph_data *fgd = m->private; - if (fgd->table == ftrace_graph_funcs) + if (fgd->type == GRAPH_FILTER_FUNCTION) seq_puts(m, "#### all functions enabled ####\n"); else seq_puts(m, "#### no functions disabled ####\n"); return 0; } - seq_printf(m, "%ps\n", (void *)*ptr); + seq_printf(m, "%ps\n", (void *)entry->ip); return 0; } @@ -4621,24 +4673,51 @@ __ftrace_graph_open(struct inode *inode, struct file *file, struct ftrace_graph_data *fgd) { int ret = 0; + struct ftrace_hash *new_hash = NULL; - mutex_lock(&graph_lock); - if ((file->f_mode & FMODE_WRITE) && - (file->f_flags & O_TRUNC)) { - *fgd->count = 0; - memset(fgd->table, 0, fgd->size * sizeof(*fgd->table)); + if (file->f_mode & FMODE_WRITE) { + const int size_bits = FTRACE_HASH_DEFAULT_BITS; + + if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX)) + return -ENOMEM; + + if (file->f_flags & O_TRUNC) + new_hash = alloc_ftrace_hash(size_bits); + else + new_hash = alloc_and_copy_ftrace_hash(size_bits, + fgd->hash); + if (!new_hash) { + ret = -ENOMEM; + goto out; + } } - mutex_unlock(&graph_lock); if (file->f_mode & FMODE_READ) { - ret = seq_open(file, fgd->seq_ops); + ret = seq_open(file, &ftrace_graph_seq_ops); if (!ret) { struct seq_file *m = file->private_data; m->private = fgd; + } else { + /* Failed */ + free_ftrace_hash(new_hash); + new_hash = NULL; } } else file->private_data = fgd; +out: + if (ret < 0 && file->f_mode & FMODE_WRITE) + trace_parser_put(&fgd->parser); + + fgd->new_hash = new_hash; + + /* + * All uses of fgd->hash must be taken with the graph_lock + * held. The graph_lock is going to be released, so force + * fgd->hash to be reinitialized when it is taken again. + */ + fgd->hash = NULL; + return ret; } @@ -4646,6 +4725,7 @@ static int ftrace_graph_open(struct inode *inode, struct file *file) { struct ftrace_graph_data *fgd; + int ret; if (unlikely(ftrace_disabled)) return -ENODEV; @@ -4654,18 +4734,26 @@ ftrace_graph_open(struct inode *inode, struct file *file) if (fgd == NULL) return -ENOMEM; - fgd->table = ftrace_graph_funcs; - fgd->size = FTRACE_GRAPH_MAX_FUNCS; - fgd->count = &ftrace_graph_count; + mutex_lock(&graph_lock); + + fgd->hash = rcu_dereference_protected(ftrace_graph_hash, + lockdep_is_held(&graph_lock)); + fgd->type = GRAPH_FILTER_FUNCTION; fgd->seq_ops = &ftrace_graph_seq_ops; - return __ftrace_graph_open(inode, file, fgd); + ret = __ftrace_graph_open(inode, file, fgd); + if (ret < 0) + kfree(fgd); + + mutex_unlock(&graph_lock); + return ret; } static int ftrace_graph_notrace_open(struct inode *inode, struct file *file) { struct ftrace_graph_data *fgd; + int ret; if (unlikely(ftrace_disabled)) return -ENODEV; @@ -4674,45 +4762,97 @@ ftrace_graph_notrace_open(struct inode *inode, struct file *file) if (fgd == NULL) return -ENOMEM; - fgd->table = ftrace_graph_notrace_funcs; - fgd->size = FTRACE_GRAPH_MAX_FUNCS; - fgd->count = &ftrace_graph_notrace_count; + mutex_lock(&graph_lock); + + fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, + lockdep_is_held(&graph_lock)); + fgd->type = GRAPH_FILTER_NOTRACE; fgd->seq_ops = &ftrace_graph_seq_ops; - return __ftrace_graph_open(inode, file, fgd); + ret = __ftrace_graph_open(inode, file, fgd); + if (ret < 0) + kfree(fgd); + + mutex_unlock(&graph_lock); + return ret; } static int ftrace_graph_release(struct inode *inode, struct file *file) { + struct ftrace_graph_data *fgd; + struct ftrace_hash *old_hash, *new_hash; + struct trace_parser *parser; + int ret = 0; + if (file->f_mode & FMODE_READ) { struct seq_file *m = file->private_data; - kfree(m->private); + fgd = m->private; seq_release(inode, file); } else { - kfree(file->private_data); + fgd = file->private_data; } - return 0; + + if (file->f_mode & FMODE_WRITE) { + + parser = &fgd->parser; + + if (trace_parser_loaded((parser))) { + parser->buffer[parser->idx] = 0; + ret = ftrace_graph_set_hash(fgd->new_hash, + parser->buffer); + } + + trace_parser_put(parser); + + new_hash = __ftrace_hash_move(fgd->new_hash); + if (!new_hash) { + ret = -ENOMEM; + goto out; + } + + mutex_lock(&graph_lock); + + if (fgd->type == GRAPH_FILTER_FUNCTION) { + old_hash = rcu_dereference_protected(ftrace_graph_hash, + lockdep_is_held(&graph_lock)); + rcu_assign_pointer(ftrace_graph_hash, new_hash); + } else { + old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, + lockdep_is_held(&graph_lock)); + rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash); + } + + mutex_unlock(&graph_lock); + + /* Wait till all users are no longer using the old hash */ + synchronize_sched(); + + free_ftrace_hash(old_hash); + } + + out: + kfree(fgd->new_hash); + kfree(fgd); + + return ret; } static int -ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) +ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer) { struct ftrace_glob func_g; struct dyn_ftrace *rec; struct ftrace_page *pg; + struct ftrace_func_entry *entry; int fail = 1; int not; - bool exists; - int i; /* decode regex */ func_g.type = filter_parse_regex(buffer, strlen(buffer), &func_g.search, ¬); - if (!not && *idx >= size) - return -EBUSY; func_g.len = strlen(func_g.search); @@ -4729,26 +4869,18 @@ ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) continue; if (ftrace_match_record(rec, &func_g, NULL, 0)) { - /* if it is in the array */ - exists = false; - for (i = 0; i < *idx; i++) { - if (array[i] == rec->ip) { - exists = true; - break; - } - } + entry = ftrace_lookup_ip(hash, rec->ip); if (!not) { fail = 0; - if (!exists) { - array[(*idx)++] = rec->ip; - if (*idx >= size) - goto out; - } + + if (entry) + continue; + if (add_hash_entry(hash, rec->ip) < 0) + goto out; } else { - if (exists) { - array[i] = array[--(*idx)]; - array[*idx] = 0; + if (entry) { + free_hash_entry(hash, entry); fail = 0; } } @@ -4767,35 +4899,34 @@ static ssize_t ftrace_graph_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { - struct trace_parser parser; ssize_t read, ret = 0; struct ftrace_graph_data *fgd = file->private_data; + struct trace_parser *parser; if (!cnt) return 0; - if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) - return -ENOMEM; - - read = trace_get_user(&parser, ubuf, cnt, ppos); + /* Read mode uses seq functions */ + if (file->f_mode & FMODE_READ) { + struct seq_file *m = file->private_data; + fgd = m->private; + } - if (read >= 0 && trace_parser_loaded((&parser))) { - parser.buffer[parser.idx] = 0; + parser = &fgd->parser; - mutex_lock(&graph_lock); + read = trace_get_user(parser, ubuf, cnt, ppos); - /* we allow only one expression at a time */ - ret = ftrace_set_func(fgd->table, fgd->count, fgd->size, - parser.buffer); + if (read >= 0 && trace_parser_loaded(parser) && + !trace_parser_cont(parser)) { - mutex_unlock(&graph_lock); + ret = ftrace_graph_set_hash(fgd->new_hash, + parser->buffer); + trace_parser_clear(parser); } if (!ret) ret = read; - trace_parser_put(&parser); - return ret; } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a85739efcc30..96fc3c043ad6 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -6,6 +6,7 @@ #include <linux/trace_events.h> #include <linux/ring_buffer.h> #include <linux/trace_clock.h> +#include <linux/sched/clock.h> #include <linux/trace_seq.h> #include <linux/spinlock.h> #include <linux/irq_work.h> diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 6df9a83e20d7..c190a4d5013c 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c @@ -6,6 +6,7 @@ #include <linux/ring_buffer.h> #include <linux/completion.h> #include <linux/kthread.h> +#include <uapi/linux/sched/types.h> #include <linux/module.h> #include <linux/ktime.h> #include <asm/local.h> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 310f0ea0d1a2..707445ceb7ef 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -260,16 +260,8 @@ unsigned long long ns2usecs(u64 nsec) TRACE_ITER_EVENT_FORK /* - * The global_trace is the descriptor that holds the tracing - * buffers for the live tracing. For each CPU, it contains - * a link list of pages that will store trace entries. The - * page descriptor of the pages in the memory is used to hold - * the link list by linking the lru item in the page descriptor - * to each of the pages in the buffer per CPU. - * - * For each active CPU there is a data field that holds the - * pages for the buffer for that CPU. Each CPU has the same number - * of pages allocated for its buffer. + * The global_trace is the descriptor that holds the top-level tracing + * buffers for the live tracing. */ static struct trace_array global_trace = { .trace_flags = TRACE_DEFAULT_FLAGS, @@ -1193,6 +1185,7 @@ int trace_parser_get_init(struct trace_parser *parser, int size) void trace_parser_put(struct trace_parser *parser) { kfree(parser->buffer); + parser->buffer = NULL; } /* diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 1ea51ab53edf..ae1cce91fead 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -753,6 +753,21 @@ enum print_line_t print_trace_line(struct trace_iterator *iter); extern char trace_find_mark(unsigned long long duration); +struct ftrace_hash { + unsigned long size_bits; + struct hlist_head *buckets; + unsigned long count; + struct rcu_head rcu; +}; + +struct ftrace_func_entry * +ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip); + +static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash) +{ + return !hash || !hash->count; +} + /* Standard output formatting function used for function return traces */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -787,53 +802,50 @@ extern void __trace_graph_return(struct trace_array *tr, struct ftrace_graph_ret *trace, unsigned long flags, int pc); - #ifdef CONFIG_DYNAMIC_FTRACE -/* TODO: make this variable */ -#define FTRACE_GRAPH_MAX_FUNCS 32 -extern int ftrace_graph_count; -extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; -extern int ftrace_graph_notrace_count; -extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS]; +extern struct ftrace_hash *ftrace_graph_hash; +extern struct ftrace_hash *ftrace_graph_notrace_hash; static inline int ftrace_graph_addr(unsigned long addr) { - int i; - - if (!ftrace_graph_count) - return 1; - - for (i = 0; i < ftrace_graph_count; i++) { - if (addr == ftrace_graph_funcs[i]) { - /* - * If no irqs are to be traced, but a set_graph_function - * is set, and called by an interrupt handler, we still - * want to trace it. - */ - if (in_irq()) - trace_recursion_set(TRACE_IRQ_BIT); - else - trace_recursion_clear(TRACE_IRQ_BIT); - return 1; - } + int ret = 0; + + preempt_disable_notrace(); + + if (ftrace_hash_empty(ftrace_graph_hash)) { + ret = 1; + goto out; } - return 0; + if (ftrace_lookup_ip(ftrace_graph_hash, addr)) { + /* + * If no irqs are to be traced, but a set_graph_function + * is set, and called by an interrupt handler, we still + * want to trace it. + */ + if (in_irq()) + trace_recursion_set(TRACE_IRQ_BIT); + else + trace_recursion_clear(TRACE_IRQ_BIT); + ret = 1; + } + +out: + preempt_enable_notrace(); + return ret; } static inline int ftrace_graph_notrace_addr(unsigned long addr) { - int i; + int ret = 0; - if (!ftrace_graph_notrace_count) - return 0; + preempt_disable_notrace(); - for (i = 0; i < ftrace_graph_notrace_count; i++) { - if (addr == ftrace_graph_notrace_funcs[i]) - return 1; - } + if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr)) + ret = 1; - return 0; + preempt_enable_notrace(); + return ret; } #else static inline int ftrace_graph_addr(unsigned long addr) @@ -1300,7 +1312,8 @@ static inline bool is_string_field(struct ftrace_event_field *field) { return field->filter_type == FILTER_DYN_STRING || field->filter_type == FILTER_STATIC_STRING || - field->filter_type == FILTER_PTR_STRING; + field->filter_type == FILTER_PTR_STRING || + field->filter_type == FILTER_COMM; } static inline bool is_function_field(struct ftrace_event_field *field) diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c index e3b488825ae3..e49fbe901cfc 100644 --- a/kernel/trace/trace_benchmark.c +++ b/kernel/trace/trace_benchmark.c @@ -175,9 +175,9 @@ int trace_benchmark_reg(void) bm_event_thread = kthread_run(benchmark_event_kthread, NULL, "event_benchmark"); - if (!bm_event_thread) { + if (IS_ERR(bm_event_thread)) { pr_warning("trace benchmark failed to create kernel thread\n"); - return -ENOMEM; + return PTR_ERR(bm_event_thread); } return 0; diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 75489de546b6..4d8fdf3184dc 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -27,7 +27,7 @@ static DEFINE_MUTEX(branch_tracing_mutex); static struct trace_array *branch_tracer; static void -probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) +probe_likely_condition(struct ftrace_likely_data *f, int val, int expect) { struct trace_event_call *call = &event_branch; struct trace_array *tr = branch_tracer; @@ -68,16 +68,17 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) entry = ring_buffer_event_data(event); /* Strip off the path, only save the file */ - p = f->file + strlen(f->file); - while (p >= f->file && *p != '/') + p = f->data.file + strlen(f->data.file); + while (p >= f->data.file && *p != '/') p--; p++; - strncpy(entry->func, f->func, TRACE_FUNC_SIZE); + strncpy(entry->func, f->data.func, TRACE_FUNC_SIZE); strncpy(entry->file, p, TRACE_FILE_SIZE); entry->func[TRACE_FUNC_SIZE] = 0; entry->file[TRACE_FILE_SIZE] = 0; - entry->line = f->line; + entry->constant = f->constant; + entry->line = f->data.line; entry->correct = val == expect; if (!call_filter_check_discard(call, entry, buffer, event)) @@ -89,7 +90,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) } static inline -void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) +void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect) { if (!branch_tracing_enabled) return; @@ -195,13 +196,19 @@ core_initcall(init_branch_tracer); #else static inline -void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) +void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect) { } #endif /* CONFIG_BRANCH_TRACER */ -void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect) +void ftrace_likely_update(struct ftrace_likely_data *f, int val, + int expect, int is_constant) { + /* A constant is always correct */ + if (is_constant) { + f->constant++; + val = expect; + } /* * I would love to have a trace point here instead, but the * trace point code is so inundated with unlikely and likely @@ -212,9 +219,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect) /* FIXME: Make this atomic! */ if (val == expect) - f->correct++; + f->data.correct++; else - f->incorrect++; + f->data.incorrect++; } EXPORT_SYMBOL(ftrace_likely_update); @@ -245,29 +252,60 @@ static inline long get_incorrect_percent(struct ftrace_branch_data *p) return percent; } -static int branch_stat_show(struct seq_file *m, void *v) +static const char *branch_stat_process_file(struct ftrace_branch_data *p) { - struct ftrace_branch_data *p = v; const char *f; - long percent; /* Only print the file, not the path */ f = p->file + strlen(p->file); while (f >= p->file && *f != '/') f--; - f++; + return ++f; +} + +static void branch_stat_show(struct seq_file *m, + struct ftrace_branch_data *p, const char *f) +{ + long percent; /* * The miss is overlayed on correct, and hit on incorrect. */ percent = get_incorrect_percent(p); - seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); if (percent < 0) seq_puts(m, " X "); else seq_printf(m, "%3ld ", percent); + seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); +} + +static int branch_stat_show_normal(struct seq_file *m, + struct ftrace_branch_data *p, const char *f) +{ + seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); + branch_stat_show(m, p, f); + return 0; +} + +static int annotate_branch_stat_show(struct seq_file *m, void *v) +{ + struct ftrace_likely_data *p = v; + const char *f; + int l; + + f = branch_stat_process_file(&p->data); + + if (!p->constant) + return branch_stat_show_normal(m, &p->data, f); + + l = snprintf(NULL, 0, "/%lu", p->constant); + l = l > 8 ? 0 : 8 - l; + + seq_printf(m, "%8lu/%lu %*lu ", + p->data.correct, p->constant, l, p->data.incorrect); + branch_stat_show(m, &p->data, f); return 0; } @@ -279,7 +317,7 @@ static void *annotated_branch_stat_start(struct tracer_stat *trace) static void * annotated_branch_stat_next(void *v, int idx) { - struct ftrace_branch_data *p = v; + struct ftrace_likely_data *p = v; ++p; @@ -328,7 +366,7 @@ static struct tracer_stat annotated_branch_stats = { .stat_next = annotated_branch_stat_next, .stat_cmp = annotated_branch_stat_cmp, .stat_headers = annotated_branch_stat_headers, - .stat_show = branch_stat_show + .stat_show = annotate_branch_stat_show }; __init static int init_annotated_branch_stats(void) @@ -379,12 +417,21 @@ all_branch_stat_next(void *v, int idx) return p; } +static int all_branch_stat_show(struct seq_file *m, void *v) +{ + struct ftrace_branch_data *p = v; + const char *f; + + f = branch_stat_process_file(p); + return branch_stat_show_normal(m, p, f); +} + static struct tracer_stat all_branch_stats = { .name = "branch_all", .stat_start = all_branch_stat_start, .stat_next = all_branch_stat_next, .stat_headers = all_branch_stat_headers, - .stat_show = branch_stat_show + .stat_show = all_branch_stat_show }; __init static int all_annotated_branch_stats(void) diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 0f06532a755b..5fdc779f411d 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/percpu.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/ktime.h> #include <linux/trace_clock.h> diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index eb7396b7e7c3..c203ac4df791 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -328,11 +328,13 @@ FTRACE_ENTRY(branch, trace_branch, __array( char, func, TRACE_FUNC_SIZE+1 ) __array( char, file, TRACE_FILE_SIZE+1 ) __field( char, correct ) + __field( char, constant ) ), - F_printk("%u:%s:%s (%u)", + F_printk("%u:%s:%s (%u)%s", __entry->line, - __entry->func, __entry->file, __entry->correct), + __entry->func, __entry->file, __entry->correct, + __entry->constant ? " CONSTANT" : ""), FILTER_OTHER ); diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index f3a960ed75a1..1c21d0e2a145 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -19,6 +19,7 @@ #include <linux/mutex.h> #include <linux/slab.h> #include <linux/stacktrace.h> +#include <linux/rculist.h> #include "tracing_map.h" #include "trace.h" diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 6721a1e89f39..f2ac9d44f6c4 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -22,6 +22,7 @@ #include <linux/ctype.h> #include <linux/mutex.h> #include <linux/slab.h> +#include <linux/rculist.h> #include "trace.h" diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index af344a1bf0d0..21ea6ae77d93 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -44,6 +44,7 @@ #include <linux/uaccess.h> #include <linux/cpumask.h> #include <linux/delay.h> +#include <linux/sched/clock.h> #include "trace.h" static struct trace_array *hwlat_trace; @@ -266,24 +267,13 @@ out: static struct cpumask save_cpumask; static bool disable_migrate; -static void move_to_next_cpu(bool initmask) +static void move_to_next_cpu(void) { - static struct cpumask *current_mask; + struct cpumask *current_mask = &save_cpumask; int next_cpu; if (disable_migrate) return; - - /* Just pick the first CPU on first iteration */ - if (initmask) { - current_mask = &save_cpumask; - get_online_cpus(); - cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); - put_online_cpus(); - next_cpu = cpumask_first(current_mask); - goto set_affinity; - } - /* * If for some reason the user modifies the CPU affinity * of this thread, than stop migrating for the duration @@ -300,7 +290,6 @@ static void move_to_next_cpu(bool initmask) if (next_cpu >= nr_cpu_ids) next_cpu = cpumask_first(current_mask); - set_affinity: if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */ goto disable; @@ -322,20 +311,15 @@ static void move_to_next_cpu(bool initmask) * need to ensure nothing else might be running (and thus preempting). * Obviously this should never be used in production environments. * - * Currently this runs on which ever CPU it was scheduled on, but most - * real-world hardware latency situations occur across several CPUs, - * but we might later generalize this if we find there are any actualy - * systems with alternate SMI delivery or other hardware latencies. + * Executes one loop interaction on each CPU in tracing_cpumask sysfs file. */ static int kthread_fn(void *data) { u64 interval; - bool initmask = true; while (!kthread_should_stop()) { - move_to_next_cpu(initmask); - initmask = false; + move_to_next_cpu(); local_irq_disable(); get_sample(); @@ -366,13 +350,27 @@ static int kthread_fn(void *data) */ static int start_kthread(struct trace_array *tr) { + struct cpumask *current_mask = &save_cpumask; struct task_struct *kthread; + int next_cpu; + + /* Just pick the first CPU on first iteration */ + current_mask = &save_cpumask; + get_online_cpus(); + cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); + put_online_cpus(); + next_cpu = cpumask_first(current_mask); kthread = kthread_create(kthread_fn, NULL, "hwlatd"); if (IS_ERR(kthread)) { pr_err(BANNER "could not start sampling thread\n"); return -ENOMEM; } + + cpumask_clear(current_mask); + cpumask_set_cpu(next_cpu, current_mask); + sched_setaffinity(kthread->pid, current_mask); + hwlat_kthread = kthread; wake_up_process(kthread); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 7ad9e53ad174..5f688cc724f0 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -16,9 +16,11 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#define pr_fmt(fmt) "trace_kprobe: " fmt #include <linux/module.h> #include <linux/uaccess.h> +#include <linux/rculist.h> #include "trace_probe.h" diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 070866c32eb9..02a4aeb22c47 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -8,6 +8,8 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/ftrace.h> +#include <linux/sched/clock.h> +#include <linux/sched/mm.h> #include "trace_output.h" diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 8c0553d9afd3..52478f033f88 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -21,6 +21,7 @@ * Copyright (C) IBM Corporation, 2010-2011 * Author: Srikar Dronamraju */ +#define pr_fmt(fmt) "trace_probe: " fmt #include "trace_probe.h" @@ -647,7 +648,7 @@ ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos, int (*createfn)(int, char **)) { - char *kbuf, *tmp; + char *kbuf, *buf, *tmp; int ret = 0; size_t done = 0; size_t size; @@ -667,27 +668,38 @@ ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer, goto out; } kbuf[size] = '\0'; - tmp = strchr(kbuf, '\n'); + buf = kbuf; + do { + tmp = strchr(buf, '\n'); + if (tmp) { + *tmp = '\0'; + size = tmp - buf + 1; + } else { + size = strlen(buf); + if (done + size < count) { + if (buf != kbuf) + break; + /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ + pr_warn("Line length is too long: Should be less than %d\n", + WRITE_BUFSIZE - 2); + ret = -EINVAL; + goto out; + } + } + done += size; - if (tmp) { - *tmp = '\0'; - size = tmp - kbuf + 1; - } else if (done + size < count) { - pr_warn("Line length is too long: Should be less than %d\n", - WRITE_BUFSIZE); - ret = -EINVAL; - goto out; - } - done += size; - /* Remove comments */ - tmp = strchr(kbuf, '#'); + /* Remove comments */ + tmp = strchr(buf, '#'); - if (tmp) - *tmp = '\0'; + if (tmp) + *tmp = '\0'; - ret = traceprobe_command(kbuf, createfn); - if (ret) - goto out; + ret = traceprobe_command(buf, createfn); + if (ret) + goto out; + buf += size; + + } while (done < count); } ret = done; diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index b0f86ea77881..cb917cebae29 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -1,5 +1,6 @@ /* Include in trace.c */ +#include <uapi/linux/sched/types.h> #include <linux/stringify.h> #include <linux/kthread.h> #include <linux/delay.h> diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 2a1abbaca10e..1d68b5b7ad41 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -2,6 +2,7 @@ * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> * */ +#include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <linux/kallsyms.h> #include <linux/seq_file.h> diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 0913693caf6e..a7581fec9681 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -17,12 +17,14 @@ * Copyright (C) IBM Corporation, 2010-2012 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> */ +#define pr_fmt(fmt) "trace_kprobe: " fmt #include <linux/module.h> #include <linux/uaccess.h> #include <linux/uprobes.h> #include <linux/namei.h> #include <linux/string.h> +#include <linux/rculist.h> #include "trace_probe.h" @@ -431,7 +433,8 @@ static int create_trace_uprobe(int argc, char **argv) pr_info("Probe point is not specified.\n"); return -EINVAL; } - arg = strchr(argv[1], ':'); + /* Find the last occurrence, in case the path contains ':' too. */ + arg = strrchr(argv[1], ':'); if (!arg) { ret = -EINVAL; goto fail_address_parse; diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 1f9a31f934a4..685c50ae6300 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -24,7 +24,8 @@ #include <linux/tracepoint.h> #include <linux/err.h> #include <linux/slab.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task.h> #include <linux/static_key.h> extern struct tracepoint * const __start___tracepoints_ptrs[]; diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 5c21f0535056..370724b45391 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -17,7 +17,9 @@ */ #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> +#include <linux/sched/cputime.h> #include <linux/tsacct_kern.h> #include <linux/acct.h> #include <linux/jiffies.h> diff --git a/kernel/ucount.c b/kernel/ucount.c index 8a11fc0cb459..62630a40ab3a 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c @@ -8,6 +8,7 @@ #include <linux/stat.h> #include <linux/sysctl.h> #include <linux/slab.h> +#include <linux/cred.h> #include <linux/hash.h> #include <linux/user_namespace.h> diff --git a/kernel/uid16.c b/kernel/uid16.c index 71645ae9303a..5c2dc5b2bf4f 100644 --- a/kernel/uid16.c +++ b/kernel/uid16.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/highuid.h> #include <linux/security.h> +#include <linux/cred.h> #include <linux/syscalls.h> #include <linux/uaccess.h> diff --git a/kernel/user.c b/kernel/user.c index b069ccbfb0b0..00281add65b2 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/bitops.h> #include <linux/key.h> +#include <linux/sched/user.h> #include <linux/interrupt.h> #include <linux/export.h> #include <linux/user_namespace.h> diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 86b7854fec8e..2f735cbe05e8 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -8,6 +8,7 @@ #include <linux/export.h> #include <linux/nsproxy.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/user_namespace.h> #include <linux/proc_ns.h> #include <linux/highuid.h> diff --git a/kernel/utsname.c b/kernel/utsname.c index 6976cd47dcf6..913fe4336d2b 100644 --- a/kernel/utsname.c +++ b/kernel/utsname.c @@ -14,8 +14,10 @@ #include <linux/utsname.h> #include <linux/err.h> #include <linux/slab.h> +#include <linux/cred.h> #include <linux/user_namespace.h> #include <linux/proc_ns.h> +#include <linux/sched/task.h> static struct ucounts *inc_uts_namespaces(struct user_namespace *ns) { diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c index c8eac43267e9..233cd8fc6910 100644 --- a/kernel/utsname_sysctl.c +++ b/kernel/utsname_sysctl.c @@ -14,6 +14,7 @@ #include <linux/utsname.h> #include <linux/sysctl.h> #include <linux/wait.h> +#include <linux/rwsem.h> #ifdef CONFIG_PROC_SYSCTL diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 63177be0159e..03e0b69bb5bf 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -19,8 +19,11 @@ #include <linux/sysctl.h> #include <linux/smpboot.h> #include <linux/sched/rt.h> +#include <uapi/linux/sched/types.h> #include <linux/tick.h> #include <linux/workqueue.h> +#include <linux/sched/clock.h> +#include <linux/sched/debug.h> #include <asm/irq_regs.h> #include <linux/kvm_para.h> diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index b5de262a9eb9..54a427d1f344 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -13,6 +13,8 @@ #include <linux/nmi.h> #include <linux/module.h> +#include <linux/sched/debug.h> + #include <asm/irq_regs.h> #include <linux/perf_event.h> diff --git a/lib/Kconfig b/lib/Kconfig index fe7e8e175db8..0c8b78a9ae2e 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -394,6 +394,16 @@ config HAS_DMA depends on !NO_DMA default y +config DMA_NOOP_OPS + bool + depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT) + default n + +config DMA_VIRT_OPS + bool + depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT) + default n + config CHECK_SIGNATURE bool @@ -549,7 +559,7 @@ config SBITMAP bool config PARMAN - tristate + tristate "parman" if COMPILE_TEST config PRIME_NUMBERS tristate diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 55735c9bdb75..97d62c2da6c2 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -729,19 +729,6 @@ source "lib/Kconfig.kmemcheck" source "lib/Kconfig.kasan" -config DEBUG_REFCOUNT - bool "Verbose refcount checks" - help - Say Y here if you want reference counters (refcount_t and kref) to - generate WARNs on dubious usage. Without this refcount_t will still - be a saturating counter and avoid Use-After-Free by turning it into - a resource leak Denial-Of-Service. - - Use of this option will increase kernel text size but will alert the - admin of potential abuse. - - If in doubt, say "N". - endmenu # "Memory Debugging" config ARCH_HAS_KCOV diff --git a/lib/Makefile b/lib/Makefile index 445a39c21f46..320ac46a8725 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -25,9 +25,13 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ earlycpio.o seq_buf.o siphash.o \ nmi_backtrace.o nodemask.o win_minmax.o +CFLAGS_radix-tree.o += -DCONFIG_SPARSE_RCU_POINTER +CFLAGS_idr.o += -DCONFIG_SPARSE_RCU_POINTER + lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o -lib-$(CONFIG_HAS_DMA) += dma-noop.o +lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o +lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o lib-y += kobject.o klist.o obj-y += lockref.o @@ -37,7 +41,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ bsearch.o find_bit.o llist.o memweight.o kfifo.o \ percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ - once.o + once.o refcount.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += hexdump.o diff --git a/lib/bug.c b/lib/bug.c index bc3656e944d2..06edbbef0623 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -45,6 +45,7 @@ #include <linux/kernel.h> #include <linux/bug.h> #include <linux/sched.h> +#include <linux/rculist.h> extern const struct bug_entry __start___bug_table[], __stop___bug_table[]; diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 8c28cbd7e104..17afb0430161 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -13,6 +13,7 @@ #include <linux/debugobjects.h> #include <linux/interrupt.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/slab.h> diff --git a/lib/digsig.c b/lib/digsig.c index 55b8b2f41a9e..03d7c63837ae 100644 --- a/lib/digsig.c +++ b/lib/digsig.c @@ -85,7 +85,7 @@ static int digsig_verify_rsa(struct key *key, struct pubkey_hdr *pkh; down_read(&key->sem); - ukp = user_key_payload(key); + ukp = user_key_payload_locked(key); if (ukp->datalen < sizeof(*pkh)) goto err1; diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 60c57ec936db..b157b46cc9a6 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -17,8 +17,10 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include <linux/sched/task_stack.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> +#include <linux/sched/task.h> #include <linux/stacktrace.h> #include <linux/dma-debug.h> #include <linux/spinlock.h> diff --git a/lib/dma-noop.c b/lib/dma-noop.c index 3d766e78fbe2..de26c8b68f34 100644 --- a/lib/dma-noop.c +++ b/lib/dma-noop.c @@ -1,7 +1,7 @@ /* * lib/dma-noop.c * - * Simple DMA noop-ops that map 1:1 with memory + * DMA operations that map to physical addresses without flushing memory. */ #include <linux/export.h> #include <linux/mm.h> @@ -64,7 +64,7 @@ static int dma_noop_supported(struct device *dev, u64 mask) return 1; } -struct dma_map_ops dma_noop_ops = { +const struct dma_map_ops dma_noop_ops = { .alloc = dma_noop_alloc, .free = dma_noop_free, .map_page = dma_noop_map_page, diff --git a/lib/dma-virt.c b/lib/dma-virt.c new file mode 100644 index 000000000000..dcd4df1f7174 --- /dev/null +++ b/lib/dma-virt.c @@ -0,0 +1,72 @@ +/* + * lib/dma-virt.c + * + * DMA operations that map to virtual addresses without flushing memory. + */ +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/dma-mapping.h> +#include <linux/scatterlist.h> + +static void *dma_virt_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + void *ret; + + ret = (void *)__get_free_pages(gfp, get_order(size)); + if (ret) + *dma_handle = (uintptr_t)ret; + return ret; +} + +static void dma_virt_free(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr, + unsigned long attrs) +{ + free_pages((unsigned long)cpu_addr, get_order(size)); +} + +static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + return (uintptr_t)(page_address(page) + offset); +} + +static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + BUG_ON(!sg_page(sg)); + sg_dma_address(sg) = (uintptr_t)sg_virt(sg); + sg_dma_len(sg) = sg->length; + } + + return nents; +} + +static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return false; +} + +static int dma_virt_supported(struct device *dev, u64 mask) +{ + return true; +} + +const struct dma_map_ops dma_virt_ops = { + .alloc = dma_virt_alloc, + .free = dma_virt_free, + .map_page = dma_virt_map_page, + .map_sg = dma_virt_map_sg, + .mapping_error = dma_virt_mapping_error, + .dma_supported = dma_virt_supported, +}; +EXPORT_SYMBOL(dma_virt_ops); diff --git a/lib/dump_stack.c b/lib/dump_stack.c index c30d07e99dba..625375e7f11f 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <linux/export.h> #include <linux/sched.h> +#include <linux/sched/debug.h> #include <linux/smp.h> #include <linux/atomic.h> diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig index e77dfe00de36..8fa0791e8a1e 100644 --- a/lib/fonts/Kconfig +++ b/lib/fonts/Kconfig @@ -87,6 +87,14 @@ config FONT_6x10 embedded devices with a 320x240 screen, to get a reasonable number of characters (53x24) that are still at a readable size. +config FONT_10x18 + bool "console 10x18 font (not supported by all drivers)" if FONTS + depends on FRAMEBUFFER_CONSOLE + help + This is a high resolution console font for machines with very + big letters. It fits between the sun 12x22 and the normal 8x16 font. + If other fonts are too big or too small for you, say Y, otherwise say N. + config FONT_SUN8x16 bool "Sparc console 8x16 font" depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC) @@ -101,14 +109,6 @@ config FONT_SUN12x22 big letters (like the letters used in the SPARC PROM). If the standard font is unreadable for you, say Y, otherwise say N. -config FONT_10x18 - bool "console 10x18 font (not supported by all drivers)" if FONTS - depends on FRAMEBUFFER_CONSOLE - help - This is a high resolution console font for machines with very - big letters. It fits between the sun 12x22 and the normal 8x16 font. - If other fonts are too big or too small for you, say Y, otherwise say N. - config FONT_AUTOSELECT def_bool y depends on !FONT_8x8 diff --git a/lib/idr.c b/lib/idr.c index 52d2979a05e8..b13682bb0a1c 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -1,1068 +1,409 @@ -/* - * 2002-10-18 written by Jim Houston jim.houston@ccur.com - * Copyright (C) 2002 by Concurrent Computer Corporation - * Distributed under the GNU GPL license version 2. - * - * Modified by George Anzinger to reuse immediately and to use - * find bit instructions. Also removed _irq on spinlocks. - * - * Modified by Nadia Derbey to make it RCU safe. - * - * Small id to pointer translation service. - * - * It uses a radix tree like structure as a sparse array indexed - * by the id to obtain the pointer. The bitmap makes allocating - * a new id quick. - * - * You call it to allocate an id (an int) an associate with that id a - * pointer or what ever, we treat it as a (void *). You can pass this - * id to a user for him to pass back at a later time. You then pass - * that id to this code and it returns your pointer. - */ - -#ifndef TEST // to test in user space... -#include <linux/slab.h> -#include <linux/init.h> +#include <linux/bitmap.h> #include <linux/export.h> -#endif -#include <linux/err.h> -#include <linux/string.h> #include <linux/idr.h> +#include <linux/slab.h> #include <linux/spinlock.h> -#include <linux/percpu.h> - -#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) -#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) - -/* Leave the possibility of an incomplete final layer */ -#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) -/* Number of id_layer structs to leave in free list */ -#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) - -static struct kmem_cache *idr_layer_cache; -static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head); -static DEFINE_PER_CPU(int, idr_preload_cnt); +DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap); static DEFINE_SPINLOCK(simple_ida_lock); -/* the maximum ID which can be allocated given idr->layers */ -static int idr_max(int layers) -{ - int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT); - - return (1 << bits) - 1; -} - -/* - * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is - * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and - * so on. - */ -static int idr_layer_prefix_mask(int layer) -{ - return ~idr_max(layer + 1); -} - -static struct idr_layer *get_from_free_list(struct idr *idp) -{ - struct idr_layer *p; - unsigned long flags; - - spin_lock_irqsave(&idp->lock, flags); - if ((p = idp->id_free)) { - idp->id_free = p->ary[0]; - idp->id_free_cnt--; - p->ary[0] = NULL; - } - spin_unlock_irqrestore(&idp->lock, flags); - return(p); -} - /** - * idr_layer_alloc - allocate a new idr_layer - * @gfp_mask: allocation mask - * @layer_idr: optional idr to allocate from - * - * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch - * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch - * an idr_layer from @idr->id_free. - * - * @layer_idr is to maintain backward compatibility with the old alloc - * interface - idr_pre_get() and idr_get_new*() - and will be removed - * together with per-pool preload buffer. - */ -static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) -{ - struct idr_layer *new; - - /* this is the old path, bypass to get_from_free_list() */ - if (layer_idr) - return get_from_free_list(layer_idr); - - /* - * Try to allocate directly from kmem_cache. We want to try this - * before preload buffer; otherwise, non-preloading idr_alloc() - * users will end up taking advantage of preloading ones. As the - * following is allowed to fail for preloaded cases, suppress - * warning this time. - */ - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); - if (new) - return new; - - /* - * Try to fetch one from the per-cpu preload buffer if in process - * context. See idr_preload() for details. - */ - if (!in_interrupt()) { - preempt_disable(); - new = __this_cpu_read(idr_preload_head); - if (new) { - __this_cpu_write(idr_preload_head, new->ary[0]); - __this_cpu_dec(idr_preload_cnt); - new->ary[0] = NULL; - } - preempt_enable(); - if (new) - return new; - } - - /* - * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so - * that memory allocation failure warning is printed as intended. - */ - return kmem_cache_zalloc(idr_layer_cache, gfp_mask); -} - -static void idr_layer_rcu_free(struct rcu_head *head) -{ - struct idr_layer *layer; - - layer = container_of(head, struct idr_layer, rcu_head); - kmem_cache_free(idr_layer_cache, layer); -} - -static inline void free_layer(struct idr *idr, struct idr_layer *p) -{ - if (idr->hint == p) - RCU_INIT_POINTER(idr->hint, NULL); - call_rcu(&p->rcu_head, idr_layer_rcu_free); -} - -/* only called when idp->lock is held */ -static void __move_to_free_list(struct idr *idp, struct idr_layer *p) -{ - p->ary[0] = idp->id_free; - idp->id_free = p; - idp->id_free_cnt++; -} - -static void move_to_free_list(struct idr *idp, struct idr_layer *p) -{ - unsigned long flags; - - /* - * Depends on the return element being zeroed. - */ - spin_lock_irqsave(&idp->lock, flags); - __move_to_free_list(idp, p); - spin_unlock_irqrestore(&idp->lock, flags); -} - -static void idr_mark_full(struct idr_layer **pa, int id) -{ - struct idr_layer *p = pa[0]; - int l = 0; - - __set_bit(id & IDR_MASK, p->bitmap); - /* - * If this layer is full mark the bit in the layer above to - * show that this part of the radix tree is full. This may - * complete the layer above and require walking up the radix - * tree. - */ - while (bitmap_full(p->bitmap, IDR_SIZE)) { - if (!(p = pa[++l])) - break; - id = id >> IDR_BITS; - __set_bit((id & IDR_MASK), p->bitmap); - } -} - -static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) -{ - while (idp->id_free_cnt < MAX_IDR_FREE) { - struct idr_layer *new; - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); - if (new == NULL) - return (0); - move_to_free_list(idp, new); - } - return 1; -} - -/** - * sub_alloc - try to allocate an id without growing the tree depth - * @idp: idr handle - * @starting_id: id to start search at - * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer - * @gfp_mask: allocation mask for idr_layer_alloc() - * @layer_idr: optional idr passed to idr_layer_alloc() - * - * Allocate an id in range [@starting_id, INT_MAX] from @idp without - * growing its depth. Returns - * - * the allocated id >= 0 if successful, - * -EAGAIN if the tree needs to grow for allocation to succeed, - * -ENOSPC if the id space is exhausted, - * -ENOMEM if more idr_layers need to be allocated. - */ -static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, - gfp_t gfp_mask, struct idr *layer_idr) -{ - int n, m, sh; - struct idr_layer *p, *new; - int l, id, oid; - - id = *starting_id; - restart: - p = idp->top; - l = idp->layers; - pa[l--] = NULL; - while (1) { - /* - * We run around this while until we reach the leaf node... - */ - n = (id >> (IDR_BITS*l)) & IDR_MASK; - m = find_next_zero_bit(p->bitmap, IDR_SIZE, n); - if (m == IDR_SIZE) { - /* no space available go back to previous layer. */ - l++; - oid = id; - id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; - - /* if already at the top layer, we need to grow */ - if (id > idr_max(idp->layers)) { - *starting_id = id; - return -EAGAIN; - } - p = pa[l]; - BUG_ON(!p); - - /* If we need to go up one layer, continue the - * loop; otherwise, restart from the top. - */ - sh = IDR_BITS * (l + 1); - if (oid >> sh == id >> sh) - continue; - else - goto restart; - } - if (m != n) { - sh = IDR_BITS*l; - id = ((id >> sh) ^ n ^ m) << sh; - } - if ((id >= MAX_IDR_BIT) || (id < 0)) - return -ENOSPC; - if (l == 0) - break; - /* - * Create the layer below if it is missing. - */ - if (!p->ary[m]) { - new = idr_layer_alloc(gfp_mask, layer_idr); - if (!new) - return -ENOMEM; - new->layer = l-1; - new->prefix = id & idr_layer_prefix_mask(new->layer); - rcu_assign_pointer(p->ary[m], new); - p->count++; - } - pa[l--] = p; - p = p->ary[m]; - } - - pa[l] = p; - return id; -} - -static int idr_get_empty_slot(struct idr *idp, int starting_id, - struct idr_layer **pa, gfp_t gfp_mask, - struct idr *layer_idr) -{ - struct idr_layer *p, *new; - int layers, v, id; - unsigned long flags; - - id = starting_id; -build_up: - p = idp->top; - layers = idp->layers; - if (unlikely(!p)) { - if (!(p = idr_layer_alloc(gfp_mask, layer_idr))) - return -ENOMEM; - p->layer = 0; - layers = 1; - } - /* - * Add a new layer to the top of the tree if the requested - * id is larger than the currently allocated space. - */ - while (id > idr_max(layers)) { - layers++; - if (!p->count) { - /* special case: if the tree is currently empty, - * then we grow the tree by moving the top node - * upwards. - */ - p->layer++; - WARN_ON_ONCE(p->prefix); - continue; - } - if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { - /* - * The allocation failed. If we built part of - * the structure tear it down. - */ - spin_lock_irqsave(&idp->lock, flags); - for (new = p; p && p != idp->top; new = p) { - p = p->ary[0]; - new->ary[0] = NULL; - new->count = 0; - bitmap_clear(new->bitmap, 0, IDR_SIZE); - __move_to_free_list(idp, new); - } - spin_unlock_irqrestore(&idp->lock, flags); - return -ENOMEM; - } - new->ary[0] = p; - new->count = 1; - new->layer = layers-1; - new->prefix = id & idr_layer_prefix_mask(new->layer); - if (bitmap_full(p->bitmap, IDR_SIZE)) - __set_bit(0, new->bitmap); - p = new; - } - rcu_assign_pointer(idp->top, p); - idp->layers = layers; - v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr); - if (v == -EAGAIN) - goto build_up; - return(v); -} - -/* - * @id and @pa are from a successful allocation from idr_get_empty_slot(). - * Install the user pointer @ptr and mark the slot full. - */ -static void idr_fill_slot(struct idr *idr, void *ptr, int id, - struct idr_layer **pa) -{ - /* update hint used for lookup, cleared from free_layer() */ - rcu_assign_pointer(idr->hint, pa[0]); - - rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); - pa[0]->count++; - idr_mark_full(pa, id); -} - - -/** - * idr_preload - preload for idr_alloc() - * @gfp_mask: allocation mask to use for preloading - * - * Preload per-cpu layer buffer for idr_alloc(). Can only be used from - * process context and each idr_preload() invocation should be matched with - * idr_preload_end(). Note that preemption is disabled while preloaded. - * - * The first idr_alloc() in the preloaded section can be treated as if it - * were invoked with @gfp_mask used for preloading. This allows using more - * permissive allocation masks for idrs protected by spinlocks. - * - * For example, if idr_alloc() below fails, the failure can be treated as - * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT. - * - * idr_preload(GFP_KERNEL); - * spin_lock(lock); - * - * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT); - * - * spin_unlock(lock); - * idr_preload_end(); - * if (id < 0) - * error; - */ -void idr_preload(gfp_t gfp_mask) -{ - /* - * Consuming preload buffer from non-process context breaks preload - * allocation guarantee. Disallow usage from those contexts. - */ - WARN_ON_ONCE(in_interrupt()); - might_sleep_if(gfpflags_allow_blocking(gfp_mask)); - - preempt_disable(); - - /* - * idr_alloc() is likely to succeed w/o full idr_layer buffer and - * return value from idr_alloc() needs to be checked for failure - * anyway. Silently give up if allocation fails. The caller can - * treat failures from idr_alloc() as if idr_alloc() were called - * with @gfp_mask which should be enough. - */ - while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { - struct idr_layer *new; - - preempt_enable(); - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); - preempt_disable(); - if (!new) - break; - - /* link the new one to per-cpu preload list */ - new->ary[0] = __this_cpu_read(idr_preload_head); - __this_cpu_write(idr_preload_head, new); - __this_cpu_inc(idr_preload_cnt); - } -} -EXPORT_SYMBOL(idr_preload); - -/** - * idr_alloc - allocate new idr entry - * @idr: the (initialized) idr + * idr_alloc - allocate an id + * @idr: idr handle * @ptr: pointer to be associated with the new id * @start: the minimum id (inclusive) - * @end: the maximum id (exclusive, <= 0 for max) - * @gfp_mask: memory allocation flags + * @end: the maximum id (exclusive) + * @gfp: memory allocation flags * - * Allocate an id in [start, end) and associate it with @ptr. If no ID is - * available in the specified range, returns -ENOSPC. On memory allocation - * failure, returns -ENOMEM. + * Allocates an unused ID in the range [start, end). Returns -ENOSPC + * if there are no unused IDs in that range. * * Note that @end is treated as max when <= 0. This is to always allow * using @start + N as @end as long as N is inside integer range. * - * The user is responsible for exclusively synchronizing all operations - * which may modify @idr. However, read-only accesses such as idr_find() - * or iteration can be performed under RCU read lock provided the user - * destroys @ptr in RCU-safe way after removal from idr. + * Simultaneous modifications to the @idr are not allowed and should be + * prevented by the user, usually with a lock. idr_alloc() may be called + * concurrently with read-only accesses to the @idr, such as idr_find() and + * idr_for_each_entry(). */ -int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) +int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) { - int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ - struct idr_layer *pa[MAX_IDR_LEVEL + 1]; - int id; + void __rcu **slot; + struct radix_tree_iter iter; - might_sleep_if(gfpflags_allow_blocking(gfp_mask)); - - /* sanity checks */ if (WARN_ON_ONCE(start < 0)) return -EINVAL; - if (unlikely(max < start)) - return -ENOSPC; + if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) + return -EINVAL; - /* allocate id */ - id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); - if (unlikely(id < 0)) - return id; - if (unlikely(id > max)) - return -ENOSPC; + radix_tree_iter_init(&iter, start); + slot = idr_get_free(&idr->idr_rt, &iter, gfp, end); + if (IS_ERR(slot)) + return PTR_ERR(slot); - idr_fill_slot(idr, ptr, id, pa); - return id; + radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr); + radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE); + return iter.index; } EXPORT_SYMBOL_GPL(idr_alloc); /** * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion - * @idr: the (initialized) idr + * @idr: idr handle * @ptr: pointer to be associated with the new id * @start: the minimum id (inclusive) - * @end: the maximum id (exclusive, <= 0 for max) - * @gfp_mask: memory allocation flags - * - * Essentially the same as idr_alloc, but prefers to allocate progressively - * higher ids if it can. If the "cur" counter wraps, then it will start again - * at the "start" end of the range and allocate one that has already been used. - */ -int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, - gfp_t gfp_mask) -{ - int id; - - id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask); - if (id == -ENOSPC) - id = idr_alloc(idr, ptr, start, end, gfp_mask); - - if (likely(id >= 0)) - idr->cur = id + 1; - return id; -} -EXPORT_SYMBOL(idr_alloc_cyclic); - -static void idr_remove_warning(int id) -{ - WARN(1, "idr_remove called for id=%d which is not allocated.\n", id); -} - -static void sub_remove(struct idr *idp, int shift, int id) -{ - struct idr_layer *p = idp->top; - struct idr_layer **pa[MAX_IDR_LEVEL + 1]; - struct idr_layer ***paa = &pa[0]; - struct idr_layer *to_free; - int n; - - *paa = NULL; - *++paa = &idp->top; - - while ((shift > 0) && p) { - n = (id >> shift) & IDR_MASK; - __clear_bit(n, p->bitmap); - *++paa = &p->ary[n]; - p = p->ary[n]; - shift -= IDR_BITS; - } - n = id & IDR_MASK; - if (likely(p != NULL && test_bit(n, p->bitmap))) { - __clear_bit(n, p->bitmap); - RCU_INIT_POINTER(p->ary[n], NULL); - to_free = NULL; - while(*paa && ! --((**paa)->count)){ - if (to_free) - free_layer(idp, to_free); - to_free = **paa; - **paa-- = NULL; - } - if (!*paa) - idp->layers = 0; - if (to_free) - free_layer(idp, to_free); - } else - idr_remove_warning(id); -} - -/** - * idr_remove - remove the given id and free its slot - * @idp: idr handle - * @id: unique key - */ -void idr_remove(struct idr *idp, int id) -{ - struct idr_layer *p; - struct idr_layer *to_free; - - if (id < 0) - return; - - if (id > idr_max(idp->layers)) { - idr_remove_warning(id); - return; - } - - sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); - if (idp->top && idp->top->count == 1 && (idp->layers > 1) && - idp->top->ary[0]) { - /* - * Single child at leftmost slot: we can shrink the tree. - * This level is not needed anymore since when layers are - * inserted, they are inserted at the top of the existing - * tree. - */ - to_free = idp->top; - p = idp->top->ary[0]; - rcu_assign_pointer(idp->top, p); - --idp->layers; - to_free->count = 0; - bitmap_clear(to_free->bitmap, 0, IDR_SIZE); - free_layer(idp, to_free); - } -} -EXPORT_SYMBOL(idr_remove); - -static void __idr_remove_all(struct idr *idp) -{ - int n, id, max; - int bt_mask; - struct idr_layer *p; - struct idr_layer *pa[MAX_IDR_LEVEL + 1]; - struct idr_layer **paa = &pa[0]; - - n = idp->layers * IDR_BITS; - *paa = idp->top; - RCU_INIT_POINTER(idp->top, NULL); - max = idr_max(idp->layers); - - id = 0; - while (id >= 0 && id <= max) { - p = *paa; - while (n > IDR_BITS && p) { - n -= IDR_BITS; - p = p->ary[(id >> n) & IDR_MASK]; - *++paa = p; - } - - bt_mask = id; - id += 1 << n; - /* Get the highest bit that the above add changed from 0->1. */ - while (n < fls(id ^ bt_mask)) { - if (*paa) - free_layer(idp, *paa); - n += IDR_BITS; - --paa; - } - } - idp->layers = 0; -} - -/** - * idr_destroy - release all cached layers within an idr tree - * @idp: idr handle - * - * Free all id mappings and all idp_layers. After this function, @idp is - * completely unused and can be freed / recycled. The caller is - * responsible for ensuring that no one else accesses @idp during or after - * idr_destroy(). + * @end: the maximum id (exclusive) + * @gfp: memory allocation flags * - * A typical clean-up sequence for objects stored in an idr tree will use - * idr_for_each() to free all objects, if necessary, then idr_destroy() to - * free up the id mappings and cached idr_layers. + * Allocates an ID larger than the last ID allocated if one is available. + * If not, it will attempt to allocate the smallest ID that is larger or + * equal to @start. */ -void idr_destroy(struct idr *idp) +int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) { - __idr_remove_all(idp); + int id, curr = idr->idr_next; - while (idp->id_free_cnt) { - struct idr_layer *p = get_from_free_list(idp); - kmem_cache_free(idr_layer_cache, p); - } -} -EXPORT_SYMBOL(idr_destroy); + if (curr < start) + curr = start; -void *idr_find_slowpath(struct idr *idp, int id) -{ - int n; - struct idr_layer *p; - - if (id < 0) - return NULL; - - p = rcu_dereference_raw(idp->top); - if (!p) - return NULL; - n = (p->layer+1) * IDR_BITS; + id = idr_alloc(idr, ptr, curr, end, gfp); + if ((id == -ENOSPC) && (curr > start)) + id = idr_alloc(idr, ptr, start, curr, gfp); - if (id > idr_max(p->layer + 1)) - return NULL; - BUG_ON(n == 0); + if (id >= 0) + idr->idr_next = id + 1U; - while (n > 0 && p) { - n -= IDR_BITS; - BUG_ON(n != p->layer*IDR_BITS); - p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); - } - return((void *)p); + return id; } -EXPORT_SYMBOL(idr_find_slowpath); +EXPORT_SYMBOL(idr_alloc_cyclic); /** * idr_for_each - iterate through all stored pointers - * @idp: idr handle + * @idr: idr handle * @fn: function to be called for each pointer - * @data: data passed back to callback function + * @data: data passed to callback function * - * Iterate over the pointers registered with the given idr. The - * callback function will be called for each pointer currently - * registered, passing the id, the pointer and the data pointer passed - * to this function. It is not safe to modify the idr tree while in - * the callback, so functions such as idr_get_new and idr_remove are - * not allowed. + * The callback function will be called for each entry in @idr, passing + * the id, the pointer and the data pointer passed to this function. * - * We check the return of @fn each time. If it returns anything other - * than %0, we break out and return that value. + * If @fn returns anything other than %0, the iteration stops and that + * value is returned from this function. * - * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). + * idr_for_each() can be called concurrently with idr_alloc() and + * idr_remove() if protected by RCU. Newly added entries may not be + * seen and deleted entries may be seen, but adding and removing entries + * will not cause other entries to be skipped, nor spurious ones to be seen. */ -int idr_for_each(struct idr *idp, - int (*fn)(int id, void *p, void *data), void *data) +int idr_for_each(const struct idr *idr, + int (*fn)(int id, void *p, void *data), void *data) { - int n, id, max, error = 0; - struct idr_layer *p; - struct idr_layer *pa[MAX_IDR_LEVEL + 1]; - struct idr_layer **paa = &pa[0]; - - n = idp->layers * IDR_BITS; - *paa = rcu_dereference_raw(idp->top); - max = idr_max(idp->layers); + struct radix_tree_iter iter; + void __rcu **slot; - id = 0; - while (id >= 0 && id <= max) { - p = *paa; - while (n > 0 && p) { - n -= IDR_BITS; - p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); - *++paa = p; - } - - if (p) { - error = fn(id, (void *)p, data); - if (error) - break; - } - - id += 1 << n; - while (n < fls(id)) { - n += IDR_BITS; - --paa; - } + radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) { + int ret = fn(iter.index, rcu_dereference_raw(*slot), data); + if (ret) + return ret; } - return error; + return 0; } EXPORT_SYMBOL(idr_for_each); /** - * idr_get_next - lookup next object of id to given id. - * @idp: idr handle - * @nextidp: pointer to lookup key - * - * Returns pointer to registered object with id, which is next number to - * given id. After being looked up, *@nextidp will be updated for the next - * iteration. - * - * This function can be called under rcu_read_lock(), given that the leaf - * pointers lifetimes are correctly managed. + * idr_get_next - Find next populated entry + * @idr: idr handle + * @nextid: Pointer to lowest possible ID to return + * + * Returns the next populated entry in the tree with an ID greater than + * or equal to the value pointed to by @nextid. On exit, @nextid is updated + * to the ID of the found value. To use in a loop, the value pointed to by + * nextid must be incremented by the user. */ -void *idr_get_next(struct idr *idp, int *nextidp) +void *idr_get_next(struct idr *idr, int *nextid) { - struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; - struct idr_layer **paa = &pa[0]; - int id = *nextidp; - int n, max; + struct radix_tree_iter iter; + void __rcu **slot; - /* find first ent */ - p = *paa = rcu_dereference_raw(idp->top); - if (!p) + slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid); + if (!slot) return NULL; - n = (p->layer + 1) * IDR_BITS; - max = idr_max(p->layer + 1); - - while (id >= 0 && id <= max) { - p = *paa; - while (n > 0 && p) { - n -= IDR_BITS; - p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); - *++paa = p; - } - - if (p) { - *nextidp = id; - return p; - } - /* - * Proceed to the next layer at the current level. Unlike - * idr_for_each(), @id isn't guaranteed to be aligned to - * layer boundary at this point and adding 1 << n may - * incorrectly skip IDs. Make sure we jump to the - * beginning of the next layer using round_up(). - */ - id = round_up(id + 1, 1 << n); - while (n < fls(id)) { - n += IDR_BITS; - --paa; - } - } - return NULL; + *nextid = iter.index; + return rcu_dereference_raw(*slot); } EXPORT_SYMBOL(idr_get_next); - /** * idr_replace - replace pointer for given id - * @idp: idr handle - * @ptr: pointer you want associated with the id - * @id: lookup key + * @idr: idr handle + * @ptr: New pointer to associate with the ID + * @id: Lookup key * - * Replace the pointer registered with an id and return the old value. - * A %-ENOENT return indicates that @id was not found. - * A %-EINVAL return indicates that @id was not within valid constraints. + * Replace the pointer registered with an ID and return the old value. + * This function can be called under the RCU read lock concurrently with + * idr_alloc() and idr_remove() (as long as the ID being removed is not + * the one being replaced!). * - * The caller must serialize with writers. + * Returns: 0 on success. %-ENOENT indicates that @id was not found. + * %-EINVAL indicates that @id or @ptr were not valid. */ -void *idr_replace(struct idr *idp, void *ptr, int id) +void *idr_replace(struct idr *idr, void *ptr, int id) { - int n; - struct idr_layer *p, *old_p; + struct radix_tree_node *node; + void __rcu **slot = NULL; + void *entry; - if (id < 0) + if (WARN_ON_ONCE(id < 0)) + return ERR_PTR(-EINVAL); + if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) return ERR_PTR(-EINVAL); - p = idp->top; - if (!p) - return ERR_PTR(-ENOENT); - - if (id > idr_max(p->layer + 1)) - return ERR_PTR(-ENOENT); - - n = p->layer * IDR_BITS; - while ((n > 0) && p) { - p = p->ary[(id >> n) & IDR_MASK]; - n -= IDR_BITS; - } - - n = id & IDR_MASK; - if (unlikely(p == NULL || !test_bit(n, p->bitmap))) + entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot); + if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE)) return ERR_PTR(-ENOENT); - old_p = p->ary[n]; - rcu_assign_pointer(p->ary[n], ptr); + __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL, NULL); - return old_p; + return entry; } EXPORT_SYMBOL(idr_replace); -void __init idr_init_cache(void) -{ - idr_layer_cache = kmem_cache_create("idr_layer_cache", - sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); -} - -/** - * idr_init - initialize idr handle - * @idp: idr handle - * - * This function is use to set up the handle (@idp) that you will pass - * to the rest of the functions. - */ -void idr_init(struct idr *idp) -{ - memset(idp, 0, sizeof(struct idr)); - spin_lock_init(&idp->lock); -} -EXPORT_SYMBOL(idr_init); - -static int idr_has_entry(int id, void *p, void *data) -{ - return 1; -} - -bool idr_is_empty(struct idr *idp) -{ - return !idr_for_each(idp, idr_has_entry, NULL); -} -EXPORT_SYMBOL(idr_is_empty); - /** * DOC: IDA description - * IDA - IDR based ID allocator * - * This is id allocator without id -> pointer translation. Memory - * usage is much lower than full blown idr because each id only - * occupies a bit. ida uses a custom leaf node which contains - * IDA_BITMAP_BITS slots. - * - * 2007-04-25 written by Tejun Heo <htejun@gmail.com> + * The IDA is an ID allocator which does not provide the ability to + * associate an ID with a pointer. As such, it only needs to store one + * bit per ID, and so is more space efficient than an IDR. To use an IDA, + * define it using DEFINE_IDA() (or embed a &struct ida in a data structure, + * then initialise it using ida_init()). To allocate a new ID, call + * ida_simple_get(). To free an ID, call ida_simple_remove(). + * + * If you have more complex locking requirements, use a loop around + * ida_pre_get() and ida_get_new() to allocate a new ID. Then use + * ida_remove() to free an ID. You must make sure that ida_get_new() and + * ida_remove() cannot be called at the same time as each other for the + * same IDA. + * + * You can also use ida_get_new_above() if you need an ID to be allocated + * above a particular number. ida_destroy() can be used to dispose of an + * IDA without needing to free the individual IDs in it. You can use + * ida_is_empty() to find out whether the IDA has any IDs currently allocated. + * + * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward + * limitation, it should be quite straightforward to raise the maximum. */ -static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) -{ - unsigned long flags; - - if (!ida->free_bitmap) { - spin_lock_irqsave(&ida->idr.lock, flags); - if (!ida->free_bitmap) { - ida->free_bitmap = bitmap; - bitmap = NULL; - } - spin_unlock_irqrestore(&ida->idr.lock, flags); - } - - kfree(bitmap); -} - -/** - * ida_pre_get - reserve resources for ida allocation - * @ida: ida handle - * @gfp_mask: memory allocation flag - * - * This function should be called prior to locking and calling the - * following function. It preallocates enough memory to satisfy the - * worst possible allocation. - * - * If the system is REALLY out of memory this function returns %0, - * otherwise %1. +/* + * Developer's notes: + * + * The IDA uses the functionality provided by the IDR & radix tree to store + * bitmaps in each entry. The IDR_FREE tag means there is at least one bit + * free, unlike the IDR where it means at least one entry is free. + * + * I considered telling the radix tree that each slot is an order-10 node + * and storing the bit numbers in the radix tree, but the radix tree can't + * allow a single multiorder entry at index 0, which would significantly + * increase memory consumption for the IDA. So instead we divide the index + * by the number of bits in the leaf bitmap before doing a radix tree lookup. + * + * As an optimisation, if there are only a few low bits set in any given + * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional + * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits + * directly in the entry. By being really tricksy, we could store + * BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising + * for 0-3 allocated IDs. + * + * We allow the radix tree 'exceptional' count to get out of date. Nothing + * in the IDA nor the radix tree code checks it. If it becomes important + * to maintain an accurate exceptional count, switch the rcu_assign_pointer() + * calls to radix_tree_iter_replace() which will correct the exceptional + * count. + * + * The IDA always requires a lock to alloc/free. If we add a 'test_bit' + * equivalent, it will still need locking. Going to RCU lookup would require + * using RCU to free bitmaps, and that's not trivial without embedding an + * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte + * bitmap, which is excessive. */ -int ida_pre_get(struct ida *ida, gfp_t gfp_mask) -{ - /* allocate idr_layers */ - if (!__idr_pre_get(&ida->idr, gfp_mask)) - return 0; - /* allocate free_bitmap */ - if (!ida->free_bitmap) { - struct ida_bitmap *bitmap; - - bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); - if (!bitmap) - return 0; - - free_bitmap(ida, bitmap); - } - - return 1; -} -EXPORT_SYMBOL(ida_pre_get); +#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS) /** * ida_get_new_above - allocate new ID above or equal to a start id - * @ida: ida handle - * @starting_id: id to start search at - * @p_id: pointer to the allocated handle + * @ida: ida handle + * @start: id to start search at + * @id: pointer to the allocated handle * - * Allocate new ID above or equal to @starting_id. It should be called - * with any required locks. + * Allocate new ID above or equal to @start. It should be called + * with any required locks to ensure that concurrent calls to + * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed. + * Consider using ida_simple_get() if you do not have complex locking + * requirements. * * If memory is required, it will return %-EAGAIN, you should unlock * and go back to the ida_pre_get() call. If the ida is full, it will - * return %-ENOSPC. - * - * Note that callers must ensure that concurrent access to @ida is not possible. - * See ida_simple_get() for a varaint which takes care of locking. + * return %-ENOSPC. On success, it will return 0. * - * @p_id returns a value in the range @starting_id ... %0x7fffffff. + * @id returns a value in the range @start ... %0x7fffffff. */ -int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) +int ida_get_new_above(struct ida *ida, int start, int *id) { - struct idr_layer *pa[MAX_IDR_LEVEL + 1]; + struct radix_tree_root *root = &ida->ida_rt; + void __rcu **slot; + struct radix_tree_iter iter; struct ida_bitmap *bitmap; - unsigned long flags; - int idr_id = starting_id / IDA_BITMAP_BITS; - int offset = starting_id % IDA_BITMAP_BITS; - int t, id; - - restart: - /* get vacant slot */ - t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr); - if (t < 0) - return t == -ENOMEM ? -EAGAIN : t; - - if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) - return -ENOSPC; - - if (t != idr_id) - offset = 0; - idr_id = t; - - /* if bitmap isn't there, create a new one */ - bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; - if (!bitmap) { - spin_lock_irqsave(&ida->idr.lock, flags); - bitmap = ida->free_bitmap; - ida->free_bitmap = NULL; - spin_unlock_irqrestore(&ida->idr.lock, flags); - - if (!bitmap) - return -EAGAIN; - - memset(bitmap, 0, sizeof(struct ida_bitmap)); - rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], - (void *)bitmap); - pa[0]->count++; - } - - /* lookup for empty slot */ - t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); - if (t == IDA_BITMAP_BITS) { - /* no empty slot after offset, continue to the next chunk */ - idr_id++; - offset = 0; - goto restart; - } - - id = idr_id * IDA_BITMAP_BITS + t; - if (id >= MAX_IDR_BIT) - return -ENOSPC; + unsigned long index; + unsigned bit, ebit; + int new; + + index = start / IDA_BITMAP_BITS; + bit = start % IDA_BITMAP_BITS; + ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT; + + slot = radix_tree_iter_init(&iter, index); + for (;;) { + if (slot) + slot = radix_tree_next_slot(slot, &iter, + RADIX_TREE_ITER_TAGGED); + if (!slot) { + slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX); + if (IS_ERR(slot)) { + if (slot == ERR_PTR(-ENOMEM)) + return -EAGAIN; + return PTR_ERR(slot); + } + } + if (iter.index > index) { + bit = 0; + ebit = RADIX_TREE_EXCEPTIONAL_SHIFT; + } + new = iter.index * IDA_BITMAP_BITS; + bitmap = rcu_dereference_raw(*slot); + if (radix_tree_exception(bitmap)) { + unsigned long tmp = (unsigned long)bitmap; + ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit); + if (ebit < BITS_PER_LONG) { + tmp |= 1UL << ebit; + rcu_assign_pointer(*slot, (void *)tmp); + *id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT; + return 0; + } + bitmap = this_cpu_xchg(ida_bitmap, NULL); + if (!bitmap) + return -EAGAIN; + memset(bitmap, 0, sizeof(*bitmap)); + bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT; + rcu_assign_pointer(*slot, bitmap); + } - __set_bit(t, bitmap->bitmap); - if (++bitmap->nr_busy == IDA_BITMAP_BITS) - idr_mark_full(pa, idr_id); + if (bitmap) { + bit = find_next_zero_bit(bitmap->bitmap, + IDA_BITMAP_BITS, bit); + new += bit; + if (new < 0) + return -ENOSPC; + if (bit == IDA_BITMAP_BITS) + continue; - *p_id = id; + __set_bit(bit, bitmap->bitmap); + if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS)) + radix_tree_iter_tag_clear(root, &iter, + IDR_FREE); + } else { + new += bit; + if (new < 0) + return -ENOSPC; + if (ebit < BITS_PER_LONG) { + bitmap = (void *)((1UL << ebit) | + RADIX_TREE_EXCEPTIONAL_ENTRY); + radix_tree_iter_replace(root, &iter, slot, + bitmap); + *id = new; + return 0; + } + bitmap = this_cpu_xchg(ida_bitmap, NULL); + if (!bitmap) + return -EAGAIN; + memset(bitmap, 0, sizeof(*bitmap)); + __set_bit(bit, bitmap->bitmap); + radix_tree_iter_replace(root, &iter, slot, bitmap); + } - /* Each leaf node can handle nearly a thousand slots and the - * whole idea of ida is to have small memory foot print. - * Throw away extra resources one by one after each successful - * allocation. - */ - if (ida->idr.id_free_cnt || ida->free_bitmap) { - struct idr_layer *p = get_from_free_list(&ida->idr); - if (p) - kmem_cache_free(idr_layer_cache, p); + *id = new; + return 0; } - - return 0; } EXPORT_SYMBOL(ida_get_new_above); /** - * ida_remove - remove the given ID - * @ida: ida handle - * @id: ID to free + * ida_remove - Free the given ID + * @ida: ida handle + * @id: ID to free + * + * This function should not be called at the same time as ida_get_new_above(). */ void ida_remove(struct ida *ida, int id) { - struct idr_layer *p = ida->idr.top; - int shift = (ida->idr.layers - 1) * IDR_BITS; - int idr_id = id / IDA_BITMAP_BITS; - int offset = id % IDA_BITMAP_BITS; - int n; + unsigned long index = id / IDA_BITMAP_BITS; + unsigned offset = id % IDA_BITMAP_BITS; struct ida_bitmap *bitmap; + unsigned long *btmp; + struct radix_tree_iter iter; + void __rcu **slot; - if (idr_id > idr_max(ida->idr.layers)) + slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index); + if (!slot) goto err; - /* clear full bits while looking up the leaf idr_layer */ - while ((shift > 0) && p) { - n = (idr_id >> shift) & IDR_MASK; - __clear_bit(n, p->bitmap); - p = p->ary[n]; - shift -= IDR_BITS; + bitmap = rcu_dereference_raw(*slot); + if (radix_tree_exception(bitmap)) { + btmp = (unsigned long *)slot; + offset += RADIX_TREE_EXCEPTIONAL_SHIFT; + if (offset >= BITS_PER_LONG) + goto err; + } else { + btmp = bitmap->bitmap; } - - if (p == NULL) - goto err; - - n = idr_id & IDR_MASK; - __clear_bit(n, p->bitmap); - - bitmap = (void *)p->ary[n]; - if (!bitmap || !test_bit(offset, bitmap->bitmap)) + if (!test_bit(offset, btmp)) goto err; - /* update bitmap and remove it if empty */ - __clear_bit(offset, bitmap->bitmap); - if (--bitmap->nr_busy == 0) { - __set_bit(n, p->bitmap); /* to please idr_remove() */ - idr_remove(&ida->idr, idr_id); - free_bitmap(ida, bitmap); + __clear_bit(offset, btmp); + radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE); + if (radix_tree_exception(bitmap)) { + if (rcu_dereference_raw(*slot) == + (void *)RADIX_TREE_EXCEPTIONAL_ENTRY) + radix_tree_iter_delete(&ida->ida_rt, &iter, slot); + } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) { + kfree(bitmap); + radix_tree_iter_delete(&ida->ida_rt, &iter, slot); } - return; - err: WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); } EXPORT_SYMBOL(ida_remove); /** - * ida_destroy - release all cached layers within an ida tree - * @ida: ida handle + * ida_destroy - Free the contents of an ida + * @ida: ida handle + * + * Calling this function releases all resources associated with an IDA. When + * this call returns, the IDA is empty and can be reused or freed. The caller + * should not allow ida_remove() or ida_get_new_above() to be called at the + * same time. */ void ida_destroy(struct ida *ida) { - idr_destroy(&ida->idr); - kfree(ida->free_bitmap); + struct radix_tree_iter iter; + void __rcu **slot; + + radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) { + struct ida_bitmap *bitmap = rcu_dereference_raw(*slot); + if (!radix_tree_exception(bitmap)) + kfree(bitmap); + radix_tree_iter_delete(&ida->ida_rt, &iter, slot); + } } EXPORT_SYMBOL(ida_destroy); @@ -1141,18 +482,3 @@ void ida_simple_remove(struct ida *ida, unsigned int id) spin_unlock_irqrestore(&simple_ida_lock, flags); } EXPORT_SYMBOL(ida_simple_remove); - -/** - * ida_init - initialize ida handle - * @ida: ida handle - * - * This function is use to set up the handle (@ida) that you will pass - * to the rest of the functions. - */ -void ida_init(struct ida *ida) -{ - memset(ida, 0, sizeof(struct ida)); - idr_init(&ida->idr); - -} -EXPORT_SYMBOL(ida_init); diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c index 391fd23976a2..9c7d89df40ed 100644 --- a/lib/is_single_threaded.c +++ b/lib/is_single_threaded.c @@ -9,8 +9,9 @@ * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ - -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task.h> +#include <linux/sched/mm.h> /* * Returns true if the task does not share ->mm with another thread/process. diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 5f7999eacad5..4e8a30d1c22f 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -17,6 +17,7 @@ #include <linux/kprobes.h> #include <linux/nmi.h> #include <linux/cpu.h> +#include <linux/sched/debug.h> #ifdef arch_trigger_cpumask_backtrace /* For reliability, we're prepared to waste bits here. */ diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index c8cebb137076..9c21000df0b5 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -176,13 +176,12 @@ static int percpu_counter_cpu_dead(unsigned int cpu) spin_lock_irq(&percpu_counters_lock); list_for_each_entry(fbc, &percpu_counters, list) { s32 *pcount; - unsigned long flags; - raw_spin_lock_irqsave(&fbc->lock, flags); + raw_spin_lock(&fbc->lock); pcount = per_cpu_ptr(fbc->counters, cpu); fbc->count += *pcount; *pcount = 0; - raw_spin_unlock_irqrestore(&fbc->lock, flags); + raw_spin_unlock(&fbc->lock); } spin_unlock_irq(&percpu_counters_lock); #endif diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 6d40944960de..6016f1deb1f5 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c @@ -14,6 +14,7 @@ * General Public License for more details. */ +#include <linux/mm.h> #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/bug.h> @@ -22,7 +23,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/percpu.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/string.h> #include <linux/spinlock.h> #include <linux/percpu_ida.h> diff --git a/lib/plist.c b/lib/plist.c index 3a30c53db061..199408f91057 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -175,6 +175,7 @@ void plist_requeue(struct plist_node *node, struct plist_head *head) #ifdef CONFIG_DEBUG_PI_LIST #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/module.h> #include <linux/init.h> diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 72fab4999c00..5ed506d648c4 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -22,20 +22,21 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/bitmap.h> +#include <linux/bitops.h> #include <linux/cpu.h> #include <linux/errno.h> +#include <linux/export.h> +#include <linux/idr.h> #include <linux/init.h> #include <linux/kernel.h> -#include <linux/export.h> -#include <linux/radix-tree.h> +#include <linux/kmemleak.h> #include <linux/percpu.h> +#include <linux/preempt.h> /* in_interrupt() */ +#include <linux/radix-tree.h> +#include <linux/rcupdate.h> #include <linux/slab.h> -#include <linux/kmemleak.h> -#include <linux/cpu.h> #include <linux/string.h> -#include <linux/bitops.h> -#include <linux/rcupdate.h> -#include <linux/preempt.h> /* in_interrupt() */ /* Number of nodes in fully populated tree of given height */ @@ -60,11 +61,28 @@ static struct kmem_cache *radix_tree_node_cachep; #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) /* + * The IDR does not have to be as high as the radix tree since it uses + * signed integers, not unsigned longs. + */ +#define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1) +#define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \ + RADIX_TREE_MAP_SHIFT)) +#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1) + +/* + * The IDA is even shorter since it uses a bitmap at the last level. + */ +#define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS)) +#define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \ + RADIX_TREE_MAP_SHIFT)) +#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1) + +/* * Per-cpu pool of preloaded nodes */ struct radix_tree_preload { unsigned nr; - /* nodes->private_data points to next preallocated node */ + /* nodes->parent points to next preallocated node */ struct radix_tree_node *nodes; }; static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; @@ -83,35 +101,38 @@ static inline void *node_to_entry(void *ptr) #ifdef CONFIG_RADIX_TREE_MULTIORDER /* Sibling slots point directly to another slot in the same node */ -static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) +static inline +bool is_sibling_entry(const struct radix_tree_node *parent, void *node) { - void **ptr = node; + void __rcu **ptr = node; return (parent->slots <= ptr) && (ptr < parent->slots + RADIX_TREE_MAP_SIZE); } #else -static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) +static inline +bool is_sibling_entry(const struct radix_tree_node *parent, void *node) { return false; } #endif -static inline unsigned long get_slot_offset(struct radix_tree_node *parent, - void **slot) +static inline unsigned long +get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot) { return slot - parent->slots; } -static unsigned int radix_tree_descend(struct radix_tree_node *parent, +static unsigned int radix_tree_descend(const struct radix_tree_node *parent, struct radix_tree_node **nodep, unsigned long index) { unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; - void **entry = rcu_dereference_raw(parent->slots[offset]); + void __rcu **entry = rcu_dereference_raw(parent->slots[offset]); #ifdef CONFIG_RADIX_TREE_MULTIORDER if (radix_tree_is_internal_node(entry)) { if (is_sibling_entry(parent, entry)) { - void **sibentry = (void **) entry_to_node(entry); + void __rcu **sibentry; + sibentry = (void __rcu **) entry_to_node(entry); offset = get_slot_offset(parent, sibentry); entry = rcu_dereference_raw(*sibentry); } @@ -122,7 +143,7 @@ static unsigned int radix_tree_descend(struct radix_tree_node *parent, return offset; } -static inline gfp_t root_gfp_mask(struct radix_tree_root *root) +static inline gfp_t root_gfp_mask(const struct radix_tree_root *root) { return root->gfp_mask & __GFP_BITS_MASK; } @@ -139,42 +160,48 @@ static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, __clear_bit(offset, node->tags[tag]); } -static inline int tag_get(struct radix_tree_node *node, unsigned int tag, +static inline int tag_get(const struct radix_tree_node *node, unsigned int tag, int offset) { return test_bit(offset, node->tags[tag]); } -static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) +static inline void root_tag_set(struct radix_tree_root *root, unsigned tag) { - root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); + root->gfp_mask |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT)); } static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) { - root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); + root->gfp_mask &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT)); } static inline void root_tag_clear_all(struct radix_tree_root *root) { - root->gfp_mask &= __GFP_BITS_MASK; + root->gfp_mask &= (1 << ROOT_TAG_SHIFT) - 1; +} + +static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag) +{ + return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT)); } -static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) +static inline unsigned root_tags_get(const struct radix_tree_root *root) { - return (__force int)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); + return (__force unsigned)root->gfp_mask >> ROOT_TAG_SHIFT; } -static inline unsigned root_tags_get(struct radix_tree_root *root) +static inline bool is_idr(const struct radix_tree_root *root) { - return (__force unsigned)root->gfp_mask >> __GFP_BITS_SHIFT; + return !!(root->gfp_mask & ROOT_IS_IDR); } /* * Returns 1 if any slot in the node has this tag set. * Otherwise returns 0. */ -static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) +static inline int any_tag_set(const struct radix_tree_node *node, + unsigned int tag) { unsigned idx; for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { @@ -184,6 +211,11 @@ static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) return 0; } +static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag) +{ + bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE); +} + /** * radix_tree_find_next_bit - find the next set bit in a memory region * @@ -232,11 +264,18 @@ static inline unsigned long shift_maxindex(unsigned int shift) return (RADIX_TREE_MAP_SIZE << shift) - 1; } -static inline unsigned long node_maxindex(struct radix_tree_node *node) +static inline unsigned long node_maxindex(const struct radix_tree_node *node) { return shift_maxindex(node->shift); } +static unsigned long next_index(unsigned long index, + const struct radix_tree_node *node, + unsigned long offset) +{ + return (index & ~node_maxindex(node)) + (offset << node->shift); +} + #ifndef __KERNEL__ static void dump_node(struct radix_tree_node *node, unsigned long index) { @@ -275,11 +314,59 @@ static void radix_tree_dump(struct radix_tree_root *root) { pr_debug("radix root: %p rnode %p tags %x\n", root, root->rnode, - root->gfp_mask >> __GFP_BITS_SHIFT); + root->gfp_mask >> ROOT_TAG_SHIFT); if (!radix_tree_is_internal_node(root->rnode)) return; dump_node(entry_to_node(root->rnode), 0); } + +static void dump_ida_node(void *entry, unsigned long index) +{ + unsigned long i; + + if (!entry) + return; + + if (radix_tree_is_internal_node(entry)) { + struct radix_tree_node *node = entry_to_node(entry); + + pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n", + node, node->offset, index * IDA_BITMAP_BITS, + ((index | node_maxindex(node)) + 1) * + IDA_BITMAP_BITS - 1, + node->parent, node->tags[0][0], node->shift, + node->count); + for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) + dump_ida_node(node->slots[i], + index | (i << node->shift)); + } else if (radix_tree_exceptional_entry(entry)) { + pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n", + entry, (int)(index & RADIX_TREE_MAP_MASK), + index * IDA_BITMAP_BITS, + index * IDA_BITMAP_BITS + BITS_PER_LONG - + RADIX_TREE_EXCEPTIONAL_SHIFT, + (unsigned long)entry >> + RADIX_TREE_EXCEPTIONAL_SHIFT); + } else { + struct ida_bitmap *bitmap = entry; + + pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap, + (int)(index & RADIX_TREE_MAP_MASK), + index * IDA_BITMAP_BITS, + (index + 1) * IDA_BITMAP_BITS - 1); + for (i = 0; i < IDA_BITMAP_LONGS; i++) + pr_cont(" %lx", bitmap->bitmap[i]); + pr_cont("\n"); + } +} + +static void ida_dump(struct ida *ida) +{ + struct radix_tree_root *root = &ida->ida_rt; + pr_debug("ida: %p node %p free %d\n", ida, root->rnode, + root->gfp_mask >> ROOT_TAG_SHIFT); + dump_ida_node(root->rnode, 0); +} #endif /* @@ -287,13 +374,12 @@ static void radix_tree_dump(struct radix_tree_root *root) * that the caller has pinned this thread of control to the current CPU. */ static struct radix_tree_node * -radix_tree_node_alloc(struct radix_tree_root *root, - struct radix_tree_node *parent, +radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, + struct radix_tree_root *root, unsigned int shift, unsigned int offset, unsigned int count, unsigned int exceptional) { struct radix_tree_node *ret = NULL; - gfp_t gfp_mask = root_gfp_mask(root); /* * Preload code isn't irq safe and it doesn't make sense to use @@ -321,8 +407,7 @@ radix_tree_node_alloc(struct radix_tree_root *root, rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr) { ret = rtp->nodes; - rtp->nodes = ret->private_data; - ret->private_data = NULL; + rtp->nodes = ret->parent; rtp->nr--; } /* @@ -336,11 +421,12 @@ radix_tree_node_alloc(struct radix_tree_root *root, out: BUG_ON(radix_tree_is_internal_node(ret)); if (ret) { - ret->parent = parent; ret->shift = shift; ret->offset = offset; ret->count = count; ret->exceptional = exceptional; + ret->parent = parent; + ret->root = root; } return ret; } @@ -399,7 +485,7 @@ static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) preempt_disable(); rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr < nr) { - node->private_data = rtp->nodes; + node->parent = rtp->nodes; rtp->nodes = node; rtp->nr++; } else { @@ -510,7 +596,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) return __radix_tree_preload(gfp_mask, nr_nodes); } -static unsigned radix_tree_load_root(struct radix_tree_root *root, +static unsigned radix_tree_load_root(const struct radix_tree_root *root, struct radix_tree_node **nodep, unsigned long *maxindex) { struct radix_tree_node *node = rcu_dereference_raw(root->rnode); @@ -530,10 +616,10 @@ static unsigned radix_tree_load_root(struct radix_tree_root *root, /* * Extend a radix tree so it can store key @index. */ -static int radix_tree_extend(struct radix_tree_root *root, +static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp, unsigned long index, unsigned int shift) { - struct radix_tree_node *slot; + void *entry; unsigned int maxshift; int tag; @@ -542,32 +628,44 @@ static int radix_tree_extend(struct radix_tree_root *root, while (index > shift_maxindex(maxshift)) maxshift += RADIX_TREE_MAP_SHIFT; - slot = root->rnode; - if (!slot) + entry = rcu_dereference_raw(root->rnode); + if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE))) goto out; do { - struct radix_tree_node *node = radix_tree_node_alloc(root, - NULL, shift, 0, 1, 0); + struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL, + root, shift, 0, 1, 0); if (!node) return -ENOMEM; - /* Propagate the aggregated tag info into the new root */ - for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { - if (root_tag_get(root, tag)) - tag_set(node, tag, 0); + if (is_idr(root)) { + all_tag_set(node, IDR_FREE); + if (!root_tag_get(root, IDR_FREE)) { + tag_clear(node, IDR_FREE, 0); + root_tag_set(root, IDR_FREE); + } + } else { + /* Propagate the aggregated tag info to the new child */ + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { + if (root_tag_get(root, tag)) + tag_set(node, tag, 0); + } } BUG_ON(shift > BITS_PER_LONG); - if (radix_tree_is_internal_node(slot)) { - entry_to_node(slot)->parent = node; - } else if (radix_tree_exceptional_entry(slot)) { + if (radix_tree_is_internal_node(entry)) { + entry_to_node(entry)->parent = node; + } else if (radix_tree_exceptional_entry(entry)) { /* Moving an exceptional root->rnode to a node */ node->exceptional = 1; } - node->slots[0] = slot; - slot = node_to_entry(node); - rcu_assign_pointer(root->rnode, slot); + /* + * entry was already in the radix tree, so we do not need + * rcu_assign_pointer here + */ + node->slots[0] = (void __rcu *)entry; + entry = node_to_entry(node); + rcu_assign_pointer(root->rnode, entry); shift += RADIX_TREE_MAP_SHIFT; } while (shift <= maxshift); out: @@ -578,12 +676,14 @@ out: * radix_tree_shrink - shrink radix tree to minimum height * @root radix tree root */ -static inline void radix_tree_shrink(struct radix_tree_root *root, +static inline bool radix_tree_shrink(struct radix_tree_root *root, radix_tree_update_node_t update_node, void *private) { + bool shrunk = false; + for (;;) { - struct radix_tree_node *node = root->rnode; + struct radix_tree_node *node = rcu_dereference_raw(root->rnode); struct radix_tree_node *child; if (!radix_tree_is_internal_node(node)) @@ -597,7 +697,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root, */ if (node->count != 1) break; - child = node->slots[0]; + child = rcu_dereference_raw(node->slots[0]); if (!child) break; if (!radix_tree_is_internal_node(child) && node->shift) @@ -613,7 +713,9 @@ static inline void radix_tree_shrink(struct radix_tree_root *root, * (node->slots[0]), it will be safe to dereference the new * one (root->rnode) as far as dependent read barriers go. */ - root->rnode = child; + root->rnode = (void __rcu *)child; + if (is_idr(root) && !tag_get(node, IDR_FREE, 0)) + root_tag_clear(root, IDR_FREE); /* * We have a dilemma here. The node's slot[0] must not be @@ -635,27 +737,34 @@ static inline void radix_tree_shrink(struct radix_tree_root *root, */ node->count = 0; if (!radix_tree_is_internal_node(child)) { - node->slots[0] = RADIX_TREE_RETRY; + node->slots[0] = (void __rcu *)RADIX_TREE_RETRY; if (update_node) update_node(node, private); } WARN_ON_ONCE(!list_empty(&node->private_list)); radix_tree_node_free(node); + shrunk = true; } + + return shrunk; } -static void delete_node(struct radix_tree_root *root, +static bool delete_node(struct radix_tree_root *root, struct radix_tree_node *node, radix_tree_update_node_t update_node, void *private) { + bool deleted = false; + do { struct radix_tree_node *parent; if (node->count) { - if (node == entry_to_node(root->rnode)) - radix_tree_shrink(root, update_node, private); - return; + if (node_to_entry(node) == + rcu_dereference_raw(root->rnode)) + deleted |= radix_tree_shrink(root, update_node, + private); + return deleted; } parent = node->parent; @@ -663,15 +772,23 @@ static void delete_node(struct radix_tree_root *root, parent->slots[node->offset] = NULL; parent->count--; } else { - root_tag_clear_all(root); + /* + * Shouldn't the tags already have all been cleared + * by the caller? + */ + if (!is_idr(root)) + root_tag_clear_all(root); root->rnode = NULL; } WARN_ON_ONCE(!list_empty(&node->private_list)); radix_tree_node_free(node); + deleted = true; node = parent; } while (node); + + return deleted; } /** @@ -693,13 +810,14 @@ static void delete_node(struct radix_tree_root *root, */ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, unsigned order, struct radix_tree_node **nodep, - void ***slotp) + void __rcu ***slotp) { struct radix_tree_node *node = NULL, *child; - void **slot = (void **)&root->rnode; + void __rcu **slot = (void __rcu **)&root->rnode; unsigned long maxindex; unsigned int shift, offset = 0; unsigned long max = index | ((1UL << order) - 1); + gfp_t gfp = root_gfp_mask(root); shift = radix_tree_load_root(root, &child, &maxindex); @@ -707,18 +825,18 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, if (order > 0 && max == ((1UL << order) - 1)) max++; if (max > maxindex) { - int error = radix_tree_extend(root, max, shift); + int error = radix_tree_extend(root, gfp, max, shift); if (error < 0) return error; shift = error; - child = root->rnode; + child = rcu_dereference_raw(root->rnode); } while (shift > order) { shift -= RADIX_TREE_MAP_SHIFT; if (child == NULL) { /* Have to add a child node. */ - child = radix_tree_node_alloc(root, node, shift, + child = radix_tree_node_alloc(gfp, node, root, shift, offset, 0, 0); if (!child) return -ENOMEM; @@ -741,7 +859,6 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, return 0; } -#ifdef CONFIG_RADIX_TREE_MULTIORDER /* * Free any nodes below this node. The tree is presumed to not need * shrinking, and any user data in the tree is presumed to not need a @@ -757,7 +874,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node) struct radix_tree_node *child = entry_to_node(node); for (;;) { - void *entry = child->slots[offset]; + void *entry = rcu_dereference_raw(child->slots[offset]); if (radix_tree_is_internal_node(entry) && !is_sibling_entry(child, entry)) { child = entry_to_node(entry); @@ -777,8 +894,9 @@ static void radix_tree_free_nodes(struct radix_tree_node *node) } } -static inline int insert_entries(struct radix_tree_node *node, void **slot, - void *item, unsigned order, bool replace) +#ifdef CONFIG_RADIX_TREE_MULTIORDER +static inline int insert_entries(struct radix_tree_node *node, + void __rcu **slot, void *item, unsigned order, bool replace) { struct radix_tree_node *child; unsigned i, n, tag, offset, tags = 0; @@ -813,7 +931,7 @@ static inline int insert_entries(struct radix_tree_node *node, void **slot, } for (i = 0; i < n; i++) { - struct radix_tree_node *old = slot[i]; + struct radix_tree_node *old = rcu_dereference_raw(slot[i]); if (i) { rcu_assign_pointer(slot[i], child); for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) @@ -840,8 +958,8 @@ static inline int insert_entries(struct radix_tree_node *node, void **slot, return n; } #else -static inline int insert_entries(struct radix_tree_node *node, void **slot, - void *item, unsigned order, bool replace) +static inline int insert_entries(struct radix_tree_node *node, + void __rcu **slot, void *item, unsigned order, bool replace) { if (*slot) return -EEXIST; @@ -868,7 +986,7 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, unsigned order, void *item) { struct radix_tree_node *node; - void **slot; + void __rcu **slot; int error; BUG_ON(radix_tree_is_internal_node(item)); @@ -908,16 +1026,17 @@ EXPORT_SYMBOL(__radix_tree_insert); * allocated and @root->rnode is used as a direct slot instead of * pointing to a node, in which case *@nodep will be NULL. */ -void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, - struct radix_tree_node **nodep, void ***slotp) +void *__radix_tree_lookup(const struct radix_tree_root *root, + unsigned long index, struct radix_tree_node **nodep, + void __rcu ***slotp) { struct radix_tree_node *node, *parent; unsigned long maxindex; - void **slot; + void __rcu **slot; restart: parent = NULL; - slot = (void **)&root->rnode; + slot = (void __rcu **)&root->rnode; radix_tree_load_root(root, &node, &maxindex); if (index > maxindex) return NULL; @@ -952,9 +1071,10 @@ void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, * exclusive from other writers. Any dereference of the slot must be done * using radix_tree_deref_slot. */ -void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) +void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root, + unsigned long index) { - void **slot; + void __rcu **slot; if (!__radix_tree_lookup(root, index, NULL, &slot)) return NULL; @@ -974,75 +1094,76 @@ EXPORT_SYMBOL(radix_tree_lookup_slot); * them safely). No RCU barriers are required to access or modify the * returned item, however. */ -void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) +void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index) { return __radix_tree_lookup(root, index, NULL, NULL); } EXPORT_SYMBOL(radix_tree_lookup); -static inline int slot_count(struct radix_tree_node *node, - void **slot) +static inline void replace_sibling_entries(struct radix_tree_node *node, + void __rcu **slot, int count, int exceptional) { - int n = 1; #ifdef CONFIG_RADIX_TREE_MULTIORDER void *ptr = node_to_entry(slot); - unsigned offset = get_slot_offset(node, slot); - int i; + unsigned offset = get_slot_offset(node, slot) + 1; - for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { - if (node->slots[offset + i] != ptr) + while (offset < RADIX_TREE_MAP_SIZE) { + if (rcu_dereference_raw(node->slots[offset]) != ptr) break; - n++; + if (count < 0) { + node->slots[offset] = NULL; + node->count--; + } + node->exceptional += exceptional; + offset++; } #endif - return n; } -static void replace_slot(struct radix_tree_root *root, - struct radix_tree_node *node, - void **slot, void *item, - bool warn_typeswitch) +static void replace_slot(void __rcu **slot, void *item, + struct radix_tree_node *node, int count, int exceptional) { - void *old = rcu_dereference_raw(*slot); - int count, exceptional; - - WARN_ON_ONCE(radix_tree_is_internal_node(item)); - - count = !!item - !!old; - exceptional = !!radix_tree_exceptional_entry(item) - - !!radix_tree_exceptional_entry(old); - - WARN_ON_ONCE(warn_typeswitch && (count || exceptional)); + if (WARN_ON_ONCE(radix_tree_is_internal_node(item))) + return; - if (node) { + if (node && (count || exceptional)) { node->count += count; - if (exceptional) { - exceptional *= slot_count(node, slot); - node->exceptional += exceptional; - } + node->exceptional += exceptional; + replace_sibling_entries(node, slot, count, exceptional); } rcu_assign_pointer(*slot, item); } -static inline void delete_sibling_entries(struct radix_tree_node *node, - void **slot) +static bool node_tag_get(const struct radix_tree_root *root, + const struct radix_tree_node *node, + unsigned int tag, unsigned int offset) { -#ifdef CONFIG_RADIX_TREE_MULTIORDER - bool exceptional = radix_tree_exceptional_entry(*slot); - void *ptr = node_to_entry(slot); - unsigned offset = get_slot_offset(node, slot); - int i; + if (node) + return tag_get(node, tag, offset); + return root_tag_get(root, tag); +} - for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { - if (node->slots[offset + i] != ptr) - break; - node->slots[offset + i] = NULL; - node->count--; - if (exceptional) - node->exceptional--; +/* + * IDR users want to be able to store NULL in the tree, so if the slot isn't + * free, don't adjust the count, even if it's transitioning between NULL and + * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still + * have empty bits, but it only stores NULL in slots when they're being + * deleted. + */ +static int calculate_count(struct radix_tree_root *root, + struct radix_tree_node *node, void __rcu **slot, + void *item, void *old) +{ + if (is_idr(root)) { + unsigned offset = get_slot_offset(node, slot); + bool free = node_tag_get(root, node, IDR_FREE, offset); + if (!free) + return 0; + if (!old) + return 1; } -#endif + return !!item - !!old; } /** @@ -1059,18 +1180,22 @@ static inline void delete_sibling_entries(struct radix_tree_node *node, */ void __radix_tree_replace(struct radix_tree_root *root, struct radix_tree_node *node, - void **slot, void *item, + void __rcu **slot, void *item, radix_tree_update_node_t update_node, void *private) { - if (!item) - delete_sibling_entries(node, slot); + void *old = rcu_dereference_raw(*slot); + int exceptional = !!radix_tree_exceptional_entry(item) - + !!radix_tree_exceptional_entry(old); + int count = calculate_count(root, node, slot, item, old); + /* * This function supports replacing exceptional entries and * deleting entries, but that needs accounting against the * node unless the slot is root->rnode. */ - replace_slot(root, node, slot, item, - !node && slot != (void **)&root->rnode); + WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->rnode) && + (count || exceptional)); + replace_slot(slot, item, node, count, exceptional); if (!node) return; @@ -1098,9 +1223,9 @@ void __radix_tree_replace(struct radix_tree_root *root, * radix_tree_iter_replace(). */ void radix_tree_replace_slot(struct radix_tree_root *root, - void **slot, void *item) + void __rcu **slot, void *item) { - replace_slot(root, NULL, slot, item, true); + __radix_tree_replace(root, NULL, slot, item, NULL, NULL); } EXPORT_SYMBOL(radix_tree_replace_slot); @@ -1114,7 +1239,8 @@ EXPORT_SYMBOL(radix_tree_replace_slot); * Caller must hold tree write locked across split and replacement. */ void radix_tree_iter_replace(struct radix_tree_root *root, - const struct radix_tree_iter *iter, void **slot, void *item) + const struct radix_tree_iter *iter, + void __rcu **slot, void *item) { __radix_tree_replace(root, iter->node, slot, item, NULL, NULL); } @@ -1138,7 +1264,7 @@ int radix_tree_join(struct radix_tree_root *root, unsigned long index, unsigned order, void *item) { struct radix_tree_node *node; - void **slot; + void __rcu **slot; int error; BUG_ON(radix_tree_is_internal_node(item)); @@ -1173,9 +1299,10 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index, unsigned order) { struct radix_tree_node *parent, *node, *child; - void **slot; + void __rcu **slot; unsigned int offset, end; unsigned n, tag, tags = 0; + gfp_t gfp = root_gfp_mask(root); if (!__radix_tree_lookup(root, index, &parent, &slot)) return -ENOENT; @@ -1189,7 +1316,8 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index, tags |= 1 << tag; for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) { - if (!is_sibling_entry(parent, parent->slots[end])) + if (!is_sibling_entry(parent, + rcu_dereference_raw(parent->slots[end]))) break; for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) if (tags & (1 << tag)) @@ -1213,14 +1341,15 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index, for (;;) { if (node->shift > order) { - child = radix_tree_node_alloc(root, node, + child = radix_tree_node_alloc(gfp, node, root, node->shift - RADIX_TREE_MAP_SHIFT, offset, 0, 0); if (!child) goto nomem; if (node != parent) { node->count++; - node->slots[offset] = node_to_entry(child); + rcu_assign_pointer(node->slots[offset], + node_to_entry(child)); for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) if (tags & (1 << tag)) tag_set(node, tag, offset); @@ -1262,6 +1391,22 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index, } #endif +static void node_tag_set(struct radix_tree_root *root, + struct radix_tree_node *node, + unsigned int tag, unsigned int offset) +{ + while (node) { + if (tag_get(node, tag, offset)) + return; + tag_set(node, tag, offset); + offset = node->offset; + node = node->parent; + } + + if (!root_tag_get(root, tag)) + root_tag_set(root, tag); +} + /** * radix_tree_tag_set - set a tag on a radix tree node * @root: radix tree root @@ -1303,6 +1448,18 @@ void *radix_tree_tag_set(struct radix_tree_root *root, } EXPORT_SYMBOL(radix_tree_tag_set); +/** + * radix_tree_iter_tag_set - set a tag on the current iterator entry + * @root: radix tree root + * @iter: iterator state + * @tag: tag to set + */ +void radix_tree_iter_tag_set(struct radix_tree_root *root, + const struct radix_tree_iter *iter, unsigned int tag) +{ + node_tag_set(root, iter->node, tag, iter_offset(iter)); +} + static void node_tag_clear(struct radix_tree_root *root, struct radix_tree_node *node, unsigned int tag, unsigned int offset) @@ -1323,34 +1480,6 @@ static void node_tag_clear(struct radix_tree_root *root, root_tag_clear(root, tag); } -static void node_tag_set(struct radix_tree_root *root, - struct radix_tree_node *node, - unsigned int tag, unsigned int offset) -{ - while (node) { - if (tag_get(node, tag, offset)) - return; - tag_set(node, tag, offset); - offset = node->offset; - node = node->parent; - } - - if (!root_tag_get(root, tag)) - root_tag_set(root, tag); -} - -/** - * radix_tree_iter_tag_set - set a tag on the current iterator entry - * @root: radix tree root - * @iter: iterator state - * @tag: tag to set - */ -void radix_tree_iter_tag_set(struct radix_tree_root *root, - const struct radix_tree_iter *iter, unsigned int tag) -{ - node_tag_set(root, iter->node, tag, iter_offset(iter)); -} - /** * radix_tree_tag_clear - clear a tag on a radix tree node * @root: radix tree root @@ -1391,6 +1520,18 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, EXPORT_SYMBOL(radix_tree_tag_clear); /** + * radix_tree_iter_tag_clear - clear a tag on the current iterator entry + * @root: radix tree root + * @iter: iterator state + * @tag: tag to clear + */ +void radix_tree_iter_tag_clear(struct radix_tree_root *root, + const struct radix_tree_iter *iter, unsigned int tag) +{ + node_tag_clear(root, iter->node, tag, iter_offset(iter)); +} + +/** * radix_tree_tag_get - get a tag on a radix tree node * @root: radix tree root * @index: index key @@ -1405,7 +1546,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear); * the RCU lock is held, unless tag modification and node deletion are excluded * from concurrency. */ -int radix_tree_tag_get(struct radix_tree_root *root, +int radix_tree_tag_get(const struct radix_tree_root *root, unsigned long index, unsigned int tag) { struct radix_tree_node *node, *parent; @@ -1417,8 +1558,6 @@ int radix_tree_tag_get(struct radix_tree_root *root, radix_tree_load_root(root, &node, &maxindex); if (index > maxindex) return 0; - if (node == NULL) - return 0; while (radix_tree_is_internal_node(node)) { unsigned offset; @@ -1426,8 +1565,6 @@ int radix_tree_tag_get(struct radix_tree_root *root, parent = entry_to_node(node); offset = radix_tree_descend(parent, &node, index); - if (!node) - return 0; if (!tag_get(parent, tag, offset)) return 0; if (node == RADIX_TREE_RETRY) @@ -1454,6 +1591,11 @@ static void set_iter_tags(struct radix_tree_iter *iter, unsigned tag_long = offset / BITS_PER_LONG; unsigned tag_bit = offset % BITS_PER_LONG; + if (!node) { + iter->tags = 1; + return; + } + iter->tags = node->tags[tag][tag_long] >> tag_bit; /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ @@ -1468,8 +1610,8 @@ static void set_iter_tags(struct radix_tree_iter *iter, } #ifdef CONFIG_RADIX_TREE_MULTIORDER -static void **skip_siblings(struct radix_tree_node **nodep, - void **slot, struct radix_tree_iter *iter) +static void __rcu **skip_siblings(struct radix_tree_node **nodep, + void __rcu **slot, struct radix_tree_iter *iter) { void *sib = node_to_entry(slot - 1); @@ -1486,8 +1628,8 @@ static void **skip_siblings(struct radix_tree_node **nodep, return NULL; } -void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, - unsigned flags) +void __rcu **__radix_tree_next_slot(void __rcu **slot, + struct radix_tree_iter *iter, unsigned flags) { unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; struct radix_tree_node *node = rcu_dereference_raw(*slot); @@ -1540,20 +1682,20 @@ void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, } EXPORT_SYMBOL(__radix_tree_next_slot); #else -static void **skip_siblings(struct radix_tree_node **nodep, - void **slot, struct radix_tree_iter *iter) +static void __rcu **skip_siblings(struct radix_tree_node **nodep, + void __rcu **slot, struct radix_tree_iter *iter) { return slot; } #endif -void **radix_tree_iter_resume(void **slot, struct radix_tree_iter *iter) +void __rcu **radix_tree_iter_resume(void __rcu **slot, + struct radix_tree_iter *iter) { struct radix_tree_node *node; slot++; iter->index = __radix_tree_iter_add(iter, 1); - node = rcu_dereference_raw(*slot); skip_siblings(&node, slot, iter); iter->next_index = iter->index; iter->tags = 0; @@ -1569,7 +1711,7 @@ EXPORT_SYMBOL(radix_tree_iter_resume); * @flags: RADIX_TREE_ITER_* flags and tag index * Returns: pointer to chunk first slot, or NULL if iteration is over */ -void **radix_tree_next_chunk(struct radix_tree_root *root, +void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root, struct radix_tree_iter *iter, unsigned flags) { unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; @@ -1606,7 +1748,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, iter->tags = 1; iter->node = NULL; __set_iter_shift(iter, 0); - return (void **)&root->rnode; + return (void __rcu **)&root->rnode; } do { @@ -1624,7 +1766,8 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, offset + 1); else while (++offset < RADIX_TREE_MAP_SIZE) { - void *slot = node->slots[offset]; + void *slot = rcu_dereference_raw( + node->slots[offset]); if (is_sibling_entry(node, slot)) continue; if (slot) @@ -1680,11 +1823,11 @@ EXPORT_SYMBOL(radix_tree_next_chunk); * stored in 'results'. */ unsigned int -radix_tree_gang_lookup(struct radix_tree_root *root, void **results, +radix_tree_gang_lookup(const struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items) { struct radix_tree_iter iter; - void **slot; + void __rcu **slot; unsigned int ret = 0; if (unlikely(!max_items)) @@ -1725,12 +1868,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup); * protection, radix_tree_deref_slot may fail requiring a retry. */ unsigned int -radix_tree_gang_lookup_slot(struct radix_tree_root *root, - void ***results, unsigned long *indices, +radix_tree_gang_lookup_slot(const struct radix_tree_root *root, + void __rcu ***results, unsigned long *indices, unsigned long first_index, unsigned int max_items) { struct radix_tree_iter iter; - void **slot; + void __rcu **slot; unsigned int ret = 0; if (unlikely(!max_items)) @@ -1762,12 +1905,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_slot); * returns the number of items which were placed at *@results. */ unsigned int -radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, +radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items, unsigned int tag) { struct radix_tree_iter iter; - void **slot; + void __rcu **slot; unsigned int ret = 0; if (unlikely(!max_items)) @@ -1803,12 +1946,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag); * returns the number of slots which were placed at *@results. */ unsigned int -radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, - unsigned long first_index, unsigned int max_items, - unsigned int tag) +radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root, + void __rcu ***results, unsigned long first_index, + unsigned int max_items, unsigned int tag) { struct radix_tree_iter iter; - void **slot; + void __rcu **slot; unsigned int ret = 0; if (unlikely(!max_items)) @@ -1843,59 +1986,83 @@ void __radix_tree_delete_node(struct radix_tree_root *root, delete_node(root, node, update_node, private); } +static bool __radix_tree_delete(struct radix_tree_root *root, + struct radix_tree_node *node, void __rcu **slot) +{ + void *old = rcu_dereference_raw(*slot); + int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0; + unsigned offset = get_slot_offset(node, slot); + int tag; + + if (is_idr(root)) + node_tag_set(root, node, IDR_FREE, offset); + else + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) + node_tag_clear(root, node, tag, offset); + + replace_slot(slot, NULL, node, -1, exceptional); + return node && delete_node(root, node, NULL, NULL); +} + /** - * radix_tree_delete_item - delete an item from a radix tree - * @root: radix tree root - * @index: index key - * @item: expected item + * radix_tree_iter_delete - delete the entry at this iterator position + * @root: radix tree root + * @iter: iterator state + * @slot: pointer to slot * - * Remove @item at @index from the radix tree rooted at @root. + * Delete the entry at the position currently pointed to by the iterator. + * This may result in the current node being freed; if it is, the iterator + * is advanced so that it will not reference the freed memory. This + * function may be called without any locking if there are no other threads + * which can access this tree. + */ +void radix_tree_iter_delete(struct radix_tree_root *root, + struct radix_tree_iter *iter, void __rcu **slot) +{ + if (__radix_tree_delete(root, iter->node, slot)) + iter->index = iter->next_index; +} + +/** + * radix_tree_delete_item - delete an item from a radix tree + * @root: radix tree root + * @index: index key + * @item: expected item * - * Returns the address of the deleted item, or NULL if it was not present - * or the entry at the given @index was not @item. + * Remove @item at @index from the radix tree rooted at @root. + * + * Return: the deleted entry, or %NULL if it was not present + * or the entry at the given @index was not @item. */ void *radix_tree_delete_item(struct radix_tree_root *root, unsigned long index, void *item) { - struct radix_tree_node *node; - unsigned int offset; - void **slot; + struct radix_tree_node *node = NULL; + void __rcu **slot; void *entry; - int tag; entry = __radix_tree_lookup(root, index, &node, &slot); - if (!entry) + if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, + get_slot_offset(node, slot)))) return NULL; if (item && entry != item) return NULL; - if (!node) { - root_tag_clear_all(root); - root->rnode = NULL; - return entry; - } - - offset = get_slot_offset(node, slot); - - /* Clear all tags associated with the item to be deleted. */ - for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) - node_tag_clear(root, node, tag, offset); - - __radix_tree_replace(root, node, slot, NULL, NULL, NULL); + __radix_tree_delete(root, node, slot); return entry; } EXPORT_SYMBOL(radix_tree_delete_item); /** - * radix_tree_delete - delete an item from a radix tree - * @root: radix tree root - * @index: index key + * radix_tree_delete - delete an entry from a radix tree + * @root: radix tree root + * @index: index key * - * Remove the item at @index from the radix tree rooted at @root. + * Remove the entry at @index from the radix tree rooted at @root. * - * Returns the address of the deleted item, or NULL if it was not present. + * Return: The deleted entry, or %NULL if it was not present. */ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) { @@ -1905,15 +2072,14 @@ EXPORT_SYMBOL(radix_tree_delete); void radix_tree_clear_tags(struct radix_tree_root *root, struct radix_tree_node *node, - void **slot) + void __rcu **slot) { if (node) { unsigned int tag, offset = get_slot_offset(node, slot); for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) node_tag_clear(root, node, tag, offset); } else { - /* Clear root node tags */ - root->gfp_mask &= __GFP_BITS_MASK; + root_tag_clear_all(root); } } @@ -1922,12 +2088,147 @@ void radix_tree_clear_tags(struct radix_tree_root *root, * @root: radix tree root * @tag: tag to test */ -int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) +int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag) { return root_tag_get(root, tag); } EXPORT_SYMBOL(radix_tree_tagged); +/** + * idr_preload - preload for idr_alloc() + * @gfp_mask: allocation mask to use for preloading + * + * Preallocate memory to use for the next call to idr_alloc(). This function + * returns with preemption disabled. It will be enabled by idr_preload_end(). + */ +void idr_preload(gfp_t gfp_mask) +{ + __radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE); +} +EXPORT_SYMBOL(idr_preload); + +/** + * ida_pre_get - reserve resources for ida allocation + * @ida: ida handle + * @gfp: memory allocation flags + * + * This function should be called before calling ida_get_new_above(). If it + * is unable to allocate memory, it will return %0. On success, it returns %1. + */ +int ida_pre_get(struct ida *ida, gfp_t gfp) +{ + __radix_tree_preload(gfp, IDA_PRELOAD_SIZE); + /* + * The IDA API has no preload_end() equivalent. Instead, + * ida_get_new() can return -EAGAIN, prompting the caller + * to return to the ida_pre_get() step. + */ + preempt_enable(); + + if (!this_cpu_read(ida_bitmap)) { + struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp); + if (!bitmap) + return 0; + bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap); + kfree(bitmap); + } + + return 1; +} +EXPORT_SYMBOL(ida_pre_get); + +void __rcu **idr_get_free(struct radix_tree_root *root, + struct radix_tree_iter *iter, gfp_t gfp, int end) +{ + struct radix_tree_node *node = NULL, *child; + void __rcu **slot = (void __rcu **)&root->rnode; + unsigned long maxindex, start = iter->next_index; + unsigned long max = end > 0 ? end - 1 : INT_MAX; + unsigned int shift, offset = 0; + + grow: + shift = radix_tree_load_root(root, &child, &maxindex); + if (!radix_tree_tagged(root, IDR_FREE)) + start = max(start, maxindex + 1); + if (start > max) + return ERR_PTR(-ENOSPC); + + if (start > maxindex) { + int error = radix_tree_extend(root, gfp, start, shift); + if (error < 0) + return ERR_PTR(error); + shift = error; + child = rcu_dereference_raw(root->rnode); + } + + while (shift) { + shift -= RADIX_TREE_MAP_SHIFT; + if (child == NULL) { + /* Have to add a child node. */ + child = radix_tree_node_alloc(gfp, node, root, shift, + offset, 0, 0); + if (!child) + return ERR_PTR(-ENOMEM); + all_tag_set(child, IDR_FREE); + rcu_assign_pointer(*slot, node_to_entry(child)); + if (node) + node->count++; + } else if (!radix_tree_is_internal_node(child)) + break; + + node = entry_to_node(child); + offset = radix_tree_descend(node, &child, start); + if (!tag_get(node, IDR_FREE, offset)) { + offset = radix_tree_find_next_bit(node, IDR_FREE, + offset + 1); + start = next_index(start, node, offset); + if (start > max) + return ERR_PTR(-ENOSPC); + while (offset == RADIX_TREE_MAP_SIZE) { + offset = node->offset + 1; + node = node->parent; + if (!node) + goto grow; + shift = node->shift; + } + child = rcu_dereference_raw(node->slots[offset]); + } + slot = &node->slots[offset]; + } + + iter->index = start; + if (node) + iter->next_index = 1 + min(max, (start | node_maxindex(node))); + else + iter->next_index = 1; + iter->node = node; + __set_iter_shift(iter, shift); + set_iter_tags(iter, node, offset, IDR_FREE); + + return slot; +} + +/** + * idr_destroy - release all internal memory from an IDR + * @idr: idr handle + * + * After this function is called, the IDR is empty, and may be reused or + * the data structure containing it may be freed. + * + * A typical clean-up sequence for objects stored in an idr tree will use + * idr_for_each() to free all objects, if necessary, then idr_destroy() to + * free the memory used to keep track of those objects. + */ +void idr_destroy(struct idr *idr) +{ + struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.rnode); + if (radix_tree_is_internal_node(node)) + radix_tree_free_nodes(node); + idr->idr_rt.rnode = NULL; + root_tag_set(&idr->idr_rt, IDR_FREE); +} +EXPORT_SYMBOL(idr_destroy); + static void radix_tree_node_ctor(void *arg) { @@ -1971,10 +2272,12 @@ static int radix_tree_cpu_dead(unsigned int cpu) rtp = &per_cpu(radix_tree_preloads, cpu); while (rtp->nr) { node = rtp->nodes; - rtp->nodes = node->private_data; + rtp->nodes = node->parent; kmem_cache_free(radix_tree_node_cachep, node); rtp->nr--; } + kfree(per_cpu(ida_bitmap, cpu)); + per_cpu(ida_bitmap, cpu) = NULL; return 0; } diff --git a/lib/refcount.c b/lib/refcount.c new file mode 100644 index 000000000000..1d33366189d1 --- /dev/null +++ b/lib/refcount.c @@ -0,0 +1,267 @@ +/* + * Variant of atomic_t specialized for reference counts. + * + * The interface matches the atomic_t interface (to aid in porting) but only + * provides the few functions one should use for reference counting. + * + * It differs in that the counter saturates at UINT_MAX and will not move once + * there. This avoids wrapping the counter and causing 'spurious' + * use-after-free issues. + * + * Memory ordering rules are slightly relaxed wrt regular atomic_t functions + * and provide only what is strictly required for refcounts. + * + * The increments are fully relaxed; these will not provide ordering. The + * rationale is that whatever is used to obtain the object we're increasing the + * reference count on will provide the ordering. For locked data structures, + * its the lock acquire, for RCU/lockless data structures its the dependent + * load. + * + * Do note that inc_not_zero() provides a control dependency which will order + * future stores against the inc, this ensures we'll never modify the object + * if we did not in fact acquire a reference. + * + * The decrements will provide release order, such that all the prior loads and + * stores will be issued before, it also provides a control dependency, which + * will order us against the subsequent free(). + * + * The control dependency is against the load of the cmpxchg (ll/sc) that + * succeeded. This means the stores aren't fully ordered, but this is fine + * because the 1->0 transition indicates no concurrency. + * + * Note that the allocator is responsible for ordering things between free() + * and alloc(). + * + */ + +#include <linux/refcount.h> +#include <linux/bug.h> + +bool refcount_add_not_zero(unsigned int i, refcount_t *r) +{ + unsigned int old, new, val = atomic_read(&r->refs); + + for (;;) { + if (!val) + return false; + + if (unlikely(val == UINT_MAX)) + return true; + + new = val + i; + if (new < val) + new = UINT_MAX; + old = atomic_cmpxchg_relaxed(&r->refs, val, new); + if (old == val) + break; + + val = old; + } + + WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); + + return true; +} +EXPORT_SYMBOL_GPL(refcount_add_not_zero); + +void refcount_add(unsigned int i, refcount_t *r) +{ + WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); +} +EXPORT_SYMBOL_GPL(refcount_add); + +/* + * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. + * + * Provides no memory ordering, it is assumed the caller has guaranteed the + * object memory to be stable (RCU, etc.). It does provide a control dependency + * and thereby orders future stores. See the comment on top. + */ +bool refcount_inc_not_zero(refcount_t *r) +{ + unsigned int old, new, val = atomic_read(&r->refs); + + for (;;) { + new = val + 1; + + if (!val) + return false; + + if (unlikely(!new)) + return true; + + old = atomic_cmpxchg_relaxed(&r->refs, val, new); + if (old == val) + break; + + val = old; + } + + WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); + + return true; +} +EXPORT_SYMBOL_GPL(refcount_inc_not_zero); + +/* + * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. + * + * Provides no memory ordering, it is assumed the caller already has a + * reference on the object, will WARN when this is not so. + */ +void refcount_inc(refcount_t *r) +{ + WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); +} +EXPORT_SYMBOL_GPL(refcount_inc); + +bool refcount_sub_and_test(unsigned int i, refcount_t *r) +{ + unsigned int old, new, val = atomic_read(&r->refs); + + for (;;) { + if (unlikely(val == UINT_MAX)) + return false; + + new = val - i; + if (new > val) { + WARN(new > val, "refcount_t: underflow; use-after-free.\n"); + return false; + } + + old = atomic_cmpxchg_release(&r->refs, val, new); + if (old == val) + break; + + val = old; + } + + return !new; +} +EXPORT_SYMBOL_GPL(refcount_sub_and_test); + +/* + * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to + * decrement when saturated at UINT_MAX. + * + * Provides release memory ordering, such that prior loads and stores are done + * before, and provides a control dependency such that free() must come after. + * See the comment on top. + */ +bool refcount_dec_and_test(refcount_t *r) +{ + return refcount_sub_and_test(1, r); +} +EXPORT_SYMBOL_GPL(refcount_dec_and_test); + +/* + * Similar to atomic_dec(), it will WARN on underflow and fail to decrement + * when saturated at UINT_MAX. + * + * Provides release memory ordering, such that prior loads and stores are done + * before. + */ + +void refcount_dec(refcount_t *r) +{ + WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); +} +EXPORT_SYMBOL_GPL(refcount_dec); + +/* + * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the + * success thereof. + * + * Like all decrement operations, it provides release memory order and provides + * a control dependency. + * + * It can be used like a try-delete operator; this explicit case is provided + * and not cmpxchg in generic, because that would allow implementing unsafe + * operations. + */ +bool refcount_dec_if_one(refcount_t *r) +{ + return atomic_cmpxchg_release(&r->refs, 1, 0) == 1; +} +EXPORT_SYMBOL_GPL(refcount_dec_if_one); + +/* + * No atomic_t counterpart, it decrements unless the value is 1, in which case + * it will return false. + * + * Was often done like: atomic_add_unless(&var, -1, 1) + */ +bool refcount_dec_not_one(refcount_t *r) +{ + unsigned int old, new, val = atomic_read(&r->refs); + + for (;;) { + if (unlikely(val == UINT_MAX)) + return true; + + if (val == 1) + return false; + + new = val - 1; + if (new > val) { + WARN(new > val, "refcount_t: underflow; use-after-free.\n"); + return true; + } + + old = atomic_cmpxchg_release(&r->refs, val, new); + if (old == val) + break; + + val = old; + } + + return true; +} +EXPORT_SYMBOL_GPL(refcount_dec_not_one); + +/* + * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail + * to decrement when saturated at UINT_MAX. + * + * Provides release memory ordering, such that prior loads and stores are done + * before, and provides a control dependency such that free() must come after. + * See the comment on top. + */ +bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) +{ + if (refcount_dec_not_one(r)) + return false; + + mutex_lock(lock); + if (!refcount_dec_and_test(r)) { + mutex_unlock(lock); + return false; + } + + return true; +} +EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock); + +/* + * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to + * decrement when saturated at UINT_MAX. + * + * Provides release memory ordering, such that prior loads and stores are done + * before, and provides a control dependency such that free() must come after. + * See the comment on top. + */ +bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) +{ + if (refcount_dec_not_one(r)) + return false; + + spin_lock(lock); + if (!refcount_dec_and_test(r)) { + spin_unlock(lock); + return false; + } + + return true; +} +EXPORT_SYMBOL_GPL(refcount_dec_and_lock); + diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 172454e6b979..f8635fd57442 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/log2.h> #include <linux/sched.h> +#include <linux/rculist.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mm.h> @@ -146,9 +147,7 @@ static void bucket_table_free(const struct bucket_table *tbl) if (tbl->nest) nested_bucket_table_free(tbl); - if (tbl) - kvfree(tbl->locks); - + kvfree(tbl->locks); kvfree(tbl); } @@ -1123,12 +1122,13 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, union nested_table *ntbl; ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); - ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash); + ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); subhash >>= tbl->nest; while (ntbl && size > (1 << shift)) { index = subhash & ((1 << shift) - 1); - ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash); + ntbl = rht_dereference_bucket_rcu(ntbl[index].table, + tbl, hash); size >>= shift; subhash >>= shift; } diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 55e11c4b2f3b..60e800e0b5a0 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -15,6 +15,7 @@ * along with this program. If not, see <https://www.gnu.org/licenses/>. */ +#include <linux/sched.h> #include <linux/random.h> #include <linux/sbitmap.h> #include <linux/seq_file.h> diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 004fc70fc56a..c6cf82242d65 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -651,7 +651,6 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, { unsigned int offset = 0; struct sg_mapping_iter miter; - unsigned long flags; unsigned int sg_flags = SG_MITER_ATOMIC; if (to_buffer) @@ -664,9 +663,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, if (!sg_miter_skip(&miter, skip)) return false; - local_irq_save(flags); - - while (sg_miter_next(&miter) && offset < buflen) { + while ((offset < buflen) && sg_miter_next(&miter)) { unsigned int len; len = min(miter.length, buflen - offset); @@ -681,7 +678,6 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, sg_miter_stop(&miter); - local_irq_restore(flags); return offset; } EXPORT_SYMBOL(sg_copy_buffer); diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 1afec32de6f2..690d75b132fa 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -22,7 +22,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1, * Kernel threads bound to a single CPU can safely use * smp_processor_id(): */ - if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu))) + if (cpumask_equal(¤t->cpus_allowed, cpumask_of(this_cpu))) goto out; /* diff --git a/lib/syscall.c b/lib/syscall.c index 63239e097b13..17d5ff5fa6a3 100644 --- a/lib/syscall.c +++ b/lib/syscall.c @@ -1,5 +1,6 @@ #include <linux/ptrace.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/export.h> #include <asm/syscall.h> diff --git a/lib/test_parman.c b/lib/test_parman.c index fe9f3a785804..35e32243693c 100644 --- a/lib/test_parman.c +++ b/lib/test_parman.c @@ -334,7 +334,7 @@ static int test_parman_check_array(struct test_parman *test_parman, last_priority = item->prio->priority; if (item->parman_item.index != i) { - pr_err("Item has different index in compare to where it actualy is (%lu != %d)\n", + pr_err("Item has different index in compare to where it actually is (%lu != %d)\n", item->parman_item.index, i); return -EINVAL; } diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 0967771d8f7f..e3bf4e0f10b5 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -1739,6 +1739,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, * 'h', 'l', or 'L' for integer fields * 'z' support added 23/7/1999 S.H. * 'z' changed to 'Z' --davidm 1/25/99 + * 'Z' changed to 'z' --adobriyan 2017-01-25 * 't' added for ptrdiff_t * * @fmt: the format string @@ -1838,7 +1839,7 @@ qualifier: /* get the conversion qualifier */ qualifier = 0; if (*fmt == 'h' || _tolower(*fmt) == 'l' || - _tolower(*fmt) == 'z' || *fmt == 't') { + *fmt == 'z' || *fmt == 't') { qualifier = *fmt++; if (unlikely(qualifier == *fmt)) { if (qualifier == 'l') { @@ -1907,7 +1908,7 @@ qualifier: else if (qualifier == 'l') { BUILD_BUG_ON(FORMAT_TYPE_ULONG + SIGN != FORMAT_TYPE_LONG); spec->type = FORMAT_TYPE_ULONG + (spec->flags & SIGN); - } else if (_tolower(qualifier) == 'z') { + } else if (qualifier == 'z') { spec->type = FORMAT_TYPE_SIZE_T; } else if (qualifier == 't') { spec->type = FORMAT_TYPE_PTRDIFF; @@ -2657,7 +2658,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args) /* get conversion qualifier */ qualifier = -1; if (*fmt == 'h' || _tolower(*fmt) == 'l' || - _tolower(*fmt) == 'z') { + *fmt == 'z') { qualifier = *fmt++; if (unlikely(qualifier == *fmt)) { if (qualifier == 'h') { @@ -2851,7 +2852,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args) else *va_arg(args, unsigned long long *) = val.u; break; - case 'Z': case 'z': *va_arg(args, size_t *) = val.u; break; diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index afcc550877ff..79d0fd13b5b3 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -90,3 +90,9 @@ config DEBUG_PAGE_REF careful when enabling this feature because it adds about 30 KB to the kernel code. However the runtime performance overhead is virtually nil until the tracepoints are actually enabled. + +config DEBUG_RODATA_TEST + bool "Testcase for the marking rodata read-only" + depends on STRICT_KERNEL_RWX + ---help--- + This option enables a testcase for the setting rodata read-only. diff --git a/mm/Makefile b/mm/Makefile index aa0aa17cb413..026f6a828a50 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -85,6 +85,7 @@ obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o +obj-$(CONFIG_DEBUG_RODATA_TEST) += rodata_test.o obj-$(CONFIG_PAGE_OWNER) += page_owner.o obj-$(CONFIG_CLEANCACHE) += cleancache.o obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o diff --git a/mm/compaction.c b/mm/compaction.c index 0fdfde016ee2..81e1eaa2a2cf 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -12,6 +12,7 @@ #include <linux/migrate.h> #include <linux/compaction.h> #include <linux/mm_inline.h> +#include <linux/sched/signal.h> #include <linux/backing-dev.h> #include <linux/sysctl.h> #include <linux/sysfs.h> diff --git a/mm/dmapool.c b/mm/dmapool.c index cef82b8a9291..4d90a64b2fdc 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -93,7 +93,7 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf) spin_unlock_irq(&pool->lock); /* per-pool info, no real statistics yet */ - temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", + temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n", pool->name, blocks, pages * (pool->allocation / pool->size), pool->size, pages); diff --git a/mm/filemap.c b/mm/filemap.c index 1944c631e3e6..1694623a6289 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -13,6 +13,7 @@ #include <linux/compiler.h> #include <linux/dax.h> #include <linux/fs.h> +#include <linux/sched/signal.h> #include <linux/uaccess.h> #include <linux/capability.h> #include <linux/kernel_stat.h> @@ -10,7 +10,7 @@ #include <linux/swap.h> #include <linux/swapops.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/rwsem.h> #include <linux/hugetlb.h> diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 71e3dede95b4..d36b2af4d1bf 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -9,6 +9,8 @@ #include <linux/mm.h> #include <linux/sched.h> +#include <linux/sched/coredump.h> +#include <linux/sched/numa_balancing.h> #include <linux/highmem.h> #include <linux/hugetlb.h> #include <linux/mmu_notifier.h> diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2e0e8159ce8e..a7aa811b7d14 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -18,6 +18,7 @@ #include <linux/bootmem.h> #include <linux/sysfs.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/swapops.h> diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 25f0e6521f36..98b27195e38b 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/printk.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/slab.h> #include <linux/stacktrace.h> #include <linux/string.h> @@ -39,6 +40,16 @@ #include "kasan.h" #include "../slab.h" +void kasan_enable_current(void) +{ + current->kasan_depth++; +} + +void kasan_disable_current(void) +{ + current->kasan_depth--; +} + /* * Poisons the shadow memory for 'size' bytes starting from 'addr'. * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 77ae3239c3de..ba40b7f673f4 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2,6 +2,8 @@ #include <linux/mm.h> #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/coredump.h> #include <linux/mmu_notifier.h> #include <linux/rmap.h> #include <linux/swap.h> @@ -420,7 +422,7 @@ int __khugepaged_enter(struct mm_struct *mm) list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); spin_unlock(&khugepaged_mm_lock); - atomic_inc(&mm->mm_count); + mmgrab(mm); if (wakeup) wake_up_interruptible(&khugepaged_wait); diff --git a/mm/kmemleak.c b/mm/kmemleak.c index da3436953022..26c874e90b12 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -73,7 +73,9 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <linux/jiffies.h> #include <linux/delay.h> #include <linux/export.h> @@ -19,6 +19,8 @@ #include <linux/fs.h> #include <linux/mman.h> #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/coredump.h> #include <linux/rwsem.h> #include <linux/pagemap.h> #include <linux/rmap.h> @@ -1854,7 +1856,7 @@ int __ksm_enter(struct mm_struct *mm) spin_unlock(&ksm_mmlist_lock); set_bit(MMF_VM_MERGEABLE, &mm->flags); - atomic_inc(&mm->mm_count); + mmgrab(mm); if (needs_wakeup) wake_up_interruptible(&ksm_thread_wait); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 45867e439d31..c52ec893e241 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -35,6 +35,7 @@ #include <linux/memcontrol.h> #include <linux/cgroup.h> #include <linux/mm.h> +#include <linux/sched/mm.h> #include <linux/shmem_fs.h> #include <linux/hugetlb.h> #include <linux/pagemap.h> diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 3d0f2fd4bf73..27f7210e7fab 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -40,7 +40,8 @@ #include <linux/mm.h> #include <linux/page-flags.h> #include <linux/kernel-page-flags.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/export.h> diff --git a/mm/memory.c b/mm/memory.c index 14fc0b40f0bb..a97a4cec2e1f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -40,6 +40,10 @@ #include <linux/kernel_stat.h> #include <linux/mm.h> +#include <linux/sched/mm.h> +#include <linux/sched/coredump.h> +#include <linux/sched/numa_balancing.h> +#include <linux/sched/task.h> #include <linux/hugetlb.h> #include <linux/mman.h> #include <linux/swap.h> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 1d3ed58f92ab..295479b792ec 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -6,6 +6,7 @@ #include <linux/stddef.h> #include <linux/mm.h> +#include <linux/sched/signal.h> #include <linux/swap.h> #include <linux/interrupt.h> #include <linux/pagemap.h> diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 1e7873e40c9a..75b2745bac41 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -73,6 +73,9 @@ #include <linux/hugetlb.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/numa_balancing.h> +#include <linux/sched/task.h> #include <linux/nodemask.h> #include <linux/cpuset.h> #include <linux/slab.h> diff --git a/mm/migrate.c b/mm/migrate.c index 2c63ac06791b..9a0897a14d37 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -40,6 +40,7 @@ #include <linux/mmu_notifier.h> #include <linux/page_idle.h> #include <linux/page_owner.h> +#include <linux/sched/mm.h> #include <asm/tlbflush.h> diff --git a/mm/mlock.c b/mm/mlock.c index cdbed8aaa426..1050511f8b2b 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -8,6 +8,7 @@ #include <linux/capability.h> #include <linux/mman.h> #include <linux/mm.h> +#include <linux/sched/user.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/pagemap.h> diff --git a/mm/mmap.c b/mm/mmap.c index 499b988b1639..bfbe8856d134 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1672,7 +1672,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, * new file must not have been exposed to user-space, yet. */ vma->vm_file = get_file(file); - error = file->f_op->mmap(file, vma); + error = call_mmap(file, vma); if (error) goto unmap_and_free_vma; diff --git a/mm/mmu_context.c b/mm/mmu_context.c index 6f4d27c5bb32..3e612ae748e9 100644 --- a/mm/mmu_context.c +++ b/mm/mmu_context.c @@ -5,6 +5,8 @@ #include <linux/mm.h> #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include <linux/mmu_context.h> #include <linux/export.h> @@ -25,7 +27,7 @@ void use_mm(struct mm_struct *mm) task_lock(tsk); active_mm = tsk->active_mm; if (active_mm != mm) { - atomic_inc(&mm->mm_count); + mmgrab(mm); tsk->active_mm = mm; } tsk->mm = mm; diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index f4259e496f83..a7652acd2ab9 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -17,6 +17,7 @@ #include <linux/srcu.h> #include <linux/rcupdate.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/slab.h> /* global SRCU for all MMs */ @@ -275,7 +276,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn, mm->mmu_notifier_mm = mmu_notifier_mm; mmu_notifier_mm = NULL; } - atomic_inc(&mm->mm_count); + mmgrab(mm); /* * Serialize the update against mmu_notifier_unregister. A diff --git a/mm/nommu.c b/mm/nommu.c index fe9f4fa4a7a7..2d131b97a851 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -17,6 +17,7 @@ #include <linux/export.h> #include <linux/mm.h> +#include <linux/sched/mm.h> #include <linux/vmacache.h> #include <linux/mman.h> #include <linux/swap.h> @@ -757,7 +758,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) mm->map_count--; for (i = 0; i < VMACACHE_SIZE; i++) { /* if the vma is cached, invalidate the entire cache */ - if (curr->vmacache[i] == vma) { + if (curr->vmacache.vmas[i] == vma) { vmacache_invalidate(mm); break; } @@ -1084,7 +1085,7 @@ static int do_mmap_shared_file(struct vm_area_struct *vma) { int ret; - ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); + ret = call_mmap(vma->vm_file, vma); if (ret == 0) { vma->vm_region->vm_top = vma->vm_region->vm_end; return 0; @@ -1115,7 +1116,7 @@ static int do_mmap_private(struct vm_area_struct *vma, * - VM_MAYSHARE will be set if it may attempt to share */ if (capabilities & NOMMU_MAP_DIRECT) { - ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); + ret = call_mmap(vma->vm_file, vma); if (ret == 0) { /* shouldn't return success if we're not sharing */ BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 578321f1c070..d083714a2bb9 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -22,6 +22,9 @@ #include <linux/err.h> #include <linux/gfp.h> #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/coredump.h> +#include <linux/sched/task.h> #include <linux/swap.h> #include <linux/timex.h> #include <linux/jiffies.h> @@ -653,7 +656,7 @@ static void mark_oom_victim(struct task_struct *tsk) /* oom_mm is bound to the signal struct life time. */ if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) - atomic_inc(&tsk->signal->oom_mm->mm_count); + mmgrab(tsk->signal->oom_mm); /* * Make sure that the task is woken up from uninterruptible sleep @@ -870,7 +873,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message) /* Get a reference to safely compare mm after task_unlock(victim) */ mm = victim->mm; - atomic_inc(&mm->mm_count); + mmgrab(mm); /* * We should send SIGKILL before setting TIF_MEMDIE in order to prevent * the OOM victim from depleting the memory reserves from the user diff --git a/mm/page-writeback.c b/mm/page-writeback.c index ae6e601f0a58..d8ac2a7fb9e7 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -36,6 +36,7 @@ #include <linux/pagevec.h> #include <linux/timer.h> #include <linux/sched/rt.h> +#include <linux/sched/signal.h> #include <linux/mm_inline.h> #include <trace/events/writeback.h> @@ -1797,7 +1798,7 @@ pause: * pages exceeds dirty_thresh, give the other good wb's a pipe * to go through, so that tasks on them still remain responsive. * - * In theory 1 page is enough to keep the comsumer-producer + * In theory 1 page is enough to keep the consumer-producer * pipe going: the flusher cleans 1 page => the task dirties 1 * more page. However wb_dirty has accounting errors. So use * the larger and more IO friendly wb_stat_error. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9f9623d690d6..eaa64d2ffdc5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -61,6 +61,7 @@ #include <linux/migrate.h> #include <linux/hugetlb.h> #include <linux/sched/rt.h> +#include <linux/sched/mm.h> #include <linux/page_owner.h> #include <linux/kthread.h> #include <linux/memcontrol.h> @@ -5925,7 +5926,7 @@ static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages, * the zone and SPARSEMEM is in use. If there are holes within the * zone, each populated memory region may cost us one or two extra * memmap pages due to alignment because memmap pages for each - * populated regions may not naturally algined on page boundary. + * populated regions may not be naturally aligned on page boundary. * So the (present_pages >> 4) heuristic is a tradeoff for that. */ if (spanned_pages > present_pages + (present_pages >> 4) && diff --git a/mm/percpu.c b/mm/percpu.c index 0686f566d347..5696039b5c07 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -43,7 +43,7 @@ * Chunks can be determined from the address using the index field * in the page struct. The index field contains a pointer to the chunk. * - * To use this allocator, arch code should do the followings. + * To use this allocator, arch code should do the following: * * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate * regular address to percpu pointer and back if they need to be diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 84d0c7eada2b..8973cd231ece 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -12,6 +12,7 @@ #include <linux/mm.h> #include <linux/uio.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/highmem.h> #include <linux/ptrace.h> #include <linux/slab.h> diff --git a/mm/rmap.c b/mm/rmap.c index 8774791e2809..2da487d6cea8 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -46,6 +46,8 @@ */ #include <linux/mm.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/swapops.h> diff --git a/mm/rodata_test.c b/mm/rodata_test.c new file mode 100644 index 000000000000..0fd21670b513 --- /dev/null +++ b/mm/rodata_test.c @@ -0,0 +1,56 @@ +/* + * rodata_test.c: functional test for mark_rodata_ro function + * + * (C) Copyright 2008 Intel Corporation + * Author: Arjan van de Ven <arjan@linux.intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#include <linux/uaccess.h> +#include <asm/sections.h> + +const int rodata_test_data = 0xC3; +EXPORT_SYMBOL_GPL(rodata_test_data); + +void rodata_test(void) +{ + unsigned long start, end; + int zero = 0; + + /* test 1: read the value */ + /* If this test fails, some previous testrun has clobbered the state */ + if (!rodata_test_data) { + pr_err("rodata_test: test 1 fails (start data)\n"); + return; + } + + /* test 2: write to the variable; this should fault */ + if (!probe_kernel_write((void *)&rodata_test_data, + (void *)&zero, sizeof(zero))) { + pr_err("rodata_test: test data was not read only\n"); + return; + } + + /* test 3: check the value hasn't changed */ + if (rodata_test_data == zero) { + pr_err("rodata_test: test data was changed\n"); + return; + } + + /* test 4: check if the rodata section is PAGE_SIZE aligned */ + start = (unsigned long)__start_rodata; + end = (unsigned long)__end_rodata; + if (start & (PAGE_SIZE - 1)) { + pr_err("rodata_test: start of .rodata is not page size aligned\n"); + return; + } + if (end & (PAGE_SIZE - 1)) { + pr_err("rodata_test: end of .rodata is not page size aligned\n"); + return; + } + + pr_info("rodata_test: all tests were successful\n"); +} diff --git a/mm/shmem.c b/mm/shmem.c index a26649a6633f..e67d6ba4e98e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -29,6 +29,7 @@ #include <linux/pagemap.h> #include <linux/file.h> #include <linux/mm.h> +#include <linux/sched/signal.h> #include <linux/export.h> #include <linux/swap.h> #include <linux/uio.h> @@ -958,10 +959,10 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) } EXPORT_SYMBOL_GPL(shmem_truncate_range); -static int shmem_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int shmem_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = dentry->d_inode; + struct inode *inode = path->dentry->d_inode; struct shmem_inode_info *info = SHMEM_I(inode); if (info->alloced - info->swapped != inode->i_mapping->nrpages) { diff --git a/mm/slab.c b/mm/slab.c index bd63450a9b16..807d86c76908 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -116,6 +116,7 @@ #include <linux/kmemcheck.h> #include <linux/memory.h> #include <linux/prefetch.h> +#include <linux/sched/task_stack.h> #include <net/sock.h> diff --git a/mm/swapfile.c b/mm/swapfile.c index 2cac12cc9abe..521ef9b6064f 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -6,6 +6,8 @@ */ #include <linux/mm.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include <linux/hugetlb.h> #include <linux/mman.h> #include <linux/slab.h> @@ -1671,7 +1673,7 @@ int try_to_unuse(unsigned int type, bool frontswap, * that. */ start_mm = &init_mm; - atomic_inc(&init_mm.mm_users); + mmget(&init_mm); /* * Keep on scanning until all entries have gone. Usually, @@ -1720,7 +1722,7 @@ int try_to_unuse(unsigned int type, bool frontswap, if (atomic_read(&start_mm->mm_users) == 1) { mmput(start_mm); start_mm = &init_mm; - atomic_inc(&init_mm.mm_users); + mmget(&init_mm); } /* @@ -1757,13 +1759,13 @@ int try_to_unuse(unsigned int type, bool frontswap, struct mm_struct *prev_mm = start_mm; struct mm_struct *mm; - atomic_inc(&new_start_mm->mm_users); - atomic_inc(&prev_mm->mm_users); + mmget(new_start_mm); + mmget(prev_mm); spin_lock(&mmlist_lock); while (swap_count(*swap_map) && !retval && (p = p->next) != &start_mm->mmlist) { mm = list_entry(p, struct mm_struct, mmlist); - if (!atomic_inc_not_zero(&mm->mm_users)) + if (!mmget_not_zero(mm)) continue; spin_unlock(&mmlist_lock); mmput(prev_mm); @@ -1781,7 +1783,7 @@ int try_to_unuse(unsigned int type, bool frontswap, if (set_start_mm && *swap_map < swcount) { mmput(new_start_mm); - atomic_inc(&mm->mm_users); + mmget(mm); new_start_mm = mm; set_start_mm = 0; } diff --git a/mm/truncate.c b/mm/truncate.c index f2db67465495..6263affdef88 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -786,7 +786,7 @@ EXPORT_SYMBOL(truncate_setsize); */ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) { - int bsize = 1 << inode->i_blkbits; + int bsize = i_blocksize(inode); loff_t rounded_from; struct page *page; pgoff_t index; diff --git a/mm/usercopy.c b/mm/usercopy.c index 8345299e3e3b..d155e12563b1 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -16,6 +16,9 @@ #include <linux/mm.h> #include <linux/slab.h> +#include <linux/sched.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> #include <asm/sections.h> enum { diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 9f0ad2a4f102..479e631d43c2 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -8,6 +8,7 @@ */ #include <linux/mm.h> +#include <linux/sched/signal.h> #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/swap.h> diff --git a/mm/util.c b/mm/util.c index b8f538863b5a..656dc5e37a87 100644 --- a/mm/util.c +++ b/mm/util.c @@ -5,6 +5,8 @@ #include <linux/export.h> #include <linux/err.h> #include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/sched/task_stack.h> #include <linux/security.h> #include <linux/swap.h> #include <linux/swapops.h> diff --git a/mm/vmacache.c b/mm/vmacache.c index 035fdeb35b43..7ffa0ee341b5 100644 --- a/mm/vmacache.c +++ b/mm/vmacache.c @@ -1,7 +1,8 @@ /* * Copyright (C) 2014 Davidlohr Bueso. */ -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task.h> #include <linux/mm.h> #include <linux/vmacache.h> @@ -60,7 +61,7 @@ static inline bool vmacache_valid_mm(struct mm_struct *mm) void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) { if (vmacache_valid_mm(newvma->vm_mm)) - current->vmacache[VMACACHE_HASH(addr)] = newvma; + current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma; } static bool vmacache_valid(struct mm_struct *mm) @@ -71,12 +72,12 @@ static bool vmacache_valid(struct mm_struct *mm) return false; curr = current; - if (mm->vmacache_seqnum != curr->vmacache_seqnum) { + if (mm->vmacache_seqnum != curr->vmacache.seqnum) { /* * First attempt will always be invalid, initialize * the new cache for this task here. */ - curr->vmacache_seqnum = mm->vmacache_seqnum; + curr->vmacache.seqnum = mm->vmacache_seqnum; vmacache_flush(curr); return false; } @@ -93,7 +94,7 @@ struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) return NULL; for (i = 0; i < VMACACHE_SIZE; i++) { - struct vm_area_struct *vma = current->vmacache[i]; + struct vm_area_struct *vma = current->vmacache.vmas[i]; if (!vma) continue; @@ -121,7 +122,7 @@ struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, return NULL; for (i = 0; i < VMACACHE_SIZE; i++) { - struct vm_area_struct *vma = current->vmacache[i]; + struct vm_area_struct *vma = current->vmacache.vmas[i]; if (vma && vma->vm_start == start && vma->vm_end == end) { count_vm_vmacache_event(VMACACHE_FIND_HITS); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index be93949b4885..b4024d688f38 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -12,7 +12,7 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/highmem.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/interrupt.h> diff --git a/mm/vmscan.c b/mm/vmscan.c index 70aa739c6b68..bc8031ef994d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -14,6 +14,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mm.h> +#include <linux/sched/mm.h> #include <linux/module.h> #include <linux/gfp.h> #include <linux/kernel_stat.h> diff --git a/mm/workingset.c b/mm/workingset.c index 79ed5364375d..ac839fca0e76 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -355,10 +355,8 @@ void workingset_update_node(struct radix_tree_node *node, void *private) * as node->private_list is protected by &mapping->tree_lock. */ if (node->count && node->count == node->exceptional) { - if (list_empty(&node->private_list)) { - node->private_data = mapping; + if (list_empty(&node->private_list)) list_lru_add(&shadow_nodes, &node->private_list); - } } else { if (!list_empty(&node->private_list)) list_lru_del(&shadow_nodes, &node->private_list); @@ -436,7 +434,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, */ node = container_of(item, struct radix_tree_node, private_list); - mapping = node->private_data; + mapping = container_of(node->root, struct address_space, page_tree); /* Coming from the list, invert the lock order */ if (!spin_trylock(&mapping->tree_lock)) { diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index b7b1fb6c8c21..b7ee9c34dbd6 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -33,6 +33,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/magic.h> #include <linux/bitops.h> #include <linux/errno.h> #include <linux/highmem.h> diff --git a/mm/zswap.c b/mm/zswap.c index cabf09e0128b..eedc27894b10 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -76,6 +76,8 @@ static u64 zswap_duplicate_entry; * tunables **********************************/ +#define ZSWAP_PARAM_UNSET "" + /* Enable/disable zswap (disabled by default) */ static bool zswap_enabled; static int zswap_enabled_param_set(const char *, @@ -185,6 +187,9 @@ static bool zswap_init_started; /* fatal error during init */ static bool zswap_init_failed; +/* init completed, but couldn't create the initial pool */ +static bool zswap_has_pool; + /********************************* * helpers and fwd declarations **********************************/ @@ -424,7 +429,8 @@ static struct zswap_pool *__zswap_pool_current(void) struct zswap_pool *pool; pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list); - WARN_ON(!pool); + WARN_ONCE(!pool && zswap_has_pool, + "%s: no page storage pool!\n", __func__); return pool; } @@ -443,7 +449,7 @@ static struct zswap_pool *zswap_pool_current_get(void) rcu_read_lock(); pool = __zswap_pool_current(); - if (!pool || !zswap_pool_get(pool)) + if (!zswap_pool_get(pool)) pool = NULL; rcu_read_unlock(); @@ -459,7 +465,9 @@ static struct zswap_pool *zswap_pool_last_get(void) list_for_each_entry_rcu(pool, &zswap_pools, list) last = pool; - if (!WARN_ON(!last) && !zswap_pool_get(last)) + WARN_ONCE(!last && zswap_has_pool, + "%s: no page storage pool!\n", __func__); + if (!zswap_pool_get(last)) last = NULL; rcu_read_unlock(); @@ -495,6 +503,17 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor) gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; int ret; + if (!zswap_has_pool) { + /* if either are unset, pool initialization failed, and we + * need both params to be set correctly before trying to + * create a pool. + */ + if (!strcmp(type, ZSWAP_PARAM_UNSET)) + return NULL; + if (!strcmp(compressor, ZSWAP_PARAM_UNSET)) + return NULL; + } + pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool) { pr_err("pool alloc failed\n"); @@ -544,29 +563,41 @@ error: static __init struct zswap_pool *__zswap_pool_create_fallback(void) { - if (!crypto_has_comp(zswap_compressor, 0, 0)) { - if (!strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) { - pr_err("default compressor %s not available\n", - zswap_compressor); - return NULL; - } + bool has_comp, has_zpool; + + has_comp = crypto_has_comp(zswap_compressor, 0, 0); + if (!has_comp && strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) { pr_err("compressor %s not available, using default %s\n", zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT); param_free_charp(&zswap_compressor); zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT; + has_comp = crypto_has_comp(zswap_compressor, 0, 0); } - if (!zpool_has_pool(zswap_zpool_type)) { - if (!strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) { - pr_err("default zpool %s not available\n", - zswap_zpool_type); - return NULL; - } + if (!has_comp) { + pr_err("default compressor %s not available\n", + zswap_compressor); + param_free_charp(&zswap_compressor); + zswap_compressor = ZSWAP_PARAM_UNSET; + } + + has_zpool = zpool_has_pool(zswap_zpool_type); + if (!has_zpool && strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) { pr_err("zpool %s not available, using default %s\n", zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT); param_free_charp(&zswap_zpool_type); zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT; + has_zpool = zpool_has_pool(zswap_zpool_type); + } + if (!has_zpool) { + pr_err("default zpool %s not available\n", + zswap_zpool_type); + param_free_charp(&zswap_zpool_type); + zswap_zpool_type = ZSWAP_PARAM_UNSET; } + if (!has_comp || !has_zpool) + return NULL; + return zswap_pool_create(zswap_zpool_type, zswap_compressor); } @@ -582,6 +613,9 @@ static void zswap_pool_destroy(struct zswap_pool *pool) static int __must_check zswap_pool_get(struct zswap_pool *pool) { + if (!pool) + return 0; + return kref_get_unless_zero(&pool->kref); } @@ -639,7 +673,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp, } /* no change required */ - if (!strcmp(s, *(char **)kp->arg)) + if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool) return 0; /* if this is load-time (pre-init) param setting, @@ -670,21 +704,26 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp, pool = zswap_pool_find_get(type, compressor); if (pool) { zswap_pool_debug("using existing", pool); + WARN_ON(pool == zswap_pool_current()); list_del_rcu(&pool->list); - } else { - spin_unlock(&zswap_pools_lock); - pool = zswap_pool_create(type, compressor); - spin_lock(&zswap_pools_lock); } + spin_unlock(&zswap_pools_lock); + + if (!pool) + pool = zswap_pool_create(type, compressor); + if (pool) ret = param_set_charp(s, kp); else ret = -EINVAL; + spin_lock(&zswap_pools_lock); + if (!ret) { put_pool = zswap_pool_current(); list_add_rcu(&pool->list, &zswap_pools); + zswap_has_pool = true; } else if (pool) { /* add the possibly pre-existing pool to the end of the pools * list; if it's new (and empty) then it'll be removed and @@ -696,6 +735,17 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp, spin_unlock(&zswap_pools_lock); + if (!zswap_has_pool && !pool) { + /* if initial pool creation failed, and this pool creation also + * failed, maybe both compressor and zpool params were bad. + * Allow changing this param, so pool creation will succeed + * when the other param is changed. We already verified this + * param is ok in the zpool_has_pool() or crypto_has_comp() + * checks above. + */ + ret = param_set_charp(s, kp); + } + /* drop the ref from either the old current pool, * or the new pool we failed to add */ @@ -724,6 +774,10 @@ static int zswap_enabled_param_set(const char *val, pr_err("can't enable, initialization failed\n"); return -ENODEV; } + if (!zswap_has_pool && zswap_init_started) { + pr_err("can't enable, no pool configured\n"); + return -ENODEV; + } return param_set_bool(val, kp); } @@ -1205,22 +1259,21 @@ static int __init init_zswap(void) goto hp_fail; pool = __zswap_pool_create_fallback(); - if (!pool) { + if (pool) { + pr_info("loaded using pool %s/%s\n", pool->tfm_name, + zpool_get_type(pool->zpool)); + list_add(&pool->list, &zswap_pools); + zswap_has_pool = true; + } else { pr_err("pool creation failed\n"); - goto pool_fail; + zswap_enabled = false; } - pr_info("loaded using pool %s/%s\n", pool->tfm_name, - zpool_get_type(pool->zpool)); - - list_add(&pool->list, &zswap_pools); frontswap_register_ops(&zswap_frontswap_ops); if (zswap_debugfs_init()) pr_warn("debugfs initialization failed\n"); return 0; -pool_fail: - cpuhp_remove_state_nocalls(CPUHP_MM_ZSWP_POOL_PREPARE); hp_fail: cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE); dstmem_fail: diff --git a/net/9p/client.c b/net/9p/client.c index 3fc94a49ccd5..25cfd8a4bc36 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -32,7 +32,7 @@ #include <linux/idr.h> #include <linux/mutex.h> #include <linux/slab.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/uaccess.h> #include <linux/uio.h> #include <net/9p/9p.h> diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 10d2bdce686e..465cc24b41e5 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1656,7 +1656,7 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) ddp->deh_dport = usat->sat_port; ddp->deh_sport = at->src_port; - SOCK_DEBUG(sk, "SK %p: Copy user data (%Zd bytes).\n", sk, len); + SOCK_DEBUG(sk, "SK %p: Copy user data (%zd bytes).\n", sk, len); err = memcpy_from_msg(skb_put(skb, len), msg, len); if (err) { @@ -1720,7 +1720,7 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) */ aarp_send_ddp(dev, skb, &usat->sat_addr, NULL); } - SOCK_DEBUG(sk, "SK %p: Done write (%Zd).\n", sk, len); + SOCK_DEBUG(sk, "SK %p: Done write (%zd).\n", sk, len); out: release_sock(sk); diff --git a/net/atm/common.c b/net/atm/common.c index a3ca922d307b..9613381f5db0 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -13,7 +13,7 @@ #include <linux/errno.h> /* error codes */ #include <linux/capability.h> #include <linux/mm.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/time.h> /* struct timeval */ #include <linux/skbuff.h> #include <linux/bitops.h> diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 3b3b1a292ec8..a190800572bd 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -451,7 +451,7 @@ static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr, return; } if (end_of_tlvs - tlvs != 0) - pr_info("(%s) ignoring %Zd bytes of trailing TLV garbage\n", + pr_info("(%s) ignoring %zd bytes of trailing TLV garbage\n", dev->name, end_of_tlvs - tlvs); } diff --git a/net/atm/svc.c b/net/atm/svc.c index 878563a8354d..db9794ec61d8 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c @@ -10,7 +10,7 @@ #include <linux/kernel.h> /* printk */ #include <linux/skbuff.h> #include <linux/wait.h> -#include <linux/sched.h> /* jiffies and HZ */ +#include <linux/sched/signal.h> #include <linux/fcntl.h> /* O_NONBLOCK */ #include <linux/init.h> #include <linux/atm.h> /* ATM stuff */ diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 90fcf5fc2e0a..a8e42cedf1db 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -20,7 +20,7 @@ #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index cfb2faba46de..69e1f7d362a8 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -27,6 +27,8 @@ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/stringify.h> +#include <linux/sched/signal.h> + #include <asm/ioctls.h> #include <net/bluetooth/bluetooth.h> diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c index 46ac686c8911..bb308224099c 100644 --- a/net/bluetooth/cmtp/capi.c +++ b/net/bluetooth/cmtp/capi.c @@ -26,7 +26,7 @@ #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 1015d9c8d97d..b5faff458d8b 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -21,6 +21,8 @@ SOFTWARE IS DISCLAIMED. */ +#include <linux/sched/signal.h> + #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/mgmt.h> diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 48f9471e7c85..f64d6566021f 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -851,7 +851,7 @@ static int hci_sock_release(struct socket *sock) if (hdev) { if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { - /* When releasing an user channel exclusive access, + /* When releasing a user channel exclusive access, * call hci_dev_do_close directly instead of calling * hci_dev_close to ensure the exclusive access will * be released and the controller brought back down. @@ -1172,7 +1172,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, /* In case the transport is already up and * running, clear the error here. * - * This can happen when opening an user + * This can happen when opening a user * channel and HCI_AUTO_OFF grace period * is still active. */ @@ -1190,7 +1190,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, if (!hci_sock_gen_cookie(sk)) { /* In the case when a cookie has already been assigned, * this socket will transition from a raw socket into - * an user channel socket. For a clean transition, send + * a user channel socket. For a clean transition, send * the close notification first. */ skb = create_monitor_ctrl_close(sk); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index a8ba752732c9..f307b145ea54 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/export.h> +#include <linux/sched/signal.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 7511df72347f..aa1a814ceddc 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -27,6 +27,7 @@ #include <linux/export.h> #include <linux/debugfs.h> +#include <linux/sched/signal.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 3125ce670c2f..e4e9a2da1e7e 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -27,6 +27,7 @@ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <linux/sched/signal.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 0f4034934d56..0b5dd607444c 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -19,6 +19,7 @@ #include <linux/rtnetlink.h> #include <linux/spinlock.h> #include <linux/times.h> +#include <linux/sched/signal.h> #include "br_private.h" diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 05e8946ccc03..79aee759aba5 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -17,6 +17,7 @@ #include <linux/if_bridge.h> #include <linux/rtnetlink.h> #include <linux/spinlock.h> +#include <linux/sched/signal.h> #include "br_private.h" diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c index 9024283d2bca..279527f8b1fe 100644 --- a/net/bridge/netfilter/ebt_among.c +++ b/net/bridge/netfilter/ebt_among.c @@ -187,7 +187,7 @@ static int ebt_among_mt_check(const struct xt_mtchk_param *par) expected_length += ebt_mac_wormhash_size(wh_src); if (em->match_size != EBT_ALIGN(expected_length)) { - pr_info("wrong size: %d against expected %d, rounded to %Zd\n", + pr_info("wrong size: %d against expected %d, rounded to %zd\n", em->match_size, expected_length, EBT_ALIGN(expected_length)); return -EINVAL; diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 92cbbd2afddb..adcad344c843 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -9,7 +9,7 @@ #include <linux/fs.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/list.h> diff --git a/net/ceph/cls_lock_client.c b/net/ceph/cls_lock_client.c index 50f040fdb2a9..b9233b990399 100644 --- a/net/ceph/cls_lock_client.c +++ b/net/ceph/cls_lock_client.c @@ -69,8 +69,8 @@ int ceph_cls_lock(struct ceph_osd_client *osdc, dout("%s lock_name %s type %d cookie %s tag %s desc %s flags 0x%x\n", __func__, lock_name, type, cookie, tag, desc, flags); ret = ceph_osdc_call(osdc, oid, oloc, "lock", "lock", - CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, - lock_op_page, lock_op_buf_size, NULL, NULL); + CEPH_OSD_FLAG_WRITE, lock_op_page, + lock_op_buf_size, NULL, NULL); dout("%s: status %d\n", __func__, ret); __free_page(lock_op_page); @@ -117,8 +117,8 @@ int ceph_cls_unlock(struct ceph_osd_client *osdc, dout("%s lock_name %s cookie %s\n", __func__, lock_name, cookie); ret = ceph_osdc_call(osdc, oid, oloc, "lock", "unlock", - CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, - unlock_op_page, unlock_op_buf_size, NULL, NULL); + CEPH_OSD_FLAG_WRITE, unlock_op_page, + unlock_op_buf_size, NULL, NULL); dout("%s: status %d\n", __func__, ret); __free_page(unlock_op_page); @@ -170,8 +170,8 @@ int ceph_cls_break_lock(struct ceph_osd_client *osdc, dout("%s lock_name %s cookie %s locker %s%llu\n", __func__, lock_name, cookie, ENTITY_NAME(*locker)); ret = ceph_osdc_call(osdc, oid, oloc, "lock", "break_lock", - CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, - break_op_page, break_op_buf_size, NULL, NULL); + CEPH_OSD_FLAG_WRITE, break_op_page, + break_op_buf_size, NULL, NULL); dout("%s: status %d\n", __func__, ret); __free_page(break_op_page); @@ -278,7 +278,7 @@ int ceph_cls_lock_info(struct ceph_osd_client *osdc, int get_info_op_buf_size; int name_len = strlen(lock_name); struct page *get_info_op_page, *reply_page; - size_t reply_len; + size_t reply_len = PAGE_SIZE; void *p, *end; int ret; diff --git a/net/ceph/crush/crush.c b/net/ceph/crush/crush.c index 80d7c3a97cb8..5bf94c04f645 100644 --- a/net/ceph/crush/crush.c +++ b/net/ceph/crush/crush.c @@ -45,7 +45,6 @@ int crush_get_bucket_item_weight(const struct crush_bucket *b, int p) void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b) { - kfree(b->h.perm); kfree(b->h.items); kfree(b); } @@ -54,14 +53,12 @@ void crush_destroy_bucket_list(struct crush_bucket_list *b) { kfree(b->item_weights); kfree(b->sum_weights); - kfree(b->h.perm); kfree(b->h.items); kfree(b); } void crush_destroy_bucket_tree(struct crush_bucket_tree *b) { - kfree(b->h.perm); kfree(b->h.items); kfree(b->node_weights); kfree(b); @@ -71,7 +68,6 @@ void crush_destroy_bucket_straw(struct crush_bucket_straw *b) { kfree(b->straws); kfree(b->item_weights); - kfree(b->h.perm); kfree(b->h.items); kfree(b); } @@ -79,7 +75,6 @@ void crush_destroy_bucket_straw(struct crush_bucket_straw *b) void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b) { kfree(b->item_weights); - kfree(b->h.perm); kfree(b->h.items); kfree(b); } diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index 130ab407c5ec..b5cd8c21bfdf 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c @@ -54,7 +54,6 @@ int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size return -1; } - /* * bucket choose methods * @@ -72,59 +71,60 @@ int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size * Since this is expensive, we optimize for the r=0 case, which * captures the vast majority of calls. */ -static int bucket_perm_choose(struct crush_bucket *bucket, +static int bucket_perm_choose(const struct crush_bucket *bucket, + struct crush_work_bucket *work, int x, int r) { unsigned int pr = r % bucket->size; unsigned int i, s; /* start a new permutation if @x has changed */ - if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) { + if (work->perm_x != (__u32)x || work->perm_n == 0) { dprintk("bucket %d new x=%d\n", bucket->id, x); - bucket->perm_x = x; + work->perm_x = x; /* optimize common r=0 case */ if (pr == 0) { s = crush_hash32_3(bucket->hash, x, bucket->id, 0) % bucket->size; - bucket->perm[0] = s; - bucket->perm_n = 0xffff; /* magic value, see below */ + work->perm[0] = s; + work->perm_n = 0xffff; /* magic value, see below */ goto out; } for (i = 0; i < bucket->size; i++) - bucket->perm[i] = i; - bucket->perm_n = 0; - } else if (bucket->perm_n == 0xffff) { + work->perm[i] = i; + work->perm_n = 0; + } else if (work->perm_n == 0xffff) { /* clean up after the r=0 case above */ for (i = 1; i < bucket->size; i++) - bucket->perm[i] = i; - bucket->perm[bucket->perm[0]] = 0; - bucket->perm_n = 1; + work->perm[i] = i; + work->perm[work->perm[0]] = 0; + work->perm_n = 1; } /* calculate permutation up to pr */ - for (i = 0; i < bucket->perm_n; i++) - dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]); - while (bucket->perm_n <= pr) { - unsigned int p = bucket->perm_n; + for (i = 0; i < work->perm_n; i++) + dprintk(" perm_choose have %d: %d\n", i, work->perm[i]); + while (work->perm_n <= pr) { + unsigned int p = work->perm_n; /* no point in swapping the final entry */ if (p < bucket->size - 1) { i = crush_hash32_3(bucket->hash, x, bucket->id, p) % (bucket->size - p); if (i) { - unsigned int t = bucket->perm[p + i]; - bucket->perm[p + i] = bucket->perm[p]; - bucket->perm[p] = t; + unsigned int t = work->perm[p + i]; + work->perm[p + i] = work->perm[p]; + work->perm[p] = t; } dprintk(" perm_choose swap %d with %d\n", p, p+i); } - bucket->perm_n++; + work->perm_n++; } for (i = 0; i < bucket->size; i++) - dprintk(" perm_choose %d: %d\n", i, bucket->perm[i]); + dprintk(" perm_choose %d: %d\n", i, work->perm[i]); - s = bucket->perm[pr]; + s = work->perm[pr]; out: dprintk(" perm_choose %d sz=%d x=%d r=%d (%d) s=%d\n", bucket->id, bucket->size, x, r, pr, s); @@ -132,14 +132,14 @@ out: } /* uniform */ -static int bucket_uniform_choose(struct crush_bucket_uniform *bucket, - int x, int r) +static int bucket_uniform_choose(const struct crush_bucket_uniform *bucket, + struct crush_work_bucket *work, int x, int r) { - return bucket_perm_choose(&bucket->h, x, r); + return bucket_perm_choose(&bucket->h, work, x, r); } /* list */ -static int bucket_list_choose(struct crush_bucket_list *bucket, +static int bucket_list_choose(const struct crush_bucket_list *bucket, int x, int r) { int i; @@ -155,8 +155,9 @@ static int bucket_list_choose(struct crush_bucket_list *bucket, w *= bucket->sum_weights[i]; w = w >> 16; /*dprintk(" scaled %llx\n", w);*/ - if (w < bucket->item_weights[i]) + if (w < bucket->item_weights[i]) { return bucket->h.items[i]; + } } dprintk("bad list sums for bucket %d\n", bucket->h.id); @@ -192,7 +193,7 @@ static int terminal(int x) return x & 1; } -static int bucket_tree_choose(struct crush_bucket_tree *bucket, +static int bucket_tree_choose(const struct crush_bucket_tree *bucket, int x, int r) { int n; @@ -224,7 +225,7 @@ static int bucket_tree_choose(struct crush_bucket_tree *bucket, /* straw */ -static int bucket_straw_choose(struct crush_bucket_straw *bucket, +static int bucket_straw_choose(const struct crush_bucket_straw *bucket, int x, int r) { __u32 i; @@ -301,7 +302,7 @@ static __u64 crush_ln(unsigned int xin) * */ -static int bucket_straw2_choose(struct crush_bucket_straw2 *bucket, +static int bucket_straw2_choose(const struct crush_bucket_straw2 *bucket, int x, int r) { unsigned int i, high = 0; @@ -344,37 +345,42 @@ static int bucket_straw2_choose(struct crush_bucket_straw2 *bucket, high_draw = draw; } } + return bucket->h.items[high]; } -static int crush_bucket_choose(struct crush_bucket *in, int x, int r) +static int crush_bucket_choose(const struct crush_bucket *in, + struct crush_work_bucket *work, + int x, int r) { dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r); BUG_ON(in->size == 0); switch (in->alg) { case CRUSH_BUCKET_UNIFORM: - return bucket_uniform_choose((struct crush_bucket_uniform *)in, - x, r); + return bucket_uniform_choose( + (const struct crush_bucket_uniform *)in, + work, x, r); case CRUSH_BUCKET_LIST: - return bucket_list_choose((struct crush_bucket_list *)in, + return bucket_list_choose((const struct crush_bucket_list *)in, x, r); case CRUSH_BUCKET_TREE: - return bucket_tree_choose((struct crush_bucket_tree *)in, + return bucket_tree_choose((const struct crush_bucket_tree *)in, x, r); case CRUSH_BUCKET_STRAW: - return bucket_straw_choose((struct crush_bucket_straw *)in, - x, r); + return bucket_straw_choose( + (const struct crush_bucket_straw *)in, + x, r); case CRUSH_BUCKET_STRAW2: - return bucket_straw2_choose((struct crush_bucket_straw2 *)in, - x, r); + return bucket_straw2_choose( + (const struct crush_bucket_straw2 *)in, + x, r); default: dprintk("unknown bucket %d alg %d\n", in->id, in->alg); return in->items[0]; } } - /* * true if device is marked "out" (failed, fully offloaded) * of the cluster @@ -416,7 +422,8 @@ static int is_out(const struct crush_map *map, * @parent_r: r value passed from the parent */ static int crush_choose_firstn(const struct crush_map *map, - struct crush_bucket *bucket, + struct crush_work *work, + const struct crush_bucket *bucket, const __u32 *weight, int weight_max, int x, int numrep, int type, int *out, int outpos, @@ -434,7 +441,7 @@ static int crush_choose_firstn(const struct crush_map *map, int rep; unsigned int ftotal, flocal; int retry_descent, retry_bucket, skip_rep; - struct crush_bucket *in = bucket; + const struct crush_bucket *in = bucket; int r; int i; int item = 0; @@ -473,9 +480,13 @@ static int crush_choose_firstn(const struct crush_map *map, if (local_fallback_retries > 0 && flocal >= (in->size>>1) && flocal > local_fallback_retries) - item = bucket_perm_choose(in, x, r); + item = bucket_perm_choose( + in, work->work[-1-in->id], + x, r); else - item = crush_bucket_choose(in, x, r); + item = crush_bucket_choose( + in, work->work[-1-in->id], + x, r); if (item >= map->max_devices) { dprintk(" bad item %d\n", item); skip_rep = 1; @@ -518,19 +529,21 @@ static int crush_choose_firstn(const struct crush_map *map, sub_r = r >> (vary_r-1); else sub_r = 0; - if (crush_choose_firstn(map, - map->buckets[-1-item], - weight, weight_max, - x, stable ? 1 : outpos+1, 0, - out2, outpos, count, - recurse_tries, 0, - local_retries, - local_fallback_retries, - 0, - vary_r, - stable, - NULL, - sub_r) <= outpos) + if (crush_choose_firstn( + map, + work, + map->buckets[-1-item], + weight, weight_max, + x, stable ? 1 : outpos+1, 0, + out2, outpos, count, + recurse_tries, 0, + local_retries, + local_fallback_retries, + 0, + vary_r, + stable, + NULL, + sub_r) <= outpos) /* didn't get leaf */ reject = 1; } else { @@ -539,14 +552,12 @@ static int crush_choose_firstn(const struct crush_map *map, } } - if (!reject) { + if (!reject && !collide) { /* out? */ if (itemtype == 0) reject = is_out(map, weight, weight_max, item, x); - else - reject = 0; } reject: @@ -600,7 +611,8 @@ reject: * */ static void crush_choose_indep(const struct crush_map *map, - struct crush_bucket *bucket, + struct crush_work *work, + const struct crush_bucket *bucket, const __u32 *weight, int weight_max, int x, int left, int numrep, int type, int *out, int outpos, @@ -610,7 +622,7 @@ static void crush_choose_indep(const struct crush_map *map, int *out2, int parent_r) { - struct crush_bucket *in = bucket; + const struct crush_bucket *in = bucket; int endpos = outpos + left; int rep; unsigned int ftotal; @@ -678,7 +690,9 @@ static void crush_choose_indep(const struct crush_map *map, break; } - item = crush_bucket_choose(in, x, r); + item = crush_bucket_choose( + in, work->work[-1-in->id], + x, r); if (item >= map->max_devices) { dprintk(" bad item %d\n", item); out[rep] = CRUSH_ITEM_NONE; @@ -724,13 +738,15 @@ static void crush_choose_indep(const struct crush_map *map, if (recurse_to_leaf) { if (item < 0) { - crush_choose_indep(map, - map->buckets[-1-item], - weight, weight_max, - x, 1, numrep, 0, - out2, rep, - recurse_tries, 0, - 0, NULL, r); + crush_choose_indep( + map, + work, + map->buckets[-1-item], + weight, weight_max, + x, 1, numrep, 0, + out2, rep, + recurse_tries, 0, + 0, NULL, r); if (out2[rep] == CRUSH_ITEM_NONE) { /* placed nothing; no leaf */ break; @@ -781,6 +797,53 @@ static void crush_choose_indep(const struct crush_map *map, #endif } + +/* + * This takes a chunk of memory and sets it up to be a shiny new + * working area for a CRUSH placement computation. It must be called + * on any newly allocated memory before passing it in to + * crush_do_rule. It may be used repeatedly after that, so long as the + * map has not changed. If the map /has/ changed, you must make sure + * the working size is no smaller than what was allocated and re-run + * crush_init_workspace. + * + * If you do retain the working space between calls to crush, make it + * thread-local. + */ +void crush_init_workspace(const struct crush_map *map, void *v) +{ + struct crush_work *w = v; + __s32 b; + + /* + * We work by moving through the available space and setting + * values and pointers as we go. + * + * It's a bit like Forth's use of the 'allot' word since we + * set the pointer first and then reserve the space for it to + * point to by incrementing the point. + */ + v += sizeof(struct crush_work *); + w->work = v; + v += map->max_buckets * sizeof(struct crush_work_bucket *); + for (b = 0; b < map->max_buckets; ++b) { + if (!map->buckets[b]) + continue; + + w->work[b] = v; + switch (map->buckets[b]->alg) { + default: + v += sizeof(struct crush_work_bucket); + break; + } + w->work[b]->perm_x = 0; + w->work[b]->perm_n = 0; + w->work[b]->perm = v; + v += map->buckets[b]->size * sizeof(__u32); + } + BUG_ON(v - (void *)w != map->working_size); +} + /** * crush_do_rule - calculate a mapping with the given input and rule * @map: the crush_map @@ -790,24 +853,25 @@ static void crush_choose_indep(const struct crush_map *map, * @result_max: maximum result size * @weight: weight vector (for map leaves) * @weight_max: size of weight vector - * @scratch: scratch vector for private use; must be >= 3 * result_max + * @cwin: pointer to at least crush_work_size() bytes of memory */ int crush_do_rule(const struct crush_map *map, int ruleno, int x, int *result, int result_max, const __u32 *weight, int weight_max, - int *scratch) + void *cwin) { int result_len; - int *a = scratch; - int *b = scratch + result_max; - int *c = scratch + result_max*2; + struct crush_work *cw = cwin; + int *a = cwin + map->working_size; + int *b = a + result_max; + int *c = b + result_max; + int *w = a; + int *o = b; int recurse_to_leaf; - int *w; int wsize = 0; - int *o; int osize; int *tmp; - struct crush_rule *rule; + const struct crush_rule *rule; __u32 step; int i, j; int numrep; @@ -835,12 +899,10 @@ int crush_do_rule(const struct crush_map *map, rule = map->rules[ruleno]; result_len = 0; - w = a; - o = b; for (step = 0; step < rule->len; step++) { int firstn = 0; - struct crush_rule_step *curstep = &rule->steps[step]; + const struct crush_rule_step *curstep = &rule->steps[step]; switch (curstep->op) { case CRUSH_RULE_TAKE: @@ -936,6 +998,7 @@ int crush_do_rule(const struct crush_map *map, recurse_tries = choose_tries; osize += crush_choose_firstn( map, + cw, map->buckets[bno], weight, weight_max, x, numrep, @@ -956,6 +1019,7 @@ int crush_do_rule(const struct crush_map *map, numrep : (result_max-osize)); crush_choose_indep( map, + cw, map->buckets[bno], weight, weight_max, x, out_size, numrep, @@ -997,5 +1061,6 @@ int crush_do_rule(const struct crush_map *map, break; } } + return result_len; } diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index 292e33bd916e..46008d5ac504 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c @@ -3,10 +3,12 @@ #include <linux/err.h> #include <linux/scatterlist.h> +#include <linux/sched.h> #include <linux/slab.h> #include <crypto/aes.h> #include <crypto/skcipher.h> #include <linux/key-type.h> +#include <linux/sched/mm.h> #include <keys/ceph-type.h> #include <keys/user-type.h> diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index bad3d4ae43f6..38dcf1eb427d 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -520,7 +520,8 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; int r; - r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); + iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, len); + r = sock_recvmsg(sock, &msg, msg.msg_flags); if (r == -EAGAIN) r = 0; return r; @@ -529,17 +530,20 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) static int ceph_tcp_recvpage(struct socket *sock, struct page *page, int page_offset, size_t length) { - void *kaddr; - int ret; + struct bio_vec bvec = { + .bv_page = page, + .bv_offset = page_offset, + .bv_len = length + }; + struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; + int r; BUG_ON(page_offset + length > PAGE_SIZE); - - kaddr = kmap(page); - BUG_ON(!kaddr); - ret = ceph_tcp_recvmsg(sock, kaddr + page_offset, length); - kunmap(page); - - return ret; + iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, &bvec, 1, length); + r = sock_recvmsg(sock, &msg, msg.msg_flags); + if (r == -EAGAIN) + r = 0; + return r; } /* @@ -579,18 +583,28 @@ static int __ceph_tcp_sendpage(struct socket *sock, struct page *page, static int ceph_tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, bool more) { + struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; + struct bio_vec bvec; int ret; - struct kvec iov; /* sendpage cannot properly handle pages with page_count == 0, * we need to fallback to sendmsg if that's the case */ if (page_count(page) >= 1) return __ceph_tcp_sendpage(sock, page, offset, size, more); - iov.iov_base = kmap(page) + offset; - iov.iov_len = size; - ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more); - kunmap(page); + bvec.bv_page = page; + bvec.bv_offset = offset; + bvec.bv_len = size; + + if (more) + msg.msg_flags |= MSG_MORE; + else + msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ + + iov_iter_bvec(&msg.msg_iter, WRITE | ITER_BVEC, &bvec, 1, size); + ret = sock_sendmsg(sock, &msg); + if (ret == -EAGAIN) + ret = 0; return ret; } diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f3378ba1a828..b65bbf9f45eb 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -460,7 +460,6 @@ static void request_init(struct ceph_osd_request *req) kref_init(&req->r_kref); init_completion(&req->r_completion); - init_completion(&req->r_done_completion); RB_CLEAR_NODE(&req->r_node); RB_CLEAR_NODE(&req->r_mc_node); INIT_LIST_HEAD(&req->r_unsafe_item); @@ -672,7 +671,8 @@ void osd_req_op_extent_update(struct ceph_osd_request *osd_req, BUG_ON(length > previous); op->extent.length = length; - op->indata_len -= previous - length; + if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL) + op->indata_len -= previous - length; } EXPORT_SYMBOL(osd_req_op_extent_update); @@ -1636,7 +1636,7 @@ static void __submit_request(struct ceph_osd_request *req, bool wrlocked) bool need_send = false; bool promoted = false; - WARN_ON(req->r_tid || req->r_got_reply); + WARN_ON(req->r_tid); dout("%s req %p wrlocked %d\n", __func__, req, wrlocked); again: @@ -1704,17 +1704,10 @@ promote: static void account_request(struct ceph_osd_request *req) { - unsigned int mask = CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK; + WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK)); + WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE))); - if (req->r_flags & CEPH_OSD_FLAG_READ) { - WARN_ON(req->r_flags & mask); - req->r_flags |= CEPH_OSD_FLAG_ACK; - } else if (req->r_flags & CEPH_OSD_FLAG_WRITE) - WARN_ON(!(req->r_flags & mask)); - else - WARN_ON(1); - - WARN_ON(req->r_unsafe_callback && (req->r_flags & mask) != mask); + req->r_flags |= CEPH_OSD_FLAG_ONDISK; atomic_inc(&req->r_osdc->num_requests); } @@ -1749,15 +1742,15 @@ static void finish_request(struct ceph_osd_request *req) static void __complete_request(struct ceph_osd_request *req) { - if (req->r_callback) + if (req->r_callback) { + dout("%s req %p tid %llu cb %pf result %d\n", __func__, req, + req->r_tid, req->r_callback, req->r_result); req->r_callback(req); - else - complete_all(&req->r_completion); + } } /* - * Note that this is open-coded in handle_reply(), which has to deal - * with ack vs commit, dup acks, etc. + * This is open-coded in handle_reply(). */ static void complete_request(struct ceph_osd_request *req, int err) { @@ -1766,7 +1759,7 @@ static void complete_request(struct ceph_osd_request *req, int err) req->r_result = err; finish_request(req); __complete_request(req); - complete_all(&req->r_done_completion); + complete_all(&req->r_completion); ceph_osdc_put_request(req); } @@ -1792,7 +1785,7 @@ static void cancel_request(struct ceph_osd_request *req) cancel_map_check(req); finish_request(req); - complete_all(&req->r_done_completion); + complete_all(&req->r_completion); ceph_osdc_put_request(req); } @@ -2169,7 +2162,6 @@ static void linger_commit_cb(struct ceph_osd_request *req) mutex_lock(&lreq->lock); dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq, lreq->linger_id, req->r_result); - WARN_ON(!__linger_registered(lreq)); linger_reg_commit_complete(lreq, req->r_result); lreq->committed = true; @@ -2785,31 +2777,8 @@ e_inval: } /* - * We are done with @req if - * - @m is a safe reply, or - * - @m is an unsafe reply and we didn't want a safe one - */ -static bool done_request(const struct ceph_osd_request *req, - const struct MOSDOpReply *m) -{ - return (m->result < 0 || - (m->flags & CEPH_OSD_FLAG_ONDISK) || - !(req->r_flags & CEPH_OSD_FLAG_ONDISK)); -} - -/* - * handle osd op reply. either call the callback if it is specified, - * or do the completion to wake up the waiting thread. - * - * ->r_unsafe_callback is set? yes no - * - * first reply is OK (needed r_cb/r_completion, r_cb/r_completion, - * any or needed/got safe) r_done_completion r_done_completion - * - * first reply is unsafe r_unsafe_cb(true) (nothing) - * - * when we get the safe reply r_unsafe_cb(false), r_cb/r_completion, - * r_done_completion r_done_completion + * Handle MOSDOpReply. Set ->r_result and call the callback if it is + * specified. */ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) { @@ -2818,7 +2787,6 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) struct MOSDOpReply m; u64 tid = le64_to_cpu(msg->hdr.tid); u32 data_len = 0; - bool already_acked; int ret; int i; @@ -2897,50 +2865,22 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) le32_to_cpu(msg->hdr.data_len), req->r_tid); goto fail_request; } - dout("%s req %p tid %llu acked %d result %d data_len %u\n", __func__, - req, req->r_tid, req->r_got_reply, m.result, data_len); - - already_acked = req->r_got_reply; - if (!already_acked) { - req->r_result = m.result ?: data_len; - req->r_replay_version = m.replay_version; /* struct */ - req->r_got_reply = true; - } else if (!(m.flags & CEPH_OSD_FLAG_ONDISK)) { - dout("req %p tid %llu dup ack\n", req, req->r_tid); - goto out_unlock_session; - } - - if (done_request(req, &m)) { - finish_request(req); - if (req->r_linger) { - WARN_ON(req->r_unsafe_callback); - dout("req %p tid %llu cb (locked)\n", req, req->r_tid); - __complete_request(req); - } - } + dout("%s req %p tid %llu result %d data_len %u\n", __func__, + req, req->r_tid, m.result, data_len); + /* + * Since we only ever request ONDISK, we should only ever get + * one (type of) reply back. + */ + WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK)); + req->r_result = m.result ?: data_len; + finish_request(req); mutex_unlock(&osd->lock); up_read(&osdc->lock); - if (done_request(req, &m)) { - if (already_acked && req->r_unsafe_callback) { - dout("req %p tid %llu safe-cb\n", req, req->r_tid); - req->r_unsafe_callback(req, false); - } else if (!req->r_linger) { - dout("req %p tid %llu cb\n", req, req->r_tid); - __complete_request(req); - } - complete_all(&req->r_done_completion); - ceph_osdc_put_request(req); - } else { - if (req->r_unsafe_callback) { - dout("req %p tid %llu unsafe-cb\n", req, req->r_tid); - req->r_unsafe_callback(req, true); - } else { - WARN_ON(1); - } - } - + __complete_request(req); + complete_all(&req->r_completion); + ceph_osdc_put_request(req); return; fail_request: @@ -3540,7 +3480,7 @@ again: up_read(&osdc->lock); dout("%s waiting on req %p tid %llu last_tid %llu\n", __func__, req, req->r_tid, last_tid); - wait_for_completion(&req->r_done_completion); + wait_for_completion(&req->r_completion); ceph_osdc_put_request(req); goto again; } @@ -3599,7 +3539,7 @@ ceph_osdc_watch(struct ceph_osd_client *osdc, ceph_oid_copy(&lreq->t.base_oid, oid); ceph_oloc_copy(&lreq->t.base_oloc, oloc); - lreq->t.flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK; + lreq->t.flags = CEPH_OSD_FLAG_WRITE; lreq->mtime = CURRENT_TIME; lreq->reg_req = alloc_linger_request(lreq); @@ -3657,7 +3597,7 @@ int ceph_osdc_unwatch(struct ceph_osd_client *osdc, ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid); ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); - req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK; + req->r_flags = CEPH_OSD_FLAG_WRITE; req->r_mtime = CURRENT_TIME; osd_req_op_watch_init(req, 0, lreq->linger_id, CEPH_OSD_WATCH_OP_UNWATCH); @@ -4022,7 +3962,7 @@ EXPORT_SYMBOL(ceph_osdc_maybe_request_map); * Execute an OSD class method on an object. * * @flags: CEPH_OSD_FLAG_* - * @resp_len: out param for reply length + * @resp_len: in/out param for reply length */ int ceph_osdc_call(struct ceph_osd_client *osdc, struct ceph_object_id *oid, @@ -4035,6 +3975,9 @@ int ceph_osdc_call(struct ceph_osd_client *osdc, struct ceph_osd_request *req; int ret; + if (req_len > PAGE_SIZE || (resp_page && *resp_len > PAGE_SIZE)) + return -E2BIG; + req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); if (!req) return -ENOMEM; @@ -4053,7 +3996,7 @@ int ceph_osdc_call(struct ceph_osd_client *osdc, 0, false, false); if (resp_page) osd_req_op_cls_response_data_pages(req, 0, &resp_page, - PAGE_SIZE, 0, false, false); + *resp_len, 0, false, false); ceph_osdc_start_request(osdc, req, false); ret = ceph_osdc_wait_request(osdc, req); @@ -4220,8 +4163,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, int page_align = off & ~PAGE_MASK; req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1, - CEPH_OSD_OP_WRITE, - CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, + CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc, truncate_seq, truncate_size, true); if (IS_ERR(req)) diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index d2436880b305..6824c0ec8373 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -153,6 +153,32 @@ bad: return -EINVAL; } +static void crush_finalize(struct crush_map *c) +{ + __s32 b; + + /* Space for the array of pointers to per-bucket workspace */ + c->working_size = sizeof(struct crush_work) + + c->max_buckets * sizeof(struct crush_work_bucket *); + + for (b = 0; b < c->max_buckets; b++) { + if (!c->buckets[b]) + continue; + + switch (c->buckets[b]->alg) { + default: + /* + * The base case, permutation variables and + * the pointer to the permutation array. + */ + c->working_size += sizeof(struct crush_work_bucket); + break; + } + /* Every bucket has a permutation array. */ + c->working_size += c->buckets[b]->size * sizeof(__u32); + } +} + static struct crush_map *crush_decode(void *pbyval, void *end) { struct crush_map *c; @@ -246,10 +272,6 @@ static struct crush_map *crush_decode(void *pbyval, void *end) b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS); if (b->items == NULL) goto badmem; - b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS); - if (b->perm == NULL) - goto badmem; - b->perm_n = 0; ceph_decode_need(p, end, b->size*sizeof(u32), bad); for (j = 0; j < b->size; j++) @@ -368,6 +390,8 @@ static struct crush_map *crush_decode(void *pbyval, void *end) dout("crush decode tunable chooseleaf_stable = %d\n", c->chooseleaf_stable); + crush_finalize(c); + done: dout("crush_decode success\n"); return c; @@ -719,7 +743,7 @@ struct ceph_osdmap *ceph_osdmap_alloc(void) map->pool_max = -1; map->pg_temp = RB_ROOT; map->primary_temp = RB_ROOT; - mutex_init(&map->crush_scratch_mutex); + mutex_init(&map->crush_workspace_mutex); return map; } @@ -753,6 +777,7 @@ void ceph_osdmap_destroy(struct ceph_osdmap *map) kfree(map->osd_weight); kfree(map->osd_addr); kfree(map->osd_primary_affinity); + kfree(map->crush_workspace); kfree(map); } @@ -808,6 +833,31 @@ static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) return 0; } +static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush) +{ + void *workspace; + size_t work_size; + + if (IS_ERR(crush)) + return PTR_ERR(crush); + + work_size = crush_work_size(crush, CEPH_PG_MAX_SIZE); + dout("%s work_size %zu bytes\n", __func__, work_size); + workspace = kmalloc(work_size, GFP_NOIO); + if (!workspace) { + crush_destroy(crush); + return -ENOMEM; + } + crush_init_workspace(crush, workspace); + + if (map->crush) + crush_destroy(map->crush); + kfree(map->crush_workspace); + map->crush = crush; + map->crush_workspace = workspace; + return 0; +} + #define OSDMAP_WRAPPER_COMPAT_VER 7 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1 @@ -1214,13 +1264,9 @@ static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map) /* crush */ ceph_decode_32_safe(p, end, len, e_inval); - map->crush = crush_decode(*p, min(*p + len, end)); - if (IS_ERR(map->crush)) { - err = PTR_ERR(map->crush); - map->crush = NULL; + err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end))); + if (err) goto bad; - } - *p += len; /* ignore the rest */ *p = end; @@ -1375,7 +1421,6 @@ e_inval: struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, struct ceph_osdmap *map) { - struct crush_map *newcrush = NULL; struct ceph_fsid fsid; u32 epoch = 0; struct ceph_timespec modified; @@ -1414,12 +1459,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, /* new crush? */ ceph_decode_32_safe(p, end, len, e_inval); if (len > 0) { - newcrush = crush_decode(*p, min(*p+len, end)); - if (IS_ERR(newcrush)) { - err = PTR_ERR(newcrush); - newcrush = NULL; + err = osdmap_set_crush(map, + crush_decode(*p, min(*p + len, end))); + if (err) goto bad; - } *p += len; } @@ -1439,12 +1482,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, map->epoch++; map->modified = modified; - if (newcrush) { - if (map->crush) - crush_destroy(map->crush); - map->crush = newcrush; - newcrush = NULL; - } /* new_pools */ err = decode_new_pools(p, end, map); @@ -1505,8 +1542,6 @@ bad: print_hex_dump(KERN_DEBUG, "osdmap: ", DUMP_PREFIX_OFFSET, 16, 1, start, end - start, true); - if (newcrush) - crush_destroy(newcrush); return ERR_PTR(err); } @@ -1942,10 +1977,10 @@ static int do_crush(struct ceph_osdmap *map, int ruleno, int x, BUG_ON(result_max > CEPH_PG_MAX_SIZE); - mutex_lock(&map->crush_scratch_mutex); + mutex_lock(&map->crush_workspace_mutex); r = crush_do_rule(map->crush, ruleno, x, result, result_max, - weight, weight_max, map->crush_scratch_ary); - mutex_unlock(&map->crush_scratch_mutex); + weight, weight_max, map->crush_workspace); + mutex_unlock(&map->crush_workspace_mutex); return r; } @@ -1978,8 +2013,14 @@ static void pg_to_raw_osds(struct ceph_osdmap *osdmap, return; } - len = do_crush(osdmap, ruleno, pps, raw->osds, - min_t(int, pi->size, ARRAY_SIZE(raw->osds)), + if (pi->size > ARRAY_SIZE(raw->osds)) { + pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n", + pi->id, pi->crush_ruleset, pi->type, pi->size, + ARRAY_SIZE(raw->osds)); + return; + } + + len = do_crush(osdmap, ruleno, pps, raw->osds, pi->size, osdmap->osd_weight, osdmap->max_osd); if (len < 0) { pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n", diff --git a/net/ceph/snapshot.c b/net/ceph/snapshot.c index 154683f5f14c..705414e78ae0 100644 --- a/net/ceph/snapshot.c +++ b/net/ceph/snapshot.c @@ -18,8 +18,6 @@ * 02110-1301, USA. */ -#include <stddef.h> - #include <linux/types.h> #include <linux/export.h> #include <linux/ceph/libceph.h> diff --git a/net/core/ethtool.c b/net/core/ethtool.c index be7bab1adcde..aecb2c7241b6 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -24,7 +24,7 @@ #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/rtnetlink.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/net.h> /* diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index b0c04cf4851d..3945821e9c1f 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -15,6 +15,7 @@ #include <net/switchdev.h> #include <linux/if_arp.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/nsproxy.h> #include <net/sock.h> #include <net/net_namespace.h> diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 3c4bbec39713..652468ff65b7 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -16,6 +16,8 @@ #include <linux/export.h> #include <linux/user_namespace.h> #include <linux/net_namespace.h> +#include <linux/sched/task.h> + #include <net/sock.h> #include <net/netlink.h> #include <net/net_namespace.h> diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index 11fce17274f6..6ae56037bb13 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c @@ -12,6 +12,8 @@ #include <linux/slab.h> #include <linux/cgroup.h> #include <linux/fdtable.h> +#include <linux/sched/task.h> + #include <net/cls_cgroup.h> #include <net/sock.h> diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 756637dc7a57..0f9275ee5595 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -20,6 +20,8 @@ #include <linux/cgroup.h> #include <linux/rcupdate.h> #include <linux/atomic.h> +#include <linux/sched/task.h> + #include <net/rtnetlink.h> #include <net/pkt_cls.h> #include <net/sock.h> diff --git a/net/core/scm.c b/net/core/scm.c index b6d83686e149..b1ff8a441748 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -14,6 +14,7 @@ #include <linux/capability.h> #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/user.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/stat.h> diff --git a/net/core/stream.c b/net/core/stream.c index f575bcf64af2..20231dbb1da0 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -13,6 +13,7 @@ */ #include <linux/module.h> +#include <linux/sched/signal.h> #include <linux/net.h> #include <linux/signal.h> #include <linux/tcp.h> diff --git a/net/dccp/output.c b/net/dccp/output.c index b66c84db0766..91a15b3c4915 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -14,6 +14,7 @@ #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <net/inet_sock.h> #include <net/sock.h> diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index a90ed67027b0..e6e79eda9763 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -106,7 +106,7 @@ Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c index ecc28cff08ab..af781010753b 100644 --- a/net/dns_resolver/dns_query.c +++ b/net/dns_resolver/dns_query.c @@ -37,8 +37,10 @@ #include <linux/module.h> #include <linux/slab.h> +#include <linux/cred.h> #include <linux/dns_resolver.h> #include <linux/err.h> + #include <keys/dns_resolver-type.h> #include <keys/user-type.h> @@ -70,7 +72,7 @@ int dns_query(const char *type, const char *name, size_t namelen, const char *options, char **_result, time64_t *_expiry) { struct key *rkey; - const struct user_key_payload *upayload; + struct user_key_payload *upayload; const struct cred *saved_cred; size_t typelen, desclen; char *desc, *cp; @@ -141,7 +143,7 @@ int dns_query(const char *type, const char *name, size_t namelen, if (ret) goto put; - upayload = user_key_payload(rkey); + upayload = user_key_payload_locked(rkey); len = upayload->datalen; ret = -ENOMEM; diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index e0bd013a1e5e..eedba7670b51 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -279,7 +279,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) pr_debug("name = %s, mtu = %u\n", dev->name, mtu); if (size > mtu) { - pr_debug("size = %Zu, mtu = %u\n", size, mtu); + pr_debug("size = %zu, mtu = %u\n", size, mtu); err = -EMSGSIZE; goto out_dev; } @@ -645,7 +645,7 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) pr_debug("name = %s, mtu = %u\n", dev->name, mtu); if (size > mtu) { - pr_debug("size = %Zu, mtu = %u\n", size, mtu); + pr_debug("size = %zu, mtu = %u\n", size, mtu); err = -EMSGSIZE; goto out_dev; } diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 5d367b7ff542..cebedd545e5e 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -32,6 +32,7 @@ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 7db2ad2e82d3..b39a791f6756 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -319,7 +319,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, int ret, no_addr; struct fib_result res; struct flowi4 fl4; - struct net *net; + struct net *net = dev_net(dev); bool dev_match; fl4.flowi4_oif = 0; @@ -332,6 +332,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, fl4.flowi4_scope = RT_SCOPE_UNIVERSE; fl4.flowi4_tun_key.tun_id = 0; fl4.flowi4_flags = 0; + fl4.flowi4_uid = sock_net_uid(net, NULL); no_addr = idev->ifa_list == NULL; @@ -339,13 +340,12 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, trace_fib_validate_source(dev, &fl4); - net = dev_net(dev); if (fib_lookup(net, &fl4, &res, 0)) goto last_resort; if (res.type != RTN_UNICAST && (res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev))) goto e_inval; - if (!rpf && !fib_num_tclassid_users(dev_net(dev)) && + if (!rpf && !fib_num_tclassid_users(net) && (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) goto last_resort; fib_combine_itag(itag, &res); diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index d8cea210af0e..2f0d8233950f 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -2388,7 +2388,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "Basic info: size of leaf:" - " %Zd bytes, size of tnode: %Zd bytes.\n", + " %zd bytes, size of tnode: %zd bytes.\n", LEAF_SIZE, TNODE_SIZE(0)); for (h = 0; h < FIB_TABLE_HASHSZ; h++) { diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index beacd028848c..c0317c940bcd 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -2596,7 +2596,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v) const char *name = vif->dev ? vif->dev->name : "none"; seq_printf(seq, - "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", + "%2zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", vif - mrt->vif_table, name, vif->bytes_in, vif->pkt_in, vif->bytes_out, vif->pkt_out, diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index fcfd071f4705..bc1486f2c064 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -235,7 +235,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) } if ((unsigned int) *len < sizeof(struct sockaddr_in)) { - pr_debug("SO_ORIGINAL_DST: len %d not %Zu\n", + pr_debug("SO_ORIGINAL_DST: len %d not %zu\n", *len, sizeof(struct sockaddr_in)); return -EINVAL; } diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c index f6f713376e6e..2f3895ddc275 100644 --- a/net/ipv4/netfilter/nf_log_arp.c +++ b/net/ipv4/netfilter/nf_log_arp.c @@ -69,7 +69,7 @@ static void dump_arp_packet(struct nf_log_buf *m, ap = skb_header_pointer(skb, sizeof(_arph), sizeof(_arpp), &_arpp); if (ap == NULL) { - nf_log_buf_add(m, " INCOMPLETE [%Zu bytes]", + nf_log_buf_add(m, " INCOMPLETE [%zu bytes]", skb->len - sizeof(_arph)); return; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index cb494a5050f7..8471dd116771 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1876,6 +1876,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, fl4.flowi4_flags = 0; fl4.daddr = daddr; fl4.saddr = saddr; + fl4.flowi4_uid = sock_net_uid(net, NULL); err = fib_lookup(net, &fl4, &res, 0); if (err != 0) { if (!IN_DEV_FORWARD(in_dev)) @@ -2008,6 +2009,7 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, { int res; + tos &= IPTOS_RT_MASK; rcu_read_lock(); /* Multicast recognition logic is moved from route cache to here. diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c index 35b280361cb2..50a0f3e51d5b 100644 --- a/net/ipv4/tcp_cdg.c +++ b/net/ipv4/tcp_cdg.c @@ -27,6 +27,8 @@ #include <linux/kernel.h> #include <linux/random.h> #include <linux/module.h> +#include <linux/sched/clock.h> + #include <net/tcp.h> #define HYSTART_ACK_TRAIN 1 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3a2025f5bf2c..77362b88a661 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -43,6 +43,7 @@ #include <linux/errno.h> #include <linux/types.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index c795fee372c4..644ba59fbd9d 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -693,6 +693,10 @@ vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p) u->link = p->link; u->i_key = p->i_key; u->o_key = p->o_key; + if (u->i_key) + u->i_flags |= GRE_KEY; + if (u->o_key) + u->o_flags |= GRE_KEY; u->proto = p->proto; memcpy(u->name, p->name, sizeof(u->name)); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index babaf3ec2742..6ba6c900ebcf 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1666,6 +1666,10 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns struct net *net = sock_net(sk); struct mr6_table *mrt; + if (sk->sk_type != SOCK_RAW || + inet_sk(sk)->inet_num != IPPROTO_ICMPV6) + return -EOPNOTSUPP; + mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); if (!mrt) return -ENOENT; @@ -1677,9 +1681,6 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns switch (optname) { case MRT6_INIT: - if (sk->sk_type != SOCK_RAW || - inet_sk(sk)->inet_num != IPPROTO_ICMPV6) - return -EOPNOTSUPP; if (optlen < sizeof(int)) return -EINVAL; @@ -1815,6 +1816,10 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, struct net *net = sock_net(sk); struct mr6_table *mrt; + if (sk->sk_type != SOCK_RAW || + inet_sk(sk)->inet_num != IPPROTO_ICMPV6) + return -EOPNOTSUPP; + mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); if (!mrt) return -ENOENT; diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c index 055c51b80f5d..97c724224da7 100644 --- a/net/ipv6/netfilter/nf_log_ipv6.c +++ b/net/ipv6/netfilter/nf_log_ipv6.c @@ -64,7 +64,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m, nf_log_buf_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr); /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */ - nf_log_buf_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ", + nf_log_buf_add(m, "LEN=%zu TC=%u HOPLIMIT=%u FLOWLBL=%u ", ntohs(ih->payload_len) + sizeof(struct ipv6hdr), (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20, ih->hop_limit, diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index ab254041dab7..81adc29a448d 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -46,6 +46,7 @@ #include <linux/socket.h> #include <linux/sockios.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/init.h> #include <linux/net.h> #include <linux/irda.h> diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 817b1b186aff..f6061c4bb0a8 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c @@ -32,7 +32,7 @@ #include <linux/module.h> #include <linux/fs.h> #include <linux/slab.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/seq_file.h> #include <linux/termios.h> #include <linux/tty.h> diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 1215693fdd22..7025dcb853d0 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c @@ -13,8 +13,9 @@ * 2) as a control channel (write commands, read events) */ -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> + #include "irnet_ppp.h" /* Private header */ /* Please put other headers in irnet.h - Thanks */ @@ -51,7 +52,7 @@ irnet_ctrl_write(irnet_socket * ap, char * next; /* Next command to process */ int length; /* Length of current command */ - DENTER(CTRL_TRACE, "(ap=0x%p, count=%Zd)\n", ap, count); + DENTER(CTRL_TRACE, "(ap=0x%p, count=%zd)\n", ap, count); /* Check for overflow... */ DABORT(count >= IRNET_MAX_COMMAND, -ENOMEM, @@ -66,7 +67,7 @@ irnet_ctrl_write(irnet_socket * ap, /* Safe terminate the string */ command[count] = '\0'; - DEBUG(CTRL_INFO, "Command line received is ``%s'' (%Zd).\n", + DEBUG(CTRL_INFO, "Command line received is ``%s'' (%zd).\n", command, count); /* Check every commands in the command line */ @@ -285,7 +286,7 @@ irnet_ctrl_read(irnet_socket * ap, char event[75]; ssize_t ret = 0; - DENTER(CTRL_TRACE, "(ap=0x%p, count=%Zd)\n", ap, count); + DENTER(CTRL_TRACE, "(ap=0x%p, count=%zd)\n", ap, count); #ifdef INITIAL_DISCOVERY /* Check if we have read the log */ @@ -328,7 +329,7 @@ irnet_ctrl_read(irnet_socket * ap, if(ret != 0) { /* No, return the error code */ - DEXIT(CTRL_TRACE, " - ret %Zd\n", ret); + DEXIT(CTRL_TRACE, " - ret %zd\n", ret); return ret; } @@ -568,7 +569,7 @@ dev_irnet_write(struct file * file, { irnet_socket * ap = file->private_data; - DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%Zd)\n", + DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%zd)\n", file, ap, count); DABORT(ap == NULL, -ENXIO, FS_ERROR, "ap is NULL !!!\n"); @@ -592,7 +593,7 @@ dev_irnet_read(struct file * file, { irnet_socket * ap = file->private_data; - DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%Zd)\n", + DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%zd)\n", file, ap, count); DABORT(ap == NULL, -ENXIO, FS_ERROR, "ap is NULL !!!\n"); diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 13190b38f22e..89bbde1081ce 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -17,7 +17,7 @@ #include <linux/list.h> #include <linux/errno.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/init.h> diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index a646f3481240..309062f3debe 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -24,6 +24,8 @@ #include <linux/uaccess.h> #include <linux/workqueue.h> #include <linux/syscalls.h> +#include <linux/sched/signal.h> + #include <net/kcm.h> #include <net/netns/generic.h> #include <net/sock.h> diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index b58000efee73..8adab6335ced 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1058,10 +1058,10 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, /* Debug */ if (session->send_seq) - l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes, ns=%u\n", + l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %zd bytes, ns=%u\n", session->name, data_len, session->ns - 1); else - l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes\n", + l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %zd bytes\n", session->name, data_len); if (session->debug & L2TP_MSG_DATA) { diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index c59712057dc8..d25038cfd64e 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -388,7 +388,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) drop: IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); kfree_skb(skb); - return -1; + return 0; } /* Userspace will call sendmsg() on the tunnel socket to send L2TP diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 5e9296382420..06186d608a27 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -26,6 +26,8 @@ #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/sched/signal.h> + #include <net/llc.h> #include <net/llc_sap.h> #include <net/llc_pdu.h> diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index c28b0af9c1f2..6e7b6a07b7d5 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -681,7 +681,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh) 2 + /* NULL SSID */ /* Channel Switch Announcement */ 2 + sizeof(struct ieee80211_channel_sw_ie) + - /* Mesh Channel Swith Parameters */ + /* Mesh Channel Switch Parameters */ 2 + sizeof(struct ieee80211_mesh_chansw_params_ie) + 2 + 8 + /* supported rates */ 2 + 3; /* DS params */ diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index fcba70e57073..953d71e784a9 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -9,6 +9,8 @@ #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/random.h> +#include <linux/rculist.h> + #include "ieee80211_i.h" #include "rate.h" #include "mesh.h" diff --git a/net/mac80211/status.c b/net/mac80211/status.c index a3af6e1bfd98..0dd7c351002d 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -462,9 +462,7 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local, unsigned long flags; spin_lock_irqsave(&local->ack_status_lock, flags); - skb = idr_find(&local->ack_status_frames, info->ack_frame_id); - if (skb) - idr_remove(&local->ack_status_frames, info->ack_frame_id); + skb = idr_remove(&local->ack_status_frames, info->ack_frame_id); spin_unlock_irqrestore(&local->ack_status_lock, flags); if (!skb) diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c index 6a3e1c2181d3..1e1c9b20bab7 100644 --- a/net/mac802154/llsec.c +++ b/net/mac802154/llsec.c @@ -18,6 +18,8 @@ #include <linux/bug.h> #include <linux/completion.h> #include <linux/ieee802154.h> +#include <linux/rculist.h> + #include <crypto/aead.h> #include <crypto/skcipher.h> diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 096a45103f14..e6a2753dff9e 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -1429,7 +1429,7 @@ int __init ip_vs_conn_init(void) "(size=%d, memory=%ldKbytes)\n", ip_vs_conn_tab_size, (long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024); - IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n", + IP_VS_DBG(0, "Each connection entry needs %zd bytes at least\n", sizeof(struct ip_vs_conn)); for (idx = 0; idx < ip_vs_conn_tab_size; idx++) diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c index 6be5c538b71e..75f798f8e83b 100644 --- a/net/netfilter/ipvs/ip_vs_dh.c +++ b/net/netfilter/ipvs/ip_vs_dh.c @@ -163,7 +163,7 @@ static int ip_vs_dh_init_svc(struct ip_vs_service *svc) return -ENOMEM; svc->sched_data = s; - IP_VS_DBG(6, "DH hash table (memory=%Zdbytes) allocated for " + IP_VS_DBG(6, "DH hash table (memory=%zdbytes) allocated for " "current service\n", sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE); @@ -183,7 +183,7 @@ static void ip_vs_dh_done_svc(struct ip_vs_service *svc) /* release the table itself */ kfree_rcu(s, rcu_head); - IP_VS_DBG(6, "DH hash table (memory=%Zdbytes) released\n", + IP_VS_DBG(6, "DH hash table (memory=%zdbytes) released\n", sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE); } diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index cccf4d637412..5824927cf8e0 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c @@ -356,7 +356,7 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) return -ENOMEM; svc->sched_data = tbl; - IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for " + IP_VS_DBG(6, "LBLC hash table (memory=%zdbytes) allocated for " "current service\n", sizeof(*tbl)); /* @@ -393,7 +393,7 @@ static void ip_vs_lblc_done_svc(struct ip_vs_service *svc) /* release the table itself */ kfree_rcu(tbl, rcu_head); - IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n", + IP_VS_DBG(6, "LBLC hash table (memory=%zdbytes) released\n", sizeof(*tbl)); } diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index 796d70e47ddd..703f11877bee 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c @@ -519,7 +519,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) return -ENOMEM; svc->sched_data = tbl; - IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for " + IP_VS_DBG(6, "LBLCR hash table (memory=%zdbytes) allocated for " "current service\n", sizeof(*tbl)); /* @@ -556,7 +556,7 @@ static void ip_vs_lblcr_done_svc(struct ip_vs_service *svc) /* release the table itself */ kfree_rcu(tbl, rcu_head); - IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n", + IP_VS_DBG(6, "LBLCR hash table (memory=%zdbytes) released\n", sizeof(*tbl)); } diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c index 1e373a5e44e3..16aaac6eedc9 100644 --- a/net/netfilter/ipvs/ip_vs_sh.c +++ b/net/netfilter/ipvs/ip_vs_sh.c @@ -239,7 +239,7 @@ static int ip_vs_sh_init_svc(struct ip_vs_service *svc) return -ENOMEM; svc->sched_data = s; - IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) allocated for " + IP_VS_DBG(6, "SH hash table (memory=%zdbytes) allocated for " "current service\n", sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE); @@ -259,7 +259,7 @@ static void ip_vs_sh_done_svc(struct ip_vs_service *svc) /* release the table itself */ kfree_rcu(s, rcu_head); - IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) released\n", + IP_VS_DBG(6, "SH hash table (memory=%zdbytes) released\n", sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE); } diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 9350530c16c1..b03c28084f81 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1791,7 +1791,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, u16 mtu, min_mtu; IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); - IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", + IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n", sizeof(struct ip_vs_sync_conn_v0)); if (!ipvs->sync_state) { diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index e19a69787d99..4b2e1fb28bb4 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -410,7 +410,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) struct net *net = nf_ct_exp_net(expect); struct hlist_node *next; unsigned int h; - int ret = 1; + int ret = 0; if (!master_help) { ret = -ESHUTDOWN; @@ -460,14 +460,14 @@ int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, spin_lock_bh(&nf_conntrack_expect_lock); ret = __nf_ct_expect_check(expect); - if (ret <= 0) + if (ret < 0) goto out; nf_ct_expect_insert(expect); spin_unlock_bh(&nf_conntrack_expect_lock); nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report); - return ret; + return 0; out: spin_unlock_bh(&nf_conntrack_expect_lock); return ret; diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index e3ed20060878..4aecef4a89fb 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c @@ -300,7 +300,7 @@ static int find_pattern(const char *data, size_t dlen, { size_t i = plen; - pr_debug("find_pattern `%s': dlen = %Zu\n", pattern, dlen); + pr_debug("find_pattern `%s': dlen = %zu\n", pattern, dlen); if (dlen <= plen) { /* Short packet: try for partial? */ diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index 3b79f34b5095..de8782345c86 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c @@ -48,7 +48,7 @@ nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff, if (helper == NULL) return NF_DROP; - /* This is an user-space helper not yet configured, skip. */ + /* This is a user-space helper not yet configured, skip. */ if ((helper->flags & (NF_CT_HELPER_F_USERSPACE | NF_CT_HELPER_F_CONFIGURED)) == NF_CT_HELPER_F_USERSPACE) diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index c6b8022c0e47..bf548a7a71ec 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -528,6 +528,7 @@ static int nft_ct_set_init(const struct nft_ctx *ctx, if (!nft_ct_tmpl_alloc_pcpu()) return -ENOMEM; nft_ct_pcpu_template_refcnt++; + len = sizeof(u16); break; #endif default: diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c index 97f9649bcc7e..152d226552c1 100644 --- a/net/netfilter/nft_set_bitmap.c +++ b/net/netfilter/nft_set_bitmap.c @@ -258,7 +258,7 @@ static int nft_bitmap_init(const struct nft_set *set, { struct nft_bitmap *priv = nft_set_priv(set); - priv->bitmap_size = nft_bitmap_total_size(set->klen); + priv->bitmap_size = nft_bitmap_size(set->klen); return 0; } diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 016db6be94b9..14857afc9937 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -667,7 +667,7 @@ int xt_compat_check_entry_offsets(const void *base, const char *elems, COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset) return -EINVAL; - /* compat_xt_entry match has less strict aligment requirements, + /* compat_xt_entry match has less strict alignment requirements, * otherwise they are identical. In case of padding differences * we need to add compat version of xt_check_entry_match. */ diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c index 16477df45b3b..3d705c688a27 100644 --- a/net/netfilter/xt_owner.c +++ b/net/netfilter/xt_owner.c @@ -13,6 +13,8 @@ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/file.h> +#include <linux/cred.h> + #include <net/sock.h> #include <net/inet_sock.h> #include <linux/netfilter/x_tables.h> diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index ed212ffc1d9d..4bbf4526b885 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -17,7 +17,7 @@ #include <linux/in.h> #include <linux/slab.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index b9edf5fae6ae..879885b31cce 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -21,6 +21,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/nfc.h> +#include <linux/sched/signal.h> #include "nfc.h" #include "llcp.h" diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 8bad5624a27a..222bedcd9575 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -23,6 +23,7 @@ */ #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/socket.h> #include <net/sock.h> diff --git a/net/phonet/socket.c b/net/phonet/socket.c index ffd5f2297584..a6c8da3ee893 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -27,6 +27,8 @@ #include <linux/kernel.h> #include <linux/net.h> #include <linux/poll.h> +#include <linux/sched/signal.h> + #include <net/sock.h> #include <net/tcp_states.h> diff --git a/net/rds/ib.c b/net/rds/ib.c index 8d70884d7bb6..91fe46f1e4cc 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -111,8 +111,7 @@ static void rds_ib_dev_free(struct work_struct *work) kfree(i_ipaddr); } - if (rds_ibdev->vector_load) - kfree(rds_ibdev->vector_load); + kfree(rds_ibdev->vector_load); kfree(rds_ibdev); } diff --git a/net/rds/ib.h b/net/rds/ib.h index 540458928f3c..ec550626e221 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -136,7 +136,7 @@ struct rds_ib_connection { struct rds_ib_work_ring i_send_ring; struct rm_data_op *i_data_op; struct rds_header *i_send_hdrs; - u64 i_send_hdrs_dma; + dma_addr_t i_send_hdrs_dma; struct rds_ib_send_work *i_sends; atomic_t i_signaled_sends; @@ -146,7 +146,7 @@ struct rds_ib_connection { struct rds_ib_incoming *i_ibinc; u32 i_recv_data_rem; struct rds_header *i_recv_hdrs; - u64 i_recv_hdrs_dma; + dma_addr_t i_recv_hdrs_dma; struct rds_ib_recv_work *i_recvs; u64 i_ack_recv; /* last ACK received */ struct rds_ib_refill_cache i_cache_incs; @@ -164,7 +164,7 @@ struct rds_ib_connection { struct rds_header *i_ack; struct ib_send_wr i_ack_wr; struct ib_sge i_ack_sge; - u64 i_ack_dma; + dma_addr_t i_ack_dma; unsigned long i_ack_queued; /* Flow control related information @@ -235,7 +235,7 @@ struct rds_ib_device { int *vector_load; }; -#define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device) +#define ibdev_to_node(ibdev) dev_to_node((ibdev)->dev.parent) #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev) /* bits for i_ack_flags */ diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h index 1c754f4acbe5..24c086db4511 100644 --- a/net/rds/ib_mr.h +++ b/net/rds/ib_mr.h @@ -45,7 +45,6 @@ struct rds_ib_fmr { struct ib_fmr *fmr; - u64 *dma; }; enum rds_ib_fr_state { diff --git a/net/rds/page.c b/net/rds/page.c index e2b5a5832d3d..7cc57e098ddb 100644 --- a/net/rds/page.c +++ b/net/rds/page.c @@ -45,35 +45,6 @@ struct rds_page_remainder { static DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, rds_page_remainders); -/* - * returns 0 on success or -errno on failure. - * - * We don't have to worry about flush_dcache_page() as this only works - * with private pages. If, say, we were to do directed receive to pinned - * user pages we'd have to worry more about cache coherence. (Though - * the flush_dcache_page() in get_user_pages() would probably be enough). - */ -int rds_page_copy_user(struct page *page, unsigned long offset, - void __user *ptr, unsigned long bytes, - int to_user) -{ - unsigned long ret; - void *addr; - - addr = kmap(page); - if (to_user) { - rds_stats_add(s_copy_to_user, bytes); - ret = copy_to_user(ptr, addr + offset, bytes); - } else { - rds_stats_add(s_copy_from_user, bytes); - ret = copy_from_user(addr + offset, ptr, bytes); - } - kunmap(page); - - return ret ? -EFAULT : 0; -} -EXPORT_SYMBOL_GPL(rds_page_copy_user); - /** * rds_page_remainder_alloc - build up regions of a message. * diff --git a/net/rds/rds.h b/net/rds/rds.h index 07fff73dd4f3..966d2ee1f107 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -798,13 +798,6 @@ static inline int rds_message_verify_checksum(const struct rds_header *hdr) /* page.c */ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, gfp_t gfp); -int rds_page_copy_user(struct page *page, unsigned long offset, - void __user *ptr, unsigned long bytes, - int to_user); -#define rds_page_copy_to_user(page, offset, ptr, bytes) \ - rds_page_copy_user(page, offset, ptr, bytes, 1) -#define rds_page_copy_from_user(page, offset, ptr, bytes) \ - rds_page_copy_user(page, offset, ptr, bytes, 0) void rds_page_exit(void); /* recv.c */ diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 57bb52361e0f..5438f6725092 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -641,12 +641,12 @@ static int rds_tcp_init(void) ret = register_netdevice_notifier(&rds_tcp_dev_notifier); if (ret) { pr_warn("could not register rds_tcp_dev_notifier\n"); - goto out; + goto out_slab; } ret = register_pernet_subsys(&rds_tcp_net_ops); if (ret) - goto out_slab; + goto out_notifier; ret = rds_tcp_recv_init(); if (ret) @@ -664,9 +664,10 @@ out_recv: rds_tcp_recv_exit(); out_pernet: unregister_pernet_subsys(&rds_tcp_net_ops); -out_slab: +out_notifier: if (unregister_netdevice_notifier(&rds_tcp_dev_notifier)) pr_warn("could not unregister rds_tcp_dev_notifier\n"); +out_slab: kmem_cache_destroy(rds_tcp_conn_slab); out: return ret; diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 9ad301c46b88..b8a1df2c9785 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -20,7 +20,7 @@ #include <linux/in.h> #include <linux/slab.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/string.h> diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 40a1ef2adeb4..c3be03e8d098 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -76,6 +76,8 @@ #include <linux/slab.h> #include <linux/idr.h> #include <linux/timer.h> +#include <linux/sched/signal.h> + #include "ar-internal.h" __read_mostly unsigned int rxrpc_max_client_connections = 1000; diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index 18c737a61d80..0a4e28477ad9 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c @@ -1065,7 +1065,7 @@ static long rxrpc_read(const struct key *key, switch (token->security_index) { case RXRPC_SECURITY_RXKAD: - toksize += 8 * 4; /* viceid, kvno, key*2, begin, + toksize += 9 * 4; /* viceid, kvno, key*2 + len, begin, * end, primary, tktlen */ toksize += RND(token->kad->ticket_len); break; diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index c29362d50a92..28274a3c9831 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -14,6 +14,8 @@ #include <linux/net.h> #include <linux/skbuff.h> #include <linux/export.h> +#include <linux/sched/signal.h> + #include <net/sock.h> #include <net/af_rxrpc.h> #include "ar-internal.h" @@ -320,8 +322,10 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call, /* Barriers against rxrpc_input_data(). */ hard_ack = call->rx_hard_ack; - top = smp_load_acquire(&call->rx_top); - for (seq = hard_ack + 1; before_eq(seq, top); seq++) { + seq = hard_ack + 1; + while (top = smp_load_acquire(&call->rx_top), + before_eq(seq, top) + ) { ix = seq & RXRPC_RXTX_BUFF_MASK; skb = call->rxtx_buffer[ix]; if (!skb) { @@ -394,6 +398,8 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call, ret = 1; goto out; } + + seq++; } out: diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 0a6ef217aa8a..19b36c60fb4c 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -15,6 +15,8 @@ #include <linux/gfp.h> #include <linux/skbuff.h> #include <linux/export.h> +#include <linux/sched/signal.h> + #include <net/sock.h> #include <net/af_rxrpc.h> #include "ar-internal.h" diff --git a/net/sched/act_api.c b/net/sched/act_api.c index f219ff325ed4..b70aa57319ea 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -613,8 +613,8 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla, goto err_mod; } - err = nla_memdup_cookie(a, tb); - if (err < 0) { + if (nla_memdup_cookie(a, tb) < 0) { + err = -ENOMEM; tcf_hash_release(a, bind); goto err_mod; } @@ -859,10 +859,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, goto out_module_put; err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops); - if (err < 0) + if (err <= 0) goto out_module_put; - if (err == 0) - goto noflush_out; nla_nest_end(skb, nest); @@ -879,7 +877,6 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, out_module_put: module_put(ops->owner); err_out: -noflush_out: kfree_skb(skb); return err; } diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 41c80b6c3906..ae7e4f5b348b 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -63,6 +63,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/loadavg.h> #include <linux/string.h> #include <linux/skbuff.h> #include <linux/random.h> diff --git a/net/sctp/output.c b/net/sctp/output.c index 85406d5f8f41..71ce6b945dcb 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -177,7 +177,7 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, { sctp_xmit_t retval; - pr_debug("%s: packet:%p size:%Zu chunk:%p size:%d\n", __func__, + pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__, packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1); switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 8227bbbd077a..1b6d4574d2b0 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -199,6 +199,7 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp, sctp_scope_t scope, gfp_t gfp, int copy_flags) { struct sctp_sockaddr_entry *addr; + union sctp_addr laddr; int error = 0; rcu_read_lock(); @@ -220,7 +221,10 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp, !(copy_flags & SCTP_ADDR6_PEERSUPP))) continue; - if (sctp_bind_addr_state(bp, &addr->a) != -1) + laddr = addr->a; + /* also works for setting ipv6 address port */ + laddr.v4.sin_port = htons(bp->port); + if (sctp_bind_addr_state(bp, &laddr) != -1) continue; error = sctp_add_bind_addr(bp, &addr->a, sizeof(addr->a), diff --git a/net/sctp/socket.c b/net/sctp/socket.c index b5321486fbed..6f0a9be50f50 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -57,6 +57,7 @@ #include <linux/kernel.h> #include <linux/wait.h> #include <linux/time.h> +#include <linux/sched/signal.h> #include <linux/ip.h> #include <linux/capability.h> #include <linux/fcntl.h> @@ -4862,6 +4863,12 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) if (!asoc) return -EINVAL; + /* If there is a thread waiting on more sndbuf space for + * sending on this asoc, it cannot be peeled. + */ + if (waitqueue_active(&asoc->wait)) + return -EBUSY; + /* An association cannot be branched off from an already peeled-off * socket, nor is this supported for tcp style sockets. */ @@ -7599,8 +7606,6 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, */ release_sock(sk); current_timeo = schedule_timeout(current_timeo); - if (sk != asoc->base.sk) - goto do_error; lock_sock(sk); *timeo_p = current_timeo; diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 5b63ceb3bf37..3379668af368 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -643,9 +643,7 @@ void sctp_transport_reset(struct sctp_transport *t) t->srtt = 0; t->rttvar = 0; - /* Reset these additional varibles so that we have a clean - * slate. - */ + /* Reset these additional variables so that we have a clean slate. */ t->partial_bytes_acked = 0; t->flight_size = 0; t->error_count = 0; diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 5d4208ad029e..85837ab90e89 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -27,6 +27,8 @@ #include <linux/inetdevice.h> #include <linux/workqueue.h> #include <linux/in.h> +#include <linux/sched/signal.h> + #include <net/sock.h> #include <net/tcp.h> #include <net/smc.h> diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index cc6b6f8651eb..e41f594a1e1d 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -11,6 +11,8 @@ #include <linux/in.h> #include <linux/if_ether.h> +#include <linux/sched/signal.h> + #include <net/sock.h> #include <net/tcp.h> diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index 03dfcc6b7661..67a71d170bed 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c @@ -9,6 +9,8 @@ */ #include <linux/workqueue.h> +#include <linux/sched/signal.h> + #include <net/sock.h> #include "smc.h" diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index 5d1878732f46..c4ef9a4ec569 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -11,6 +11,8 @@ #include <linux/net.h> #include <linux/rcupdate.h> +#include <linux/sched/signal.h> + #include <net/sock.h> #include "smc.h" diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 6e73b28915ea..69a0013dd25c 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -15,6 +15,8 @@ #include <linux/net.h> #include <linux/rcupdate.h> #include <linux/workqueue.h> +#include <linux/sched/signal.h> + #include <net/sock.h> #include "smc.h" diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 2bff63a73cf8..d2623b9f23d6 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -8,6 +8,7 @@ #include <linux/types.h> #include <linux/sched.h> +#include <linux/cred.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/errno.h> @@ -464,8 +465,10 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan) * Note that the cred_unused list must be time-ordered. */ if (time_in_range(cred->cr_expire, expired, jiffies) && - test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) + test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) { + freed = SHRINK_STOP; break; + } list_del_init(&cred->cr_lru); number_cred_unused--; @@ -520,7 +523,7 @@ static unsigned long rpcauth_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { - return (number_cred_unused / 100) * sysctl_vfs_cache_pressure; + return number_cred_unused * sysctl_vfs_cache_pressure / 100; } static void @@ -646,9 +649,6 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred, cred->cr_auth = auth; cred->cr_ops = ops; cred->cr_expire = jiffies; -#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) - cred->cr_magic = RPCAUTH_CRED_MAGIC; -#endif cred->cr_uid = acred->uid; } EXPORT_SYMBOL_GPL(rpcauth_init_cred); @@ -876,8 +876,12 @@ int __init rpcauth_init_module(void) err = rpc_init_generic_auth(); if (err < 0) goto out2; - register_shrinker(&rpc_cred_shrinker); + err = register_shrinker(&rpc_cred_shrinker); + if (err < 0) + goto out3; return 0; +out3: + rpc_destroy_generic_auth(); out2: rpc_destroy_authunix(); out1: diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index cdeb1d814833..4f16953e4954 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -763,7 +763,7 @@ err_put_ctx: err: kfree(buf); out: - dprintk("RPC: %s returning %Zd\n", __func__, err); + dprintk("RPC: %s returning %zd\n", __func__, err); return err; } diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 153082598522..a54a7a3d28f5 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -1489,8 +1489,8 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) case RPC_GSS_PROC_DESTROY: if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq)) goto auth_err; - rsci->h.expiry_time = seconds_since_boot(); - set_bit(CACHE_NEGATIVE, &rsci->h.flags); + /* Delete the entry from the cache_list and call cache_put */ + sunrpc_cache_unhash(sn->rsc_cache, &rsci->h); if (resv->iov_len + 4 > PAGE_SIZE) goto drop; svc_putnl(resv, RPC_SUCCESS); diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c index 4d17376b2acb..5f3d527dff65 100644 --- a/net/sunrpc/auth_null.c +++ b/net/sunrpc/auth_null.c @@ -139,7 +139,4 @@ struct rpc_cred null_cred = { .cr_ops = &null_credops, .cr_count = ATOMIC_INIT(1), .cr_flags = 1UL << RPCAUTH_CRED_UPTODATE, -#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) - .cr_magic = RPCAUTH_CRED_MAGIC, -#endif }; diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 306fc0f54596..82337e1ec9cd 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c @@ -14,12 +14,10 @@ #include <linux/sunrpc/auth.h> #include <linux/user_namespace.h> -#define NFS_NGROUPS 16 - struct unx_cred { struct rpc_cred uc_base; kgid_t uc_gid; - kgid_t uc_gids[NFS_NGROUPS]; + kgid_t uc_gids[UNX_NGROUPS]; }; #define uc_uid uc_base.cr_uid @@ -82,13 +80,13 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t if (acred->group_info != NULL) groups = acred->group_info->ngroups; - if (groups > NFS_NGROUPS) - groups = NFS_NGROUPS; + if (groups > UNX_NGROUPS) + groups = UNX_NGROUPS; cred->uc_gid = acred->gid; for (i = 0; i < groups; i++) cred->uc_gids[i] = acred->group_info->gid[i]; - if (i < NFS_NGROUPS) + if (i < UNX_NGROUPS) cred->uc_gids[i] = INVALID_GID; return &cred->uc_base; @@ -132,12 +130,12 @@ unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags) if (acred->group_info != NULL) groups = acred->group_info->ngroups; - if (groups > NFS_NGROUPS) - groups = NFS_NGROUPS; + if (groups > UNX_NGROUPS) + groups = UNX_NGROUPS; for (i = 0; i < groups ; i++) if (!gid_eq(cred->uc_gids[i], acred->group_info->gid[i])) return 0; - if (groups < NFS_NGROUPS && gid_valid(cred->uc_gids[groups])) + if (groups < UNX_NGROUPS && gid_valid(cred->uc_gids[groups])) return 0; return 1; } @@ -166,7 +164,7 @@ unx_marshal(struct rpc_task *task, __be32 *p) *p++ = htonl((u32) from_kuid(&init_user_ns, cred->uc_uid)); *p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gid)); hold = p++; - for (i = 0; i < 16 && gid_valid(cred->uc_gids[i]); i++) + for (i = 0; i < UNX_NGROUPS && gid_valid(cred->uc_gids[i]); i++) *p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gids[i])); *hold = htonl(p - hold - 1); /* gid array length */ *base = htonl((p - base - 1) << 2); /* cred length */ diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index f39e3e11f9aa..79d55d949d9a 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -362,11 +362,6 @@ void sunrpc_destroy_cache_detail(struct cache_detail *cd) cache_purge(cd); spin_lock(&cache_list_lock); write_lock(&cd->hash_lock); - if (cd->entries) { - write_unlock(&cd->hash_lock); - spin_unlock(&cache_list_lock); - goto out; - } if (current_detail == cd) current_detail = NULL; list_del_init(&cd->others); @@ -376,9 +371,6 @@ void sunrpc_destroy_cache_detail(struct cache_detail *cd) /* module must be being unloaded so its safe to kill the worker */ cancel_delayed_work_sync(&cache_cleaner); } - return; -out: - printk(KERN_ERR "RPC: failed to unregister %s cache\n", cd->name); } EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail); @@ -497,13 +489,32 @@ EXPORT_SYMBOL_GPL(cache_flush); void cache_purge(struct cache_detail *detail) { - time_t now = seconds_since_boot(); - if (detail->flush_time >= now) - now = detail->flush_time + 1; - /* 'now' is the maximum value any 'last_refresh' can have */ - detail->flush_time = now; - detail->nextcheck = seconds_since_boot(); - cache_flush(); + struct cache_head *ch = NULL; + struct hlist_head *head = NULL; + struct hlist_node *tmp = NULL; + int i = 0; + + write_lock(&detail->hash_lock); + if (!detail->entries) { + write_unlock(&detail->hash_lock); + return; + } + + dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name); + for (i = 0; i < detail->hash_size; i++) { + head = &detail->hash_table[i]; + hlist_for_each_entry_safe(ch, tmp, head, cache_list) { + hlist_del_init(&ch->cache_list); + detail->entries--; + + set_bit(CACHE_CLEANED, &ch->flags); + write_unlock(&detail->hash_lock); + cache_fresh_unlocked(ch, detail); + cache_put(ch, detail); + write_lock(&detail->hash_lock); + } + } + write_unlock(&detail->hash_lock); } EXPORT_SYMBOL_GPL(cache_purge); @@ -717,7 +728,7 @@ void cache_clean_deferred(void *owner) /* * communicate with user-space * - * We have a magic /proc file - /proc/sunrpc/<cachename>/channel. + * We have a magic /proc file - /proc/net/rpc/<cachename>/channel. * On read, you get a full request, or block. * On write, an update request is processed. * Poll works if anything to read, and always allows write. @@ -1272,7 +1283,7 @@ EXPORT_SYMBOL_GPL(qword_get); /* - * support /proc/sunrpc/cache/$CACHENAME/content + * support /proc/net/rpc/$CACHENAME/content * as a seqfile. * We call ->cache_show passing NULL for the item to * get a header, then pass each real item in the cache @@ -1427,20 +1438,11 @@ static ssize_t read_flush(struct file *file, char __user *buf, struct cache_detail *cd) { char tbuf[22]; - unsigned long p = *ppos; size_t len; - snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time)); - len = strlen(tbuf); - if (p >= len) - return 0; - len -= p; - if (len > count) - len = count; - if (copy_to_user(buf, (void*)(tbuf+p), len)) - return -EFAULT; - *ppos += len; - return len; + len = snprintf(tbuf, sizeof(tbuf), "%lu\n", + convert_to_wallclock(cd->flush_time)); + return simple_read_from_buffer(buf, count, ppos, tbuf, len); } static ssize_t write_flush(struct file *file, const char __user *buf, @@ -1600,21 +1602,12 @@ static const struct file_operations cache_flush_operations_procfs = { .llseek = no_llseek, }; -static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net) +static void remove_cache_proc_entries(struct cache_detail *cd) { - struct sunrpc_net *sn; - - if (cd->u.procfs.proc_ent == NULL) - return; - if (cd->u.procfs.flush_ent) - remove_proc_entry("flush", cd->u.procfs.proc_ent); - if (cd->u.procfs.channel_ent) - remove_proc_entry("channel", cd->u.procfs.proc_ent); - if (cd->u.procfs.content_ent) - remove_proc_entry("content", cd->u.procfs.proc_ent); - cd->u.procfs.proc_ent = NULL; - sn = net_generic(net, sunrpc_net_id); - remove_proc_entry(cd->name, sn->proc_net_rpc); + if (cd->procfs) { + proc_remove(cd->procfs); + cd->procfs = NULL; + } } #ifdef CONFIG_PROC_FS @@ -1624,38 +1617,30 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) struct sunrpc_net *sn; sn = net_generic(net, sunrpc_net_id); - cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc); - if (cd->u.procfs.proc_ent == NULL) + cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc); + if (cd->procfs == NULL) goto out_nomem; - cd->u.procfs.channel_ent = NULL; - cd->u.procfs.content_ent = NULL; p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR, - cd->u.procfs.proc_ent, - &cache_flush_operations_procfs, cd); - cd->u.procfs.flush_ent = p; + cd->procfs, &cache_flush_operations_procfs, cd); if (p == NULL) goto out_nomem; if (cd->cache_request || cd->cache_parse) { p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR, - cd->u.procfs.proc_ent, - &cache_file_operations_procfs, cd); - cd->u.procfs.channel_ent = p; + cd->procfs, &cache_file_operations_procfs, cd); if (p == NULL) goto out_nomem; } if (cd->cache_show) { p = proc_create_data("content", S_IFREG|S_IRUSR, - cd->u.procfs.proc_ent, - &content_file_operations_procfs, cd); - cd->u.procfs.content_ent = p; + cd->procfs, &content_file_operations_procfs, cd); if (p == NULL) goto out_nomem; } return 0; out_nomem: - remove_cache_proc_entries(cd, net); + remove_cache_proc_entries(cd); return -ENOMEM; } #else /* CONFIG_PROC_FS */ @@ -1684,7 +1669,7 @@ EXPORT_SYMBOL_GPL(cache_register_net); void cache_unregister_net(struct cache_detail *cd, struct net *net) { - remove_cache_proc_entries(cd, net); + remove_cache_proc_entries(cd); sunrpc_destroy_cache_detail(cd); } EXPORT_SYMBOL_GPL(cache_unregister_net); @@ -1843,15 +1828,29 @@ int sunrpc_cache_register_pipefs(struct dentry *parent, struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd); if (IS_ERR(dir)) return PTR_ERR(dir); - cd->u.pipefs.dir = dir; + cd->pipefs = dir; return 0; } EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs); void sunrpc_cache_unregister_pipefs(struct cache_detail *cd) { - rpc_remove_cache_dir(cd->u.pipefs.dir); - cd->u.pipefs.dir = NULL; + if (cd->pipefs) { + rpc_remove_cache_dir(cd->pipefs); + cd->pipefs = NULL; + } } EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs); +void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h) +{ + write_lock(&cd->hash_lock); + if (!hlist_unhashed(&h->cache_list)){ + hlist_del_init(&h->cache_list); + cd->entries--; + write_unlock(&cd->hash_lock); + cache_put(h, cd); + } else + write_unlock(&cd->hash_lock); +} +EXPORT_SYMBOL_GPL(sunrpc_cache_unhash); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 1dc9f3bac099..52da3ce54bb5 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1453,21 +1453,6 @@ size_t rpc_max_bc_payload(struct rpc_clnt *clnt) EXPORT_SYMBOL_GPL(rpc_max_bc_payload); /** - * rpc_get_timeout - Get timeout for transport in units of HZ - * @clnt: RPC client to query - */ -unsigned long rpc_get_timeout(struct rpc_clnt *clnt) -{ - unsigned long ret; - - rcu_read_lock(); - ret = rcu_dereference(clnt->cl_xprt)->timeout->to_initval; - rcu_read_unlock(); - return ret; -} -EXPORT_SYMBOL_GPL(rpc_get_timeout); - -/** * rpc_force_rebind - force transport to check that remote port is unchanged * @clnt: client to rebind * @@ -2699,6 +2684,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt, { struct rpc_xprt_switch *xps; struct rpc_xprt *xprt; + unsigned long connect_timeout; unsigned long reconnect_timeout; unsigned char resvport; int ret = 0; @@ -2711,6 +2697,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt, return -EAGAIN; } resvport = xprt->resvport; + connect_timeout = xprt->connect_timeout; reconnect_timeout = xprt->max_reconnect_timeout; rcu_read_unlock(); @@ -2720,7 +2707,10 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt, goto out_put_switch; } xprt->resvport = resvport; - xprt->max_reconnect_timeout = reconnect_timeout; + if (xprt->ops->set_connect_timeout != NULL) + xprt->ops->set_connect_timeout(xprt, + connect_timeout, + reconnect_timeout); rpc_xprt_switch_set_roundrobin(xps); if (setup) { @@ -2737,26 +2727,39 @@ out_put_switch: } EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); +struct connect_timeout_data { + unsigned long connect_timeout; + unsigned long reconnect_timeout; +}; + static int -rpc_xprt_cap_max_reconnect_timeout(struct rpc_clnt *clnt, +rpc_xprt_set_connect_timeout(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *data) { - unsigned long timeout = *((unsigned long *)data); + struct connect_timeout_data *timeo = data; - if (timeout < xprt->max_reconnect_timeout) - xprt->max_reconnect_timeout = timeout; + if (xprt->ops->set_connect_timeout) + xprt->ops->set_connect_timeout(xprt, + timeo->connect_timeout, + timeo->reconnect_timeout); return 0; } void -rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, unsigned long timeo) +rpc_set_connect_timeout(struct rpc_clnt *clnt, + unsigned long connect_timeout, + unsigned long reconnect_timeout) { + struct connect_timeout_data timeout = { + .connect_timeout = connect_timeout, + .reconnect_timeout = reconnect_timeout, + }; rpc_clnt_iterate_for_each_xprt(clnt, - rpc_xprt_cap_max_reconnect_timeout, - &timeo); + rpc_xprt_set_connect_timeout, + &timeout); } -EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout); +EXPORT_SYMBOL_GPL(rpc_set_connect_timeout); void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt) { diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c index e7b4d93566df..c8fd0b6c1618 100644 --- a/net/sunrpc/debugfs.c +++ b/net/sunrpc/debugfs.c @@ -16,11 +16,6 @@ static struct dentry *rpc_xprt_dir; unsigned int rpc_inject_disconnect; -struct rpc_clnt_iter { - struct rpc_clnt *clnt; - loff_t pos; -}; - static int tasks_show(struct seq_file *f, void *v) { @@ -47,12 +42,10 @@ static void * tasks_start(struct seq_file *f, loff_t *ppos) __acquires(&clnt->cl_lock) { - struct rpc_clnt_iter *iter = f->private; + struct rpc_clnt *clnt = f->private; loff_t pos = *ppos; - struct rpc_clnt *clnt = iter->clnt; struct rpc_task *task; - iter->pos = pos + 1; spin_lock(&clnt->cl_lock); list_for_each_entry(task, &clnt->cl_tasks, tk_task) if (pos-- == 0) @@ -63,12 +56,10 @@ tasks_start(struct seq_file *f, loff_t *ppos) static void * tasks_next(struct seq_file *f, void *v, loff_t *pos) { - struct rpc_clnt_iter *iter = f->private; - struct rpc_clnt *clnt = iter->clnt; + struct rpc_clnt *clnt = f->private; struct rpc_task *task = v; struct list_head *next = task->tk_task.next; - ++iter->pos; ++*pos; /* If there's another task on list, return it */ @@ -81,9 +72,7 @@ static void tasks_stop(struct seq_file *f, void *v) __releases(&clnt->cl_lock) { - struct rpc_clnt_iter *iter = f->private; - struct rpc_clnt *clnt = iter->clnt; - + struct rpc_clnt *clnt = f->private; spin_unlock(&clnt->cl_lock); } @@ -96,17 +85,13 @@ static const struct seq_operations tasks_seq_operations = { static int tasks_open(struct inode *inode, struct file *filp) { - int ret = seq_open_private(filp, &tasks_seq_operations, - sizeof(struct rpc_clnt_iter)); - + int ret = seq_open(filp, &tasks_seq_operations); if (!ret) { struct seq_file *seq = filp->private_data; - struct rpc_clnt_iter *iter = seq->private; - - iter->clnt = inode->i_private; + struct rpc_clnt *clnt = seq->private = inode->i_private; - if (!atomic_inc_not_zero(&iter->clnt->cl_count)) { - seq_release_private(inode, filp); + if (!atomic_inc_not_zero(&clnt->cl_count)) { + seq_release(inode, filp); ret = -EINVAL; } } @@ -118,10 +103,10 @@ static int tasks_release(struct inode *inode, struct file *filp) { struct seq_file *seq = filp->private_data; - struct rpc_clnt_iter *iter = seq->private; + struct rpc_clnt *clnt = seq->private; - rpc_release_client(iter->clnt); - return seq_release_private(inode, filp); + rpc_release_client(clnt); + return seq_release(inode, filp); } static const struct file_operations tasks_fops = { diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 75f290bddca1..a08aeb56b8e4 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -11,7 +11,7 @@ */ #include <linux/linkage.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/net.h> #include <linux/in.h> @@ -385,7 +385,7 @@ static int svc_uses_rpcbind(struct svc_serv *serv) for (i = 0; i < progp->pg_nvers; i++) { if (progp->pg_vers[i] == NULL) continue; - if (progp->pg_vers[i]->vs_hidden == 0) + if (!progp->pg_vers[i]->vs_hidden) return 1; } } @@ -976,6 +976,13 @@ int svc_register(const struct svc_serv *serv, struct net *net, if (vers->vs_hidden) continue; + /* + * Don't register a UDP port if we need congestion + * control. + */ + if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP) + continue; + error = __svc_register(net, progp->pg_name, progp->pg_prog, i, family, proto, port); @@ -1169,6 +1176,21 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) !(versp = progp->pg_vers[vers])) goto err_bad_vers; + /* + * Some protocol versions (namely NFSv4) require some form of + * congestion control. (See RFC 7530 section 3.1 paragraph 2) + * In other words, UDP is not allowed. We mark those when setting + * up the svc_xprt, and verify that here. + * + * The spec is not very clear about what error should be returned + * when someone tries to access a server that is listening on UDP + * for lower versions. RPC_PROG_MISMATCH seems to be the closest + * fit. + */ + if (versp->vs_need_cong_ctrl && + !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags)) + goto err_bad_vers; + procp = versp->vs_proc + proc; if (proc >= versp->vs_nproc || !procp->pc_func) goto err_bad_proc; @@ -1260,7 +1282,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) return 0; err_short_len: - svc_printk(rqstp, "short len %Zd, dropping request\n", + svc_printk(rqstp, "short len %zd, dropping request\n", argv->iov_len); goto close; diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 64af4f034de6..f81eaa8e0888 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -403,7 +403,7 @@ svcauth_unix_info_release(struct svc_xprt *xpt) /**************************************************************************** * auth.unix.gid cache * simple cache to map a UID to a list of GIDs - * because AUTH_UNIX aka AUTH_SYS has a max of 16 + * because AUTH_UNIX aka AUTH_SYS has a max of UNX_NGROUPS */ #define GID_HASHBITS 8 #define GID_HASHMAX (1<<GID_HASHBITS) @@ -810,7 +810,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp) cred->cr_uid = make_kuid(&init_user_ns, svc_getnl(argv)); /* uid */ cred->cr_gid = make_kgid(&init_user_ns, svc_getnl(argv)); /* gid */ slen = svc_getnl(argv); /* gids length */ - if (slen > 16 || (len -= (slen + 2)*4) < 0) + if (slen > UNX_NGROUPS || (len -= (slen + 2)*4) < 0) goto badcred; cred->cr_group_info = groups_alloc(slen); if (cred->cr_group_info == NULL) diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index de066acdb34e..8931e33b6541 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -278,7 +278,7 @@ static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) rqstp->rq_respages[0], tailoff); out: - dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n", + dprintk("svc: socket %p sendto([%p %zu... ], %d) = %d (addr %s)\n", svsk, xdr->head[0].iov_base, xdr->head[0].iov_len, xdr->len, len, svc_print_addr(rqstp, buf, sizeof(buf))); @@ -346,7 +346,7 @@ static int svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, if (len == buflen) set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); - dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", + dprintk("svc: socket %p recvfrom(%p, %zu) = %d\n", svsk, iov[0].iov_base, iov[0].iov_len, len); return len; } @@ -1306,6 +1306,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_tcp_class, &svsk->sk_xprt, serv); set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); + set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags); if (sk->sk_state == TCP_LISTEN) { dprintk("setting up TCP socket for listening\n"); set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags); diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 7f1071e103ca..1f7082144e01 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1518,3 +1518,37 @@ out: } EXPORT_SYMBOL_GPL(xdr_process_buf); +/** + * xdr_stream_decode_string_dup - Decode and duplicate variable length string + * @xdr: pointer to xdr_stream + * @str: location to store pointer to string + * @maxlen: maximum acceptable string length + * @gfp_flags: GFP mask to use + * + * Return values: + * On success, returns length of NUL-terminated string stored in *@ptr + * %-EBADMSG on XDR buffer overflow + * %-EMSGSIZE if the size of the string would exceed @maxlen + * %-ENOMEM on memory allocation failure + */ +ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, + size_t maxlen, gfp_t gfp_flags) +{ + void *p; + ssize_t ret; + + ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen); + if (ret > 0) { + char *s = kmalloc(ret + 1, gfp_flags); + if (s != NULL) { + memcpy(s, p, ret); + s[ret] = '\0'; + *str = s; + return strlen(s); + } + ret = -ENOMEM; + } + *str = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup); diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 9a6be030ca7d..b530a2852ba8 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -897,13 +897,11 @@ static void xprt_timer(struct rpc_task *task) return; dprintk("RPC: %5u xprt_timer\n", task->tk_pid); - spin_lock_bh(&xprt->transport_lock); if (!req->rq_reply_bytes_recvd) { if (xprt->ops->timer) xprt->ops->timer(xprt, task); } else task->tk_status = 0; - spin_unlock_bh(&xprt->transport_lock); } /** diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index 1ebb09e1ac4f..59e64025ed96 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c @@ -310,10 +310,7 @@ fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, struct rpcrdma_mw *mw; while (!list_empty(&req->rl_registered)) { - mw = list_first_entry(&req->rl_registered, - struct rpcrdma_mw, mw_list); - list_del_init(&mw->mw_list); - + mw = rpcrdma_pop_mw(&req->rl_registered); if (sync) fmr_op_recover_mr(mw); else diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index 47bed5333c7f..f81dd93176c0 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -466,8 +466,8 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) struct ib_send_wr *first, **prev, *last, *bad_wr; struct rpcrdma_rep *rep = req->rl_reply; struct rpcrdma_ia *ia = &r_xprt->rx_ia; - struct rpcrdma_mw *mw, *tmp; struct rpcrdma_frmr *f; + struct rpcrdma_mw *mw; int count, rc; dprintk("RPC: %s: req %p\n", __func__, req); @@ -534,10 +534,10 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) * them to the free MW list. */ unmap: - list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) { + while (!list_empty(&req->rl_registered)) { + mw = rpcrdma_pop_mw(&req->rl_registered); dprintk("RPC: %s: DMA unmapping frmr %p\n", __func__, &mw->frmr); - list_del_init(&mw->mw_list); ib_dma_unmap_sg(ia->ri_device, mw->mw_sg, mw->mw_nents, mw->mw_dir); rpcrdma_put_mw(r_xprt, mw); @@ -571,10 +571,7 @@ frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, struct rpcrdma_mw *mw; while (!list_empty(&req->rl_registered)) { - mw = list_first_entry(&req->rl_registered, - struct rpcrdma_mw, mw_list); - list_del_init(&mw->mw_list); - + mw = rpcrdma_pop_mw(&req->rl_registered); if (sync) frwr_op_recover_mr(mw); else diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index c52e0f2ffe52..a044be2d6ad7 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -125,14 +125,34 @@ void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt) /* The client can send a request inline as long as the RPCRDMA header * plus the RPC call fit under the transport's inline limit. If the * combined call message size exceeds that limit, the client must use - * the read chunk list for this operation. + * a Read chunk for this operation. + * + * A Read chunk is also required if sending the RPC call inline would + * exceed this device's max_sge limit. */ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst) { - struct rpcrdma_ia *ia = &r_xprt->rx_ia; + struct xdr_buf *xdr = &rqst->rq_snd_buf; + unsigned int count, remaining, offset; + + if (xdr->len > r_xprt->rx_ia.ri_max_inline_write) + return false; + + if (xdr->page_len) { + remaining = xdr->page_len; + offset = xdr->page_base & ~PAGE_MASK; + count = 0; + while (remaining) { + remaining -= min_t(unsigned int, + PAGE_SIZE - offset, remaining); + offset = 0; + if (++count > r_xprt->rx_ia.ri_max_send_sges) + return false; + } + } - return rqst->rq_snd_buf.len <= ia->ri_max_inline_write; + return true; } /* The client can't know how large the actual reply will be. Thus it @@ -186,9 +206,9 @@ rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, int n) */ static int -rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos, - enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, - bool reminv_expected) +rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, + unsigned int pos, enum rpcrdma_chunktype type, + struct rpcrdma_mr_seg *seg) { int len, n, p, page_base; struct page **ppages; @@ -226,22 +246,21 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos, if (len && n == RPCRDMA_MAX_SEGS) goto out_overflow; - /* When encoding the read list, the tail is always sent inline */ - if (type == rpcrdma_readch) + /* When encoding a Read chunk, the tail iovec contains an + * XDR pad and may be omitted. + */ + if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup) return n; - /* When encoding the Write list, some servers need to see an extra - * segment for odd-length Write chunks. The upper layer provides - * space in the tail iovec for this purpose. + /* When encoding a Write chunk, some servers need to see an + * extra segment for non-XDR-aligned Write chunks. The upper + * layer provides space in the tail iovec that may be used + * for this purpose. */ - if (type == rpcrdma_writech && reminv_expected) + if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup) return n; if (xdrbuf->tail[0].iov_len) { - /* the rpcrdma protocol allows us to omit any trailing - * xdr pad bytes, saving the server an RDMA operation. */ - if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize) - return n; n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n); if (n == RPCRDMA_MAX_SEGS) goto out_overflow; @@ -293,7 +312,8 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, if (rtype == rpcrdma_areadch) pos = 0; seg = req->rl_segments; - nsegs = rpcrdma_convert_iovs(&rqst->rq_snd_buf, pos, rtype, seg, false); + nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos, + rtype, seg); if (nsegs < 0) return ERR_PTR(nsegs); @@ -302,7 +322,7 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, false, &mw); if (n < 0) return ERR_PTR(n); - list_add(&mw->mw_list, &req->rl_registered); + rpcrdma_push_mw(mw, &req->rl_registered); *iptr++ = xdr_one; /* item present */ @@ -355,10 +375,9 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, } seg = req->rl_segments; - nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, + nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, rqst->rq_rcv_buf.head[0].iov_len, - wtype, seg, - r_xprt->rx_ia.ri_reminv_expected); + wtype, seg); if (nsegs < 0) return ERR_PTR(nsegs); @@ -371,7 +390,7 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, true, &mw); if (n < 0) return ERR_PTR(n); - list_add(&mw->mw_list, &req->rl_registered); + rpcrdma_push_mw(mw, &req->rl_registered); iptr = xdr_encode_rdma_segment(iptr, mw); @@ -423,8 +442,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, } seg = req->rl_segments; - nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, 0, wtype, seg, - r_xprt->rx_ia.ri_reminv_expected); + nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg); if (nsegs < 0) return ERR_PTR(nsegs); @@ -437,7 +455,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, true, &mw); if (n < 0) return ERR_PTR(n); - list_add(&mw->mw_list, &req->rl_registered); + rpcrdma_push_mw(mw, &req->rl_registered); iptr = xdr_encode_rdma_segment(iptr, mw); @@ -741,13 +759,13 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) iptr = headerp->rm_body.rm_chunks; iptr = rpcrdma_encode_read_list(r_xprt, req, rqst, iptr, rtype); if (IS_ERR(iptr)) - goto out_unmap; + goto out_err; iptr = rpcrdma_encode_write_list(r_xprt, req, rqst, iptr, wtype); if (IS_ERR(iptr)) - goto out_unmap; + goto out_err; iptr = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, iptr, wtype); if (IS_ERR(iptr)) - goto out_unmap; + goto out_err; hdrlen = (unsigned char *)iptr - (unsigned char *)headerp; dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n", @@ -758,12 +776,14 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, hdrlen, &rqst->rq_snd_buf, rtype)) { iptr = ERR_PTR(-EIO); - goto out_unmap; + goto out_err; } return 0; -out_unmap: - r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false); +out_err: + pr_err("rpcrdma: rpcrdma_marshal_req failed, status %ld\n", + PTR_ERR(iptr)); + r_xprt->rx_stats.failed_marshal_count++; return PTR_ERR(iptr); } diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index cb1e48e54eb1..ff1df40f0d26 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -201,19 +201,20 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) { struct rpc_xprt *xprt = rqst->rq_xprt; struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); - struct rpcrdma_msg *headerp = (struct rpcrdma_msg *)rqst->rq_buffer; + __be32 *p; int rc; /* Space in the send buffer for an RPC/RDMA header is reserved * via xprt->tsh_size. */ - headerp->rm_xid = rqst->rq_xid; - headerp->rm_vers = rpcrdma_version; - headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_bc_max_requests); - headerp->rm_type = rdma_msg; - headerp->rm_body.rm_chunks[0] = xdr_zero; - headerp->rm_body.rm_chunks[1] = xdr_zero; - headerp->rm_body.rm_chunks[2] = xdr_zero; + p = rqst->rq_buffer; + *p++ = rqst->rq_xid; + *p++ = rpcrdma_version; + *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_max_requests); + *p++ = rdma_msg; + *p++ = xdr_zero; + *p++ = xdr_zero; + *p = xdr_zero; #ifdef SVCRDMA_BACKCHANNEL_DEBUG pr_info("%s: %*ph\n", __func__, 64, rqst->rq_buffer); diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c index 0ba9887f3e22..1c4aabf0f657 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c +++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2016 Oracle. All rights reserved. * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -47,102 +48,43 @@ #define RPCDBG_FACILITY RPCDBG_SVCXPRT -/* - * Decodes a read chunk list. The expected format is as follows: - * descrim : xdr_one - * position : __be32 offset into XDR stream - * handle : __be32 RKEY - * . . . - * end-of-list: xdr_zero - */ -static __be32 *decode_read_list(__be32 *va, __be32 *vaend) +static __be32 *xdr_check_read_list(__be32 *p, __be32 *end) { - struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va; + __be32 *next; - while (ch->rc_discrim != xdr_zero) { - if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) > - (unsigned long)vaend) { - dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch); + while (*p++ != xdr_zero) { + next = p + rpcrdma_readchunk_maxsz - 1; + if (next > end) return NULL; - } - ch++; + p = next; } - return &ch->rc_position; + return p; } -/* - * Decodes a write chunk list. The expected format is as follows: - * descrim : xdr_one - * nchunks : <count> - * handle : __be32 RKEY ---+ - * length : __be32 <len of segment> | - * offset : remove va + <count> - * . . . | - * ---+ - */ -static __be32 *decode_write_list(__be32 *va, __be32 *vaend) +static __be32 *xdr_check_write_list(__be32 *p, __be32 *end) { - unsigned long start, end; - int nchunks; - - struct rpcrdma_write_array *ary = - (struct rpcrdma_write_array *)va; + __be32 *next; - /* Check for not write-array */ - if (ary->wc_discrim == xdr_zero) - return &ary->wc_nchunks; - - if ((unsigned long)ary + sizeof(struct rpcrdma_write_array) > - (unsigned long)vaend) { - dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend); - return NULL; - } - nchunks = be32_to_cpu(ary->wc_nchunks); - - start = (unsigned long)&ary->wc_array[0]; - end = (unsigned long)vaend; - if (nchunks < 0 || - nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) || - (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) { - dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n", - ary, nchunks, vaend); - return NULL; + while (*p++ != xdr_zero) { + next = p + 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz; + if (next > end) + return NULL; + p = next; } - /* - * rs_length is the 2nd 4B field in wc_target and taking its - * address skips the list terminator - */ - return &ary->wc_array[nchunks].wc_target.rs_length; + return p; } -static __be32 *decode_reply_array(__be32 *va, __be32 *vaend) +static __be32 *xdr_check_reply_chunk(__be32 *p, __be32 *end) { - unsigned long start, end; - int nchunks; - struct rpcrdma_write_array *ary = - (struct rpcrdma_write_array *)va; - - /* Check for no reply-array */ - if (ary->wc_discrim == xdr_zero) - return &ary->wc_nchunks; - - if ((unsigned long)ary + sizeof(struct rpcrdma_write_array) > - (unsigned long)vaend) { - dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend); - return NULL; - } - nchunks = be32_to_cpu(ary->wc_nchunks); - - start = (unsigned long)&ary->wc_array[0]; - end = (unsigned long)vaend; - if (nchunks < 0 || - nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) || - (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) { - dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n", - ary, nchunks, vaend); - return NULL; + __be32 *next; + + if (*p++ != xdr_zero) { + next = p + 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz; + if (next > end) + return NULL; + p = next; } - return (__be32 *)&ary->wc_array[nchunks]; + return p; } /** @@ -158,87 +100,71 @@ static __be32 *decode_reply_array(__be32 *va, __be32 *vaend) */ int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg) { - struct rpcrdma_msg *rmsgp; - __be32 *va, *vaend; - unsigned int len; - u32 hdr_len; + __be32 *p, *end, *rdma_argp; + unsigned int hdr_len; /* Verify that there's enough bytes for header + something */ - if (rq_arg->len <= RPCRDMA_HDRLEN_ERR) { - dprintk("svcrdma: header too short = %d\n", - rq_arg->len); - return -EINVAL; - } + if (rq_arg->len <= RPCRDMA_HDRLEN_ERR) + goto out_short; - rmsgp = (struct rpcrdma_msg *)rq_arg->head[0].iov_base; - if (rmsgp->rm_vers != rpcrdma_version) { - dprintk("%s: bad version %u\n", __func__, - be32_to_cpu(rmsgp->rm_vers)); - return -EPROTONOSUPPORT; - } + rdma_argp = rq_arg->head[0].iov_base; + if (*(rdma_argp + 1) != rpcrdma_version) + goto out_version; - switch (be32_to_cpu(rmsgp->rm_type)) { - case RDMA_MSG: - case RDMA_NOMSG: + switch (*(rdma_argp + 3)) { + case rdma_msg: + case rdma_nomsg: break; - case RDMA_DONE: - /* Just drop it */ - dprintk("svcrdma: dropping RDMA_DONE message\n"); - return 0; - - case RDMA_ERROR: - /* Possible if this is a backchannel reply. - * XXX: We should cancel this XID, though. - */ - dprintk("svcrdma: dropping RDMA_ERROR message\n"); - return 0; - - case RDMA_MSGP: - /* Pull in the extra for the padded case, bump our pointer */ - rmsgp->rm_body.rm_padded.rm_align = - be32_to_cpu(rmsgp->rm_body.rm_padded.rm_align); - rmsgp->rm_body.rm_padded.rm_thresh = - be32_to_cpu(rmsgp->rm_body.rm_padded.rm_thresh); - - va = &rmsgp->rm_body.rm_padded.rm_pempty[4]; - rq_arg->head[0].iov_base = va; - len = (u32)((unsigned long)va - (unsigned long)rmsgp); - rq_arg->head[0].iov_len -= len; - if (len > rq_arg->len) - return -EINVAL; - return len; - default: - dprintk("svcrdma: bad rdma procedure (%u)\n", - be32_to_cpu(rmsgp->rm_type)); - return -EINVAL; - } + case rdma_done: + goto out_drop; - /* The chunk list may contain either a read chunk list or a write - * chunk list and a reply chunk list. - */ - va = &rmsgp->rm_body.rm_chunks[0]; - vaend = (__be32 *)((unsigned long)rmsgp + rq_arg->len); - va = decode_read_list(va, vaend); - if (!va) { - dprintk("svcrdma: failed to decode read list\n"); - return -EINVAL; - } - va = decode_write_list(va, vaend); - if (!va) { - dprintk("svcrdma: failed to decode write list\n"); - return -EINVAL; - } - va = decode_reply_array(va, vaend); - if (!va) { - dprintk("svcrdma: failed to decode reply chunk\n"); - return -EINVAL; + case rdma_error: + goto out_drop; + + default: + goto out_proc; } - rq_arg->head[0].iov_base = va; - hdr_len = (unsigned long)va - (unsigned long)rmsgp; + end = (__be32 *)((unsigned long)rdma_argp + rq_arg->len); + p = xdr_check_read_list(rdma_argp + 4, end); + if (!p) + goto out_inval; + p = xdr_check_write_list(p, end); + if (!p) + goto out_inval; + p = xdr_check_reply_chunk(p, end); + if (!p) + goto out_inval; + if (p > end) + goto out_inval; + + rq_arg->head[0].iov_base = p; + hdr_len = (unsigned long)p - (unsigned long)rdma_argp; rq_arg->head[0].iov_len -= hdr_len; return hdr_len; + +out_short: + dprintk("svcrdma: header too short = %d\n", rq_arg->len); + return -EINVAL; + +out_version: + dprintk("svcrdma: bad xprt version: %u\n", + be32_to_cpup(rdma_argp + 1)); + return -EPROTONOSUPPORT; + +out_drop: + dprintk("svcrdma: dropping RDMA_DONE/ERROR message\n"); + return 0; + +out_proc: + dprintk("svcrdma: bad rdma procedure (%u)\n", + be32_to_cpup(rdma_argp + 3)); + return -EINVAL; + +out_inval: + dprintk("svcrdma: failed to parse transport header\n"); + return -EINVAL; } int svc_rdma_xdr_encode_error(struct svcxprt_rdma *xprt, @@ -249,7 +175,7 @@ int svc_rdma_xdr_encode_error(struct svcxprt_rdma *xprt, *va++ = rmsgp->rm_xid; *va++ = rmsgp->rm_vers; - *va++ = cpu_to_be32(xprt->sc_max_requests); + *va++ = xprt->sc_fc_credits; *va++ = rdma_error; *va++ = cpu_to_be32(err); if (err == ERR_VERS) { @@ -260,32 +186,35 @@ int svc_rdma_xdr_encode_error(struct svcxprt_rdma *xprt, return (int)((unsigned long)va - (unsigned long)startp); } -int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *rmsgp) +/** + * svc_rdma_xdr_get_reply_hdr_length - Get length of Reply transport header + * @rdma_resp: buffer containing Reply transport header + * + * Returns length of transport header, in bytes. + */ +unsigned int svc_rdma_xdr_get_reply_hdr_len(__be32 *rdma_resp) { - struct rpcrdma_write_array *wr_ary; + unsigned int nsegs; + __be32 *p; - /* There is no read-list in a reply */ + p = rdma_resp; - /* skip write list */ - wr_ary = (struct rpcrdma_write_array *) - &rmsgp->rm_body.rm_chunks[1]; - if (wr_ary->wc_discrim) - wr_ary = (struct rpcrdma_write_array *) - &wr_ary->wc_array[be32_to_cpu(wr_ary->wc_nchunks)]. - wc_target.rs_length; - else - wr_ary = (struct rpcrdma_write_array *) - &wr_ary->wc_nchunks; - - /* skip reply array */ - if (wr_ary->wc_discrim) - wr_ary = (struct rpcrdma_write_array *) - &wr_ary->wc_array[be32_to_cpu(wr_ary->wc_nchunks)]; - else - wr_ary = (struct rpcrdma_write_array *) - &wr_ary->wc_nchunks; - - return (unsigned long) wr_ary - (unsigned long) rmsgp; + /* RPC-over-RDMA V1 replies never have a Read list. */ + p += rpcrdma_fixed_maxsz + 1; + + /* Skip Write list. */ + while (*p++ != xdr_zero) { + nsegs = be32_to_cpup(p++); + p += nsegs * rpcrdma_segment_maxsz; + } + + /* Skip Reply chunk. */ + if (*p++ != xdr_zero) { + nsegs = be32_to_cpup(p++); + p += nsegs * rpcrdma_segment_maxsz; + } + + return (unsigned long)p - (unsigned long)rdma_resp; } void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *rmsgp, int chunks) @@ -326,19 +255,3 @@ void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary, seg->rs_offset = rs_offset; seg->rs_length = cpu_to_be32(write_len); } - -void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt, - struct rpcrdma_msg *rdma_argp, - struct rpcrdma_msg *rdma_resp, - enum rpcrdma_proc rdma_type) -{ - rdma_resp->rm_xid = rdma_argp->rm_xid; - rdma_resp->rm_vers = rdma_argp->rm_vers; - rdma_resp->rm_credit = cpu_to_be32(xprt->sc_max_requests); - rdma_resp->rm_type = cpu_to_be32(rdma_type); - - /* Encode <nul> chunks lists */ - rdma_resp->rm_body.rm_chunks[0] = xdr_zero; - rdma_resp->rm_body.rm_chunks[1] = xdr_zero; - rdma_resp->rm_body.rm_chunks[2] = xdr_zero; -} diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 172b537f8cfc..f7b2daf72a86 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -606,26 +606,24 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) dprintk("svcrdma: rqstp=%p\n", rqstp); - spin_lock_bh(&rdma_xprt->sc_rq_dto_lock); + spin_lock(&rdma_xprt->sc_rq_dto_lock); if (!list_empty(&rdma_xprt->sc_read_complete_q)) { - ctxt = list_entry(rdma_xprt->sc_read_complete_q.next, - struct svc_rdma_op_ctxt, - dto_q); - list_del_init(&ctxt->dto_q); - spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock); + ctxt = list_first_entry(&rdma_xprt->sc_read_complete_q, + struct svc_rdma_op_ctxt, list); + list_del(&ctxt->list); + spin_unlock(&rdma_xprt->sc_rq_dto_lock); rdma_read_complete(rqstp, ctxt); goto complete; } else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) { - ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next, - struct svc_rdma_op_ctxt, - dto_q); - list_del_init(&ctxt->dto_q); + ctxt = list_first_entry(&rdma_xprt->sc_rq_dto_q, + struct svc_rdma_op_ctxt, list); + list_del(&ctxt->list); } else { atomic_inc(&rdma_stat_rq_starve); clear_bit(XPT_DATA, &xprt->xpt_flags); ctxt = NULL; } - spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock); + spin_unlock(&rdma_xprt->sc_rq_dto_lock); if (!ctxt) { /* This is the EAGAIN path. The svc_recv routine will * return -EAGAIN, the nfsd thread will go to call into diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index ad4d286a83c5..515221b16d09 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -476,7 +476,8 @@ static int send_reply(struct svcxprt_rdma *rdma, /* Prepare the SGE for the RPCRDMA Header */ ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey; - ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp); + ctxt->sge[0].length = + svc_rdma_xdr_get_reply_hdr_len((__be32 *)rdma_resp); ctxt->sge[0].addr = ib_dma_map_page(rdma->sc_cm_id->device, page, 0, ctxt->sge[0].length, DMA_TO_DEVICE); @@ -559,12 +560,12 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) struct rpcrdma_msg *rdma_argp; struct rpcrdma_msg *rdma_resp; struct rpcrdma_write_array *wr_ary, *rp_ary; - enum rpcrdma_proc reply_type; int ret; int inline_bytes; struct page *res_page; struct svc_rdma_req_map *vec; u32 inv_rkey; + __be32 *p; dprintk("svcrdma: sending response for rqstp=%p\n", rqstp); @@ -596,12 +597,17 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) if (!res_page) goto err0; rdma_resp = page_address(res_page); - if (rp_ary) - reply_type = RDMA_NOMSG; - else - reply_type = RDMA_MSG; - svc_rdma_xdr_encode_reply_header(rdma, rdma_argp, - rdma_resp, reply_type); + + p = &rdma_resp->rm_xid; + *p++ = rdma_argp->rm_xid; + *p++ = rdma_argp->rm_vers; + *p++ = rdma->sc_fc_credits; + *p++ = rp_ary ? rdma_nomsg : rdma_msg; + + /* Start with empty chunks */ + *p++ = xdr_zero; + *p++ = xdr_zero; + *p = xdr_zero; /* Send any write-chunk data and build resp write-list */ if (wr_ary) { diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 39652d390a9c..c13a5c35ce14 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -157,8 +157,7 @@ static struct svc_rdma_op_ctxt *alloc_ctxt(struct svcxprt_rdma *xprt, ctxt = kmalloc(sizeof(*ctxt), flags); if (ctxt) { ctxt->xprt = xprt; - INIT_LIST_HEAD(&ctxt->free); - INIT_LIST_HEAD(&ctxt->dto_q); + INIT_LIST_HEAD(&ctxt->list); } return ctxt; } @@ -180,7 +179,7 @@ static bool svc_rdma_prealloc_ctxts(struct svcxprt_rdma *xprt) dprintk("svcrdma: No memory for RDMA ctxt\n"); return false; } - list_add(&ctxt->free, &xprt->sc_ctxts); + list_add(&ctxt->list, &xprt->sc_ctxts); } return true; } @@ -189,15 +188,15 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) { struct svc_rdma_op_ctxt *ctxt = NULL; - spin_lock_bh(&xprt->sc_ctxt_lock); + spin_lock(&xprt->sc_ctxt_lock); xprt->sc_ctxt_used++; if (list_empty(&xprt->sc_ctxts)) goto out_empty; ctxt = list_first_entry(&xprt->sc_ctxts, - struct svc_rdma_op_ctxt, free); - list_del_init(&ctxt->free); - spin_unlock_bh(&xprt->sc_ctxt_lock); + struct svc_rdma_op_ctxt, list); + list_del(&ctxt->list); + spin_unlock(&xprt->sc_ctxt_lock); out: ctxt->count = 0; @@ -209,15 +208,15 @@ out_empty: /* Either pre-allocation missed the mark, or send * queue accounting is broken. */ - spin_unlock_bh(&xprt->sc_ctxt_lock); + spin_unlock(&xprt->sc_ctxt_lock); ctxt = alloc_ctxt(xprt, GFP_NOIO); if (ctxt) goto out; - spin_lock_bh(&xprt->sc_ctxt_lock); + spin_lock(&xprt->sc_ctxt_lock); xprt->sc_ctxt_used--; - spin_unlock_bh(&xprt->sc_ctxt_lock); + spin_unlock(&xprt->sc_ctxt_lock); WARN_ONCE(1, "svcrdma: empty RDMA ctxt list?\n"); return NULL; } @@ -254,10 +253,10 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) for (i = 0; i < ctxt->count; i++) put_page(ctxt->pages[i]); - spin_lock_bh(&xprt->sc_ctxt_lock); + spin_lock(&xprt->sc_ctxt_lock); xprt->sc_ctxt_used--; - list_add(&ctxt->free, &xprt->sc_ctxts); - spin_unlock_bh(&xprt->sc_ctxt_lock); + list_add(&ctxt->list, &xprt->sc_ctxts); + spin_unlock(&xprt->sc_ctxt_lock); } static void svc_rdma_destroy_ctxts(struct svcxprt_rdma *xprt) @@ -266,8 +265,8 @@ static void svc_rdma_destroy_ctxts(struct svcxprt_rdma *xprt) struct svc_rdma_op_ctxt *ctxt; ctxt = list_first_entry(&xprt->sc_ctxts, - struct svc_rdma_op_ctxt, free); - list_del(&ctxt->free); + struct svc_rdma_op_ctxt, list); + list_del(&ctxt->list); kfree(ctxt); } } @@ -404,7 +403,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) /* All wc fields are now known to be valid */ ctxt->byte_len = wc->byte_len; spin_lock(&xprt->sc_rq_dto_lock); - list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q); + list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q); spin_unlock(&xprt->sc_rq_dto_lock); set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); @@ -525,7 +524,7 @@ void svc_rdma_wc_read(struct ib_cq *cq, struct ib_wc *wc) read_hdr = ctxt->read_hdr; spin_lock(&xprt->sc_rq_dto_lock); - list_add_tail(&read_hdr->dto_q, + list_add_tail(&read_hdr->list, &xprt->sc_read_complete_q); spin_unlock(&xprt->sc_rq_dto_lock); @@ -557,7 +556,6 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, return NULL; svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv); INIT_LIST_HEAD(&cma_xprt->sc_accept_q); - INIT_LIST_HEAD(&cma_xprt->sc_dto_q); INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q); INIT_LIST_HEAD(&cma_xprt->sc_frmr_q); @@ -571,6 +569,14 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, spin_lock_init(&cma_xprt->sc_ctxt_lock); spin_lock_init(&cma_xprt->sc_map_lock); + /* + * Note that this implies that the underlying transport support + * has some form of congestion control (see RFC 7530 section 3.1 + * paragraph 2). For now, we assume that all supported RDMA + * transports are suitable here. + */ + set_bit(XPT_CONG_CTRL, &cma_xprt->sc_xprt.xpt_flags); + if (listener) set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags); @@ -923,14 +929,14 @@ struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma) { struct svc_rdma_fastreg_mr *frmr = NULL; - spin_lock_bh(&rdma->sc_frmr_q_lock); + spin_lock(&rdma->sc_frmr_q_lock); if (!list_empty(&rdma->sc_frmr_q)) { frmr = list_entry(rdma->sc_frmr_q.next, struct svc_rdma_fastreg_mr, frmr_list); list_del_init(&frmr->frmr_list); frmr->sg_nents = 0; } - spin_unlock_bh(&rdma->sc_frmr_q_lock); + spin_unlock(&rdma->sc_frmr_q_lock); if (frmr) return frmr; @@ -943,10 +949,10 @@ void svc_rdma_put_frmr(struct svcxprt_rdma *rdma, if (frmr) { ib_dma_unmap_sg(rdma->sc_cm_id->device, frmr->sg, frmr->sg_nents, frmr->direction); - spin_lock_bh(&rdma->sc_frmr_q_lock); + spin_lock(&rdma->sc_frmr_q_lock); WARN_ON_ONCE(!list_empty(&frmr->frmr_list)); list_add(&frmr->frmr_list, &rdma->sc_frmr_q); - spin_unlock_bh(&rdma->sc_frmr_q_lock); + spin_unlock(&rdma->sc_frmr_q_lock); } } @@ -1002,6 +1008,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) newxprt->sc_max_req_size = svcrdma_max_req_size; newxprt->sc_max_requests = min_t(u32, dev->attrs.max_qp_wr, svcrdma_max_requests); + newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests); newxprt->sc_max_bc_requests = min_t(u32, dev->attrs.max_qp_wr, svcrdma_max_bc_requests); newxprt->sc_rq_depth = newxprt->sc_max_requests + @@ -1027,13 +1034,13 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) goto errout; } newxprt->sc_sq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_sq_depth, - 0, IB_POLL_SOFTIRQ); + 0, IB_POLL_WORKQUEUE); if (IS_ERR(newxprt->sc_sq_cq)) { dprintk("svcrdma: error creating SQ CQ for connect request\n"); goto errout; } newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_rq_depth, - 0, IB_POLL_SOFTIRQ); + 0, IB_POLL_WORKQUEUE); if (IS_ERR(newxprt->sc_rq_cq)) { dprintk("svcrdma: error creating RQ CQ for connect request\n"); goto errout; @@ -1213,20 +1220,18 @@ static void __svc_rdma_free(struct work_struct *work) */ while (!list_empty(&rdma->sc_read_complete_q)) { struct svc_rdma_op_ctxt *ctxt; - ctxt = list_entry(rdma->sc_read_complete_q.next, - struct svc_rdma_op_ctxt, - dto_q); - list_del_init(&ctxt->dto_q); + ctxt = list_first_entry(&rdma->sc_read_complete_q, + struct svc_rdma_op_ctxt, list); + list_del(&ctxt->list); svc_rdma_put_context(ctxt, 1); } /* Destroy queued, but not processed recv completions */ while (!list_empty(&rdma->sc_rq_dto_q)) { struct svc_rdma_op_ctxt *ctxt; - ctxt = list_entry(rdma->sc_rq_dto_q.next, - struct svc_rdma_op_ctxt, - dto_q); - list_del_init(&ctxt->dto_q); + ctxt = list_first_entry(&rdma->sc_rq_dto_q, + struct svc_rdma_op_ctxt, list); + list_del(&ctxt->list); svc_rdma_put_context(ctxt, 1); } diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 534c178d2a7e..c717f5410776 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -67,7 +67,7 @@ unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE; static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE; static unsigned int xprt_rdma_inline_write_padding; static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR; - int xprt_rdma_pad_optimize = 1; + int xprt_rdma_pad_optimize = 0; #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) @@ -709,10 +709,6 @@ xprt_rdma_send_request(struct rpc_task *task) return 0; failed_marshal: - dprintk("RPC: %s: rpcrdma_marshal_req failed, status %i\n", - __func__, rc); - if (rc == -EIO) - r_xprt->rx_stats.failed_marshal_count++; if (rc != -ENOTCONN) return rc; drop_connection: diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 11d07748f699..81cd31acf690 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -54,6 +54,7 @@ #include <linux/sunrpc/svc_rdma.h> #include <asm/bitops.h> #include <linux/module.h> /* try_module_get()/module_put() */ +#include <rdma/ib_cm.h> #include "xprt_rdma.h" @@ -208,6 +209,7 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, /* Default settings for RPC-over-RDMA Version One */ r_xprt->rx_ia.ri_reminv_expected = false; + r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize; rsize = RPCRDMA_V1_DEF_INLINE_SIZE; wsize = RPCRDMA_V1_DEF_INLINE_SIZE; @@ -215,6 +217,7 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, pmsg->cp_magic == rpcrdma_cmp_magic && pmsg->cp_version == RPCRDMA_CMP_VERSION) { r_xprt->rx_ia.ri_reminv_expected = true; + r_xprt->rx_ia.ri_implicit_roundup = true; rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); } @@ -277,7 +280,14 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event) connstate = -ENETDOWN; goto connected; case RDMA_CM_EVENT_REJECTED: +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + pr_info("rpcrdma: connection to %pIS:%u on %s rejected: %s\n", + sap, rpc_get_port(sap), ia->ri_device->name, + rdma_reject_msg(id, event->status)); +#endif connstate = -ECONNREFUSED; + if (event->status == IB_CM_REJ_STALE_CONN) + connstate = -EAGAIN; goto connected; case RDMA_CM_EVENT_DISCONNECTED: connstate = -ECONNABORTED; @@ -486,18 +496,19 @@ rpcrdma_ia_close(struct rpcrdma_ia *ia) */ int rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, - struct rpcrdma_create_data_internal *cdata) + struct rpcrdma_create_data_internal *cdata) { struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; + unsigned int max_qp_wr, max_sge; struct ib_cq *sendcq, *recvcq; - unsigned int max_qp_wr; int rc; - if (ia->ri_device->attrs.max_sge < RPCRDMA_MAX_SEND_SGES) { - dprintk("RPC: %s: insufficient sge's available\n", - __func__); + max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES); + if (max_sge < RPCRDMA_MIN_SEND_SGES) { + pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); return -ENOMEM; } + ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES; if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) { dprintk("RPC: %s: insufficient wqe's available\n", @@ -522,7 +533,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, ep->rep_attr.cap.max_recv_wr = cdata->max_requests; ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; ep->rep_attr.cap.max_recv_wr += 1; /* drain cqe */ - ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_SEND_SGES; + ep->rep_attr.cap.max_send_sge = max_sge; ep->rep_attr.cap.max_recv_sge = 1; ep->rep_attr.cap.max_inline_data = 0; ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; @@ -640,20 +651,21 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) int rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) { + struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, + rx_ia); struct rdma_cm_id *id, *old; + struct sockaddr *sap; + unsigned int extras; int rc = 0; - int retry_count = 0; if (ep->rep_connected != 0) { - struct rpcrdma_xprt *xprt; retry: dprintk("RPC: %s: reconnecting...\n", __func__); rpcrdma_ep_disconnect(ep, ia); - xprt = container_of(ia, struct rpcrdma_xprt, rx_ia); - id = rpcrdma_create_id(xprt, ia, - (struct sockaddr *)&xprt->rx_data.addr); + sap = (struct sockaddr *)&r_xprt->rx_data.addr; + id = rpcrdma_create_id(r_xprt, ia, sap); if (IS_ERR(id)) { rc = -EHOSTUNREACH; goto out; @@ -708,51 +720,18 @@ retry: } wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); - - /* - * Check state. A non-peer reject indicates no listener - * (ECONNREFUSED), which may be a transient state. All - * others indicate a transport condition which has already - * undergone a best-effort. - */ - if (ep->rep_connected == -ECONNREFUSED && - ++retry_count <= RDMA_CONNECT_RETRY_MAX) { - dprintk("RPC: %s: non-peer_reject, retry\n", __func__); - goto retry; - } if (ep->rep_connected <= 0) { - /* Sometimes, the only way to reliably connect to remote - * CMs is to use same nonzero values for ORD and IRD. */ - if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 && - (ep->rep_remote_cma.responder_resources == 0 || - ep->rep_remote_cma.initiator_depth != - ep->rep_remote_cma.responder_resources)) { - if (ep->rep_remote_cma.responder_resources == 0) - ep->rep_remote_cma.responder_resources = 1; - ep->rep_remote_cma.initiator_depth = - ep->rep_remote_cma.responder_resources; + if (ep->rep_connected == -EAGAIN) goto retry; - } rc = ep->rep_connected; - } else { - struct rpcrdma_xprt *r_xprt; - unsigned int extras; - - dprintk("RPC: %s: connected\n", __func__); - - r_xprt = container_of(ia, struct rpcrdma_xprt, rx_ia); - extras = r_xprt->rx_buf.rb_bc_srv_max_requests; - - if (extras) { - rc = rpcrdma_ep_post_extra_recv(r_xprt, extras); - if (rc) { - pr_warn("%s: rpcrdma_ep_post_extra_recv: %i\n", - __func__, rc); - rc = 0; - } - } + goto out; } + dprintk("RPC: %s: connected\n", __func__); + extras = r_xprt->rx_buf.rb_bc_srv_max_requests; + if (extras) + rpcrdma_ep_post_extra_recv(r_xprt, extras); + out: if (rc) ep->rep_connected = rc; @@ -797,9 +776,7 @@ rpcrdma_mr_recovery_worker(struct work_struct *work) spin_lock(&buf->rb_recovery_lock); while (!list_empty(&buf->rb_stale_mrs)) { - mw = list_first_entry(&buf->rb_stale_mrs, - struct rpcrdma_mw, mw_list); - list_del_init(&mw->mw_list); + mw = rpcrdma_pop_mw(&buf->rb_stale_mrs); spin_unlock(&buf->rb_recovery_lock); dprintk("RPC: %s: recovering MR %p\n", __func__, mw); @@ -817,7 +794,7 @@ rpcrdma_defer_mr_recovery(struct rpcrdma_mw *mw) struct rpcrdma_buffer *buf = &r_xprt->rx_buf; spin_lock(&buf->rb_recovery_lock); - list_add(&mw->mw_list, &buf->rb_stale_mrs); + rpcrdma_push_mw(mw, &buf->rb_stale_mrs); spin_unlock(&buf->rb_recovery_lock); schedule_delayed_work(&buf->rb_recovery_worker, 0); @@ -1093,11 +1070,8 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt) struct rpcrdma_mw *mw = NULL; spin_lock(&buf->rb_mwlock); - if (!list_empty(&buf->rb_mws)) { - mw = list_first_entry(&buf->rb_mws, - struct rpcrdma_mw, mw_list); - list_del_init(&mw->mw_list); - } + if (!list_empty(&buf->rb_mws)) + mw = rpcrdma_pop_mw(&buf->rb_mws); spin_unlock(&buf->rb_mwlock); if (!mw) @@ -1120,7 +1094,7 @@ rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw) struct rpcrdma_buffer *buf = &r_xprt->rx_buf; spin_lock(&buf->rb_mwlock); - list_add_tail(&mw->mw_list, &buf->rb_mws); + rpcrdma_push_mw(mw, &buf->rb_mws); spin_unlock(&buf->rb_mwlock); } diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index e35efd4ac1e4..171a35116de9 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -74,7 +74,9 @@ struct rpcrdma_ia { unsigned int ri_max_frmr_depth; unsigned int ri_max_inline_write; unsigned int ri_max_inline_read; + unsigned int ri_max_send_sges; bool ri_reminv_expected; + bool ri_implicit_roundup; enum ib_mr_type ri_mrtype; struct ib_qp_attr ri_qp_attr; struct ib_qp_init_attr ri_qp_init_attr; @@ -303,15 +305,19 @@ struct rpcrdma_mr_seg { /* chunk descriptors */ char *mr_offset; /* kva if no page, else offset */ }; -/* Reserve enough Send SGEs to send a maximum size inline request: +/* The Send SGE array is provisioned to send a maximum size + * inline request: * - RPC-over-RDMA header * - xdr_buf head iovec - * - RPCRDMA_MAX_INLINE bytes, possibly unaligned, in pages + * - RPCRDMA_MAX_INLINE bytes, in pages * - xdr_buf tail iovec + * + * The actual number of array elements consumed by each RPC + * depends on the device's max_sge limit. */ enum { - RPCRDMA_MAX_SEND_PAGES = PAGE_SIZE + RPCRDMA_MAX_INLINE - 1, - RPCRDMA_MAX_PAGE_SGES = (RPCRDMA_MAX_SEND_PAGES >> PAGE_SHIFT) + 1, + RPCRDMA_MIN_SEND_SGES = 3, + RPCRDMA_MAX_PAGE_SGES = RPCRDMA_MAX_INLINE >> PAGE_SHIFT, RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1, }; @@ -348,6 +354,22 @@ rpcr_to_rdmar(struct rpc_rqst *rqst) return rqst->rq_xprtdata; } +static inline void +rpcrdma_push_mw(struct rpcrdma_mw *mw, struct list_head *list) +{ + list_add_tail(&mw->mw_list, list); +} + +static inline struct rpcrdma_mw * +rpcrdma_pop_mw(struct list_head *list) +{ + struct rpcrdma_mw *mw; + + mw = list_first_entry(list, struct rpcrdma_mw, mw_list); + list_del(&mw->mw_list); + return mw; +} + /* * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for * inline requests/replies, and client/server credits. diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index af392d9b9cec..16aff8ddc16f 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -52,6 +52,8 @@ #include "sunrpc.h" static void xs_close(struct rpc_xprt *xprt); +static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt, + struct socket *sock); /* * xprtsock tunables @@ -666,6 +668,9 @@ static int xs_tcp_send_request(struct rpc_task *task) if (task->tk_flags & RPC_TASK_SENT) zerocopy = false; + if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state)) + xs_tcp_set_socket_timeouts(xprt, transport->sock); + /* Continue transmitting the packet/record. We must be careful * to cope with writespace callbacks arriving _after_ we have * called sendmsg(). */ @@ -1188,7 +1193,7 @@ static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_r char *p; len = sizeof(transport->tcp_xid) - transport->tcp_offset; - dprintk("RPC: reading XID (%Zu bytes)\n", len); + dprintk("RPC: reading XID (%zu bytes)\n", len); p = ((char *) &transport->tcp_xid) + transport->tcp_offset; used = xdr_skb_read_bits(desc, p, len); transport->tcp_offset += used; @@ -1219,7 +1224,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport, */ offset = transport->tcp_offset - sizeof(transport->tcp_xid); len = sizeof(transport->tcp_calldir) - offset; - dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len); + dprintk("RPC: reading CALL/REPLY flag (%zu bytes)\n", len); p = ((char *) &transport->tcp_calldir) + offset; used = xdr_skb_read_bits(desc, p, len); transport->tcp_offset += used; @@ -1310,7 +1315,7 @@ static inline void xs_tcp_read_common(struct rpc_xprt *xprt, return; } - dprintk("RPC: XID %08x read %Zd bytes\n", + dprintk("RPC: XID %08x read %zd bytes\n", ntohl(transport->tcp_xid), r); dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, " "tcp_reclen = %u\n", xprt, transport->tcp_copied, @@ -1456,7 +1461,7 @@ static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_s desc->count -= len; desc->offset += len; transport->tcp_offset += len; - dprintk("RPC: discarded %Zu bytes\n", len); + dprintk("RPC: discarded %zu bytes\n", len); xs_tcp_check_fraghdr(transport); } @@ -1734,7 +1739,9 @@ static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t */ static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task) { + spin_lock_bh(&xprt->transport_lock); xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); + spin_unlock_bh(&xprt->transport_lock); } static unsigned short xs_get_random_port(void) @@ -2235,6 +2242,66 @@ static void xs_tcp_shutdown(struct rpc_xprt *xprt) xs_reset_transport(transport); } +static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt, + struct socket *sock) +{ + struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); + unsigned int keepidle; + unsigned int keepcnt; + unsigned int opt_on = 1; + unsigned int timeo; + + spin_lock_bh(&xprt->transport_lock); + keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ); + keepcnt = xprt->timeout->to_retries + 1; + timeo = jiffies_to_msecs(xprt->timeout->to_initval) * + (xprt->timeout->to_retries + 1); + clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); + spin_unlock_bh(&xprt->transport_lock); + + /* TCP Keepalive options */ + kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, + (char *)&opt_on, sizeof(opt_on)); + kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, + (char *)&keepidle, sizeof(keepidle)); + kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, + (char *)&keepidle, sizeof(keepidle)); + kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, + (char *)&keepcnt, sizeof(keepcnt)); + + /* TCP user timeout (see RFC5482) */ + kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT, + (char *)&timeo, sizeof(timeo)); +} + +static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt, + unsigned long connect_timeout, + unsigned long reconnect_timeout) +{ + struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); + struct rpc_timeout to; + unsigned long initval; + + spin_lock_bh(&xprt->transport_lock); + if (reconnect_timeout < xprt->max_reconnect_timeout) + xprt->max_reconnect_timeout = reconnect_timeout; + if (connect_timeout < xprt->connect_timeout) { + memcpy(&to, xprt->timeout, sizeof(to)); + initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1); + /* Arbitrary lower limit */ + if (initval < XS_TCP_INIT_REEST_TO << 1) + initval = XS_TCP_INIT_REEST_TO << 1; + to.to_initval = initval; + to.to_maxval = initval; + memcpy(&transport->tcp_timeout, &to, + sizeof(transport->tcp_timeout)); + xprt->timeout = &transport->tcp_timeout; + xprt->connect_timeout = connect_timeout; + } + set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); + spin_unlock_bh(&xprt->transport_lock); +} + static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) { struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); @@ -2242,22 +2309,8 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) if (!transport->inet) { struct sock *sk = sock->sk; - unsigned int keepidle = xprt->timeout->to_initval / HZ; - unsigned int keepcnt = xprt->timeout->to_retries + 1; - unsigned int opt_on = 1; - unsigned int timeo; unsigned int addr_pref = IPV6_PREFER_SRC_PUBLIC; - /* TCP Keepalive options */ - kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, - (char *)&opt_on, sizeof(opt_on)); - kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, - (char *)&keepidle, sizeof(keepidle)); - kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, - (char *)&keepidle, sizeof(keepidle)); - kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, - (char *)&keepcnt, sizeof(keepcnt)); - /* Avoid temporary address, they are bad for long-lived * connections such as NFS mounts. * RFC4941, section 3.6 suggests that: @@ -2268,11 +2321,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) kernel_setsockopt(sock, SOL_IPV6, IPV6_ADDR_PREFERENCES, (char *)&addr_pref, sizeof(addr_pref)); - /* TCP user timeout (see RFC5482) */ - timeo = jiffies_to_msecs(xprt->timeout->to_initval) * - (xprt->timeout->to_retries + 1); - kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT, - (char *)&timeo, sizeof(timeo)); + xs_tcp_set_socket_timeouts(xprt, sock); write_lock_bh(&sk->sk_callback_lock); @@ -2721,6 +2770,7 @@ static struct rpc_xprt_ops xs_tcp_ops = { .set_retrans_timeout = xprt_set_retrans_timeout_def, .close = xs_tcp_shutdown, .destroy = xs_destroy, + .set_connect_timeout = xs_tcp_set_connect_timeout, .print_stats = xs_tcp_print_stats, .enable_swap = xs_enable_swap, .disable_swap = xs_disable_swap, @@ -3007,6 +3057,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) xprt->timeout = &xs_tcp_default_timeout; xprt->max_reconnect_timeout = xprt->timeout->to_maxval; + xprt->connect_timeout = xprt->timeout->to_initval * + (xprt->timeout->to_retries + 1); INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn); INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); @@ -3209,7 +3261,9 @@ static int param_set_uint_minmax(const char *val, if (!val) return -EINVAL; ret = kstrtouint(val, 0, &num); - if (ret == -EINVAL || num < min || num > max) + if (ret) + return ret; + if (num < min || num > max) return -EINVAL; *((unsigned int *)kp->arg) = num; return 0; diff --git a/net/tipc/node.c b/net/tipc/node.c index e9295fa3a554..4512e83652b1 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1505,19 +1505,21 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) { struct sk_buff_head xmitq; struct tipc_node *n; - struct tipc_msg *hdr = buf_msg(skb); - int usr = msg_user(hdr); + struct tipc_msg *hdr; int bearer_id = b->identity; struct tipc_link_entry *le; - u16 bc_ack = msg_bcast_ack(hdr); u32 self = tipc_own_addr(net); - int rc = 0; + int usr, rc = 0; + u16 bc_ack; __skb_queue_head_init(&xmitq); - /* Ensure message is well-formed */ + /* Ensure message is well-formed before touching the header */ if (unlikely(!tipc_msg_validate(skb))) goto discard; + hdr = buf_msg(skb); + usr = msg_user(hdr); + bc_ack = msg_bcast_ack(hdr); /* Handle arrival of discovery or broadcast packet */ if (unlikely(msg_non_seq(hdr))) { diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 6b09a778cc71..43e4045e72bc 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -35,6 +35,8 @@ */ #include <linux/rhashtable.h> +#include <linux/sched/signal.h> + #include "core.h" #include "name_table.h" #include "node.h" diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index e2d18b9f910f..ee37b390260a 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -85,7 +85,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/signal.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/stat.h> diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 8a398b3fb532..9192ead66751 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -90,6 +90,7 @@ #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/kmod.h> #include <linux/list.h> #include <linux/miscdevice.h> diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 6788264acc63..9d24c0e958b1 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -532,7 +532,8 @@ static int virtio_vsock_probe(struct virtio_device *vdev) vsock->vdev = vdev; ret = vsock->vdev->config->find_vqs(vsock->vdev, VSOCK_VQ_MAX, - vsock->vqs, callbacks, names); + vsock->vqs, callbacks, names, + NULL); if (ret < 0) goto out; diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 849c4ad0411e..8d592a45b597 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -9,6 +9,7 @@ */ #include <linux/spinlock.h> #include <linux/module.h> +#include <linux/sched/signal.h> #include <linux/ctype.h> #include <linux/list.h> #include <linux/virtio.h> diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 079c883aa96e..fd28a49dbe8f 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -41,7 +41,7 @@ #include <linux/capability.h> #include <linux/errno.h> #include <linux/kernel.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/net.h> diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 5f3e87866438..0806dccdf507 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2836,14 +2836,8 @@ static unsigned int xfrm_mtu(const struct dst_entry *dst) return mtu ? : dst_mtu(dst->path); } -static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, - struct sk_buff *skb, - const void *daddr) -{ - return dst->path->ops->neigh_lookup(dst, skb, daddr); -} - -static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr) +static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst, + const void *daddr) { const struct dst_entry *path = dst->path; @@ -2857,6 +2851,25 @@ static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr) else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR)) daddr = &xfrm->id.daddr; } + return daddr; +} + +static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, + struct sk_buff *skb, + const void *daddr) +{ + const struct dst_entry *path = dst->path; + + if (!skb) + daddr = xfrm_get_dst_nexthop(dst, daddr); + return path->ops->neigh_lookup(path, skb, daddr); +} + +static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr) +{ + const struct dst_entry *path = dst->path; + + daddr = xfrm_get_dst_nexthop(dst, daddr); path->ops->confirm_neigh(path, daddr); } diff --git a/samples/Kconfig b/samples/Kconfig index b124f62ed6cb..9cb63188d3ef 100644 --- a/samples/Kconfig +++ b/samples/Kconfig @@ -112,4 +112,10 @@ config SAMPLE_VFIO_MDEV_MTTY Build a virtual tty sample driver for use as a VFIO mediated device +config SAMPLE_STATX + bool "Build example extended-stat using code" + depends on BROKEN + help + Build example userspace program to use the new extended-stat syscall. + endif # SAMPLES diff --git a/samples/Makefile b/samples/Makefile index 86a137e451d9..db54e766ddb1 100644 --- a/samples/Makefile +++ b/samples/Makefile @@ -3,4 +3,4 @@ obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ livepatch/ \ hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/ \ configfs/ connector/ v4l/ trace_printk/ blackfin/ \ - vfio-mdev/ + vfio-mdev/ statx/ diff --git a/samples/statx/Makefile b/samples/statx/Makefile new file mode 100644 index 000000000000..1f80a3d8cf45 --- /dev/null +++ b/samples/statx/Makefile @@ -0,0 +1,10 @@ +# kbuild trick to avoid linker error. Can be omitted if a module is built. +obj- := dummy.o + +# List of programs to build +hostprogs-$(CONFIG_SAMPLE_STATX) := test-statx + +# Tell kbuild to always build the programs +always := $(hostprogs-y) + +HOSTCFLAGS_test-statx.o += -I$(objtree)/usr/include diff --git a/samples/statx/test-statx.c b/samples/statx/test-statx.c new file mode 100644 index 000000000000..8571d766331d --- /dev/null +++ b/samples/statx/test-statx.c @@ -0,0 +1,254 @@ +/* Test the statx() system call. + * + * Note that the output of this program is intended to look like the output of + * /bin/stat where possible. + * + * Copyright (C) 2015 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#define _GNU_SOURCE +#define _ATFILE_SOURCE +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <ctype.h> +#include <errno.h> +#include <time.h> +#include <sys/syscall.h> +#include <sys/types.h> +#include <linux/stat.h> +#include <linux/fcntl.h> +#include <sys/stat.h> + +#define AT_STATX_SYNC_TYPE 0x6000 +#define AT_STATX_SYNC_AS_STAT 0x0000 +#define AT_STATX_FORCE_SYNC 0x2000 +#define AT_STATX_DONT_SYNC 0x4000 + +static __attribute__((unused)) +ssize_t statx(int dfd, const char *filename, unsigned flags, + unsigned int mask, struct statx *buffer) +{ + return syscall(__NR_statx, dfd, filename, flags, mask, buffer); +} + +static void print_time(const char *field, struct statx_timestamp *ts) +{ + struct tm tm; + time_t tim; + char buffer[100]; + int len; + + tim = ts->tv_sec; + if (!localtime_r(&tim, &tm)) { + perror("localtime_r"); + exit(1); + } + len = strftime(buffer, 100, "%F %T", &tm); + if (len == 0) { + perror("strftime"); + exit(1); + } + printf("%s", field); + fwrite(buffer, 1, len, stdout); + printf(".%09u", ts->tv_nsec); + len = strftime(buffer, 100, "%z", &tm); + if (len == 0) { + perror("strftime2"); + exit(1); + } + fwrite(buffer, 1, len, stdout); + printf("\n"); +} + +static void dump_statx(struct statx *stx) +{ + char buffer[256], ft = '?'; + + printf("results=%x\n", stx->stx_mask); + + printf(" "); + if (stx->stx_mask & STATX_SIZE) + printf(" Size: %-15llu", (unsigned long long)stx->stx_size); + if (stx->stx_mask & STATX_BLOCKS) + printf(" Blocks: %-10llu", (unsigned long long)stx->stx_blocks); + printf(" IO Block: %-6llu", (unsigned long long)stx->stx_blksize); + if (stx->stx_mask & STATX_TYPE) { + switch (stx->stx_mode & S_IFMT) { + case S_IFIFO: printf(" FIFO\n"); ft = 'p'; break; + case S_IFCHR: printf(" character special file\n"); ft = 'c'; break; + case S_IFDIR: printf(" directory\n"); ft = 'd'; break; + case S_IFBLK: printf(" block special file\n"); ft = 'b'; break; + case S_IFREG: printf(" regular file\n"); ft = '-'; break; + case S_IFLNK: printf(" symbolic link\n"); ft = 'l'; break; + case S_IFSOCK: printf(" socket\n"); ft = 's'; break; + default: + printf(" unknown type (%o)\n", stx->stx_mode & S_IFMT); + break; + } + } else { + printf(" no type\n"); + } + + sprintf(buffer, "%02x:%02x", stx->stx_dev_major, stx->stx_dev_minor); + printf("Device: %-15s", buffer); + if (stx->stx_mask & STATX_INO) + printf(" Inode: %-11llu", (unsigned long long) stx->stx_ino); + if (stx->stx_mask & STATX_NLINK) + printf(" Links: %-5u", stx->stx_nlink); + if (stx->stx_mask & STATX_TYPE) { + switch (stx->stx_mode & S_IFMT) { + case S_IFBLK: + case S_IFCHR: + printf(" Device type: %u,%u", + stx->stx_rdev_major, stx->stx_rdev_minor); + break; + } + } + printf("\n"); + + if (stx->stx_mask & STATX_MODE) + printf("Access: (%04o/%c%c%c%c%c%c%c%c%c%c) ", + stx->stx_mode & 07777, + ft, + stx->stx_mode & S_IRUSR ? 'r' : '-', + stx->stx_mode & S_IWUSR ? 'w' : '-', + stx->stx_mode & S_IXUSR ? 'x' : '-', + stx->stx_mode & S_IRGRP ? 'r' : '-', + stx->stx_mode & S_IWGRP ? 'w' : '-', + stx->stx_mode & S_IXGRP ? 'x' : '-', + stx->stx_mode & S_IROTH ? 'r' : '-', + stx->stx_mode & S_IWOTH ? 'w' : '-', + stx->stx_mode & S_IXOTH ? 'x' : '-'); + if (stx->stx_mask & STATX_UID) + printf("Uid: %5d ", stx->stx_uid); + if (stx->stx_mask & STATX_GID) + printf("Gid: %5d\n", stx->stx_gid); + + if (stx->stx_mask & STATX_ATIME) + print_time("Access: ", &stx->stx_atime); + if (stx->stx_mask & STATX_MTIME) + print_time("Modify: ", &stx->stx_mtime); + if (stx->stx_mask & STATX_CTIME) + print_time("Change: ", &stx->stx_ctime); + if (stx->stx_mask & STATX_BTIME) + print_time(" Birth: ", &stx->stx_btime); + + if (stx->stx_attributes) { + unsigned char bits; + int loop, byte; + + static char attr_representation[64 + 1] = + /* STATX_ATTR_ flags: */ + "????????" /* 63-56 */ + "????????" /* 55-48 */ + "????????" /* 47-40 */ + "????????" /* 39-32 */ + "????????" /* 31-24 0x00000000-ff000000 */ + "????????" /* 23-16 0x00000000-00ff0000 */ + "???me???" /* 15- 8 0x00000000-0000ff00 */ + "?dai?c??" /* 7- 0 0x00000000-000000ff */ + ; + + printf("Attributes: %016llx (", stx->stx_attributes); + for (byte = 64 - 8; byte >= 0; byte -= 8) { + bits = stx->stx_attributes >> byte; + for (loop = 7; loop >= 0; loop--) { + int bit = byte + loop; + + if (bits & 0x80) + putchar(attr_representation[63 - bit]); + else + putchar('-'); + bits <<= 1; + } + if (byte) + putchar(' '); + } + printf(")\n"); + } +} + +static void dump_hex(unsigned long long *data, int from, int to) +{ + unsigned offset, print_offset = 1, col = 0; + + from /= 8; + to = (to + 7) / 8; + + for (offset = from; offset < to; offset++) { + if (print_offset) { + printf("%04x: ", offset * 8); + print_offset = 0; + } + printf("%016llx", data[offset]); + col++; + if ((col & 3) == 0) { + printf("\n"); + print_offset = 1; + } else { + printf(" "); + } + } + + if (!print_offset) + printf("\n"); +} + +int main(int argc, char **argv) +{ + struct statx stx; + int ret, raw = 0, atflag = AT_SYMLINK_NOFOLLOW; + + unsigned int mask = STATX_ALL; + + for (argv++; *argv; argv++) { + if (strcmp(*argv, "-F") == 0) { + atflag &= ~AT_STATX_SYNC_TYPE; + atflag |= AT_STATX_FORCE_SYNC; + continue; + } + if (strcmp(*argv, "-D") == 0) { + atflag &= ~AT_STATX_SYNC_TYPE; + atflag |= AT_STATX_DONT_SYNC; + continue; + } + if (strcmp(*argv, "-L") == 0) { + atflag &= ~AT_SYMLINK_NOFOLLOW; + continue; + } + if (strcmp(*argv, "-O") == 0) { + mask &= ~STATX_BASIC_STATS; + continue; + } + if (strcmp(*argv, "-A") == 0) { + atflag |= AT_NO_AUTOMOUNT; + continue; + } + if (strcmp(*argv, "-R") == 0) { + raw = 1; + continue; + } + + memset(&stx, 0xbf, sizeof(stx)); + ret = statx(AT_FDCWD, *argv, atflag, mask, &stx); + printf("statx(%s) = %d\n", *argv, ret); + if (ret < 0) { + perror(*argv); + exit(1); + } + + if (raw) + dump_hex((unsigned long long *)&stx, 0, sizeof(stx)); + + dump_statx(&stx); + } + return 0; +} diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c index 30e282d33d4d..bc7fcf010a5b 100644 --- a/samples/trace_events/trace-events-sample.c +++ b/samples/trace_events/trace-events-sample.c @@ -33,7 +33,7 @@ static void simple_thread_func(int cnt) /* Silly tracepoints */ trace_foo_bar("hello", cnt, array, random_strings[len], - tsk_cpus_allowed(current)); + ¤t->cpus_allowed); trace_foo_with_template_simple("HELLO", cnt); diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 918259a55f65..baa3c7be04ad 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -1848,6 +1848,8 @@ my $prefix = ''; sub show_type { my ($type) = @_; + $type =~ tr/[a-z]/[A-Z]/; + return defined $use_type{$type} if (scalar keys %use_type > 0); return !defined $ignore_type{$type}; @@ -5204,18 +5206,27 @@ sub process { "Consecutive strings are generally better as a single string\n" . $herecurr); } -# check for %L{u,d,i} and 0x%[udi] in strings - my $string; +# check for non-standard and hex prefixed decimal printf formats + my $show_L = 1; #don't show the same defect twice + my $show_Z = 1; while ($line =~ /(?:^|")([X\t]*)(?:"|$)/g) { - $string = substr($rawline, $-[1], $+[1] - $-[1]); + my $string = substr($rawline, $-[1], $+[1] - $-[1]); $string =~ s/%%/__/g; - if ($string =~ /(?<!%)%[\*\d\.\$]*L[udi]/) { + # check for %L + if ($show_L && $string =~ /%[\*\d\.\$]*L([diouxX])/) { WARN("PRINTF_L", - "\%Ld/%Lu are not-standard C, use %lld/%llu\n" . $herecurr); - last; - } - if ($string =~ /0x%[\*\d\.\$\Llzth]*[udi]/) { - ERROR("PRINTF_0xDECIMAL", + "\%L$1 is non-standard C, use %ll$1\n" . $herecurr); + $show_L = 0; + } + # check for %Z + if ($show_Z && $string =~ /%[\*\d\.\$]*Z([diouxX])/) { + WARN("PRINTF_Z", + "%Z$1 is non-standard C, use %z$1\n" . $herecurr); + $show_Z = 0; + } + # check for 0x<decimal> + if ($string =~ /0x%[\*\d\.\$\Llzth]*[diou]/) { + ERROR("PRINTF_0XDECIMAL", "Prefixing 0x with decimal output is defective\n" . $herecurr); } } diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 4dedd0d3d3a7..30d752a4a6a6 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -854,6 +854,7 @@ static const char *const section_white_list[] = ".cmem*", /* EZchip */ ".fmt_slot*", /* EZchip */ ".gnu.lto*", + ".discard.*", NULL }; diff --git a/scripts/module-common.lds b/scripts/module-common.lds index 73a2c7da0e55..cf7e52e4781b 100644 --- a/scripts/module-common.lds +++ b/scripts/module-common.lds @@ -4,7 +4,10 @@ * combine them automatically. */ SECTIONS { - /DISCARD/ : { *(.discard) } + /DISCARD/ : { + *(.discard) + *(.discard.*) + } __ksymtab 0 : { *(SORT(___ksymtab+*)) } __ksymtab_gpl 0 : { *(SORT(___ksymtab_gpl+*)) } diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index faac4b10d8ea..0b6002b36f20 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -318,7 +318,7 @@ if ($arch eq "x86_64") { # instruction or the addiu one. herein, we record the address of the # first one, and then we can replace this instruction by a branch # instruction to jump over the profiling function to filter the - # indicated functions, or swith back to the lui instruction to trace + # indicated functions, or switch back to the lui instruction to trace # them, which means dynamic tracing. # # c: 3c030000 lui v1,0x0 diff --git a/scripts/spelling.txt b/scripts/spelling.txt index b3a1994b5df7..0458b037c8a1 100644 --- a/scripts/spelling.txt +++ b/scripts/spelling.txt @@ -62,15 +62,19 @@ adress||address adresses||addresses adviced||advised afecting||affecting +againt||against agaist||against albumns||albums alegorical||allegorical +algined||aligned algorith||algorithm algorithmical||algorithmically algoritm||algorithm algoritms||algorithms algorrithm||algorithm algorritm||algorithm +aligment||alignment +alignement||alignment allign||align allocatrd||allocated allocte||allocate @@ -86,6 +90,10 @@ alue||value ambigious||ambiguous amoung||among amout||amount +an union||a union +an user||a user +an userspace||a userspace +an one||a one analysator||analyzer ang||and anniversery||anniversary @@ -98,6 +106,7 @@ appearence||appearance applicaion||application appliction||application applictions||applications +applys||applies appplications||applications appropiate||appropriate appropriatly||appropriately @@ -237,6 +246,9 @@ commited||committed commiting||committing committ||commit commoditiy||commodity +comsume||consume +comsumer||consumer +comsuming||consuming compability||compatibility compaibility||compatibility compatability||compatibility @@ -258,6 +270,7 @@ comunication||communication conbination||combination conditionaly||conditionally conected||connected +configuartion||configuration configuratoin||configuration configuraton||configuration configuretion||configuration @@ -310,6 +323,9 @@ defintion||definition defintions||definitions defualt||default defult||default +deintializing||deinitializing +deintialize||deinitialize +deintialized||deinitialized deivce||device delared||declared delare||declare @@ -352,6 +368,7 @@ differrence||difference difinition||definition diplay||display direectly||directly +disassocation||disassociation disapear||disappear disapeared||disappeared disappared||disappeared @@ -375,10 +392,12 @@ easilly||easily ecspecially||especially edditable||editable editting||editing +efective||effective efficently||efficiently ehther||ether eigth||eight eletronic||electronic +embeded||embedded enabledi||enabled enchanced||enhanced encorporating||incorporating @@ -414,6 +433,7 @@ expecially||especially explicite||explicit explicitely||explicitly explict||explicit +explictely||explicitly explictly||explicitly expresion||expression exprimental||experimental @@ -445,6 +465,7 @@ finsih||finish flusing||flushing folloing||following followign||following +followings||following follwing||following forseeable||foreseeable forse||force @@ -537,6 +558,7 @@ initalise||initialize initalize||initialize initation||initiation initators||initiators +initialiazation||initialization initializiation||initialization initialzed||initialized initilization||initialization @@ -566,6 +588,7 @@ interruptted||interrupted interupted||interrupted interupt||interrupt intial||initial +intialization||initialization intialized||initialized intialize||initialize intregral||integral @@ -666,6 +689,7 @@ neccecary||necessary neccesary||necessary neccessary||necessary necesary||necessary +neded||needed negaive||negative negoitation||negotiation negotation||negotiation @@ -688,6 +712,8 @@ occure||occurred occured||occurred occuring||occurring offet||offset +omited||omitted +omiting||omitting omitt||omit ommiting||omitting ommitted||omitted @@ -706,8 +732,11 @@ oustanding||outstanding overaall||overall overhread||overhead overlaping||overlapping +overrided||overridden overriden||overridden overun||overrun +overwritting||overwriting +overwriten||overwritten pacakge||package pachage||package packacge||package @@ -718,6 +747,7 @@ pakage||package pallette||palette paln||plan paramameters||parameters +paramaters||parameters paramater||parameter parametes||parameters parametised||parametrised @@ -962,6 +992,7 @@ straming||streaming struc||struct structres||structures stuct||struct +strucuture||structure stucture||structure sturcture||structure subdirectoires||subdirectories @@ -991,6 +1022,13 @@ suspeneded||suspended suspicously||suspiciously swaping||swapping switchs||switches +swith||switch +swithable||switchable +swithc||switch +swithced||switched +swithcing||switching +swithed||switched +swithing||switching symetric||symmetric synax||syntax synchonized||synchronized @@ -1007,6 +1045,7 @@ targetting||targeting teh||the temorary||temporary temproarily||temporarily +therfore||therefore thier||their threds||threads threshhold||threshold @@ -1050,6 +1089,7 @@ unkmown||unknown unknonw||unknown unknow||unknown unkown||unknown +unneded||unneeded unneedingly||unnecessarily unnsupported||unsupported unmached||unmatched @@ -1078,6 +1118,7 @@ vaid||valid vaild||valid valide||valid variantions||variations +varible||variable varient||variant vaule||value verbse||verbose diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index f44312a19522..def1fbd6bdfd 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c @@ -76,6 +76,8 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> +#include <linux/cred.h> +#include <linux/rculist.h> #include <linux/user_namespace.h> #include "include/apparmor.h" diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index e2ed498c0f5f..063d38aef64e 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -22,6 +22,8 @@ #include <linux/xattr.h> #include <linux/integrity.h> #include <linux/evm.h> +#include <linux/magic.h> + #include <crypto/hash.h> #include <crypto/algapi.h> #include "evm.h" diff --git a/security/keys/dh.c b/security/keys/dh.c index 531ed2ec132f..893af4c45038 100644 --- a/security/keys/dh.c +++ b/security/keys/dh.c @@ -55,7 +55,7 @@ static ssize_t mpi_from_key(key_serial_t keyid, size_t maxlen, MPI *mpi) if (status == 0) { const struct user_key_payload *payload; - payload = user_key_payload(key); + payload = user_key_payload_locked(key); if (maxlen == 0) { *mpi = NULL; diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index 4fb315cddf5b..0010955d7876 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c @@ -314,7 +314,7 @@ static struct key *request_user_key(const char *master_desc, const u8 **master_k goto error; down_read(&ukey->sem); - upayload = user_key_payload(ukey); + upayload = user_key_payload_locked(ukey); *master_key = upayload->data; *master_keylen = upayload->datalen; error: @@ -926,7 +926,7 @@ static long encrypted_read(const struct key *key, char __user *buffer, size_t asciiblob_len; int ret; - epayload = rcu_dereference_key(key); + epayload = dereference_key_locked(key); /* returns the hex encoded iv, encrypted-data, and hmac as ascii */ asciiblob_len = epayload->datablob_len + ivsize + 1 diff --git a/security/keys/internal.h b/security/keys/internal.h index a705a7d92ad7..a2f4c0abb8d8 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -13,6 +13,7 @@ #define _INTERNAL_H #include <linux/sched.h> +#include <linux/cred.h> #include <linux/key-type.h> #include <linux/task_work.h> #include <linux/keyctl.h> diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 04a764f71ec8..52c34532c785 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -12,12 +12,14 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> +#include <linux/sched/task.h> #include <linux/slab.h> #include <linux/syscalls.h> #include <linux/key.h> #include <linux/keyctl.h> #include <linux/fs.h> #include <linux/capability.h> +#include <linux/cred.h> #include <linux/string.h> #include <linux/err.h> #include <linux/vmalloc.h> diff --git a/security/keys/persistent.c b/security/keys/persistent.c index 1edc1f0a0ce2..d0cb5b32eff7 100644 --- a/security/keys/persistent.c +++ b/security/keys/persistent.c @@ -10,6 +10,8 @@ */ #include <linux/user_namespace.h> +#include <linux/cred.h> + #include "internal.h" unsigned persistent_keyring_expiry = 3 * 24 * 3600; /* Expire after 3 days of non-use */ diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 918cddcd4516..b6fdd22205b1 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -12,6 +12,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> +#include <linux/sched/user.h> #include <linux/keyctl.h> #include <linux/fs.h> #include <linux/err.h> diff --git a/security/keys/trusted.c b/security/keys/trusted.c index 90d61751ff12..2ae31c5a87de 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c @@ -1140,12 +1140,12 @@ out: static long trusted_read(const struct key *key, char __user *buffer, size_t buflen) { - struct trusted_key_payload *p; + const struct trusted_key_payload *p; char *ascii_buf; char *bufp; int i; - p = rcu_dereference_key(key); + p = dereference_key_locked(key); if (!p) return -EINVAL; if (!buffer || buflen <= 0) diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index e187c8909d9d..26605134f17a 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c @@ -107,7 +107,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep) /* attach the new data, displacing the old */ key->expiry = prep->expiry; if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags)) - zap = rcu_dereference_key(key); + zap = dereference_key_locked(key); rcu_assign_keypointer(key, prep->payload.data[0]); prep->payload.data[0] = NULL; @@ -123,7 +123,7 @@ EXPORT_SYMBOL_GPL(user_update); */ void user_revoke(struct key *key) { - struct user_key_payload *upayload = key->payload.data[0]; + struct user_key_payload *upayload = user_key_payload_locked(key); /* clear the quota */ key_payload_reserve(key, 0); @@ -169,7 +169,7 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen) const struct user_key_payload *upayload; long ret; - upayload = user_key_payload(key); + upayload = user_key_payload_locked(key); ret = upayload->datalen; /* we can return the data as is */ diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 9a8f12f8d5b7..0c2ac318aa7f 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -28,7 +28,8 @@ #include <linux/kernel.h> #include <linux/tracehook.h> #include <linux/errno.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task.h> #include <linux/lsm_hooks.h> #include <linux/xattr.h> #include <linux/capability.h> @@ -480,12 +481,13 @@ static int selinux_is_sblabel_mnt(struct super_block *sb) sbsec->behavior == SECURITY_FS_USE_NATIVE || /* Special handling. Genfs but also in-core setxattr handler */ !strcmp(sb->s_type->name, "sysfs") || - !strcmp(sb->s_type->name, "cgroup") || - !strcmp(sb->s_type->name, "cgroup2") || !strcmp(sb->s_type->name, "pstore") || !strcmp(sb->s_type->name, "debugfs") || !strcmp(sb->s_type->name, "tracefs") || - !strcmp(sb->s_type->name, "rootfs"); + !strcmp(sb->s_type->name, "rootfs") || + (selinux_policycap_cgroupseclabel && + (!strcmp(sb->s_type->name, "cgroup") || + !strcmp(sb->s_type->name, "cgroup2"))); } static int sb_finish_set_opts(struct super_block *sb) diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index beaa14b8b6cf..f979c35e037e 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h @@ -71,6 +71,7 @@ enum { POLICYDB_CAPABILITY_OPENPERM, POLICYDB_CAPABILITY_EXTSOCKCLASS, POLICYDB_CAPABILITY_ALWAYSNETWORK, + POLICYDB_CAPABILITY_CGROUPSECLABEL, __POLICYDB_CAPABILITY_MAX }; #define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1) @@ -79,6 +80,7 @@ extern int selinux_policycap_netpeer; extern int selinux_policycap_openperm; extern int selinux_policycap_extsockclass; extern int selinux_policycap_alwaysnetwork; +extern int selinux_policycap_cgroupseclabel; /* * type_datum properties diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index c9e8a9898ce4..cb3fd98fb05a 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -46,7 +46,8 @@ static char *policycap_names[] = { "network_peer_controls", "open_perms", "extended_socket_class", - "always_check_network" + "always_check_network", + "cgroup_seclabel" }; unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE; diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c index 7d10e5d418bb..9db4709a6877 100644 --- a/security/selinux/ss/ebitmap.c +++ b/security/selinux/ss/ebitmap.c @@ -360,7 +360,7 @@ int ebitmap_read(struct ebitmap *e, void *fp) if (mapunit != BITS_PER_U64) { printk(KERN_ERR "SELinux: ebitmap: map size %u does not " - "match my size %Zd (high bit was %d)\n", + "match my size %zd (high bit was %d)\n", mapunit, BITS_PER_U64, e->highbit); goto bad; } diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index d719db4219cd..9c92f29a38ea 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -2266,7 +2266,7 @@ int policydb_read(struct policydb *p, void *fp) len = le32_to_cpu(buf[1]); if (len != strlen(POLICYDB_STRING)) { printk(KERN_ERR "SELinux: policydb string length %d does not " - "match expected length %Zu\n", + "match expected length %zu\n", len, strlen(POLICYDB_STRING)); goto bad; } diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index a70fcee9824b..b4aa491a0a23 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -74,6 +74,7 @@ int selinux_policycap_netpeer; int selinux_policycap_openperm; int selinux_policycap_extsockclass; int selinux_policycap_alwaysnetwork; +int selinux_policycap_cgroupseclabel; static DEFINE_RWLOCK(policy_rwlock); @@ -1993,6 +1994,9 @@ static void security_load_policycaps(void) POLICYDB_CAPABILITY_EXTSOCKCLASS); selinux_policycap_alwaysnetwork = ebitmap_get_bit(&policydb.policycaps, POLICYDB_CAPABILITY_ALWAYSNETWORK); + selinux_policycap_cgroupseclabel = + ebitmap_get_bit(&policydb.policycaps, + POLICYDB_CAPABILITY_CGROUPSECLABEL); } static int security_preserve_bools(struct policydb *p); diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 838ffa78cfda..00d223e9fb37 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -5,8 +5,10 @@ */ #include "common.h" + #include <linux/binfmts.h> #include <linux/slab.h> +#include <linux/rculist.h> /* Variables definitions.*/ diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c index 50092534ec54..944ad77d8fba 100644 --- a/security/tomoyo/group.c +++ b/security/tomoyo/group.c @@ -5,6 +5,8 @@ */ #include <linux/slab.h> +#include <linux/rculist.h> + #include "common.h" /** diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c index 5fe3679137ae..848317fea704 100644 --- a/security/tomoyo/util.c +++ b/security/tomoyo/util.c @@ -5,6 +5,8 @@ */ #include <linux/slab.h> +#include <linux/rculist.h> + #include "common.h" /* Lock for protecting policy. */ diff --git a/sound/core/control.c b/sound/core/control.c index fb096cb20a80..c109b82eef4b 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -25,6 +25,7 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/time.h> +#include <linux/sched/signal.h> #include <sound/core.h> #include <sound/minors.h> #include <sound/info.h> diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c index 36d2416f90d9..9602a7e38d8a 100644 --- a/sound/core/hwdep.c +++ b/sound/core/hwdep.c @@ -25,6 +25,7 @@ #include <linux/time.h> #include <linux/mutex.h> #include <linux/module.h> +#include <linux/sched/signal.h> #include <sound/core.h> #include <sound/control.h> #include <sound/minors.h> diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 698a01419515..36baf962f9b0 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -28,6 +28,7 @@ #include <linux/init.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/time.h> #include <linux/vmalloc.h> #include <linux/module.h> diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index bb1261591a1f..5088d4b8db22 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -21,6 +21,7 @@ */ #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/time.h> #include <linux/math64.h> #include <linux/export.h> diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index aec9c92250fd..13dec5ec93f2 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -23,6 +23,7 @@ #include <linux/module.h> #include <linux/file.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/time.h> #include <linux/pm_qos.h> #include <linux/io.h> diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index 8da9cb245d01..ab890336175f 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -22,7 +22,7 @@ #include <sound/core.h> #include <linux/major.h> #include <linux/init.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/wait.h> diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h index d7b4d016b547..afa007c0cc2d 100644 --- a/sound/core/seq/oss/seq_oss_device.h +++ b/sound/core/seq/oss/seq_oss_device.h @@ -24,7 +24,7 @@ #include <linux/time.h> #include <linux/wait.h> #include <linux/slab.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <sound/core.h> #include <sound/seq_oss.h> #include <sound/rawmidi.h> diff --git a/sound/core/seq/oss/seq_oss_writeq.c b/sound/core/seq/oss/seq_oss_writeq.c index 1f6788a18444..5e04f4df10e4 100644 --- a/sound/core/seq/oss/seq_oss_writeq.c +++ b/sound/core/seq/oss/seq_oss_writeq.c @@ -28,6 +28,7 @@ #include "../seq_clientmgr.h" #include <linux/wait.h> #include <linux/slab.h> +#include <linux/sched/signal.h> /* diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c index 1d5acbe0c08b..448efd4e980e 100644 --- a/sound/core/seq/seq_fifo.c +++ b/sound/core/seq/seq_fifo.c @@ -21,6 +21,8 @@ #include <sound/core.h> #include <linux/slab.h> +#include <linux/sched/signal.h> + #include "seq_fifo.h" #include "seq_lock.h" @@ -135,6 +137,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f, f->tail = cell; if (f->head == NULL) f->head = cell; + cell->next = NULL; f->cells++; spin_unlock_irqrestore(&f->lock, flags); @@ -214,6 +217,8 @@ void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f, spin_lock_irqsave(&f->lock, flags); cell->next = f->head; f->head = cell; + if (!f->tail) + f->tail = cell; f->cells++; spin_unlock_irqrestore(&f->lock, flags); } diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c index dfa5156f3585..1a1acf3ddda4 100644 --- a/sound/core/seq/seq_memory.c +++ b/sound/core/seq/seq_memory.c @@ -23,6 +23,7 @@ #include <linux/init.h> #include <linux/export.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <linux/vmalloc.h> #include <sound/core.h> diff --git a/sound/core/timer.c b/sound/core/timer.c index fc144f43faa6..6d4fbc439246 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -27,6 +27,7 @@ #include <linux/device.h> #include <linux/module.h> #include <linux/string.h> +#include <linux/sched/signal.h> #include <sound/core.h> #include <sound/timer.h> #include <sound/control.h> @@ -1702,9 +1703,21 @@ static int snd_timer_user_params(struct file *file, return -EBADFD; if (copy_from_user(¶ms, _params, sizeof(params))) return -EFAULT; - if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) { - err = -EINVAL; - goto _end; + if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE)) { + u64 resolution; + + if (params.ticks < 1) { + err = -EINVAL; + goto _end; + } + + /* Don't allow resolution less than 1ms */ + resolution = snd_timer_resolution(tu->timeri); + resolution *= params.ticks; + if (resolution < 1000000) { + err = -EINVAL; + goto _end; + } } if (params.queue_size > 0 && (params.queue_size < 32 || params.queue_size > 1024)) { diff --git a/sound/firewire/bebob/bebob.h b/sound/firewire/bebob/bebob.h index 175da875162d..17678d6ab5a2 100644 --- a/sound/firewire/bebob/bebob.h +++ b/sound/firewire/bebob/bebob.h @@ -17,6 +17,7 @@ #include <linux/mod_devicetable.h> #include <linux/delay.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <sound/core.h> #include <sound/initval.h> diff --git a/sound/firewire/dice/dice.h b/sound/firewire/dice/dice.h index e6c07857f475..da00e75e09d4 100644 --- a/sound/firewire/dice/dice.h +++ b/sound/firewire/dice/dice.h @@ -23,6 +23,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/wait.h> +#include <linux/sched/signal.h> #include <sound/control.h> #include <sound/core.h> diff --git a/sound/firewire/digi00x/digi00x.h b/sound/firewire/digi00x/digi00x.h index 2cd465c0caae..9dc761bdacca 100644 --- a/sound/firewire/digi00x/digi00x.h +++ b/sound/firewire/digi00x/digi00x.h @@ -16,6 +16,7 @@ #include <linux/mod_devicetable.h> #include <linux/delay.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <sound/core.h> #include <sound/initval.h> diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h index d73c12b8753d..9b19c7f05d57 100644 --- a/sound/firewire/fireworks/fireworks.h +++ b/sound/firewire/fireworks/fireworks.h @@ -17,6 +17,7 @@ #include <linux/mod_devicetable.h> #include <linux/delay.h> #include <linux/slab.h> +#include <linux/sched/signal.h> #include <sound/core.h> #include <sound/initval.h> diff --git a/sound/firewire/oxfw/oxfw.h b/sound/firewire/oxfw/oxfw.h index 2047dcb27625..d54d4a9ac4a1 100644 --- a/sound/firewire/oxfw/oxfw.h +++ b/sound/firewire/oxfw/oxfw.h @@ -13,6 +13,7 @@ #include <linux/mutex.h> #include <linux/slab.h> #include <linux/compat.h> +#include <linux/sched/signal.h> #include <sound/control.h> #include <sound/core.h> diff --git a/sound/firewire/tascam/tascam.h b/sound/firewire/tascam/tascam.h index 1f61011579a7..d3cd4065722b 100644 --- a/sound/firewire/tascam/tascam.h +++ b/sound/firewire/tascam/tascam.h @@ -17,6 +17,7 @@ #include <linux/mutex.h> #include <linux/slab.h> #include <linux/compat.h> +#include <linux/sched/signal.h> #include <sound/core.h> #include <sound/initval.h> diff --git a/sound/isa/gus/gus_pcm.c b/sound/isa/gus/gus_pcm.c index 25f6788ccef3..06505999155f 100644 --- a/sound/isa/gus/gus_pcm.c +++ b/sound/isa/gus/gus_pcm.c @@ -27,6 +27,8 @@ #include <asm/dma.h> #include <linux/slab.h> +#include <linux/sched/signal.h> + #include <sound/core.h> #include <sound/control.h> #include <sound/gus.h> diff --git a/sound/isa/msnd/msnd.c b/sound/isa/msnd/msnd.c index 835d4aa26761..8109ab3d29d1 100644 --- a/sound/isa/msnd/msnd.c +++ b/sound/isa/msnd/msnd.c @@ -36,6 +36,7 @@ ********************************************************************/ #include <linux/kernel.h> +#include <linux/sched/signal.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/io.h> diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c index 94c411299e5a..ec180708f160 100644 --- a/sound/isa/sb/emu8000.c +++ b/sound/isa/sb/emu8000.c @@ -21,7 +21,7 @@ */ #include <linux/wait.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/export.h> diff --git a/sound/isa/sb/emu8000_patch.c b/sound/isa/sb/emu8000_patch.c index 71d13c0bb746..c2e41d2762f7 100644 --- a/sound/isa/sb/emu8000_patch.c +++ b/sound/isa/sb/emu8000_patch.c @@ -20,6 +20,8 @@ */ #include "emu8000_local.h" + +#include <linux/sched/signal.h> #include <linux/uaccess.h> #include <linux/moduleparam.h> diff --git a/sound/isa/sb/emu8000_pcm.c b/sound/isa/sb/emu8000_pcm.c index 250fd0006b53..32f234f494e5 100644 --- a/sound/isa/sb/emu8000_pcm.c +++ b/sound/isa/sb/emu8000_pcm.c @@ -19,6 +19,8 @@ */ #include "emu8000_local.h" + +#include <linux/sched/signal.h> #include <linux/init.h> #include <linux/slab.h> #include <sound/initval.h> diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c index 718d5e3b7806..4dae9ff9ef5a 100644 --- a/sound/isa/wavefront/wavefront_synth.c +++ b/sound/isa/wavefront/wavefront_synth.c @@ -26,6 +26,7 @@ #include <linux/delay.h> #include <linux/time.h> #include <linux/wait.h> +#include <linux/sched/signal.h> #include <linux/firmware.h> #include <linux/moduleparam.h> #include <linux/slab.h> diff --git a/sound/oss/dmabuf.c b/sound/oss/dmabuf.c index e3f29132d3ac..c5dd396c66a2 100644 --- a/sound/oss/dmabuf.c +++ b/sound/oss/dmabuf.c @@ -27,6 +27,8 @@ #include <linux/mm.h> #include <linux/gfp.h> +#include <linux/sched/signal.h> + #include "sound_config.h" #include "sleep.h" diff --git a/sound/oss/dmasound/dmasound_core.c b/sound/oss/dmasound/dmasound_core.c index 5f248fb41bea..fb3bbceb1fef 100644 --- a/sound/oss/dmasound/dmasound_core.c +++ b/sound/oss/dmasound/dmasound_core.c @@ -182,6 +182,7 @@ #include <linux/soundcard.h> #include <linux/poll.h> #include <linux/mutex.h> +#include <linux/sched/signal.h> #include <linux/uaccess.h> diff --git a/sound/oss/midibuf.c b/sound/oss/midibuf.c index 8f45cd999965..701c7625c971 100644 --- a/sound/oss/midibuf.c +++ b/sound/oss/midibuf.c @@ -16,6 +16,8 @@ #include <linux/stddef.h> #include <linux/kmod.h> #include <linux/spinlock.h> +#include <linux/sched/signal.h> + #define MIDIBUF_C #include "sound_config.h" diff --git a/sound/oss/msnd_pinnacle.c b/sound/oss/msnd_pinnacle.c index a8bb4a06ba6f..f34ec01d2239 100644 --- a/sound/oss/msnd_pinnacle.c +++ b/sound/oss/msnd_pinnacle.c @@ -41,6 +41,8 @@ #include <linux/interrupt.h> #include <linux/mutex.h> #include <linux/gfp.h> +#include <linux/sched/signal.h> + #include <asm/irq.h> #include <asm/io.h> #include "sound_config.h" diff --git a/sound/oss/sound_config.h b/sound/oss/sound_config.h index f2554ab78f5e..5253b0a70437 100644 --- a/sound/oss/sound_config.h +++ b/sound/oss/sound_config.h @@ -16,6 +16,7 @@ #include <linux/fs.h> #include <linux/sound.h> +#include <linux/sched/signal.h> #include "os.h" #include "soundvers.h" diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c index f3af63e58b36..97899352b15f 100644 --- a/sound/oss/swarm_cs4297a.c +++ b/sound/oss/swarm_cs4297a.c @@ -64,7 +64,7 @@ #include <linux/module.h> #include <linux/string.h> #include <linux/ioport.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/delay.h> #include <linux/sound.h> #include <linux/slab.h> diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c index f4234edb878c..8cf0dc7a07a4 100644 --- a/sound/pci/ac97/ac97_patch.c +++ b/sound/pci/ac97/ac97_patch.c @@ -3093,7 +3093,7 @@ static int patch_cm9739(struct snd_ac97 * ac97) /* set-up multi channel */ /* bit 14: 0 = SPDIF, 1 = EAPD */ /* bit 13: enable internal vref output for mic */ - /* bit 12: disable center/lfe (swithable) */ + /* bit 12: disable center/lfe (switchable) */ /* bit 10: disable surround/line (switchable) */ /* bit 9: mix 2 surround off */ /* bit 4: undocumented; 0 mutes the CM9739A, which defaults to 1 */ diff --git a/sound/pci/cs46xx/cs46xx_dsp_task_types.h b/sound/pci/cs46xx/cs46xx_dsp_task_types.h index 5cf920bfda27..be5694718546 100644 --- a/sound/pci/cs46xx/cs46xx_dsp_task_types.h +++ b/sound/pci/cs46xx/cs46xx_dsp_task_types.h @@ -203,7 +203,7 @@ struct dsp_task_tree_context_block { u32 saverfe; - /* Value may be overwriten by stack save algorithm. + /* Value may be overwritten by stack save algorithm. Retain the size of the stack data saved here if used */ ___DSP_DUAL_16BIT_ALLOC( reserved1, diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c index 9667cbfb0ca2..ab4cdab5cfa5 100644 --- a/sound/pci/ctxfi/cthw20k1.c +++ b/sound/pci/ctxfi/cthw20k1.c @@ -27,12 +27,6 @@ #include "cthw20k1.h" #include "ct20k1reg.h" -#if BITS_PER_LONG == 32 -#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */ -#else -#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */ -#endif - struct hw20k1 { struct hw hw; spinlock_t reg_20k1_lock; @@ -1904,19 +1898,18 @@ static int hw_card_start(struct hw *hw) { int err; struct pci_dev *pci = hw->pci; + const unsigned int dma_bits = BITS_PER_LONG; err = pci_enable_device(pci); if (err < 0) return err; /* Set DMA transfer mask */ - if (dma_set_mask(&pci->dev, CT_XFI_DMA_MASK) < 0 || - dma_set_coherent_mask(&pci->dev, CT_XFI_DMA_MASK) < 0) { - dev_err(hw->card->dev, - "architecture does not support PCI busmaster DMA with mask 0x%llx\n", - CT_XFI_DMA_MASK); - err = -ENXIO; - goto error1; + if (dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { + dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits)); + } else { + dma_set_mask(&pci->dev, DMA_BIT_MASK(32)); + dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32)); } if (!hw->io_base) { diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c index 6414ecf93efa..18ee7768b7c4 100644 --- a/sound/pci/ctxfi/cthw20k2.c +++ b/sound/pci/ctxfi/cthw20k2.c @@ -26,12 +26,6 @@ #include "cthw20k2.h" #include "ct20k2reg.h" -#if BITS_PER_LONG == 32 -#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */ -#else -#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */ -#endif - struct hw20k2 { struct hw hw; /* for i2c */ @@ -2029,19 +2023,18 @@ static int hw_card_start(struct hw *hw) int err = 0; struct pci_dev *pci = hw->pci; unsigned int gctl; + const unsigned int dma_bits = BITS_PER_LONG; err = pci_enable_device(pci); if (err < 0) return err; /* Set DMA transfer mask */ - if (dma_set_mask(&pci->dev, CT_XFI_DMA_MASK) < 0 || - dma_set_coherent_mask(&pci->dev, CT_XFI_DMA_MASK) < 0) { - dev_err(hw->card->dev, - "architecture does not support PCI busmaster DMA with mask 0x%llx\n", - CT_XFI_DMA_MASK); - err = -ENXIO; - goto error1; + if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { + dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits)); + } else { + dma_set_mask(&pci->dev, DMA_BIT_MASK(32)); + dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32)); } if (!hw->io_base) { diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 16108f0eb688..c8256a89375a 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2255,6 +2255,9 @@ static const struct pci_device_id azx_ids[] = { /* Broxton-T */ { PCI_DEVICE(0x8086, 0x1a98), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, + /* Gemini-Lake */ + { PCI_DEVICE(0x8086, 0x3198), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, /* Haswell */ { PCI_DEVICE(0x8086, 0x0a0c), .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL }, diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 9ec4dba8a793..07a9deb17477 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -2866,7 +2866,7 @@ static unsigned int ca0132_capture_pcm_delay(struct hda_pcm_stream *info, #define CA0132_CODEC_MUTE(xname, nid, dir) \ CA0132_CODEC_MUTE_MONO(xname, nid, 3, dir) -/* The followings are for tuning of products */ +/* The following are for tuning of products */ #ifdef ENABLE_TUNING_CONTROLS static unsigned int voice_focus_vals_lookup[] = { diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index fd5efa72a68b..1461ef8eb749 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -3800,6 +3800,7 @@ HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI", patch_i915_hsw_hdmi), HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI", patch_i915_hsw_hdmi), HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_i915_hsw_hdmi), HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_i915_hsw_hdmi), +HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_hsw_hdmi), HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 73a00460b5c1..4e112221d825 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5606,6 +5606,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), + SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), @@ -5724,6 +5725,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460), SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), diff --git a/sound/pci/ice1712/wm8766.c b/sound/pci/ice1712/wm8766.c index f7ac8d5e862c..27c03e40c9b1 100644 --- a/sound/pci/ice1712/wm8766.c +++ b/sound/pci/ice1712/wm8766.c @@ -254,7 +254,7 @@ static int snd_wm8766_ctl_put(struct snd_kcontrol *kcontrol, int n = kcontrol->private_value; u16 val, regval1, regval2; - /* this also works for enum because value is an union */ + /* this also works for enum because value is a union */ regval1 = ucontrol->value.integer.value[0]; regval2 = ucontrol->value.integer.value[1]; if (wm->ctl[n].flags & WM8766_FLAG_INVERT) { diff --git a/sound/pci/ice1712/wm8776.c b/sound/pci/ice1712/wm8776.c index ebd2fe4b4a57..553669b103c2 100644 --- a/sound/pci/ice1712/wm8776.c +++ b/sound/pci/ice1712/wm8776.c @@ -528,7 +528,7 @@ static int snd_wm8776_ctl_put(struct snd_kcontrol *kcontrol, int n = kcontrol->private_value; u16 val, regval1, regval2; - /* this also works for enum because value is an union */ + /* this also works for enum because value is a union */ regval1 = ucontrol->value.integer.value[0]; regval2 = ucontrol->value.integer.value[1]; if (wm->ctl[n].flags & WM8776_FLAG_INVERT) { diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c index 565f7f55c3ca..1e25095fd144 100644 --- a/sound/pci/korg1212/korg1212.c +++ b/sound/pci/korg1212/korg1212.c @@ -2051,7 +2051,7 @@ static void snd_korg1212_proc_read(struct snd_info_entry *entry, snd_iprintf(buffer, korg1212->card->longname); snd_iprintf(buffer, " (index #%d)\n", korg1212->card->number + 1); snd_iprintf(buffer, "\nGeneral settings\n"); - snd_iprintf(buffer, " period size: %Zd bytes\n", K1212_PERIOD_BYTES); + snd_iprintf(buffer, " period size: %zd bytes\n", K1212_PERIOD_BYTES); snd_iprintf(buffer, " clock mode: %s\n", clockSourceName[korg1212->clkSrcRate] ); snd_iprintf(buffer, " left ADC Sens: %d\n", korg1212->leftADCInSens ); snd_iprintf(buffer, " right ADC Sens: %d\n", korg1212->rightADCInSens ); @@ -2276,7 +2276,7 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci, if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), sizeof(struct KorgSharedBuffer), &korg1212->dma_shared) < 0) { - snd_printk(KERN_ERR "korg1212: can not allocate shared buffer memory (%Zd bytes)\n", sizeof(struct KorgSharedBuffer)); + snd_printk(KERN_ERR "korg1212: can not allocate shared buffer memory (%zd bytes)\n", sizeof(struct KorgSharedBuffer)); snd_korg1212_free(korg1212); return -ENOMEM; } diff --git a/sound/pci/pcxhr/pcxhr_hwdep.c b/sound/pci/pcxhr/pcxhr_hwdep.c index 80633055e17e..a99808ab01fe 100644 --- a/sound/pci/pcxhr/pcxhr_hwdep.c +++ b/sound/pci/pcxhr/pcxhr_hwdep.c @@ -292,7 +292,7 @@ static int pcxhr_dsp_load(struct pcxhr_mgr *mgr, int index, int err, card_index; dev_dbg(&mgr->pci->dev, - "loading dsp [%d] size = %Zd\n", index, dsp->size); + "loading dsp [%d] size = %zd\n", index, dsp->size); switch (index) { case PCXHR_FIRMWARE_XLX_INT_INDEX: diff --git a/sound/pcmcia/vx/vxp_ops.c b/sound/pcmcia/vx/vxp_ops.c index 56aa1ba73ccc..5f97791f00d7 100644 --- a/sound/pcmcia/vx/vxp_ops.c +++ b/sound/pcmcia/vx/vxp_ops.c @@ -201,7 +201,7 @@ static int vxp_load_xilinx_binary(struct vx_core *_chip, const struct firmware * c |= (int)vx_inb(chip, RXM) << 8; c |= vx_inb(chip, RXL); - snd_printdd(KERN_DEBUG "xilinx: dsp size received 0x%x, orig 0x%Zx\n", c, fw->size); + snd_printdd(KERN_DEBUG "xilinx: dsp size received 0x%x, orig 0x%zx\n", c, fw->size); vx_outb(chip, ICR, ICR_HF0); diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c index b84d7d34f188..cdd44abfc9e0 100644 --- a/sound/ppc/snd_ps3.c +++ b/sound/ppc/snd_ps3.c @@ -883,7 +883,7 @@ static void snd_ps3_audio_set_base_addr(uint64_t ioaddr_start) static void snd_ps3_audio_fixup(struct snd_ps3_card_info *card) { /* - * avsetting driver seems to never change the followings + * avsetting driver seems to never change the following * so, init them here once */ diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c index 818b052377f3..ec1067a679da 100644 --- a/sound/soc/amd/acp-pcm-dma.c +++ b/sound/soc/amd/acp-pcm-dma.c @@ -506,7 +506,7 @@ static int acp_init(void __iomem *acp_mmio) return 0; } -/* Deintialize ACP */ +/* Deinitialize ACP */ static int acp_deinit(void __iomem *acp_mmio) { u32 val; diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index 624b3b9cb079..63b2745f8169 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c @@ -1269,7 +1269,7 @@ void wm_hubs_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_ON: - /* Turn off any unneded single ended outputs */ + /* Turn off any unneeded single ended outputs */ val = 0; mask = 0; diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c index 1d82f68305c3..8cfffa70c144 100644 --- a/sound/soc/fsl/fsl_asrc.c +++ b/sound/soc/fsl/fsl_asrc.c @@ -368,7 +368,7 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair) fsl_asrc_set_watermarks(pair, ASRC_INPUTFIFO_THRESHOLD, ASRC_INPUTFIFO_THRESHOLD); - /* Configure the followings only for Ideal Ratio mode */ + /* Configure the following only for Ideal Ratio mode */ if (!ideal) return 0; diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h index 924971b6ded5..9b031352ea3c 100644 --- a/sound/soc/qcom/lpass.h +++ b/sound/soc/qcom/lpass.h @@ -82,7 +82,7 @@ struct lpass_variant { **/ u32 dmactl_audif_start; u32 wrdma_channel_start; - /* SOC specific intialization like clocks */ + /* SOC specific initialization like clocks */ int (*init)(struct platform_device *pdev); int (*exit)(struct platform_device *pdev); int (*alloc_dma_channel)(struct lpass_data *data, int direction); diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index a110d3987d4a..6dca408faae3 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -3041,7 +3041,7 @@ static int snd_soc_register_dais(struct snd_soc_component *component, unsigned int i; int ret; - dev_dbg(dev, "ASoC: dai register %s #%Zu\n", dev_name(dev), count); + dev_dbg(dev, "ASoC: dai register %s #%zu\n", dev_name(dev), count); component->dai_drv = dai_drv; diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index aff3d8129ac9..3e9b1c0bb1ce 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -344,7 +344,7 @@ static int soc_tplg_widget_load(struct soc_tplg *tplg, return 0; } -/* pass DAI configurations to component driver for extra intialization */ +/* pass DAI configurations to component driver for extra initialization */ static int soc_tplg_dai_load(struct soc_tplg *tplg, struct snd_soc_dai_driver *dai_drv) { @@ -354,7 +354,7 @@ static int soc_tplg_dai_load(struct soc_tplg *tplg, return 0; } -/* pass link configurations to component driver for extra intialization */ +/* pass link configurations to component driver for extra initialization */ static int soc_tplg_dai_link_load(struct soc_tplg *tplg, struct snd_soc_dai_link *link) { diff --git a/tools/build/Makefile b/tools/build/Makefile index aaf7ed329a45..477f00eda591 100644 --- a/tools/build/Makefile +++ b/tools/build/Makefile @@ -35,8 +35,8 @@ all: $(OUTPUT)fixdep clean: $(call QUIET_CLEAN, fixdep) - $(Q)find . -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete - $(Q)rm -f fixdep + $(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete + $(Q)rm -f $(OUTPUT)fixdep $(OUTPUT)fixdep-in.o: FORCE $(Q)$(MAKE) $(build)=fixdep diff --git a/tools/build/Makefile.include b/tools/build/Makefile.include index ad22e4e7bc59..d360f39a445b 100644 --- a/tools/build/Makefile.include +++ b/tools/build/Makefile.include @@ -3,4 +3,7 @@ build := -f $(srctree)/tools/build/Makefile.build dir=. obj fixdep: $(Q)$(MAKE) -C $(srctree)/tools/build CFLAGS= LDFLAGS= $(OUTPUT)fixdep +fixdep-clean: + $(Q)$(MAKE) -C $(srctree)/tools/build clean + .PHONY: fixdep diff --git a/tools/include/asm-generic/bitops/atomic.h b/tools/include/asm-generic/bitops/atomic.h index 18663f59d72f..68b8c1516c5a 100644 --- a/tools/include/asm-generic/bitops/atomic.h +++ b/tools/include/asm-generic/bitops/atomic.h @@ -20,4 +20,7 @@ static __always_inline int test_bit(unsigned int nr, const unsigned long *addr) (((unsigned long *)addr)[nr / __BITS_PER_LONG])) != 0; } +#define __set_bit(nr, addr) set_bit(nr, addr) +#define __clear_bit(nr, addr) clear_bit(nr, addr) + #endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_ */ diff --git a/tools/include/asm/bug.h b/tools/include/asm/bug.h index beda1a884b50..4790f047a89c 100644 --- a/tools/include/asm/bug.h +++ b/tools/include/asm/bug.h @@ -12,6 +12,14 @@ unlikely(__ret_warn_on); \ }) +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf("assertion failed at %s:%d\n", \ + __FILE__, __LINE__); \ + unlikely(__ret_warn_on); \ +}) + #define WARN_ON_ONCE(condition) ({ \ static int __warned; \ int __ret_warn_once = !!(condition); \ diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h index eef41d500e9e..e8b9f518e36b 100644 --- a/tools/include/linux/bitmap.h +++ b/tools/include/linux/bitmap.h @@ -4,6 +4,7 @@ #include <string.h> #include <linux/bitops.h> #include <stdlib.h> +#include <linux/kernel.h> #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h index fc446343ff41..1aecad369af5 100644 --- a/tools/include/linux/bitops.h +++ b/tools/include/linux/bitops.h @@ -2,7 +2,6 @@ #define _TOOLS_LINUX_BITOPS_H_ #include <asm/types.h> -#include <linux/kernel.h> #include <linux/compiler.h> #ifndef __WORDSIZE diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index 6326ede9aece..8de163b17c0d 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h @@ -25,6 +25,8 @@ #endif #define __user +#define __rcu +#define __read_mostly #ifndef __attribute_const__ # define __attribute_const__ @@ -54,6 +56,8 @@ # define unlikely(x) __builtin_expect(!!(x), 0) #endif +#define uninitialized_var(x) x = *(&(x)) + #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) #include <linux/types.h> diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h index 41446668ccce..d5677d39c1e4 100644 --- a/tools/include/linux/log2.h +++ b/tools/include/linux/log2.h @@ -13,12 +13,6 @@ #define _TOOLS_LINUX_LOG2_H /* - * deal with unrepresentable constant logarithms - */ -extern __attribute__((const, noreturn)) -int ____ilog2_NaN(void); - -/* * non-constant log of base 2 calculators * - the arch may override these in asm/bitops.h if they can be implemented * more efficiently than using fls() and fls64() @@ -78,7 +72,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define ilog2(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n) < 1 ? ____ilog2_NaN() : \ + (n) < 2 ? 0 : \ (n) & (1ULL << 63) ? 63 : \ (n) & (1ULL << 62) ? 62 : \ (n) & (1ULL << 61) ? 61 : \ @@ -141,10 +135,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) (n) & (1ULL << 4) ? 4 : \ (n) & (1ULL << 3) ? 3 : \ (n) & (1ULL << 2) ? 2 : \ - (n) & (1ULL << 1) ? 1 : \ - (n) & (1ULL << 0) ? 0 : \ - ____ilog2_NaN() \ - ) : \ + 1 ) : \ (sizeof(n) <= 4) ? \ __ilog2_u32(n) : \ __ilog2_u64(n) \ diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h new file mode 100644 index 000000000000..58397dcb19d6 --- /dev/null +++ b/tools/include/linux/spinlock.h @@ -0,0 +1,5 @@ +#define spinlock_t pthread_mutex_t +#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER; + +#define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x) +#define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index d48b70ceb25a..207c2eeddab0 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -27,7 +27,7 @@ #include "bpf.h" /* - * When building perf, unistd.h is overrided. __NR_bpf is + * When building perf, unistd.h is overridden. __NR_bpf is * required to be defined explicitly. */ #ifndef __NR_bpf diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index f2ea78021450..7ce724fc0544 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -5225,13 +5225,13 @@ int pevent_data_pid(struct pevent *pevent, struct pevent_record *rec) } /** - * pevent_data_prempt_count - parse the preempt count from the record + * pevent_data_preempt_count - parse the preempt count from the record * @pevent: a handle to the pevent * @rec: the record to parse * * This returns the preempt count from a record. */ -int pevent_data_prempt_count(struct pevent *pevent, struct pevent_record *rec) +int pevent_data_preempt_count(struct pevent *pevent, struct pevent_record *rec) { return parse_common_pc(pevent, rec->data); } diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h index 74cecba87daa..66342804161c 100644 --- a/tools/lib/traceevent/event-parse.h +++ b/tools/lib/traceevent/event-parse.h @@ -710,7 +710,7 @@ void pevent_data_lat_fmt(struct pevent *pevent, int pevent_data_type(struct pevent *pevent, struct pevent_record *rec); struct event_format *pevent_data_event_from_type(struct pevent *pevent, int type); int pevent_data_pid(struct pevent *pevent, struct pevent_record *rec); -int pevent_data_prempt_count(struct pevent *pevent, struct pevent_record *rec); +int pevent_data_preempt_count(struct pevent *pevent, struct pevent_record *rec); int pevent_data_flags(struct pevent *pevent, struct pevent_record *rec); const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid); struct cmdline; diff --git a/tools/objtool/arch.h b/tools/objtool/arch.h index f7350fcedc70..a59e061c0b4a 100644 --- a/tools/objtool/arch.h +++ b/tools/objtool/arch.h @@ -31,9 +31,8 @@ #define INSN_CALL_DYNAMIC 8 #define INSN_RETURN 9 #define INSN_CONTEXT_SWITCH 10 -#define INSN_BUG 11 -#define INSN_NOP 12 -#define INSN_OTHER 13 +#define INSN_NOP 11 +#define INSN_OTHER 12 #define INSN_LAST INSN_OTHER int arch_decode_instruction(struct elf *elf, struct section *sec, diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 039636ffb6c8..6ac99e3266eb 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -118,9 +118,6 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, op2 == 0x35) /* sysenter, sysret */ *type = INSN_CONTEXT_SWITCH; - else if (op2 == 0x0b || op2 == 0xb9) - /* ud2 */ - *type = INSN_BUG; else if (op2 == 0x0d || op2 == 0x1f) /* nopl/nopw */ *type = INSN_NOP; diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c index e8a1f699058a..4cfdbb5b6967 100644 --- a/tools/objtool/builtin-check.c +++ b/tools/objtool/builtin-check.c @@ -51,7 +51,7 @@ struct instruction { unsigned int len, state; unsigned char type; unsigned long immediate; - bool alt_group, visited; + bool alt_group, visited, dead_end; struct symbol *call_dest; struct instruction *jump_dest; struct list_head alts; @@ -330,6 +330,54 @@ static int decode_instructions(struct objtool_file *file) } /* + * Find all uses of the unreachable() macro, which are code path dead ends. + */ +static int add_dead_ends(struct objtool_file *file) +{ + struct section *sec; + struct rela *rela; + struct instruction *insn; + bool found; + + sec = find_section_by_name(file->elf, ".rela.discard.unreachable"); + if (!sec) + return 0; + + list_for_each_entry(rela, &sec->rela_list, list) { + if (rela->sym->type != STT_SECTION) { + WARN("unexpected relocation symbol type in %s", sec->name); + return -1; + } + insn = find_insn(file, rela->sym->sec, rela->addend); + if (insn) + insn = list_prev_entry(insn, list); + else if (rela->addend == rela->sym->sec->len) { + found = false; + list_for_each_entry_reverse(insn, &file->insn_list, list) { + if (insn->sec == rela->sym->sec) { + found = true; + break; + } + } + + if (!found) { + WARN("can't find unreachable insn at %s+0x%x", + rela->sym->sec->name, rela->addend); + return -1; + } + } else { + WARN("can't find unreachable insn at %s+0x%x", + rela->sym->sec->name, rela->addend); + return -1; + } + + insn->dead_end = true; + } + + return 0; +} + +/* * Warnings shouldn't be reported for ignored functions. */ static void add_ignores(struct objtool_file *file) @@ -843,6 +891,10 @@ static int decode_sections(struct objtool_file *file) if (ret) return ret; + ret = add_dead_ends(file); + if (ret) + return ret; + add_ignores(file); ret = add_jump_destinations(file); @@ -1037,13 +1089,13 @@ static int validate_branch(struct objtool_file *file, return 0; - case INSN_BUG: - return 0; - default: break; } + if (insn->dead_end) + return 0; + insn = next_insn_same_sec(file, insn); if (!insn) { WARN("%s: unexpected end of section", sec->name); @@ -1220,7 +1272,7 @@ int cmd_check(int argc, const char **argv) INIT_LIST_HEAD(&file.insn_list); hash_init(file.insn_hash); - file.whitelist = find_section_by_name(file.elf, "__func_stack_frame_non_standard"); + file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard"); file.rodata = find_section_by_name(file.elf, ".rodata"); file.ignore_unreachables = false; file.c_file = find_section_by_name(file.elf, ".comment"); diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt index 8ffbd272952d..a89273d8e744 100644 --- a/tools/perf/Documentation/perf-annotate.txt +++ b/tools/perf/Documentation/perf-annotate.txt @@ -39,6 +39,10 @@ OPTIONS --verbose:: Be more verbose. (Show symbol address, etc) +-q:: +--quiet:: + Do not show any message. (Suppress -v) + -D:: --dump-raw-trace:: Dump raw trace in ASCII. diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt index 66dbe3dee74b..a79c84ae61aa 100644 --- a/tools/perf/Documentation/perf-diff.txt +++ b/tools/perf/Documentation/perf-diff.txt @@ -73,6 +73,10 @@ OPTIONS Be verbose, for instance, show the raw counts in addition to the diff. +-q:: +--quiet:: + Do not show any message. (Suppress -v) + -f:: --force:: Don't do ownership validation. diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 27256bc68eda..b16003ec14a7 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -157,7 +157,7 @@ OPTIONS -a:: --all-cpus:: - System-wide collection from all CPUs. + System-wide collection from all CPUs (default if no target is specified). -p:: --pid=:: diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index f2914f03ae7b..c04cc0647c16 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt @@ -25,6 +25,10 @@ OPTIONS --verbose:: Be more verbose. (show symbol address, etc) +-q:: +--quiet:: + Do not show any message. (Suppress -v) + -n:: --show-nr-samples:: Show the number of samples for each symbol diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index d96ccd4844df..aecf2a87e7d6 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -63,7 +63,7 @@ report:: -a:: --all-cpus:: - system-wide collection from all CPUs + system-wide collection from all CPUs (default if no target is specified) -c:: --scale:: diff --git a/tools/perf/Documentation/tips.txt b/tools/perf/Documentation/tips.txt index 8a6479c0eac9..170b0289a7bc 100644 --- a/tools/perf/Documentation/tips.txt +++ b/tools/perf/Documentation/tips.txt @@ -22,7 +22,7 @@ If you have debuginfo enabled, try: perf report -s sym,srcline For memory address profiling, try: perf mem record / perf mem report For tracepoint events, try: perf report -s trace_fields To record callchains for each sample: perf record -g -To record every process run by an user: perf record -u <user> +To record every process run by a user: perf record -u <user> Skip collecing build-id when recording: perf record -B To change sampling frequency to 100 Hz: perf record -F 100 See assembly instructions with percentage: perf annotate <symbol> diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 2b941efadb04..27c9fbca7bd9 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -175,6 +175,10 @@ PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG)) PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) +ifeq ($(CC), clang) + PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS)) +endif + FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS) FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS) FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS) @@ -601,6 +605,9 @@ else PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) + ifeq ($(CC), clang) + PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS)) + endif FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) ifneq ($(feature-libpython), 1) diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 4da19b6ba94a..79fe31f20a17 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -726,13 +726,13 @@ config-clean: $(call QUIET_CLEAN, config) $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ $(if $(OUTPUT),OUTPUT=$(OUTPUT)feature/,) clean >/dev/null -clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean config-clean +clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean config-clean fixdep-clean $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS) $(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete $(Q)$(RM) $(OUTPUT).config-detected $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)pmu-events/jevents $(OUTPUT)$(LIBJVMTI).so $(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \ - $(OUTPUT)util/intel-pt-decoder/inat-tables.c $(OUTPUT)fixdep \ + $(OUTPUT)util/intel-pt-decoder/inat-tables.c \ $(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c \ $(OUTPUT)pmu-events/pmu-events.c $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index ebb628332a6e..4f52d85f5ebc 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -410,6 +410,7 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('q', "quiet", &quiet, "do now show any message"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), OPT_BOOLEAN(0, "gtk", &annotate.use_gtk, "Use the GTK interface"), @@ -463,6 +464,9 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) annotate.sym_hist_filter = argv[0]; } + if (quiet) + perf_quiet_option(); + file.path = input_name; annotate.session = perf_session__new(&file, false, &annotate.tool); diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 70a289347591..1b96a3122228 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -691,7 +691,7 @@ static void hists__process(struct hists *hists) hists__precompute(hists); hists__output_resort(hists, NULL); - hists__fprintf(hists, true, 0, 0, 0, stdout, + hists__fprintf(hists, !quiet, 0, 0, 0, stdout, symbol_conf.use_callchain); } @@ -739,12 +739,14 @@ static void data_process(void) hists__link(hists_base, hists); } - fprintf(stdout, "%s# Event '%s'\n#\n", first ? "" : "\n", - perf_evsel__name(evsel_base)); + if (!quiet) { + fprintf(stdout, "%s# Event '%s'\n#\n", first ? "" : "\n", + perf_evsel__name(evsel_base)); + } first = false; - if (verbose || data__files_cnt > 2) + if (verbose > 0 || ((data__files_cnt > 2) && !quiet)) data__fprintf(); /* Don't sort callchain for perf diff */ @@ -807,6 +809,7 @@ static const char * const diff_usage[] = { static const struct option options[] = { OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any message"), OPT_BOOLEAN('b', "baseline-only", &show_baseline_only, "Show only items with match in baseline"), OPT_CALLBACK('c', "compute", &compute, @@ -1328,6 +1331,9 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused) argc = parse_options(argc, argv, options, diff_usage, 0); + if (quiet) + perf_quiet_option(); + if (symbol__init(NULL) < 0) return -1; diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c index cd7bc4d104e2..6114e07ca613 100644 --- a/tools/perf/builtin-mem.c +++ b/tools/perf/builtin-mem.c @@ -42,8 +42,8 @@ static int parse_record_events(const struct option *opt, fprintf(stderr, "%-13s%-*s%s\n", e->tag, - verbose ? 25 : 0, - verbose ? perf_mem_events__name(j) : "", + verbose > 0 ? 25 : 0, + verbose > 0 ? perf_mem_events__name(j) : "", e->supported ? ": available" : ""); } exit(0); diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 6cd6776052e7..bc84a375295d 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -432,7 +432,7 @@ static int record__open(struct record *rec) try_again: if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) { if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) { - if (verbose) + if (verbose > 0) ui__warning("%s\n", msg); goto try_again; } @@ -1677,8 +1677,12 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused) argc = parse_options(argc, argv, record_options, record_usage, PARSE_OPT_STOP_AT_NON_OPTION); + if (quiet) + perf_quiet_option(); + + /* Make system wide (-a) the default target. */ if (!argc && target__none(&rec->opts.target)) - usage_with_options(record_usage, record_options); + rec->opts.target.system_wide = true; if (nr_cgroups && !rec->opts.target.system_wide) { usage_with_options_msg(record_usage, record_options, diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index dbd7fa028861..0a88670e56f3 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -320,6 +320,9 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report size_t size = sizeof(buf); int socked_id = hists->socket_filter; + if (quiet) + return 0; + if (symbol_conf.filter_relative) { nr_samples = hists->stats.nr_non_filtered_samples; nr_events = hists->stats.total_non_filtered_period; @@ -372,7 +375,11 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist, { struct perf_evsel *pos; - fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n", evlist->stats.total_lost_samples); + if (!quiet) { + fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n", + evlist->stats.total_lost_samples); + } + evlist__for_each_entry(evlist, pos) { struct hists *hists = evsel__hists(pos); const char *evname = perf_evsel__name(pos); @@ -382,7 +389,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist, continue; hists__fprintf_nr_sample_events(hists, rep, evname, stdout); - hists__fprintf(hists, true, 0, 0, rep->min_percent, stdout, + hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout, symbol_conf.use_callchain); fprintf(stdout, "\n\n"); } @@ -716,6 +723,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) "input file name"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any message"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, @@ -863,6 +871,9 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) report.symbol_filter_str = argv[0]; } + if (quiet) + perf_quiet_option(); + if (symbol_conf.vmlinux_name && access(symbol_conf.vmlinux_name, R_OK)) { pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name); @@ -983,14 +994,14 @@ repeat: goto error; } - if (report.header || report.header_only) { + if ((report.header || report.header_only) && !quiet) { perf_session__fprintf_info(session, stdout, report.show_full_info); if (report.header_only) { ret = 0; goto error; } - } else if (use_browser == 0) { + } else if (use_browser == 0 && !quiet) { fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n", stdout); } @@ -1009,7 +1020,7 @@ repeat: * providing it only in verbose mode not to bloat too * much struct symbol. */ - if (verbose) { + if (verbose > 0) { /* * XXX: Need to provide a less kludgy way to ask for * more space per symbol, the u32 is for the index on diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 270eb2d8ca6b..b94cf0de715a 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -460,7 +460,7 @@ static struct task_desc *register_pid(struct perf_sched *sched, BUG_ON(!sched->tasks); sched->tasks[task->nr] = task; - if (verbose) + if (verbose > 0) printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm); return task; @@ -794,7 +794,7 @@ replay_wakeup_event(struct perf_sched *sched, const u32 pid = perf_evsel__intval(evsel, sample, "pid"); struct task_desc *waker, *wakee; - if (verbose) { + if (verbose > 0) { printf("sched_wakeup event %p\n", evsel); printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid); @@ -822,7 +822,7 @@ static int replay_switch_event(struct perf_sched *sched, int cpu = sample->cpu; s64 delta; - if (verbose) + if (verbose > 0) printf("sched_switch event %p\n", evsel); if (cpu >= MAX_CPUS || cpu < 0) @@ -870,7 +870,7 @@ static int replay_fork_event(struct perf_sched *sched, goto out_put; } - if (verbose) { + if (verbose > 0) { printf("fork event\n"); printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid); printf("... child: %s/%d\n", thread__comm_str(child), child->tid); @@ -1573,7 +1573,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp)); color_fprintf(stdout, color, " %12s secs ", stimestamp); - if (new_shortname || (verbose && sched_in->tid)) { + if (new_shortname || (verbose > 0 && sched_in->tid)) { const char *pid_color = color; if (thread__has_color(sched_in)) @@ -2050,7 +2050,7 @@ static void save_task_callchain(struct perf_sched *sched, if (thread__resolve_callchain(thread, cursor, evsel, sample, NULL, NULL, sched->max_stack + 2) != 0) { - if (verbose) + if (verbose > 0) error("Failed to resolve callchain. Skipping\n"); return; diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index f28719178b51..13b54999ad79 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -573,7 +573,7 @@ try_again: if (errno == EINVAL || errno == ENOSYS || errno == ENOENT || errno == EOPNOTSUPP || errno == ENXIO) { - if (verbose) + if (verbose > 0) ui__warning("%s event is not supported by the kernel.\n", perf_evsel__name(counter)); counter->supported = false; @@ -582,7 +582,7 @@ try_again: !(counter->leader->nr_members > 1)) continue; } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) { - if (verbose) + if (verbose > 0) ui__warning("%s\n", msg); goto try_again; } @@ -1765,7 +1765,7 @@ static inline int perf_env__get_cpu(struct perf_env *env, struct cpu_map *map, i cpu = map->map[idx]; - if (cpu >= env->nr_cpus_online) + if (cpu >= env->nr_cpus_avail) return -1; return cpu; @@ -2445,8 +2445,9 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) } else if (big_num_opt == 0) /* User passed --no-big-num */ big_num = false; + /* Make system wide (-a) the default target. */ if (!argc && target__none(&target)) - usage_with_options(stat_usage, stat_options); + target.system_wide = true; if (run_count < 0) { pr_err("Run count must be a positive number\n"); @@ -2538,7 +2539,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) status = 0; for (run_idx = 0; forever || run_idx < run_count; run_idx++) { - if (run_count != 1 && verbose) + if (run_count != 1 && verbose > 0) fprintf(output, "[ perf stat: executing run #%d ... ]\n", run_idx + 1); diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 5a7fd7af3a6d..ab9077915763 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -871,7 +871,7 @@ try_again: if (perf_evsel__open(counter, top->evlist->cpus, top->evlist->threads) < 0) { if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) { - if (verbose) + if (verbose > 0) ui__warning("%s\n", msg); goto try_again; } diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 40ef9b293d1b..256f1fac6f7e 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -1399,7 +1399,7 @@ static struct syscall *trace__syscall_info(struct trace *trace, return &trace->syscalls.table[id]; out_cant_read: - if (verbose) { + if (verbose > 0) { fprintf(trace->output, "Problems reading syscall %d", id); if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL) fprintf(trace->output, "(%s)", trace->syscalls.table[id].name); @@ -1801,10 +1801,10 @@ static void print_location(FILE *f, struct perf_sample *sample, bool print_dso, bool print_sym) { - if ((verbose || print_dso) && al->map) + if ((verbose > 0 || print_dso) && al->map) fprintf(f, "%s@", al->map->dso->long_name); - if ((verbose || print_sym) && al->sym) + if ((verbose > 0 || print_sym) && al->sym) fprintf(f, "%s+0x%" PRIx64, al->sym->name, al->addr - al->sym->start); else if (al->map) diff --git a/tools/perf/pmu-events/json.c b/tools/perf/pmu-events/json.c index f67bbb0aa36e..0544398d6e2d 100644 --- a/tools/perf/pmu-events/json.c +++ b/tools/perf/pmu-events/json.c @@ -49,7 +49,7 @@ static char *mapfile(const char *fn, size_t *size) int err; int fd = open(fn, O_RDONLY); - if (fd < 0 && verbose && fn) { + if (fd < 0 && verbose > 0 && fn) { pr_err("Error opening events file '%s': %s\n", fn, strerror(errno)); } diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c index 28d1605b0338..88dc51f4c27b 100644 --- a/tools/perf/tests/attr.c +++ b/tools/perf/tests/attr.c @@ -144,7 +144,7 @@ static int run_dir(const char *d, const char *perf) int vcnt = min(verbose, (int) sizeof(v) - 1); char cmd[3*PATH_MAX]; - if (verbose) + if (verbose > 0) vcnt++; snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s", diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 37e326bfd2dc..83c4669cbc5b 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -299,7 +299,7 @@ static int run_test(struct test *test, int subtest) if (!dont_fork) { pr_debug("test child forked, pid %d\n", getpid()); - if (!verbose) { + if (verbose <= 0) { int nullfd = open("/dev/null", O_WRONLY); if (nullfd >= 0) { diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c index ff5bc6363a79..d1f693041324 100644 --- a/tools/perf/tests/code-reading.c +++ b/tools/perf/tests/code-reading.c @@ -599,7 +599,7 @@ static int do_test_code_reading(bool try_kcore) continue; } - if (verbose) { + if (verbose > 0) { char errbuf[512]; perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); pr_debug("perf_evlist__open() failed!\n%s\n", errbuf); diff --git a/tools/perf/tests/fdarray.c b/tools/perf/tests/fdarray.c index a2b5ff9bf83d..bc5982f42dc3 100644 --- a/tools/perf/tests/fdarray.c +++ b/tools/perf/tests/fdarray.c @@ -19,7 +19,7 @@ static int fdarray__fprintf_prefix(struct fdarray *fda, const char *prefix, FILE { int printed = 0; - if (!verbose) + if (verbose <= 0) return 0; printed += fprintf(fp, "\n%s: ", prefix); diff --git a/tools/perf/tests/llvm.c b/tools/perf/tests/llvm.c index d357dab72e68..482b5365e68d 100644 --- a/tools/perf/tests/llvm.c +++ b/tools/perf/tests/llvm.c @@ -76,7 +76,7 @@ test_llvm__fetch_bpf_obj(void **p_obj_buf, * Skip this test if user's .perfconfig doesn't set [llvm] section * and clang is not found in $PATH, and this is not perf test -v */ - if (!force && (verbose == 0 && + if (!force && (verbose <= 0 && !llvm_param.user_set_param && llvm__search_clang())) { pr_debug("No clang and no verbosive, skip this test\n"); diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index aa9276bfe3e9..1dc838014422 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -1808,7 +1808,7 @@ static void debug_warn(const char *warn, va_list params) { char msg[1024]; - if (!verbose) + if (verbose <= 0) return; vsnprintf(msg, sizeof(msg), warn, params); diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c index 541da7a68f91..87893f3ba5f1 100644 --- a/tools/perf/tests/perf-record.c +++ b/tools/perf/tests/perf-record.c @@ -172,13 +172,13 @@ int test__PERF_RECORD(int subtest __maybe_unused) err = perf_evlist__parse_sample(evlist, event, &sample); if (err < 0) { - if (verbose) + if (verbose > 0) perf_event__fprintf(event, stderr); pr_debug("Couldn't parse sample\n"); goto out_delete_evlist; } - if (verbose) { + if (verbose > 0) { pr_info("%" PRIu64" %d ", sample.time, sample.cpu); perf_event__fprintf(event, stderr); } diff --git a/tools/perf/tests/python-use.c b/tools/perf/tests/python-use.c index 7a52834ee0d0..fa79509da535 100644 --- a/tools/perf/tests/python-use.c +++ b/tools/perf/tests/python-use.c @@ -15,7 +15,7 @@ int test__python_use(int subtest __maybe_unused) int ret; if (asprintf(&cmd, "echo \"import sys ; sys.path.append('%s'); import perf\" | %s %s", - PYTHONPATH, PYTHON, verbose ? "" : "2> /dev/null") < 0) + PYTHONPATH, PYTHON, verbose > 0 ? "" : "2> /dev/null") < 0) return -1; ret = system(cmd) ? -1 : 0; diff --git a/tools/perf/tests/thread-map.c b/tools/perf/tests/thread-map.c index a4a4b4625ac3..f2d2e542d0ee 100644 --- a/tools/perf/tests/thread-map.c +++ b/tools/perf/tests/thread-map.c @@ -109,7 +109,7 @@ int test__thread_map_remove(int subtest __maybe_unused) TEST_ASSERT_VAL("failed to allocate thread_map", threads); - if (verbose) + if (verbose > 0) thread_map__fprintf(threads, stderr); TEST_ASSERT_VAL("failed to remove thread", @@ -117,7 +117,7 @@ int test__thread_map_remove(int subtest __maybe_unused) TEST_ASSERT_VAL("thread_map count != 1", threads->nr == 1); - if (verbose) + if (verbose > 0) thread_map__fprintf(threads, stderr); TEST_ASSERT_VAL("failed to remove thread", @@ -125,7 +125,7 @@ int test__thread_map_remove(int subtest __maybe_unused) TEST_ASSERT_VAL("thread_map count != 0", threads->nr == 0); - if (verbose) + if (verbose > 0) thread_map__fprintf(threads, stderr); TEST_ASSERT_VAL("failed to not remove thread", diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index 98fe69ac553c..803f893550d6 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -65,7 +65,9 @@ static int check_cpu_topology(char *path, struct cpu_map *map) session = perf_session__new(&file, false, NULL); TEST_ASSERT_VAL("can't get session", session); - for (i = 0; i < session->header.env.nr_cpus_online; i++) { + for (i = 0; i < session->header.env.nr_cpus_avail; i++) { + if (!cpu_map__has(map, i)) + continue; pr_debug("CPU %d, core %d, socket %d\n", i, session->header.env.cpu[i].core_id, session->header.env.cpu[i].socket_id); diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c index a5082331f246..862b043e5924 100644 --- a/tools/perf/tests/vmlinux-kallsyms.c +++ b/tools/perf/tests/vmlinux-kallsyms.c @@ -168,7 +168,7 @@ next_pair: err = -1; } - if (!verbose) + if (verbose <= 0) goto out; header_printed = false; diff --git a/tools/perf/ui/browsers/map.c b/tools/perf/ui/browsers/map.c index 98a34664bb7e..9ce142de536d 100644 --- a/tools/perf/ui/browsers/map.c +++ b/tools/perf/ui/browsers/map.c @@ -73,7 +73,7 @@ static int map_browser__run(struct map_browser *browser) if (ui_browser__show(&browser->b, browser->map->dso->long_name, "Press ESC to exit, %s / to search", - verbose ? "" : "restart with -v to use") < 0) + verbose > 0 ? "" : "restart with -v to use") < 0) return -1; while (1) { @@ -81,7 +81,7 @@ static int map_browser__run(struct map_browser *browser) switch (key) { case '/': - if (verbose) + if (verbose > 0) map_browser__search(browser); default: break; @@ -117,7 +117,7 @@ int map__browse(struct map *map) if (maxaddr < pos->end) maxaddr = pos->end; - if (verbose) { + if (verbose > 0) { u32 *idx = symbol__browser_index(pos); *idx = mb.b.nr_entries; } diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 18cfcdc90356..5d632dca672a 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -648,7 +648,7 @@ unsigned int hists__sort_list_width(struct hists *hists) ret += fmt->width(fmt, &dummy_hpp, hists); } - if (verbose && hists__has(hists, sym)) /* Addr + origin */ + if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */ ret += 3 + BITS_PER_LONG / 4; return ret; diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 06cc04e5806a..273f21fa32b5 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -1768,7 +1768,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, printf("%-*.*s----\n", graph_dotted_len, graph_dotted_len, graph_dotted_line); - if (verbose) + if (verbose > 0) symbol__annotate_hits(sym, evsel); list_for_each_entry(pos, ¬es->src->source, node) { diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c index 8fdee24725a7..eafbf11442b2 100644 --- a/tools/perf/util/cgroup.c +++ b/tools/perf/util/cgroup.c @@ -12,8 +12,8 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen) { FILE *fp; char mountpoint[PATH_MAX + 1], tokens[PATH_MAX + 1], type[PATH_MAX + 1]; + char path_v1[PATH_MAX + 1], path_v2[PATH_MAX + 2], *path; char *token, *saved_ptr = NULL; - int found = 0; fp = fopen("/proc/mounts", "r"); if (!fp) @@ -24,31 +24,43 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen) * and inspect every cgroupfs mount point to find one that has * perf_event subsystem */ + path_v1[0] = '\0'; + path_v2[0] = '\0'; + while (fscanf(fp, "%*s %"STR(PATH_MAX)"s %"STR(PATH_MAX)"s %" STR(PATH_MAX)"s %*d %*d\n", mountpoint, type, tokens) == 3) { - if (!strcmp(type, "cgroup")) { + if (!path_v1[0] && !strcmp(type, "cgroup")) { token = strtok_r(tokens, ",", &saved_ptr); while (token != NULL) { if (!strcmp(token, "perf_event")) { - found = 1; + strcpy(path_v1, mountpoint); break; } token = strtok_r(NULL, ",", &saved_ptr); } } - if (found) + + if (!path_v2[0] && !strcmp(type, "cgroup2")) + strcpy(path_v2, mountpoint); + + if (path_v1[0] && path_v2[0]) break; } fclose(fp); - if (!found) + + if (path_v1[0]) + path = path_v1; + else if (path_v2[0]) + path = path_v2; + else return -1; - if (strlen(mountpoint) < maxlen) { - strcpy(buf, mountpoint); + if (strlen(path) < maxlen) { + strcpy(buf, path); return 0; } return -1; diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 2c0b52264a46..8c7504939113 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -9,6 +9,7 @@ #include "asm/bug.h" static int max_cpu_num; +static int max_present_cpu_num; static int max_node_num; static int *cpunode_map; @@ -442,6 +443,7 @@ static void set_max_cpu_num(void) /* set up default */ max_cpu_num = 4096; + max_present_cpu_num = 4096; mnt = sysfs__mountpoint(); if (!mnt) @@ -455,6 +457,17 @@ static void set_max_cpu_num(void) } ret = get_max_num(path, &max_cpu_num); + if (ret) + goto out; + + /* get the highest present cpu number for a sparse allocation */ + ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt); + if (ret == PATH_MAX) { + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); + goto out; + } + + ret = get_max_num(path, &max_present_cpu_num); out: if (ret) @@ -505,6 +518,15 @@ int cpu__max_cpu(void) return max_cpu_num; } +int cpu__max_present_cpu(void) +{ + if (unlikely(!max_present_cpu_num)) + set_max_cpu_num(); + + return max_present_cpu_num; +} + + int cpu__get_node(int cpu) { if (unlikely(cpunode_map == NULL)) { diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 06bd689f5989..1a0549af8f5c 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -62,6 +62,7 @@ int cpu__setup_cpunode_map(void); int cpu__max_node(void); int cpu__max_cpu(void); +int cpu__max_present_cpu(void); int cpu__get_node(int cpu); int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res, diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index c1838b643108..03eb81f30d0d 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -203,11 +203,28 @@ int perf_debug_option(const char *str) v = (v < 0) || (v > 10) ? 0 : v; } + if (quiet) + v = -1; + *var->ptr = v; free(s); return 0; } +int perf_quiet_option(void) +{ + struct debug_variable *var = &debug_variables[0]; + + /* disable all debug messages */ + while (var->name) { + *var->ptr = -1; + var++; + } + + quiet = true; + return 0; +} + #define DEBUG_WRAPPER(__n, __l) \ static int pr_ ## __n ## _wrapper(const char *fmt, ...) \ { \ diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index d242adc3d5a2..98832f5531d3 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h @@ -54,5 +54,6 @@ int veprintf(int level, int var, const char *fmt, va_list args); int perf_debug_option(const char *str); void perf_debug_setup(void); +int perf_quiet_option(void); #endif /* __PERF_DEBUG_H */ diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index 28d41e709128..d38b62a700ca 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -951,7 +951,7 @@ static struct dso *__dso__findlink_by_longname(struct rb_root *root, if (rc == 0) { /* * In case the new DSO is a duplicate of an existing - * one, print an one-time warning & put the new entry + * one, print a one-time warning & put the new entry * at the end of the list of duplicates. */ if (!dso || (dso == this)) @@ -1058,7 +1058,7 @@ int dso__name_len(const struct dso *dso) { if (!dso) return strlen("[unknown]"); - if (verbose) + if (verbose > 0) return dso->long_name_len; return dso->short_name_len; diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index bb964e86b09d..075fc77286bf 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -66,7 +66,7 @@ int perf_env__read_cpu_topology_map(struct perf_env *env) return 0; if (env->nr_cpus_avail == 0) - env->nr_cpus_avail = sysconf(_SC_NPROCESSORS_CONF); + env->nr_cpus_avail = cpu__max_present_cpu(); nr_cpus = env->nr_cpus_avail; if (nr_cpus == -1) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 3d12c16e5103..05714d548584 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -295,11 +295,7 @@ static int write_nrcpus(int fd, struct perf_header *h __maybe_unused, u32 nrc, nra; int ret; - nr = sysconf(_SC_NPROCESSORS_CONF); - if (nr < 0) - return -1; - - nrc = (u32)(nr & UINT_MAX); + nrc = cpu__max_present_cpu(); nr = sysconf(_SC_NPROCESSORS_ONLN); if (nr < 0) @@ -505,24 +501,29 @@ static void free_cpu_topo(struct cpu_topo *tp) static struct cpu_topo *build_cpu_topology(void) { - struct cpu_topo *tp; + struct cpu_topo *tp = NULL; void *addr; u32 nr, i; size_t sz; long ncpus; int ret = -1; + struct cpu_map *map; - ncpus = sysconf(_SC_NPROCESSORS_CONF); - if (ncpus < 0) + ncpus = cpu__max_present_cpu(); + + /* build online CPU map */ + map = cpu_map__new(NULL); + if (map == NULL) { + pr_debug("failed to get system cpumap\n"); return NULL; + } nr = (u32)(ncpus & UINT_MAX); sz = nr * sizeof(char *); - addr = calloc(1, sizeof(*tp) + 2 * sz); if (!addr) - return NULL; + goto out_free; tp = addr; tp->cpu_nr = nr; @@ -532,10 +533,16 @@ static struct cpu_topo *build_cpu_topology(void) tp->thread_siblings = addr; for (i = 0; i < nr; i++) { + if (!cpu_map__has(map, i)) + continue; + ret = build_cpu_topo(tp, i); if (ret < 0) break; } + +out_free: + cpu_map__put(map); if (ret) { free_cpu_topo(tp); tp = NULL; @@ -1126,7 +1133,7 @@ static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused, { int nr, i; char *str; - int cpu_nr = ph->env.nr_cpus_online; + int cpu_nr = ph->env.nr_cpus_avail; nr = ph->env.nr_sibling_cores; str = ph->env.sibling_cores; @@ -1781,7 +1788,7 @@ static int process_cpu_topology(struct perf_file_section *section, u32 nr, i; char *str; struct strbuf sb; - int cpu_nr = ph->env.nr_cpus_online; + int cpu_nr = ph->env.nr_cpus_avail; u64 size = 0; ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu)); @@ -1862,7 +1869,7 @@ static int process_cpu_topology(struct perf_file_section *section, if (ph->needs_swap) nr = bswap_32(nr); - if (nr > (u32)cpu_nr) { + if (nr != (u32)-1 && nr > (u32)cpu_nr) { pr_debug("socket_id number is too big." "You may need to upgrade the perf tool.\n"); goto free_cpu; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 32c6a939e4cc..eaf72a938fb4 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -69,7 +69,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) */ if (h->ms.sym) { symlen = h->ms.sym->namelen + 4; - if (verbose) + if (verbose > 0) symlen += BITS_PER_LONG / 4 + 2 + 3; hists__new_col_len(hists, HISTC_SYMBOL, symlen); } else { @@ -93,7 +93,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) if (h->branch_info) { if (h->branch_info->from.sym) { symlen = (int)h->branch_info->from.sym->namelen + 4; - if (verbose) + if (verbose > 0) symlen += BITS_PER_LONG / 4 + 2 + 3; hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); @@ -107,7 +107,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) if (h->branch_info->to.sym) { symlen = (int)h->branch_info->to.sym->namelen + 4; - if (verbose) + if (verbose > 0) symlen += BITS_PER_LONG / 4 + 2 + 3; hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 281e44af31e2..67a8aebc67ab 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -2318,24 +2318,20 @@ int parse_events__is_hardcoded_term(struct parse_events_term *term) return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; } -static int new_term(struct parse_events_term **_term, int type_val, - int type_term, char *config, - char *str, u64 num, int err_term, int err_val) +static int new_term(struct parse_events_term **_term, + struct parse_events_term *temp, + char *str, u64 num) { struct parse_events_term *term; - term = zalloc(sizeof(*term)); + term = malloc(sizeof(*term)); if (!term) return -ENOMEM; + *term = *temp; INIT_LIST_HEAD(&term->list); - term->type_val = type_val; - term->type_term = type_term; - term->config = config; - term->err_term = err_term; - term->err_val = err_val; - switch (type_val) { + switch (term->type_val) { case PARSE_EVENTS__TERM_TYPE_NUM: term->val.num = num; break; @@ -2353,15 +2349,22 @@ static int new_term(struct parse_events_term **_term, int type_val, int parse_events_term__num(struct parse_events_term **term, int type_term, char *config, u64 num, + bool no_value, void *loc_term_, void *loc_val_) { YYLTYPE *loc_term = loc_term_; YYLTYPE *loc_val = loc_val_; - return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term, - config, NULL, num, - loc_term ? loc_term->first_column : 0, - loc_val ? loc_val->first_column : 0); + struct parse_events_term temp = { + .type_val = PARSE_EVENTS__TERM_TYPE_NUM, + .type_term = type_term, + .config = config, + .no_value = no_value, + .err_term = loc_term ? loc_term->first_column : 0, + .err_val = loc_val ? loc_val->first_column : 0, + }; + + return new_term(term, &temp, NULL, num); } int parse_events_term__str(struct parse_events_term **term, @@ -2371,37 +2374,45 @@ int parse_events_term__str(struct parse_events_term **term, YYLTYPE *loc_term = loc_term_; YYLTYPE *loc_val = loc_val_; - return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term, - config, str, 0, - loc_term ? loc_term->first_column : 0, - loc_val ? loc_val->first_column : 0); + struct parse_events_term temp = { + .type_val = PARSE_EVENTS__TERM_TYPE_STR, + .type_term = type_term, + .config = config, + .err_term = loc_term ? loc_term->first_column : 0, + .err_val = loc_val ? loc_val->first_column : 0, + }; + + return new_term(term, &temp, str, 0); } int parse_events_term__sym_hw(struct parse_events_term **term, char *config, unsigned idx) { struct event_symbol *sym; + struct parse_events_term temp = { + .type_val = PARSE_EVENTS__TERM_TYPE_STR, + .type_term = PARSE_EVENTS__TERM_TYPE_USER, + .config = config ?: (char *) "event", + }; BUG_ON(idx >= PERF_COUNT_HW_MAX); sym = &event_symbols_hw[idx]; - if (config) - return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, - PARSE_EVENTS__TERM_TYPE_USER, config, - (char *) sym->symbol, 0, 0, 0); - else - return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, - PARSE_EVENTS__TERM_TYPE_USER, - (char *) "event", (char *) sym->symbol, - 0, 0, 0); + return new_term(term, &temp, (char *) sym->symbol, 0); } int parse_events_term__clone(struct parse_events_term **new, struct parse_events_term *term) { - return new_term(new, term->type_val, term->type_term, term->config, - term->val.str, term->val.num, - term->err_term, term->err_val); + struct parse_events_term temp = { + .type_val = term->type_val, + .type_term = term->type_term, + .config = term->config, + .err_term = term->err_term, + .err_val = term->err_val, + }; + + return new_term(new, &temp, term->val.str, term->val.num); } void parse_events_terms__purge(struct list_head *terms) diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index da246a3ddb69..1af6a267c21b 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -94,6 +94,7 @@ struct parse_events_term { int type_term; struct list_head list; bool used; + bool no_value; /* error string indexes for within parsed string */ int err_term; @@ -122,6 +123,7 @@ void parse_events__shrink_config_terms(void); int parse_events__is_hardcoded_term(struct parse_events_term *term); int parse_events_term__num(struct parse_events_term **term, int type_term, char *config, u64 num, + bool novalue, void *loc_term, void *loc_val); int parse_events_term__str(struct parse_events_term **term, int type_term, char *config, char *str, diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index a14b47ab3879..30f018ea1370 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -252,7 +252,7 @@ PE_KERNEL_PMU_EVENT sep_dc if (!strcasecmp(alias->name, $1)) { ALLOC_LIST(head); ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, - $1, 1, &@1, NULL)); + $1, 1, false, &@1, NULL)); list_add_tail(&term->list, head); if (!parse_events_add_pmu(data, list, @@ -282,7 +282,7 @@ PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc ALLOC_LIST(head); ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, - &pmu_name, 1, &@1, NULL)); + &pmu_name, 1, false, &@1, NULL)); list_add_tail(&term->list, head); ALLOC_LIST(list); @@ -548,7 +548,7 @@ PE_NAME '=' PE_VALUE struct parse_events_term *term; ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, - $1, $3, &@1, &@3)); + $1, $3, false, &@1, &@3)); $$ = term; } | @@ -566,7 +566,7 @@ PE_NAME struct parse_events_term *term; ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, - $1, 1, &@1, NULL)); + $1, 1, true, &@1, NULL)); $$ = term; } | @@ -591,7 +591,7 @@ PE_TERM '=' PE_VALUE { struct parse_events_term *term; - ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3, &@1, &@3)); + ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3, false, &@1, &@3)); $$ = term; } | @@ -599,7 +599,7 @@ PE_TERM { struct parse_events_term *term; - ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1, &@1, NULL)); + ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1, true, &@1, NULL)); $$ = term; } | @@ -620,7 +620,7 @@ PE_NAME array '=' PE_VALUE struct parse_events_term *term; ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, - $1, $4, &@1, &@4)); + $1, $4, false, &@1, &@4)); term->array = $2; $$ = term; } diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 49bfee0e3d9e..12f84dd2ac5d 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -745,7 +745,7 @@ static int pmu_resolve_param_term(struct parse_events_term *term, } } - if (verbose) + if (verbose > 0) printf("Required parameter '%s' not specified\n", term->config); return -1; @@ -803,7 +803,7 @@ static int pmu_config_term(struct list_head *formats, format = pmu_find_format(formats, term->config); if (!format) { - if (verbose) + if (verbose > 0) printf("Invalid event/parameter '%s'\n", term->config); if (err) { char *pmu_term = pmu_formats_string(formats); @@ -834,11 +834,20 @@ static int pmu_config_term(struct list_head *formats, * Either directly use a numeric term, or try to translate string terms * using event parameters. */ - if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) + if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) { + if (term->no_value && + bitmap_weight(format->bits, PERF_PMU_FORMAT_BITS) > 1) { + if (err) { + err->idx = term->err_val; + err->str = strdup("no value assigned for term"); + } + return -EINVAL; + } + val = term->val.num; - else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) { + } else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) { if (strcmp(term->val.str, "?")) { - if (verbose) { + if (verbose > 0) { pr_info("Invalid sysfs entry %s=%s\n", term->config, term->val.str); } @@ -1223,7 +1232,7 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag, printf("%*s", 8, "["); wordwrap(aliases[j].desc, 8, columns, 0); printf("]\n"); - if (verbose) + if (verbose > 0) printf("%*s%s/%s/\n", 8, "", aliases[j].pmu, aliases[j].str); } else printf(" %-50s [Kernel PMU event]\n", aliases[j].name); diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 35f5b7b7715c..28fb62c32678 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -594,7 +594,7 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp, pr_debug("try to find information at %" PRIx64 " in %s\n", addr, tp->module ? : "kernel"); - dinfo = debuginfo_cache__open(tp->module, verbose == 0); + dinfo = debuginfo_cache__open(tp->module, verbose <= 0); if (dinfo) ret = debuginfo__find_probe_point(dinfo, (unsigned long)addr, pp); diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 0d9d6e0803b8..57cd268d4275 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -464,7 +464,7 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname, /* Verify it is a data structure */ tag = dwarf_tag(&type); if (tag != DW_TAG_structure_type && tag != DW_TAG_union_type) { - pr_warning("%s is not a data structure nor an union.\n", + pr_warning("%s is not a data structure nor a union.\n", varname); return -EINVAL; } @@ -479,7 +479,7 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname, } else { /* Verify it is a data structure */ if (tag != DW_TAG_structure_type && tag != DW_TAG_union_type) { - pr_warning("%s is not a data structure nor an union.\n", + pr_warning("%s is not a data structure nor a union.\n", varname); return -EINVAL; } diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 581e0efd6356..783326cfbaa6 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -369,10 +369,10 @@ static PyObject *python_process_callchain(struct perf_sample *sample, if (node->map) { struct map *map = node->map; const char *dsoname = "[unknown]"; - if (map && map->dso && (map->dso->name || map->dso->long_name)) { + if (map && map->dso) { if (symbol_conf.show_kernel_path && map->dso->long_name) dsoname = map->dso->long_name; - else if (map->dso->name) + else dsoname = map->dso->name; } pydict_set_item_string_decref(pyelem, "dso", diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 4cdbc8f5f14d..1dd617d116b5 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -932,7 +932,7 @@ static void branch_stack__printf(struct perf_sample *sample) printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n", i, e->from, e->to, - e->flags.cycles, + (unsigned short)e->flags.cycles, e->flags.mispred ? "M" : " ", e->flags.predicted ? "P" : " ", e->flags.abort ? "A" : " ", diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py index c8680984d2d6..af415febbc46 100644 --- a/tools/perf/util/setup.py +++ b/tools/perf/util/setup.py @@ -1,8 +1,15 @@ #!/usr/bin/python2 -from distutils.core import setup, Extension from os import getenv +cc = getenv("CC") +if cc == "clang": + from _sysconfigdata import build_time_vars + from re import sub + build_time_vars["CFLAGS"] = sub("-specs=[^ ]+", "", build_time_vars["CFLAGS"]) + +from distutils.core import setup, Extension + from distutils.command.build_ext import build_ext as _build_ext from distutils.command.install_lib import install_lib as _install_lib diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index df622f4e301e..0ff622288d24 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -151,7 +151,7 @@ static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) if (!dso_l || !dso_r) return cmp_null(dso_r, dso_l); - if (verbose) { + if (verbose > 0) { dso_name_l = dso_l->long_name; dso_name_r = dso_r->long_name; } else { @@ -172,8 +172,8 @@ static int _hist_entry__dso_snprintf(struct map *map, char *bf, size_t size, unsigned int width) { if (map && map->dso) { - const char *dso_name = !verbose ? map->dso->short_name : - map->dso->long_name; + const char *dso_name = verbose > 0 ? map->dso->long_name : + map->dso->short_name; return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); } @@ -261,7 +261,7 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, { size_t ret = 0; - if (verbose) { + if (verbose > 0) { char o = map ? dso__symtab_origin(map->dso) : '!'; ret += repsep_snprintf(bf, size, "%-#*llx %c ", BITS_PER_LONG / 4 + 2, ip, o); diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 7aff317fc7c4..796c847e2f00 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -108,7 +108,7 @@ struct hist_entry { /* * Since perf diff only supports the stdio output, TUI * fields are only accessed from perf report (or perf - * top). So make it an union to reduce memory usage. + * top). So make it a union to reduce memory usage. */ struct hist_entry_diff diff; struct /* for TUI */ { diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 39345c2ddfc2..0d51334a9b46 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -344,7 +344,7 @@ int perf_stat_process_counter(struct perf_stat_config *config, for (i = 0; i < 3; i++) update_stats(&ps->res_stats[i], count[i]); - if (verbose) { + if (verbose > 0) { fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", perf_evsel__name(counter), count[0], count[1], count[2]); } diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index adbc6c02c3aa..4e59ddeb4eda 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -213,7 +213,7 @@ static bool want_demangle(bool is_kernel_sym) static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name) { - int demangle_flags = verbose ? (DMGL_PARAMS | DMGL_ANSI) : DMGL_NO_OPTS; + int demangle_flags = verbose > 0 ? (DMGL_PARAMS | DMGL_ANSI) : DMGL_NO_OPTS; char *demangled = NULL; /* diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c index 590d12a25f6e..3e701f0e9c14 100644 --- a/tools/power/cpupower/utils/cpufreq-info.c +++ b/tools/power/cpupower/utils/cpufreq-info.c @@ -285,20 +285,24 @@ static int get_freq_hardware(unsigned int cpu, unsigned int human) /* --hwlimits / -l */ -static int get_hardware_limits(unsigned int cpu) +static int get_hardware_limits(unsigned int cpu, unsigned int human) { unsigned long min, max; - printf(_(" hardware limits: ")); if (cpufreq_get_hardware_limits(cpu, &min, &max)) { printf(_("Not Available\n")); return -EINVAL; } - print_speed(min); - printf(" - "); - print_speed(max); - printf("\n"); + if (human) { + printf(_(" hardware limits: ")); + print_speed(min); + printf(" - "); + print_speed(max); + printf("\n"); + } else { + printf("%lu %lu\n", min, max); + } return 0; } @@ -456,7 +460,7 @@ static void debug_output_one(unsigned int cpu) get_related_cpus(cpu); get_affected_cpus(cpu); get_latency(cpu, 1); - get_hardware_limits(cpu); + get_hardware_limits(cpu, 1); freqs = cpufreq_get_available_frequencies(cpu); if (freqs) { @@ -622,7 +626,7 @@ int cmd_freq_info(int argc, char **argv) ret = get_driver(cpu); break; case 'l': - ret = get_hardware_limits(cpu); + ret = get_hardware_limits(cpu, human); break; case 'w': ret = get_freq_hardware(cpu, human); @@ -639,7 +643,6 @@ int cmd_freq_info(int argc, char **argv) } if (ret) return ret; - printf("\n"); } return ret; } diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8 index 03cb639b292e..fedca3285326 100644 --- a/tools/power/x86/turbostat/turbostat.8 +++ b/tools/power/x86/turbostat/turbostat.8 @@ -16,9 +16,9 @@ idle power-state statistics, temperature and power on X86 processors. There are two ways to invoke turbostat. The first method is to supply a \fBcommand\fP, which is forked and statistics are printed -upon its completion. +in one-shot upon its completion. The second method is to omit the command, -and turbostat displays statistics every 5 seconds. +and turbostat displays statistics every 5 seconds interval. The 5-second interval can be changed using the --interval option. .PP Some information is not available on older processors. @@ -28,9 +28,10 @@ name as necessary to disambiguate it from others is necessary. Note that option .PP \fB--add attributes\fP add column with counter having specified 'attributes'. The 'location' attribute is required, all others are optional. .nf - location: {\fBmsrDDD\fP | \fBmsr0xXXX\fP} + location: {\fBmsrDDD\fP | \fBmsr0xXXX\fP | \fB/sys/path...\fP} msrDDD is a decimal offset, eg. msr16 msr0xXXX is a hex offset, eg. msr0x10 + /sys/path... is an absolute path to a sysfs attribute scope: {\fBcpu\fP | \fBcore\fP | \fBpackage\fP} sample and print the counter for every cpu, core, or package. @@ -45,12 +46,21 @@ name as necessary to disambiguate it from others is necessary. Note that option 'delta' shows the difference in values during the measurement interval. 'percent' shows the delta as a percentage of the cycles elapsed. default: delta + + name: "name_string" + Any string that does not match a key-word above is used + as the column header. .fi .PP +\fB--cpu cpu-set\fP limit output to system summary plus the specified cpu-set. If cpu-set is the string "core", then the system summary plus the first CPU in each core are printed -- eg. subsequent HT siblings are not printed. Or if cpu-set is the string "package", then the system summary plus the first CPU in each package is printed. Otherwise, the system summary plus the specified set of CPUs are printed. The cpu-set is ordered from low to high, comma delimited with ".." and "-" permitted to denote a range. eg. 1,2,8,14..17,21-44 +.PP +\fB--hide column\fP do not show the specified columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--hide sysfs" to hide the sysfs statistics columns as a group. +.PP +\fB--show column\fP show only the specified columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--show sysfs" to show the sysfs statistics columns as a group. +.PP \fB--Dump\fP displays the raw counter values. .PP -\fB--debug\fP displays additional system configuration information. Invoking this parameter -more than once may also enable internal turbostat debug information. +\fB--quiet\fP Do not decode and print the system configuration header information. .PP \fB--interval seconds\fP overrides the default 5.0 second measurement interval. .PP @@ -61,9 +71,7 @@ The file is truncated if it already exists, and it is created if it does not exi .PP \fB--Joules\fP displays energy in Joules, rather than dividing Joules by time to print power in Watts. .PP -\fB--Package\fP limits output to the system summary plus the 1st thread in each Package. -.PP -\fB--processor\fP limits output to the system summary plus the 1st thread in each processor of each package. Ie. it skips hyper-threaded siblings. +\fB--list\fP display column header names available for use by --show and --hide, then exit. .PP \fB--Summary\fP limits output to a 1-line System Summary for each interval. .PP @@ -74,24 +82,25 @@ The file is truncated if it already exists, and it is created if it does not exi The \fBcommand\fP parameter forks \fBcommand\fP, and upon its exit, displays the statistics gathered since it was forked. .PP -.SH DEFAULT FIELD DESCRIPTIONS +.SH ROW DESCRIPTIONS +The system configuration dump (if --quiet is not used) is followed by statistics. The first row of the statistics labels the content of each column (below). The second row of statistics is the system summary line. The system summary line has a '-' in the columns for the Package, Core, and CPU. The contents of the system summary line depends on the type of column. Columns that count items (eg. IRQ) show the sum across all CPUs in the system. Columns that show a percentage show the average across all CPUs in the system. Columns that dump raw MSR values simply show 0 in the summary. After the system summary row, each row describes a specific Package/Core/CPU. Note that if the --cpu parameter is used to limit which specific CPUs are displayed, turbostat will still collect statistics for all CPUs in the system and will still show the system summary for all CPUs in the system. +.SH COLUMN DESCRIPTIONS .nf +\fBCore\fP processor core number. Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology (HT). \fBCPU\fP Linux CPU (logical processor) number. Yes, it is okay that on many systems the CPUs are not listed in numerical order -- for efficiency reasons, turbostat runs in topology order, so HT siblings appear together. -\fBAVG_MHz\fP number of cycles executed divided by time elapsed. -\fBBusy%\fP percent of the interval that the CPU retired instructions, aka. % of time in "C0" state. -\fBBzy_MHz\fP average clock rate while the CPU was busy (in "c0" state). +\fBPackage\fP processor package number -- not present on systems with a single processor package. +\fBAvg_MHz\fP number of cycles executed divided by time elapsed. Note that this includes idle-time when 0 instructions are executed. +\fBBusy%\fP percent of the measurement interval that the CPU executes instructions, aka. % of time in "C0" state. +\fBBzy_MHz\fP average clock rate while the CPU was not idle (ie. in "c0" state). \fBTSC_MHz\fP average MHz that the TSC ran during the entire interval. -.fi -.PP -.SH DEBUG FIELD DESCRIPTIONS -.nf -\fBPackage\fP processor package number. -\fBCore\fP processor core number. -Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology (HT). -\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. +\fBIRQ\fP The number of interrupts serviced by that CPU during the measurement interval. The system total line is the sum of interrupts serviced across all CPUs. turbostat parses /proc/interrupts to generate this summary. +\fBSMI\fP The number of System Management Interrupts serviced CPU during the measurement interval. While this counter is actually per-CPU, SMI are triggered on all processors, so the number should be the same for all CPUs. +\fBC1, C2, C3...\fP The number times Linux requested the C1, C2, C3 idle state during the measurement interval. The system summary line shows the sum for all CPUs. These are C-state names as exported in /sys/devices/system/cpu/cpu*/cpuidle/state*/name. While their names are generic, their attributes are processor specific. They the system description section of output shows what MWAIT sub-states they are mapped to on each system. +\fBC1%, C2%, C3%\fP The residency percentage that Linux requested C1, C2, C3.... The system summary is the average of all CPUs in the system. Note that these are software, reflecting what was requested. The hardware counters reflect what was actually achieved. +\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters. \fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor. \fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. -\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. +\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters. \fBPkgWatt\fP Watts consumed by the whole package. \fBCorWatt\fP Watts consumed by the core part of the package. \fBGFXWatt\fP Watts consumed by the Graphics part of the package -- available only on client processors. @@ -99,51 +108,110 @@ Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading T \fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package. \fBRAM_%\fP percent of the interval that RAPL throttling was active on DRAM. .fi +.SH TOO MUCH INFORMATION EXAMPLE +By default, turbostat dumps all possible information -- a system configuration header, followed by columns for all counters. +This is ideal for remote debugging, use the "--out" option to save everything to a text file, and get that file to the expert helping you debug. .PP -.SH PERIODIC EXAMPLE -Without any parameters, turbostat displays statistics ever 5 seconds. -Periodic output goes to stdout, by default, unless --out is used to specify an output file. -The 5-second interval can be changed with th "-i sec" option. -Or a command may be specified as in "FORK EXAMPLE" below. +When you are not interested in all that information, and there are several ways to see only what you want. First the "--quiet" option will skip the configuration information, and turbostat will show only the counter columns. Second, you can reduce the columns with the "--hide" and "--show" options. If you use the "--show" option, then turbostat will show only the columns you list. If you use the "--hide" option, turbostat will show all columns, except the ones you list. +.PP +To find out what columns are available for --show and --hide, the "--list" option is available. For convenience, the special strings "sysfs" can be used to refer to all of the sysfs C-state counters at once: +.nf +sudo ./turbostat --show sysfs --quiet sleep 10 +10.003837 sec + C1 C1E C3 C6 C7s C1% C1E% C3% C6% C7s% + 4 21 2 2 459 0.14 0.82 0.00 0.00 98.93 + 1 17 2 2 130 0.00 0.02 0.00 0.00 99.80 + 0 0 0 0 31 0.00 0.00 0.00 0.00 99.95 + 2 1 0 0 52 1.14 6.49 0.00 0.00 92.21 + 1 2 0 0 52 0.00 0.08 0.00 0.00 99.86 + 0 0 0 0 71 0.00 0.00 0.00 0.00 99.89 + 0 0 0 0 25 0.00 0.00 0.00 0.00 99.96 + 0 0 0 0 74 0.00 0.00 0.00 0.00 99.94 + 0 1 0 0 24 0.00 0.00 0.00 0.00 99.84 +.fi +.PP +.SH ONE SHOT COMMAND EXAMPLE +If turbostat is invoked with a command, it will fork that command +and output the statistics gathered after the command exits. +In this case, turbostat output goes to stderr, by default. +Output can instead be saved to a file using the --out option. +In this example, the "sleep 10" command is forked, and turbostat waits for it to complete before saving all statistics into "ts.out". Note that "sleep 10" is not part of turbostat, but is simply an example of a command that turbostat can fork. The "ts.out" file is what you want to edit in a very wide window, paste into a spreadsheet, or attach to a bugzilla entry. + .nf -[root@hsw]# ./turbostat - CPU Avg_MHz Busy% Bzy_MHz TSC_MHz - - 488 12.51 3898 3498 - 0 0 0.01 3885 3498 - 4 3897 99.99 3898 3498 - 1 0 0.00 3861 3498 - 5 0 0.00 3882 3498 - 2 1 0.02 3894 3498 - 6 2 0.06 3898 3498 - 3 0 0.00 3849 3498 - 7 0 0.00 3877 3498 +[root@hsw]# ./turbostat -o ts.out sleep 10 +[root@hsw]# +.fi +.SH PERIODIC INTERVAL EXAMPLE +Without a command to fork, turbostat displays statistics ever 5 seconds. +Periodic output goes to stdout, by default, unless --out is used to specify an output file. +The 5-second interval can be changed with the "-i sec" option. +.nf +sudo ./turbostat --quiet --hide sysfs,IRQ,SMI,CoreTmp,PkgTmp,GFX%rc6,GFXMHz,PkgWatt,CorWatt,GFXWatt + Core CPU Avg_MHz Busy% Bzy_MHz TSC_MHz CPU%c1 CPU%c3 CPU%c6 CPU%c7 + - - 488 12.52 3900 3498 12.50 0.00 0.00 74.98 + 0 0 5 0.13 3900 3498 99.87 0.00 0.00 0.00 + 0 4 3897 99.99 3900 3498 0.01 + 1 1 0 0.00 3856 3498 0.01 0.00 0.00 99.98 + 1 5 0 0.00 3861 3498 0.01 + 2 2 1 0.02 3889 3498 0.03 0.00 0.00 99.95 + 2 6 0 0.00 3863 3498 0.05 + 3 3 0 0.01 3869 3498 0.02 0.00 0.00 99.97 + 3 7 0 0.00 3878 3498 0.03 + Core CPU Avg_MHz Busy% Bzy_MHz TSC_MHz CPU%c1 CPU%c3 CPU%c6 CPU%c7 + - - 491 12.59 3900 3498 12.42 0.00 0.00 74.99 + 0 0 27 0.69 3900 3498 99.31 0.00 0.00 0.00 + 0 4 3898 99.99 3900 3498 0.01 + 1 1 0 0.00 3883 3498 0.01 0.00 0.00 99.99 + 1 5 0 0.00 3898 3498 0.01 + 2 2 0 0.01 3889 3498 0.02 0.00 0.00 99.98 + 2 6 0 0.00 3889 3498 0.02 + 3 3 0 0.00 3856 3498 0.01 0.00 0.00 99.99 + 3 7 0 0.00 3897 3498 0.01 .fi -.SH DEBUG EXAMPLE -The "--debug" option prints additional system information before measurements: +This example also shows the use of the --hide option to skip columns that are not wanted. +Note that cpu4 in this example is 99.99% busy, while the other CPUs are all under 1% busy. +Notice that cpu4's HT sibling is cpu0, which is under 1% busy, but can get into CPU%c1 only, +because its cpu4's activity on shared hardware keeps it from entering a deeper C-state. -The first row of statistics is a summary for the entire system. -For residency % columns, the summary is a weighted average. -For Temperature columns, the summary is the column maximum. -For Watts columns, the summary is a system total. -Subsequent rows show per-CPU statistics. +.SH SYSTEM CONFIGURATION INFORMATION EXAMPLE + +By default, turbostat always dumps system configuration information +before taking measurements. In the example above, "--quiet" is used +to suppress that output. Here is an example of the configuration information: .nf -turbostat version 4.1 10-Feb, 2015 - Len Brown <lenb@kernel.org> +turbostat version 2017.02.15 - Len Brown <lenb@kernel.org> CPUID(0): GenuineIntel 13 CPUID levels; family:model:stepping 0x6:3c:3 (6:60:3) -CPUID(6): APERF, DTS, PTM, EPB +CPUID(1): SSE3 MONITOR - EIST TM2 TSC MSR ACPI-TM TM +CPUID(6): APERF, TURBO, DTS, PTM, No-HWP, No-HWPnotify, No-HWPwindow, No-HWPepp, No-HWPpkg, EPB +cpu4: MSR_IA32_MISC_ENABLE: 0x00850089 (TCC EIST No-MWAIT PREFETCH TURBO) +CPUID(7): No-SGX +cpu4: MSR_MISC_PWR_MGMT: 0x00400000 (ENable-EIST_Coordination DISable-EPB DISable-OOB) RAPL: 3121 sec. Joule Counter Range, at 84 Watts -cpu0: MSR_NHM_PLATFORM_INFO: 0x80838f3012300 -8 * 100 = 800 MHz max efficiency -35 * 100 = 3500 MHz TSC frequency -cpu0: MSR_IA32_POWER_CTL: 0x0004005d (C1E auto-promotion: DISabled) -cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x1e000400 (UNdemote-C3, UNdemote-C1, demote-C3, demote-C1, UNlocked: pkg-cstate-limit=0: pc0) -cpu0: MSR_TURBO_RATIO_LIMIT: 0x25262727 -37 * 100 = 3700 MHz max turbo 4 active cores -38 * 100 = 3800 MHz max turbo 3 active cores -39 * 100 = 3900 MHz max turbo 2 active cores -39 * 100 = 3900 MHz max turbo 1 active cores +cpu4: MSR_PLATFORM_INFO: 0x80838f3012300 +8 * 100.0 = 800.0 MHz max efficiency frequency +35 * 100.0 = 3500.0 MHz base frequency +cpu4: MSR_IA32_POWER_CTL: 0x0004005d (C1E auto-promotion: DISabled) +cpu4: MSR_TURBO_RATIO_LIMIT: 0x25262727 +37 * 100.0 = 3700.0 MHz max turbo 4 active cores +38 * 100.0 = 3800.0 MHz max turbo 3 active cores +39 * 100.0 = 3900.0 MHz max turbo 2 active cores +39 * 100.0 = 3900.0 MHz max turbo 1 active cores +cpu4: MSR_CONFIG_TDP_NOMINAL: 0x00000023 (base_ratio=35) +cpu4: MSR_CONFIG_TDP_LEVEL_1: 0x00000000 () +cpu4: MSR_CONFIG_TDP_LEVEL_2: 0x00000000 () +cpu4: MSR_CONFIG_TDP_CONTROL: 0x80000000 ( lock=1) +cpu4: MSR_TURBO_ACTIVATION_RATIO: 0x00000000 (MAX_NON_TURBO_RATIO=0 lock=0) +cpu4: MSR_PKG_CST_CONFIG_CONTROL: 0x1e000400 (UNdemote-C3, UNdemote-C1, demote-C3, demote-C1, UNlocked: pkg-cstate-limit=0: pc0) +cpu4: POLL: CPUIDLE CORE POLL IDLE +cpu4: C1: MWAIT 0x00 +cpu4: C1E: MWAIT 0x01 +cpu4: C3: MWAIT 0x10 +cpu4: C6: MWAIT 0x20 +cpu4: C7s: MWAIT 0x32 +cpu4: MSR_MISC_FEATURE_CONTROL: 0x00000000 (L2-Prefetch L2-Prefetch-pair L1-Prefetch L1-IP-Prefetch) cpu0: MSR_IA32_ENERGY_PERF_BIAS: 0x00000006 (balanced) -cpu0: MSR_CORE_PERF_LIMIT_REASONS, 0x31200000 (Active: ) (Logged: Auto-HWP, Amps, MultiCoreTurbo, Transitions, ) +cpu0: MSR_CORE_PERF_LIMIT_REASONS, 0x31200000 (Active: ) (Logged: Transitions, MultiCoreTurbo, Amps, Auto-HWP, ) cpu0: MSR_GFX_PERF_LIMIT_REASONS, 0x00000000 (Active: ) (Logged: ) cpu0: MSR_RING_PERF_LIMIT_REASONS, 0x0d000000 (Active: ) (Logged: Amps, PkgPwrL1, PkgPwrL2, ) cpu0: MSR_RAPL_POWER_UNIT: 0x000a0e03 (0.125000 Watts, 0.000061 Joules, 0.000977 sec.) @@ -158,23 +226,14 @@ cpu0: MSR_PP1_POLICY: 0 cpu0: MSR_PP1_POWER_LIMIT: 0x00000000 (UNlocked) cpu0: GFX Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled) cpu0: MSR_IA32_TEMPERATURE_TARGET: 0x00641400 (100 C) -cpu0: MSR_IA32_PACKAGE_THERM_STATUS: 0x88340800 (48 C) -cpu0: MSR_IA32_THERM_STATUS: 0x88340000 (48 C +/- 1) -cpu1: MSR_IA32_THERM_STATUS: 0x88440000 (32 C +/- 1) -cpu2: MSR_IA32_THERM_STATUS: 0x88450000 (31 C +/- 1) -cpu3: MSR_IA32_THERM_STATUS: 0x88490000 (27 C +/- 1) - Core CPU Avg_MHz Busy% Bzy_MHz TSC_MHz SMI CPU%c1 CPU%c3 CPU%c6 CPU%c7 CoreTmp PkgTmp PkgWatt CorWatt GFXWatt - - - 493 12.64 3898 3498 0 12.64 0.00 0.00 74.72 47 47 21.62 13.74 0.00 - 0 0 4 0.11 3894 3498 0 99.89 0.00 0.00 0.00 47 47 21.62 13.74 0.00 - 0 4 3897 99.98 3898 3498 0 0.02 - 1 1 7 0.17 3887 3498 0 0.04 0.00 0.00 99.79 32 - 1 5 0 0.00 3885 3498 0 0.21 - 2 2 29 0.76 3895 3498 0 0.10 0.01 0.01 99.13 32 - 2 6 2 0.06 3896 3498 0 0.80 - 3 3 1 0.02 3832 3498 0 0.03 0.00 0.00 99.95 28 - 3 7 0 0.00 3879 3498 0 0.04 -^C - +cpu0: MSR_IA32_PACKAGE_THERM_STATUS: 0x884c0800 (24 C) +cpu0: MSR_IA32_THERM_STATUS: 0x884c0000 (24 C +/- 1) +cpu1: MSR_IA32_THERM_STATUS: 0x88510000 (19 C +/- 1) +cpu2: MSR_IA32_THERM_STATUS: 0x884e0000 (22 C +/- 1) +cpu3: MSR_IA32_THERM_STATUS: 0x88510000 (19 C +/- 1) +cpu4: MSR_PKGC3_IRTL: 0x00008842 (valid, 67584 ns) +cpu4: MSR_PKGC6_IRTL: 0x00008873 (valid, 117760 ns) +cpu4: MSR_PKGC7_IRTL: 0x00008891 (valid, 148480 ns) .fi The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency available at the minimum package voltage. The \fBTSC frequency\fP is the base @@ -184,42 +243,22 @@ should be sustainable on all CPUs indefinitely, given nominal power and cooling. The remaining rows show what maximum turbo frequency is possible depending on the number of idle cores. Note that not all information is available on all processors. -.PP -The --debug option adds additional columns to the measurement ouput, including CPU idle power-state residency processor temperature sensor readinds. -See the field definitions above. -.SH FORK EXAMPLE -If turbostat is invoked with a command, it will fork that command -and output the statistics gathered after the command exits. -In this case, turbostat output goes to stderr, by default. -Output can instead be saved to a file using the --out option. -eg. Here a cycle soaker is run on 1 CPU (see %c0) for a few seconds -until ^C while the other CPUs are mostly idle: - +.SH ADD COUNTER EXAMPLE +Here we limit turbostat to showing just the CPU number for cpu0 - cpu3. +We add a counter showing the 32-bit raw value of MSR 0x199 (MSR_IA32_PERF_CTL), +labeling it with the column header, "PRF_CTRL", and display it only once, +afte the conclusion of a 0.1 second sleep. .nf -root@hsw: turbostat cat /dev/zero > /dev/null -^C - CPU Avg_MHz Busy% Bzy_MHz TSC_MHz - - 482 12.51 3854 3498 - 0 0 0.01 1960 3498 - 4 0 0.00 2128 3498 - 1 0 0.00 3003 3498 - 5 3854 99.98 3855 3498 - 2 0 0.01 3504 3498 - 6 3 0.08 3884 3498 - 3 0 0.00 2553 3498 - 7 0 0.00 2126 3498 -10.783983 sec +sudo ./turbostat --quiet --cpu 0-3 --show CPU --add msr0x199,u32,raw,PRF_CTRL sleep .1 +0.101604 sec +CPU PRF_CTRL +- 0x00000000 +0 0x00000c00 +1 0x00000800 +2 0x00000a00 +3 0x00000800 .fi -Above the cycle soaker drives cpu5 up its 3.9 GHz turbo limit. -The first row shows the average MHz and Busy% across all the processors in the system. - -Note that the Avg_MHz column reflects the total number of cycles executed -divided by the measurement interval. If the Busy% column is 100%, -then the processor was running at that speed the entire interval. -The Avg_MHz multiplied by the Busy% results in the Bzy_MHz -- -which is the average frequency while the processor was executing -- -not including any non-busy idle time. .SH NOTES diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index f13f61b065c6..828dccd3f01e 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -49,17 +49,14 @@ FILE *outf; int *fd_percpu; struct timespec interval_ts = {5, 0}; unsigned int debug; +unsigned int quiet; +unsigned int sums_need_wide_columns; unsigned int rapl_joules; unsigned int summary_only; +unsigned int list_header_only; unsigned int dump_only; -unsigned int do_nhm_cstates; unsigned int do_snb_cstates; unsigned int do_knl_cstates; -unsigned int do_pc2; -unsigned int do_pc3; -unsigned int do_pc6; -unsigned int do_pc7; -unsigned int do_c8_c9_c10; unsigned int do_skl_residency; unsigned int do_slm_cstates; unsigned int use_c1_residency_msr; @@ -71,25 +68,19 @@ unsigned int units = 1000000; /* MHz etc */ unsigned int genuine_intel; unsigned int has_invariant_tsc; unsigned int do_nhm_platform_info; +unsigned int no_MSR_MISC_PWR_MGMT; unsigned int aperf_mperf_multiplier = 1; -int do_irq = 1; -int do_smi; double bclk; double base_hz; unsigned int has_base_hz; double tsc_tweak = 1.0; -unsigned int show_pkg; -unsigned int show_core; -unsigned int show_cpu; unsigned int show_pkg_only; unsigned int show_core_only; char *output_buffer, *outp; unsigned int do_rapl; unsigned int do_dts; unsigned int do_ptm; -unsigned int do_gfx_rc6_ms; unsigned long long gfx_cur_rc6_ms; -unsigned int do_gfx_mhz; unsigned int gfx_cur_mhz; unsigned int tcc_activation_temp; unsigned int tcc_activation_temp_override; @@ -109,6 +100,7 @@ unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */ unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */ unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */ unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ +unsigned int has_misc_feature_control; #define RAPL_PKG (1 << 0) /* 0x610 MSR_PKG_POWER_LIMIT */ @@ -148,34 +140,38 @@ unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ * Usually truncated to 7 characters, but also handles 18 columns for raw 64-bit counters */ #define NAME_BYTES 20 +#define PATH_BYTES 128 int backwards_count; char *progname; -cpu_set_t *cpu_present_set, *cpu_affinity_set; -size_t cpu_present_setsize, cpu_affinity_setsize; +#define CPU_SUBSET_MAXCPUS 1024 /* need to use before probe... */ +cpu_set_t *cpu_present_set, *cpu_affinity_set, *cpu_subset; +size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size; +#define MAX_ADDED_COUNTERS 16 struct thread_data { unsigned long long tsc; unsigned long long aperf; unsigned long long mperf; unsigned long long c1; - unsigned int irq_count; + unsigned long long irq_count; unsigned int smi_count; unsigned int cpu_id; unsigned int flags; #define CPU_IS_FIRST_THREAD_IN_CORE 0x2 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4 - unsigned long long counter[1]; + unsigned long long counter[MAX_ADDED_COUNTERS]; } *thread_even, *thread_odd; struct core_data { unsigned long long c3; unsigned long long c6; unsigned long long c7; + unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */ unsigned int core_temp_c; unsigned int core_id; - unsigned long long counter[1]; + unsigned long long counter[MAX_ADDED_COUNTERS]; } *core_even, *core_odd; struct pkg_data { @@ -200,7 +196,7 @@ struct pkg_data { unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */ unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */ unsigned int pkg_temp_c; - unsigned long long counter[1]; + unsigned long long counter[MAX_ADDED_COUNTERS]; } *package_even, *package_odd; #define ODD_COUNTERS thread_odd, core_odd, package_odd @@ -215,22 +211,27 @@ struct pkg_data { #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no) enum counter_scope {SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE}; -enum counter_type {COUNTER_CYCLES, COUNTER_SECONDS}; +enum counter_type {COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC}; enum counter_format {FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT}; struct msr_counter { unsigned int msr_num; char name[NAME_BYTES]; + char path[PATH_BYTES]; unsigned int width; enum counter_type type; enum counter_format format; struct msr_counter *next; + unsigned int flags; +#define FLAGS_HIDE (1 << 0) +#define FLAGS_SHOW (1 << 1) +#define SYSFS_PERCPU (1 << 1) }; struct sys_counters { - unsigned int thread_counter_bytes; - unsigned int core_counter_bytes; - unsigned int package_counter_bytes; + unsigned int added_thread_counters; + unsigned int added_core_counters; + unsigned int added_package_counters; struct msr_counter *tp; struct msr_counter *cp; struct msr_counter *pp; @@ -334,147 +335,333 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr) retval = pread(get_msr_fd(cpu), msr, sizeof(*msr), offset); if (retval != sizeof *msr) - err(-1, "msr %d offset 0x%llx read failed", cpu, (unsigned long long)offset); + err(-1, "cpu%d: msr offset 0x%llx read failed", cpu, (unsigned long long)offset); return 0; } /* - * Example Format w/ field column widths: - * - * Package Core CPU Avg_MHz Bzy_MHz TSC_MHz IRQ SMI Busy% CPU_%c1 CPU_%c3 CPU_%c6 CPU_%c7 ThreadC CoreTmp CoreCnt PkgTmp GFXMHz Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt PkgCnt - * 12345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678 + * Each string in this array is compared in --show and --hide cmdline. + * Thus, strings that are proper sub-sets must follow their more specific peers. + */ +struct msr_counter bic[] = { + { 0x0, "Package" }, + { 0x0, "Avg_MHz" }, + { 0x0, "Bzy_MHz" }, + { 0x0, "TSC_MHz" }, + { 0x0, "IRQ" }, + { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL}, + { 0x0, "Busy%" }, + { 0x0, "CPU%c1" }, + { 0x0, "CPU%c3" }, + { 0x0, "CPU%c6" }, + { 0x0, "CPU%c7" }, + { 0x0, "ThreadC" }, + { 0x0, "CoreTmp" }, + { 0x0, "CoreCnt" }, + { 0x0, "PkgTmp" }, + { 0x0, "GFX%rc6" }, + { 0x0, "GFXMHz" }, + { 0x0, "Pkg%pc2" }, + { 0x0, "Pkg%pc3" }, + { 0x0, "Pkg%pc6" }, + { 0x0, "Pkg%pc7" }, + { 0x0, "Pkg%pc8" }, + { 0x0, "Pkg%pc9" }, + { 0x0, "Pkg%pc10" }, + { 0x0, "PkgWatt" }, + { 0x0, "CorWatt" }, + { 0x0, "GFXWatt" }, + { 0x0, "PkgCnt" }, + { 0x0, "RAMWatt" }, + { 0x0, "PKG_%" }, + { 0x0, "RAM_%" }, + { 0x0, "Pkg_J" }, + { 0x0, "Cor_J" }, + { 0x0, "GFX_J" }, + { 0x0, "RAM_J" }, + { 0x0, "Core" }, + { 0x0, "CPU" }, + { 0x0, "Mod%c6" }, + { 0x0, "sysfs" }, +}; + +#define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter)) +#define BIC_Package (1ULL << 0) +#define BIC_Avg_MHz (1ULL << 1) +#define BIC_Bzy_MHz (1ULL << 2) +#define BIC_TSC_MHz (1ULL << 3) +#define BIC_IRQ (1ULL << 4) +#define BIC_SMI (1ULL << 5) +#define BIC_Busy (1ULL << 6) +#define BIC_CPU_c1 (1ULL << 7) +#define BIC_CPU_c3 (1ULL << 8) +#define BIC_CPU_c6 (1ULL << 9) +#define BIC_CPU_c7 (1ULL << 10) +#define BIC_ThreadC (1ULL << 11) +#define BIC_CoreTmp (1ULL << 12) +#define BIC_CoreCnt (1ULL << 13) +#define BIC_PkgTmp (1ULL << 14) +#define BIC_GFX_rc6 (1ULL << 15) +#define BIC_GFXMHz (1ULL << 16) +#define BIC_Pkgpc2 (1ULL << 17) +#define BIC_Pkgpc3 (1ULL << 18) +#define BIC_Pkgpc6 (1ULL << 19) +#define BIC_Pkgpc7 (1ULL << 20) +#define BIC_Pkgpc8 (1ULL << 21) +#define BIC_Pkgpc9 (1ULL << 22) +#define BIC_Pkgpc10 (1ULL << 23) +#define BIC_PkgWatt (1ULL << 24) +#define BIC_CorWatt (1ULL << 25) +#define BIC_GFXWatt (1ULL << 26) +#define BIC_PkgCnt (1ULL << 27) +#define BIC_RAMWatt (1ULL << 28) +#define BIC_PKG__ (1ULL << 29) +#define BIC_RAM__ (1ULL << 30) +#define BIC_Pkg_J (1ULL << 31) +#define BIC_Cor_J (1ULL << 32) +#define BIC_GFX_J (1ULL << 33) +#define BIC_RAM_J (1ULL << 34) +#define BIC_Core (1ULL << 35) +#define BIC_CPU (1ULL << 36) +#define BIC_Mod_c6 (1ULL << 37) +#define BIC_sysfs (1ULL << 38) + +unsigned long long bic_enabled = 0xFFFFFFFFFFFFFFFFULL; +unsigned long long bic_present = BIC_sysfs; + +#define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME) +#define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT) +#define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT) + +#define MAX_DEFERRED 16 +char *deferred_skip_names[MAX_DEFERRED]; +int deferred_skip_index; + +/* + * HIDE_LIST - hide this list of counters, show the rest [default] + * SHOW_LIST - show this list of counters, hide the rest */ +enum show_hide_mode { SHOW_LIST, HIDE_LIST } global_show_hide_mode = HIDE_LIST; -void print_header(void) +void help(void) { - struct msr_counter *mp; + fprintf(outf, + "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n" + "\n" + "Turbostat forks the specified COMMAND and prints statistics\n" + "when COMMAND completes.\n" + "If no COMMAND is specified, turbostat wakes every 5-seconds\n" + "to print statistics, until interrupted.\n" + "--add add a counter\n" + " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n" + "--cpu cpu-set limit output to summary plus cpu-set:\n" + " {core | package | j,k,l..m,n-p }\n" + "--quiet skip decoding system configuration header\n" + "--interval sec Override default 5-second measurement interval\n" + "--help print this help message\n" + "--list list column headers only\n" + "--out file create or truncate \"file\" for all output\n" + "--version print version information\n" + "\n" + "For more help, run \"man turbostat\"\n"); +} - if (show_pkg) - outp += sprintf(outp, "\tPackage"); - if (show_core) - outp += sprintf(outp, "\tCore"); - if (show_cpu) - outp += sprintf(outp, "\tCPU"); - if (has_aperf) - outp += sprintf(outp, "\tAvg_MHz"); - if (has_aperf) - outp += sprintf(outp, "\tBusy%%"); - if (has_aperf) - outp += sprintf(outp, "\tBzy_MHz"); - outp += sprintf(outp, "\tTSC_MHz"); +/* + * bic_lookup + * for all the strings in comma separate name_list, + * set the approprate bit in return value. + */ +unsigned long long bic_lookup(char *name_list, enum show_hide_mode mode) +{ + int i; + unsigned long long retval = 0; - if (!debug) - goto done; + while (name_list) { + char *comma; - if (do_irq) - outp += sprintf(outp, "\tIRQ"); - if (do_smi) - outp += sprintf(outp, "\tSMI"); - - if (do_nhm_cstates) - outp += sprintf(outp, "\tCPU%%c1"); - if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) - outp += sprintf(outp, "\tCPU%%c3"); - if (do_nhm_cstates) - outp += sprintf(outp, "\tCPU%%c6"); - if (do_snb_cstates) - outp += sprintf(outp, "\tCPU%%c7"); + comma = strchr(name_list, ','); + + if (comma) + *comma = '\0'; + + for (i = 0; i < MAX_BIC; ++i) { + if (!strcmp(name_list, bic[i].name)) { + retval |= (1ULL << i); + break; + } + } + if (i == MAX_BIC) { + if (mode == SHOW_LIST) { + fprintf(stderr, "Invalid counter name: %s\n", name_list); + exit(-1); + } + deferred_skip_names[deferred_skip_index++] = name_list; + if (debug) + fprintf(stderr, "deferred \"%s\"\n", name_list); + if (deferred_skip_index >= MAX_DEFERRED) { + fprintf(stderr, "More than max %d un-recognized --skip options '%s'\n", + MAX_DEFERRED, name_list); + help(); + exit(1); + } + } + + name_list = comma; + if (name_list) + name_list++; + + } + return retval; +} + + +void print_header(char *delim) +{ + struct msr_counter *mp; + int printed = 0; + + if (DO_BIC(BIC_Package)) + outp += sprintf(outp, "%sPackage", (printed++ ? delim : "")); + if (DO_BIC(BIC_Core)) + outp += sprintf(outp, "%sCore", (printed++ ? delim : "")); + if (DO_BIC(BIC_CPU)) + outp += sprintf(outp, "%sCPU", (printed++ ? delim : "")); + if (DO_BIC(BIC_Avg_MHz)) + outp += sprintf(outp, "%sAvg_MHz", (printed++ ? delim : "")); + if (DO_BIC(BIC_Busy)) + outp += sprintf(outp, "%sBusy%%", (printed++ ? delim : "")); + if (DO_BIC(BIC_Bzy_MHz)) + outp += sprintf(outp, "%sBzy_MHz", (printed++ ? delim : "")); + if (DO_BIC(BIC_TSC_MHz)) + outp += sprintf(outp, "%sTSC_MHz", (printed++ ? delim : "")); + + if (DO_BIC(BIC_IRQ)) { + if (sums_need_wide_columns) + outp += sprintf(outp, "%s IRQ", (printed++ ? delim : "")); + else + outp += sprintf(outp, "%sIRQ", (printed++ ? delim : "")); + } + + if (DO_BIC(BIC_SMI)) + outp += sprintf(outp, "%sSMI", (printed++ ? delim : "")); for (mp = sys.tp; mp; mp = mp->next) { + if (mp->format == FORMAT_RAW) { if (mp->width == 64) - outp += sprintf(outp, "\t%18.18s", mp->name); + outp += sprintf(outp, "%s%18.18s", (printed++ ? delim : ""), mp->name); else - outp += sprintf(outp, "\t%10.10s", mp->name); + outp += sprintf(outp, "%s%10.10s", (printed++ ? delim : ""), mp->name); } else { - outp += sprintf(outp, "\t%-7.7s", mp->name); + if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) + outp += sprintf(outp, "%s%8s", (printed++ ? delim : ""), mp->name); + else + outp += sprintf(outp, "%s%s", (printed++ ? delim : ""), mp->name); } } - if (do_dts) - outp += sprintf(outp, "\tCoreTmp"); + if (DO_BIC(BIC_CPU_c1)) + outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : "")); + if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates) + outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : "")); + if (DO_BIC(BIC_CPU_c6)) + outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : "")); + if (DO_BIC(BIC_CPU_c7)) + outp += sprintf(outp, "%sCPU%%c7", (printed++ ? delim : "")); + + if (DO_BIC(BIC_Mod_c6)) + outp += sprintf(outp, "%sMod%%c6", (printed++ ? delim : "")); + + if (DO_BIC(BIC_CoreTmp)) + outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : "")); for (mp = sys.cp; mp; mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 64) - outp += sprintf(outp, "\t%18.18s", mp->name); + outp += sprintf(outp, "%s%18.18s", delim, mp->name); else - outp += sprintf(outp, "\t%10.10s", mp->name); + outp += sprintf(outp, "%s%10.10s", delim, mp->name); } else { - outp += sprintf(outp, "\t%-7.7s", mp->name); + if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) + outp += sprintf(outp, "%s%8s", delim, mp->name); + else + outp += sprintf(outp, "%s%s", delim, mp->name); } } - if (do_ptm) - outp += sprintf(outp, "\tPkgTmp"); + if (DO_BIC(BIC_PkgTmp)) + outp += sprintf(outp, "%sPkgTmp", (printed++ ? delim : "")); - if (do_gfx_rc6_ms) - outp += sprintf(outp, "\tGFX%%rc6"); + if (DO_BIC(BIC_GFX_rc6)) + outp += sprintf(outp, "%sGFX%%rc6", (printed++ ? delim : "")); - if (do_gfx_mhz) - outp += sprintf(outp, "\tGFXMHz"); + if (DO_BIC(BIC_GFXMHz)) + outp += sprintf(outp, "%sGFXMHz", (printed++ ? delim : "")); if (do_skl_residency) { - outp += sprintf(outp, "\tTotl%%C0"); - outp += sprintf(outp, "\tAny%%C0"); - outp += sprintf(outp, "\tGFX%%C0"); - outp += sprintf(outp, "\tCPUGFX%%"); - } - - if (do_pc2) - outp += sprintf(outp, "\tPkg%%pc2"); - if (do_pc3) - outp += sprintf(outp, "\tPkg%%pc3"); - if (do_pc6) - outp += sprintf(outp, "\tPkg%%pc6"); - if (do_pc7) - outp += sprintf(outp, "\tPkg%%pc7"); - if (do_c8_c9_c10) { - outp += sprintf(outp, "\tPkg%%pc8"); - outp += sprintf(outp, "\tPkg%%pc9"); - outp += sprintf(outp, "\tPk%%pc10"); + outp += sprintf(outp, "%sTotl%%C0", (printed++ ? delim : "")); + outp += sprintf(outp, "%sAny%%C0", (printed++ ? delim : "")); + outp += sprintf(outp, "%sGFX%%C0", (printed++ ? delim : "")); + outp += sprintf(outp, "%sCPUGFX%%", (printed++ ? delim : "")); } + if (DO_BIC(BIC_Pkgpc2)) + outp += sprintf(outp, "%sPkg%%pc2", (printed++ ? delim : "")); + if (DO_BIC(BIC_Pkgpc3)) + outp += sprintf(outp, "%sPkg%%pc3", (printed++ ? delim : "")); + if (DO_BIC(BIC_Pkgpc6)) + outp += sprintf(outp, "%sPkg%%pc6", (printed++ ? delim : "")); + if (DO_BIC(BIC_Pkgpc7)) + outp += sprintf(outp, "%sPkg%%pc7", (printed++ ? delim : "")); + if (DO_BIC(BIC_Pkgpc8)) + outp += sprintf(outp, "%sPkg%%pc8", (printed++ ? delim : "")); + if (DO_BIC(BIC_Pkgpc9)) + outp += sprintf(outp, "%sPkg%%pc9", (printed++ ? delim : "")); + if (DO_BIC(BIC_Pkgpc10)) + outp += sprintf(outp, "%sPk%%pc10", (printed++ ? delim : "")); + if (do_rapl && !rapl_joules) { - if (do_rapl & RAPL_PKG) - outp += sprintf(outp, "\tPkgWatt"); - if (do_rapl & RAPL_CORES_ENERGY_STATUS) - outp += sprintf(outp, "\tCorWatt"); - if (do_rapl & RAPL_GFX) - outp += sprintf(outp, "\tGFXWatt"); - if (do_rapl & RAPL_DRAM) - outp += sprintf(outp, "\tRAMWatt"); - if (do_rapl & RAPL_PKG_PERF_STATUS) - outp += sprintf(outp, "\tPKG_%%"); - if (do_rapl & RAPL_DRAM_PERF_STATUS) - outp += sprintf(outp, "\tRAM_%%"); + if (DO_BIC(BIC_PkgWatt)) + outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : "")); + if (DO_BIC(BIC_CorWatt)) + outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : "")); + if (DO_BIC(BIC_GFXWatt)) + outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : "")); + if (DO_BIC(BIC_RAMWatt)) + outp += sprintf(outp, "%sRAMWatt", (printed++ ? delim : "")); + if (DO_BIC(BIC_PKG__)) + outp += sprintf(outp, "%sPKG_%%", (printed++ ? delim : "")); + if (DO_BIC(BIC_RAM__)) + outp += sprintf(outp, "%sRAM_%%", (printed++ ? delim : "")); } else if (do_rapl && rapl_joules) { - if (do_rapl & RAPL_PKG) - outp += sprintf(outp, "\tPkg_J"); - if (do_rapl & RAPL_CORES_ENERGY_STATUS) - outp += sprintf(outp, "\tCor_J"); - if (do_rapl & RAPL_GFX) - outp += sprintf(outp, "\tGFX_J"); - if (do_rapl & RAPL_DRAM) - outp += sprintf(outp, "\tRAM_J"); - if (do_rapl & RAPL_PKG_PERF_STATUS) - outp += sprintf(outp, "\tPKG_%%"); - if (do_rapl & RAPL_DRAM_PERF_STATUS) - outp += sprintf(outp, "\tRAM_%%"); + if (DO_BIC(BIC_Pkg_J)) + outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : "")); + if (DO_BIC(BIC_Cor_J)) + outp += sprintf(outp, "%sCor_J", (printed++ ? delim : "")); + if (DO_BIC(BIC_GFX_J)) + outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : "")); + if (DO_BIC(BIC_RAM_J)) + outp += sprintf(outp, "%sRAM_J", (printed++ ? delim : "")); + if (DO_BIC(BIC_PKG__)) + outp += sprintf(outp, "%sPKG_%%", (printed++ ? delim : "")); + if (DO_BIC(BIC_RAM__)) + outp += sprintf(outp, "%sRAM_%%", (printed++ ? delim : "")); } for (mp = sys.pp; mp; mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 64) - outp += sprintf(outp, "\t%18.18s", mp->name); + outp += sprintf(outp, "%s%18.18s", delim, mp->name); else - outp += sprintf(outp, "\t%10.10s", mp->name); + outp += sprintf(outp, "%s%10.10s", delim, mp->name); } else { - outp += sprintf(outp, "\t%-7.7s", mp->name); + if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) + outp += sprintf(outp, "%s%8s", delim, mp->name); + else + outp += sprintf(outp, "%s%s", delim, mp->name); } } -done: outp += sprintf(outp, "\n"); } @@ -494,10 +681,10 @@ int dump_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, "mperf: %016llX\n", t->mperf); outp += sprintf(outp, "c1: %016llX\n", t->c1); - if (do_irq) - outp += sprintf(outp, "IRQ: %08X\n", t->irq_count); - if (do_smi) - outp += sprintf(outp, "SMI: %08X\n", t->smi_count); + if (DO_BIC(BIC_IRQ)) + outp += sprintf(outp, "IRQ: %lld\n", t->irq_count); + if (DO_BIC(BIC_SMI)) + outp += sprintf(outp, "SMI: %d\n", t->smi_count); for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { outp += sprintf(outp, "tADDED [%d] msr0x%x: %08llX\n", @@ -516,6 +703,7 @@ int dump_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n", i, mp->msr_num, c->counter[i]); } + outp += sprintf(outp, "mc6_us: %016llX\n", c->mc6_us); } if (p) { @@ -527,11 +715,11 @@ int dump_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0); outp += sprintf(outp, "pc2: %016llX\n", p->pc2); - if (do_pc3) + if (DO_BIC(BIC_Pkgpc3)) outp += sprintf(outp, "pc3: %016llX\n", p->pc3); - if (do_pc6) + if (DO_BIC(BIC_Pkgpc6)) outp += sprintf(outp, "pc6: %016llX\n", p->pc6); - if (do_pc7) + if (DO_BIC(BIC_Pkgpc7)) outp += sprintf(outp, "pc7: %016llX\n", p->pc7); outp += sprintf(outp, "pc8: %016llX\n", p->pc8); outp += sprintf(outp, "pc9: %016llX\n", p->pc9); @@ -563,10 +751,12 @@ int dump_counters(struct thread_data *t, struct core_data *c, int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { - double interval_float; + double interval_float, tsc; char *fmt8; int i; struct msr_counter *mp; + char *delim = "\t"; + int printed = 0; /* if showing only 1st thread in core and this isn't one, bail out */ if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) @@ -576,106 +766,126 @@ int format_counters(struct thread_data *t, struct core_data *c, if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; + /*if not summary line and --cpu is used */ + if ((t != &average.threads) && + (cpu_subset && !CPU_ISSET_S(t->cpu_id, cpu_subset_size, cpu_subset))) + return 0; + interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; + tsc = t->tsc * tsc_tweak; + /* topo columns, print blanks on 1st (average) line */ if (t == &average.threads) { - if (show_pkg) - outp += sprintf(outp, "\t-"); - if (show_core) - outp += sprintf(outp, "\t-"); - if (show_cpu) - outp += sprintf(outp, "\t-"); + if (DO_BIC(BIC_Package)) + outp += sprintf(outp, "%s-", (printed++ ? delim : "")); + if (DO_BIC(BIC_Core)) + outp += sprintf(outp, "%s-", (printed++ ? delim : "")); + if (DO_BIC(BIC_CPU)) + outp += sprintf(outp, "%s-", (printed++ ? delim : "")); } else { - if (show_pkg) { + if (DO_BIC(BIC_Package)) { if (p) - outp += sprintf(outp, "\t%d", p->package_id); + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->package_id); else - outp += sprintf(outp, "\t-"); + outp += sprintf(outp, "%s-", (printed++ ? delim : "")); } - if (show_core) { + if (DO_BIC(BIC_Core)) { if (c) - outp += sprintf(outp, "\t%d", c->core_id); + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_id); else - outp += sprintf(outp, "\t-"); + outp += sprintf(outp, "%s-", (printed++ ? delim : "")); } - if (show_cpu) - outp += sprintf(outp, "\t%d", t->cpu_id); + if (DO_BIC(BIC_CPU)) + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id); } - /* Avg_MHz */ - if (has_aperf) - outp += sprintf(outp, "\t%.0f", + if (DO_BIC(BIC_Avg_MHz)) + outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), 1.0 / units * t->aperf / interval_float); - /* Busy% */ - if (has_aperf) - outp += sprintf(outp, "\t%.2f", 100.0 * t->mperf/t->tsc/tsc_tweak); + if (DO_BIC(BIC_Busy)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->mperf/tsc); - /* Bzy_MHz */ - if (has_aperf) { + if (DO_BIC(BIC_Bzy_MHz)) { if (has_base_hz) - outp += sprintf(outp, "\t%.0f", base_hz / units * t->aperf / t->mperf); + outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), base_hz / units * t->aperf / t->mperf); else - outp += sprintf(outp, "\t%.0f", - 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); + outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), + tsc / units * t->aperf / t->mperf / interval_float); } - /* TSC_MHz */ - outp += sprintf(outp, "\t%.0f", 1.0 * t->tsc/units/interval_float); - - if (!debug) - goto done; + if (DO_BIC(BIC_TSC_MHz)) + outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), 1.0 * t->tsc/units/interval_float); /* IRQ */ - if (do_irq) - outp += sprintf(outp, "\t%d", t->irq_count); + if (DO_BIC(BIC_IRQ)) { + if (sums_need_wide_columns) + outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->irq_count); + else + outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->irq_count); + } /* SMI */ - if (do_smi) - outp += sprintf(outp, "\t%d", t->smi_count); - - if (do_nhm_cstates) - outp += sprintf(outp, "\t%.2f", 100.0 * t->c1/t->tsc); - - /* print per-core data only for 1st thread in core */ - if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) - goto done; - - if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) - outp += sprintf(outp, "\t%.2f", 100.0 * c->c3/t->tsc); - if (do_nhm_cstates) - outp += sprintf(outp, "\t%.2f", 100.0 * c->c6/t->tsc); - if (do_snb_cstates) - outp += sprintf(outp, "\t%.2f", 100.0 * c->c7/t->tsc); + if (DO_BIC(BIC_SMI)) + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->smi_count); + /* Added counters */ for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 32) - outp += sprintf(outp, "\t0x%08lx", (unsigned long) t->counter[i]); + outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) t->counter[i]); else - outp += sprintf(outp, "\t0x%016llx", t->counter[i]); + outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), t->counter[i]); } else if (mp->format == FORMAT_DELTA) { - outp += sprintf(outp, "\t%8lld", t->counter[i]); + if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) + outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->counter[i]); + else + outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->counter[i]); } else if (mp->format == FORMAT_PERCENT) { - outp += sprintf(outp, "\t%.2f", 100.0 * t->counter[i]/t->tsc); + if (mp->type == COUNTER_USEC) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), t->counter[i]/interval_float/10000); + else + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->counter[i]/tsc); } } + /* C1 */ + if (DO_BIC(BIC_CPU_c1)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->c1/tsc); - if (do_dts) - outp += sprintf(outp, "\t%d", c->core_temp_c); + + /* print per-core data only for 1st thread in core */ + if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) + goto done; + + if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc); + if (DO_BIC(BIC_CPU_c6)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc); + if (DO_BIC(BIC_CPU_c7)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c7/tsc); + + /* Mod%c6 */ + if (DO_BIC(BIC_Mod_c6)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->mc6_us / tsc); + + if (DO_BIC(BIC_CoreTmp)) + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_temp_c); for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 32) - outp += sprintf(outp, "\t0x%08lx", (unsigned long) c->counter[i]); + outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) c->counter[i]); else - outp += sprintf(outp, "\t0x%016llx", c->counter[i]); + outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), c->counter[i]); } else if (mp->format == FORMAT_DELTA) { - outp += sprintf(outp, "\t%8lld", c->counter[i]); + if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) + outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), c->counter[i]); + else + outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), c->counter[i]); } else if (mp->format == FORMAT_PERCENT) { - outp += sprintf(outp, "\t%.2f", 100.0 * c->counter[i]/t->tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->counter[i]/tsc); } } @@ -684,95 +894,89 @@ int format_counters(struct thread_data *t, struct core_data *c, goto done; /* PkgTmp */ - if (do_ptm) - outp += sprintf(outp, "\t%d", p->pkg_temp_c); + if (DO_BIC(BIC_PkgTmp)) + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->pkg_temp_c); /* GFXrc6 */ - if (do_gfx_rc6_ms) { + if (DO_BIC(BIC_GFX_rc6)) { if (p->gfx_rc6_ms == -1) { /* detect GFX counter reset */ - outp += sprintf(outp, "\t**.**"); + outp += sprintf(outp, "%s**.**", (printed++ ? delim : "")); } else { - outp += sprintf(outp, "\t%.2f", + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), p->gfx_rc6_ms / 10.0 / interval_float); } } /* GFXMHz */ - if (do_gfx_mhz) - outp += sprintf(outp, "\t%d", p->gfx_mhz); + if (DO_BIC(BIC_GFXMHz)) + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->gfx_mhz); /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */ if (do_skl_residency) { - outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_wtd_core_c0/t->tsc); - outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_any_core_c0/t->tsc); - outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_any_gfxe_c0/t->tsc); - outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_both_core_gfxe_c0/t->tsc); - } - - if (do_pc2) - outp += sprintf(outp, "\t%.2f", 100.0 * p->pc2/t->tsc); - if (do_pc3) - outp += sprintf(outp, "\t%.2f", 100.0 * p->pc3/t->tsc); - if (do_pc6) - outp += sprintf(outp, "\t%.2f", 100.0 * p->pc6/t->tsc); - if (do_pc7) - outp += sprintf(outp, "\t%.2f", 100.0 * p->pc7/t->tsc); - if (do_c8_c9_c10) { - outp += sprintf(outp, "\t%.2f", 100.0 * p->pc8/t->tsc); - outp += sprintf(outp, "\t%.2f", 100.0 * p->pc9/t->tsc); - outp += sprintf(outp, "\t%.2f", 100.0 * p->pc10/t->tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_wtd_core_c0/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_core_c0/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_gfxe_c0/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_both_core_gfxe_c0/tsc); } + if (DO_BIC(BIC_Pkgpc2)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc2/tsc); + if (DO_BIC(BIC_Pkgpc3)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc3/tsc); + if (DO_BIC(BIC_Pkgpc6)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc6/tsc); + if (DO_BIC(BIC_Pkgpc7)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc7/tsc); + if (DO_BIC(BIC_Pkgpc8)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc8/tsc); + if (DO_BIC(BIC_Pkgpc9)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc9/tsc); + if (DO_BIC(BIC_Pkgpc10)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc10/tsc); + /* * If measurement interval exceeds minimum RAPL Joule Counter range, * indicate that results are suspect by printing "**" in fraction place. */ if (interval_float < rapl_joule_counter_range) - fmt8 = "\t%.2f"; + fmt8 = "%s%.2f"; else fmt8 = "%6.0f**"; - if (do_rapl && !rapl_joules) { - if (do_rapl & RAPL_PKG) - outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units / interval_float); - if (do_rapl & RAPL_CORES_ENERGY_STATUS) - outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units / interval_float); - if (do_rapl & RAPL_GFX) - outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units / interval_float); - if (do_rapl & RAPL_DRAM) - outp += sprintf(outp, fmt8, p->energy_dram * rapl_dram_energy_units / interval_float); - if (do_rapl & RAPL_PKG_PERF_STATUS) - outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); - if (do_rapl & RAPL_DRAM_PERF_STATUS) - outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); - } else if (do_rapl && rapl_joules) { - if (do_rapl & RAPL_PKG) - outp += sprintf(outp, fmt8, - p->energy_pkg * rapl_energy_units); - if (do_rapl & RAPL_CORES) - outp += sprintf(outp, fmt8, - p->energy_cores * rapl_energy_units); - if (do_rapl & RAPL_GFX) - outp += sprintf(outp, fmt8, - p->energy_gfx * rapl_energy_units); - if (do_rapl & RAPL_DRAM) - outp += sprintf(outp, fmt8, - p->energy_dram * rapl_dram_energy_units); - if (do_rapl & RAPL_PKG_PERF_STATUS) - outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); - if (do_rapl & RAPL_DRAM_PERF_STATUS) - outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); - } + if (DO_BIC(BIC_PkgWatt)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float); + if (DO_BIC(BIC_CorWatt)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float); + if (DO_BIC(BIC_GFXWatt)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float); + if (DO_BIC(BIC_RAMWatt)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float); + if (DO_BIC(BIC_Pkg_J)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units); + if (DO_BIC(BIC_Cor_J)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units); + if (DO_BIC(BIC_GFX_J)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units); + if (DO_BIC(BIC_RAM_J)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units); + if (DO_BIC(BIC_PKG__)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); + if (DO_BIC(BIC_RAM__)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); + for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 32) - outp += sprintf(outp, "\t0x%08lx", (unsigned long) p->counter[i]); + outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) p->counter[i]); else - outp += sprintf(outp, "\t0x%016llx", p->counter[i]); + outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), p->counter[i]); } else if (mp->format == FORMAT_DELTA) { - outp += sprintf(outp, "\t%8lld", p->counter[i]); + if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) + outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), p->counter[i]); + else + outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), p->counter[i]); } else if (mp->format == FORMAT_PERCENT) { - outp += sprintf(outp, "\t%.2f", 100.0 * p->counter[i]/t->tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->counter[i]/tsc); } } @@ -807,7 +1011,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_ static int printed; if (!printed || !summary_only) - print_header(); + print_header("\t"); if (topo.num_cpus > 1) format_counters(&average.threads, &average.cores, @@ -841,11 +1045,11 @@ delta_package(struct pkg_data *new, struct pkg_data *old) old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0; } old->pc2 = new->pc2 - old->pc2; - if (do_pc3) + if (DO_BIC(BIC_Pkgpc3)) old->pc3 = new->pc3 - old->pc3; - if (do_pc6) + if (DO_BIC(BIC_Pkgpc6)) old->pc6 = new->pc6 - old->pc6; - if (do_pc7) + if (DO_BIC(BIC_Pkgpc7)) old->pc7 = new->pc7 - old->pc7; old->pc8 = new->pc8 - old->pc8; old->pc9 = new->pc9 - old->pc9; @@ -887,6 +1091,7 @@ delta_core(struct core_data *new, struct core_data *old) old->c6 = new->c6 - old->c6; old->c7 = new->c7 - old->c7; old->core_temp_c = new->core_temp_c; + old->mc6_us = new->mc6_us - old->mc6_us; for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) @@ -916,7 +1121,7 @@ delta_thread(struct thread_data *new, struct thread_data *old, old->c1 = new->c1 - old->c1; - if (has_aperf) { + if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { old->aperf = new->aperf - old->aperf; old->mperf = new->mperf - old->mperf; @@ -941,7 +1146,7 @@ delta_thread(struct thread_data *new, struct thread_data *old, old->c1 = 0; else { /* normal case, derive c1 */ - old->c1 = old->tsc - old->mperf - core_delta->c3 + old->c1 = (old->tsc * tsc_tweak) - old->mperf - core_delta->c3 - core_delta->c6 - core_delta->c7; } } @@ -952,10 +1157,10 @@ delta_thread(struct thread_data *new, struct thread_data *old, old->mperf = 1; /* divide by 0 protection */ } - if (do_irq) + if (DO_BIC(BIC_IRQ)) old->irq_count = new->irq_count - old->irq_count; - if (do_smi) + if (DO_BIC(BIC_SMI)) old->smi_count = new->smi_count - old->smi_count; for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { @@ -1008,6 +1213,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data c->c3 = 0; c->c6 = 0; c->c7 = 0; + c->mc6_us = 0; c->core_temp_c = 0; p->pkg_wtd_core_c0 = 0; @@ -1016,11 +1222,11 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data p->pkg_both_core_gfxe_c0 = 0; p->pc2 = 0; - if (do_pc3) + if (DO_BIC(BIC_Pkgpc3)) p->pc3 = 0; - if (do_pc6) + if (DO_BIC(BIC_Pkgpc6)) p->pc6 = 0; - if (do_pc7) + if (DO_BIC(BIC_Pkgpc7)) p->pc7 = 0; p->pc8 = 0; p->pc9 = 0; @@ -1036,7 +1242,6 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data p->gfx_rc6_ms = 0; p->gfx_mhz = 0; - for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) t->counter[i] = 0; @@ -1073,6 +1278,7 @@ int sum_counters(struct thread_data *t, struct core_data *c, average.cores.c3 += c->c3; average.cores.c6 += c->c6; average.cores.c7 += c->c7; + average.cores.mc6_us += c->mc6_us; average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c); @@ -1094,11 +1300,11 @@ int sum_counters(struct thread_data *t, struct core_data *c, } average.packages.pc2 += p->pc2; - if (do_pc3) + if (DO_BIC(BIC_Pkgpc3)) average.packages.pc3 += p->pc3; - if (do_pc6) + if (DO_BIC(BIC_Pkgpc6)) average.packages.pc6 += p->pc6; - if (do_pc7) + if (DO_BIC(BIC_Pkgpc7)) average.packages.pc7 += p->pc7; average.packages.pc8 += p->pc8; average.packages.pc9 += p->pc9; @@ -1143,9 +1349,13 @@ void compute_average(struct thread_data *t, struct core_data *c, average.threads.mperf /= topo.num_cpus; average.threads.c1 /= topo.num_cpus; + if (average.threads.irq_count > 9999999) + sums_need_wide_columns = 1; + average.cores.c3 /= topo.num_cores; average.cores.c6 /= topo.num_cores; average.cores.c7 /= topo.num_cores; + average.cores.mc6_us /= topo.num_cores; if (do_skl_residency) { average.packages.pkg_wtd_core_c0 /= topo.num_packages; @@ -1155,11 +1365,11 @@ void compute_average(struct thread_data *t, struct core_data *c, } average.packages.pc2 /= topo.num_packages; - if (do_pc3) + if (DO_BIC(BIC_Pkgpc3)) average.packages.pc3 /= topo.num_packages; - if (do_pc6) + if (DO_BIC(BIC_Pkgpc6)) average.packages.pc6 /= topo.num_packages; - if (do_pc7) + if (DO_BIC(BIC_Pkgpc7)) average.packages.pc7 /= topo.num_packages; average.packages.pc8 /= topo.num_packages; @@ -1169,16 +1379,29 @@ void compute_average(struct thread_data *t, struct core_data *c, for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) continue; + if (mp->type == COUNTER_ITEMS) { + if (average.threads.counter[i] > 9999999) + sums_need_wide_columns = 1; + continue; + } average.threads.counter[i] /= topo.num_cpus; } for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) continue; + if (mp->type == COUNTER_ITEMS) { + if (average.cores.counter[i] > 9999999) + sums_need_wide_columns = 1; + } average.cores.counter[i] /= topo.num_cores; } for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) continue; + if (mp->type == COUNTER_ITEMS) { + if (average.packages.counter[i] > 9999999) + sums_need_wide_columns = 1; + } average.packages.counter[i] /= topo.num_packages; } } @@ -1193,6 +1416,60 @@ static unsigned long long rdtsc(void) } /* + * Open a file, and exit on failure + */ +FILE *fopen_or_die(const char *path, const char *mode) +{ + FILE *filep = fopen(path, mode); + + if (!filep) + err(1, "%s: open failed", path); + return filep; +} +/* + * snapshot_sysfs_counter() + * + * return snapshot of given counter + */ +unsigned long long snapshot_sysfs_counter(char *path) +{ + FILE *fp; + int retval; + unsigned long long counter; + + fp = fopen_or_die(path, "r"); + + retval = fscanf(fp, "%lld", &counter); + if (retval != 1) + err(1, "snapshot_sysfs_counter(%s)", path); + + fclose(fp); + + return counter; +} + +int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp) +{ + if (mp->msr_num != 0) { + if (get_msr(cpu, mp->msr_num, counterp)) + return -1; + } else { + char path[128]; + + if (mp->flags & SYSFS_PERCPU) { + sprintf(path, "/sys/devices/system/cpu/cpu%d/%s", + cpu, mp->path); + + *counterp = snapshot_sysfs_counter(path); + } else { + *counterp = snapshot_sysfs_counter(mp->path); + } + } + + return 0; +} + +/* * get_counters(...) * migrate to cpu * acquire and record local counters for that cpu @@ -1213,7 +1490,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) retry: t->tsc = rdtsc(); /* we are running on local CPU of interest */ - if (has_aperf) { + if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time; /* @@ -1269,35 +1546,33 @@ retry: t->mperf = t->mperf * aperf_mperf_multiplier; } - if (do_irq) + if (DO_BIC(BIC_IRQ)) t->irq_count = irqs_per_cpu[cpu]; - if (do_smi) { + if (DO_BIC(BIC_SMI)) { if (get_msr(cpu, MSR_SMI_COUNT, &msr)) return -5; t->smi_count = msr & 0xFFFFFFFF; } - - if (use_c1_residency_msr) { + if (DO_BIC(BIC_CPU_c1) && use_c1_residency_msr) { if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1)) return -6; } for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { - if (get_msr(cpu, mp->msr_num, &t->counter[i])) + if (get_mp(cpu, mp, &t->counter[i])) return -10; } - /* collect core counters only for 1st thread in core */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; - if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) { + if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates) { if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) return -6; } - if (do_nhm_cstates && !do_knl_cstates) { + if (DO_BIC(BIC_CPU_c6) && !do_knl_cstates) { if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) return -7; } else if (do_knl_cstates) { @@ -1305,18 +1580,22 @@ retry: return -7; } - if (do_snb_cstates) + if (DO_BIC(BIC_CPU_c7)) if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) return -8; - if (do_dts) { + if (DO_BIC(BIC_Mod_c6)) + if (get_msr(cpu, MSR_MODULE_C6_RES_MS, &c->mc6_us)) + return -8; + + if (DO_BIC(BIC_CoreTmp)) { if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) return -9; c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); } for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { - if (get_msr(cpu, mp->msr_num, &c->counter[i])) + if (get_mp(cpu, mp, &c->counter[i])) return -10; } @@ -1334,26 +1613,35 @@ retry: if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0)) return -13; } - if (do_pc3) + if (DO_BIC(BIC_Pkgpc3)) if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3)) return -9; - if (do_pc6) - if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6)) - return -10; - if (do_pc2) + if (DO_BIC(BIC_Pkgpc6)) { + if (do_slm_cstates) { + if (get_msr(cpu, MSR_ATOM_PKG_C6_RESIDENCY, &p->pc6)) + return -10; + } else { + if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6)) + return -10; + } + } + + if (DO_BIC(BIC_Pkgpc2)) if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2)) return -11; - if (do_pc7) + if (DO_BIC(BIC_Pkgpc7)) if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7)) return -12; - if (do_c8_c9_c10) { + if (DO_BIC(BIC_Pkgpc8)) if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8)) return -13; + if (DO_BIC(BIC_Pkgpc9)) if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9)) return -13; + if (DO_BIC(BIC_Pkgpc10)) if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10)) return -13; - } + if (do_rapl & RAPL_PKG) { if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr)) return -13; @@ -1384,20 +1672,20 @@ retry: return -16; p->rapl_dram_perf_status = msr & 0xFFFFFFFF; } - if (do_ptm) { + if (DO_BIC(BIC_PkgTmp)) { if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) return -17; p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); } - if (do_gfx_rc6_ms) + if (DO_BIC(BIC_GFX_rc6)) p->gfx_rc6_ms = gfx_cur_rc6_ms; - if (do_gfx_mhz) + if (DO_BIC(BIC_GFXMHz)) p->gfx_mhz = gfx_cur_mhz; for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { - if (get_msr(cpu, mp->msr_num, &p->counter[i])) + if (get_mp(cpu, mp, &p->counter[i])) return -10; } @@ -1433,8 +1721,8 @@ char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2", int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; -int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; -int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; +int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7}; +int amt_pkg_cstate_limits[16] = {PCLUNL, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int bxt_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int skx_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; @@ -1457,11 +1745,11 @@ dump_nhm_platform_info(void) fprintf(outf, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); ratio = (msr >> 40) & 0xFF; - fprintf(outf, "%d * %.0f = %.0f MHz max efficiency frequency\n", + fprintf(outf, "%d * %.1f = %.1f MHz max efficiency frequency\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0xFF; - fprintf(outf, "%d * %.0f = %.0f MHz base frequency\n", + fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n", ratio, bclk, ratio * bclk); get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr); @@ -1483,12 +1771,12 @@ dump_hsw_turbo_ratio_limits(void) ratio = (msr >> 8) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 18 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 18 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 0) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 17 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 17 active cores\n", ratio, bclk, ratio * bclk); return; } @@ -1505,99 +1793,175 @@ dump_ivt_turbo_ratio_limits(void) ratio = (msr >> 56) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 16 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 16 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 48) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 15 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 15 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 40) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 14 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 14 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 32) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 13 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 13 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 24) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 12 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 12 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 16) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 11 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 11 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 10 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 10 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 0) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 9 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 9 active cores\n", ratio, bclk, ratio * bclk); return; } +int has_turbo_ratio_group_limits(int family, int model) +{ + + if (!genuine_intel) + return 0; + + switch (model) { + case INTEL_FAM6_ATOM_GOLDMONT: + case INTEL_FAM6_SKYLAKE_X: + case INTEL_FAM6_ATOM_DENVERTON: + return 1; + } + return 0; +} static void -dump_nhm_turbo_ratio_limits(void) +dump_turbo_ratio_limits(int family, int model) { - unsigned long long msr; - unsigned int ratio; + unsigned long long msr, core_counts; + unsigned int ratio, group_size; get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr); - fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr); + if (has_turbo_ratio_group_limits(family, model)) { + get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &core_counts); + fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, core_counts); + } else { + core_counts = 0x0807060504030201; + } + ratio = (msr >> 56) & 0xFF; + group_size = (core_counts >> 56) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 8 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, group_size); ratio = (msr >> 48) & 0xFF; + group_size = (core_counts >> 48) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 7 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, group_size); ratio = (msr >> 40) & 0xFF; + group_size = (core_counts >> 40) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 6 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, group_size); ratio = (msr >> 32) & 0xFF; + group_size = (core_counts >> 32) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 5 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, group_size); ratio = (msr >> 24) & 0xFF; + group_size = (core_counts >> 24) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 4 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, group_size); ratio = (msr >> 16) & 0xFF; + group_size = (core_counts >> 16) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 3 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, group_size); ratio = (msr >> 8) & 0xFF; + group_size = (core_counts >> 8) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 2 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, group_size); ratio = (msr >> 0) & 0xFF; + group_size = (core_counts >> 0) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 1 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, group_size); return; } static void +dump_atom_turbo_ratio_limits(void) +{ + unsigned long long msr; + unsigned int ratio; + + get_msr(base_cpu, MSR_ATOM_CORE_RATIOS, &msr); + fprintf(outf, "cpu%d: MSR_ATOM_CORE_RATIOS: 0x%08llx\n", base_cpu, msr & 0xFFFFFFFF); + + ratio = (msr >> 0) & 0x3F; + if (ratio) + fprintf(outf, "%d * %.1f = %.1f MHz minimum operating frequency\n", + ratio, bclk, ratio * bclk); + + ratio = (msr >> 8) & 0x3F; + if (ratio) + fprintf(outf, "%d * %.1f = %.1f MHz low frequency mode (LFM)\n", + ratio, bclk, ratio * bclk); + + ratio = (msr >> 16) & 0x3F; + if (ratio) + fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n", + ratio, bclk, ratio * bclk); + + get_msr(base_cpu, MSR_ATOM_CORE_TURBO_RATIOS, &msr); + fprintf(outf, "cpu%d: MSR_ATOM_CORE_TURBO_RATIOS: 0x%08llx\n", base_cpu, msr & 0xFFFFFFFF); + + ratio = (msr >> 24) & 0x3F; + if (ratio) + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 4 active cores\n", + ratio, bclk, ratio * bclk); + + ratio = (msr >> 16) & 0x3F; + if (ratio) + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 3 active cores\n", + ratio, bclk, ratio * bclk); + + ratio = (msr >> 8) & 0x3F; + if (ratio) + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 2 active cores\n", + ratio, bclk, ratio * bclk); + + ratio = (msr >> 0) & 0x3F; + if (ratio) + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 1 active core\n", + ratio, bclk, ratio * bclk); +} + +static void dump_knl_turbo_ratio_limits(void) { const unsigned int buckets_no = 7; @@ -1652,7 +2016,7 @@ dump_knl_turbo_ratio_limits(void) for (i = buckets_no - 1; i >= 0; i--) if (i > 0 ? ratio[i] != ratio[i - 1] : 1) fprintf(outf, - "%d * %.0f = %.0f MHz max turbo %d active cores\n", + "%d * %.1f = %.1f MHz max turbo %d active cores\n", ratio[i], bclk, ratio[i] * bclk, cores[i]); } @@ -1661,12 +2025,12 @@ dump_nhm_cst_cfg(void) { unsigned long long msr; - get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); + get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr); #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) - fprintf(outf, "cpu%d: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", base_cpu, msr); + fprintf(outf, "cpu%d: MSR_PKG_CST_CONFIG_CONTROL: 0x%08llx", base_cpu, msr); fprintf(outf, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n", (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "", @@ -1810,16 +2174,6 @@ void free_all_buffers(void) free(irqs_per_cpu); } -/* - * Open a file, and exit on failure - */ -FILE *fopen_or_die(const char *path, const char *mode) -{ - FILE *filep = fopen(path, mode); - if (!filep) - err(1, "%s: open failed", path); - return filep; -} /* * Parse a file containing a single int. @@ -2148,13 +2502,14 @@ int snapshot_gfx_mhz(void) */ int snapshot_proc_sysfs_files(void) { - if (snapshot_proc_interrupts()) - return 1; + if (DO_BIC(BIC_IRQ)) + if (snapshot_proc_interrupts()) + return 1; - if (do_gfx_rc6_ms) + if (DO_BIC(BIC_GFX_rc6)) snapshot_gfx_rc6_ms(); - if (do_gfx_mhz) + if (DO_BIC(BIC_GFXMHz)) snapshot_gfx_mhz(); return 0; @@ -2283,7 +2638,9 @@ void check_permissions() * MSR_SMI_COUNT 0x00000034 * * MSR_PLATFORM_INFO 0x000000ce - * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 + * MSR_PKG_CST_CONFIG_CONTROL 0x000000e2 + * + * MSR_MISC_PWR_MGMT 0x000001aa * * MSR_PKG_C3_RESIDENCY 0x000003f8 * MSR_PKG_C6_RESIDENCY 0x000003f9 @@ -2291,7 +2648,8 @@ void check_permissions() * MSR_CORE_C6_RESIDENCY 0x000003fd * * Side effect: - * sets global pkg_cstate_limit to decode MSR_NHM_SNB_PKG_CST_CFG_CTL + * sets global pkg_cstate_limit to decode MSR_PKG_CST_CONFIG_CONTROL + * sets has_misc_feature_control */ int probe_nhm_msrs(unsigned int family, unsigned int model) { @@ -2322,6 +2680,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_IVYBRIDGE: /* IVB */ case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ pkg_cstate_limits = snb_pkg_cstate_limits; + has_misc_feature_control = 1; break; case INTEL_FAM6_HASWELL_CORE: /* HSW */ case INTEL_FAM6_HASWELL_X: /* HSX */ @@ -2336,29 +2695,34 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ pkg_cstate_limits = hsw_pkg_cstate_limits; + has_misc_feature_control = 1; break; case INTEL_FAM6_SKYLAKE_X: /* SKX */ pkg_cstate_limits = skx_pkg_cstate_limits; + has_misc_feature_control = 1; break; case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */ + no_MSR_MISC_PWR_MGMT = 1; case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */ pkg_cstate_limits = slv_pkg_cstate_limits; break; case INTEL_FAM6_ATOM_AIRMONT: /* AMT */ pkg_cstate_limits = amt_pkg_cstate_limits; + no_MSR_MISC_PWR_MGMT = 1; break; case INTEL_FAM6_XEON_PHI_KNL: /* PHI */ case INTEL_FAM6_XEON_PHI_KNM: pkg_cstate_limits = phi_pkg_cstate_limits; break; case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ + case INTEL_FAM6_ATOM_GEMINI_LAKE: case INTEL_FAM6_ATOM_DENVERTON: /* DNV */ pkg_cstate_limits = bxt_pkg_cstate_limits; break; default: return 0; } - get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); + get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr); pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); @@ -2368,8 +2732,69 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) has_base_hz = 1; return 1; } -int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model) +/* + * SLV client has support for unique MSRs: + * + * MSR_CC6_DEMOTION_POLICY_CONFIG + * MSR_MC6_DEMOTION_POLICY_CONFIG + */ + +int has_slv_msrs(unsigned int family, unsigned int model) { + if (!genuine_intel) + return 0; + + switch (model) { + case INTEL_FAM6_ATOM_SILVERMONT1: + case INTEL_FAM6_ATOM_MERRIFIELD: + case INTEL_FAM6_ATOM_MOOREFIELD: + return 1; + } + return 0; +} +int is_dnv(unsigned int family, unsigned int model) +{ + + if (!genuine_intel) + return 0; + + switch (model) { + case INTEL_FAM6_ATOM_DENVERTON: + return 1; + } + return 0; +} +int is_bdx(unsigned int family, unsigned int model) +{ + + if (!genuine_intel) + return 0; + + switch (model) { + case INTEL_FAM6_BROADWELL_X: + case INTEL_FAM6_BROADWELL_XEON_D: + return 1; + } + return 0; +} +int is_skx(unsigned int family, unsigned int model) +{ + + if (!genuine_intel) + return 0; + + switch (model) { + case INTEL_FAM6_SKYLAKE_X: + return 1; + } + return 0; +} + +int has_turbo_ratio_limit(unsigned int family, unsigned int model) +{ + if (has_slv_msrs(family, model)) + return 0; + switch (model) { /* Nehalem compatible, but do not include turbo-ratio limit support */ case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */ @@ -2381,6 +2806,13 @@ int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model) return 1; } } +int has_atom_turbo_ratio_limit(unsigned int family, unsigned int model) +{ + if (has_slv_msrs(family, model)) + return 1; + + return 0; +} int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model) { if (!genuine_intel) @@ -2429,6 +2861,22 @@ int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model) return 0; } } +int has_glm_turbo_ratio_limit(unsigned int family, unsigned int model) +{ + if (!genuine_intel) + return 0; + + if (family != 6) + return 0; + + switch (model) { + case INTEL_FAM6_ATOM_GOLDMONT: + case INTEL_FAM6_SKYLAKE_X: + return 1; + default: + return 0; + } +} int has_config_tdp(unsigned int family, unsigned int model) { if (!genuine_intel) @@ -2475,8 +2923,11 @@ dump_cstate_pstate_config_info(unsigned int family, unsigned int model) if (has_ivt_turbo_ratio_limit(family, model)) dump_ivt_turbo_ratio_limits(); - if (has_nhm_turbo_ratio_limit(family, model)) - dump_nhm_turbo_ratio_limits(); + if (has_turbo_ratio_limit(family, model)) + dump_turbo_ratio_limits(family, model); + + if (has_atom_turbo_ratio_limit(family, model)) + dump_atom_turbo_ratio_limits(); if (has_knl_turbo_ratio_limit(family, model)) dump_knl_turbo_ratio_limits(); @@ -2487,6 +2938,96 @@ dump_cstate_pstate_config_info(unsigned int family, unsigned int model) dump_nhm_cst_cfg(); } +static void +dump_sysfs_cstate_config(void) +{ + char path[64]; + char name_buf[16]; + char desc[64]; + FILE *input; + int state; + char *sp; + + if (!DO_BIC(BIC_sysfs)) + return; + + for (state = 0; state < 10; ++state) { + + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", + base_cpu, state); + input = fopen(path, "r"); + if (input == NULL) + continue; + fgets(name_buf, sizeof(name_buf), input); + + /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ + sp = strchr(name_buf, '-'); + if (!sp) + sp = strchrnul(name_buf, '\n'); + *sp = '\0'; + + fclose(input); + + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc", + base_cpu, state); + input = fopen(path, "r"); + if (input == NULL) + continue; + fgets(desc, sizeof(desc), input); + + fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc); + fclose(input); + } +} +static void +dump_sysfs_pstate_config(void) +{ + char path[64]; + char driver_buf[64]; + char governor_buf[64]; + FILE *input; + int turbo; + + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_driver", + base_cpu); + input = fopen(path, "r"); + if (input == NULL) { + fprintf(stderr, "NSFOD %s\n", path); + return; + } + fgets(driver_buf, sizeof(driver_buf), input); + fclose(input); + + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor", + base_cpu); + input = fopen(path, "r"); + if (input == NULL) { + fprintf(stderr, "NSFOD %s\n", path); + return; + } + fgets(governor_buf, sizeof(governor_buf), input); + fclose(input); + + fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf); + fprintf(outf, "cpu%d: cpufreq governor: %s", base_cpu, governor_buf); + + sprintf(path, "/sys/devices/system/cpu/cpufreq/boost"); + input = fopen(path, "r"); + if (input != NULL) { + fscanf(input, "%d", &turbo); + fprintf(outf, "cpufreq boost: %d\n", turbo); + fclose(input); + } + + sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo"); + input = fopen(path, "r"); + if (input != NULL) { + fscanf(input, "%d", &turbo); + fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo); + fclose(input); + } +} + /* * print_epb() @@ -2790,15 +3331,40 @@ void rapl_probe(unsigned int family, unsigned int model) case INTEL_FAM6_BROADWELL_CORE: /* BDW */ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO; + if (rapl_joules) { + BIC_PRESENT(BIC_Pkg_J); + BIC_PRESENT(BIC_Cor_J); + BIC_PRESENT(BIC_GFX_J); + } else { + BIC_PRESENT(BIC_PkgWatt); + BIC_PRESENT(BIC_CorWatt); + BIC_PRESENT(BIC_GFXWatt); + } break; case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ + case INTEL_FAM6_ATOM_GEMINI_LAKE: do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO; + if (rapl_joules) + BIC_PRESENT(BIC_Pkg_J); + else + BIC_PRESENT(BIC_PkgWatt); break; case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; + BIC_PRESENT(BIC_PKG__); + BIC_PRESENT(BIC_RAM__); + if (rapl_joules) { + BIC_PRESENT(BIC_Pkg_J); + BIC_PRESENT(BIC_Cor_J); + BIC_PRESENT(BIC_RAM_J); + } else { + BIC_PRESENT(BIC_PkgWatt); + BIC_PRESENT(BIC_CorWatt); + BIC_PRESENT(BIC_RAMWatt); + } break; case INTEL_FAM6_HASWELL_X: /* HSX */ case INTEL_FAM6_BROADWELL_X: /* BDX */ @@ -2807,17 +3373,55 @@ void rapl_probe(unsigned int family, unsigned int model) case INTEL_FAM6_XEON_PHI_KNL: /* KNL */ case INTEL_FAM6_XEON_PHI_KNM: do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; + BIC_PRESENT(BIC_PKG__); + BIC_PRESENT(BIC_RAM__); + if (rapl_joules) { + BIC_PRESENT(BIC_Pkg_J); + BIC_PRESENT(BIC_RAM_J); + } else { + BIC_PRESENT(BIC_PkgWatt); + BIC_PRESENT(BIC_RAMWatt); + } break; case INTEL_FAM6_SANDYBRIDGE_X: case INTEL_FAM6_IVYBRIDGE_X: do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO; + BIC_PRESENT(BIC_PKG__); + BIC_PRESENT(BIC_RAM__); + if (rapl_joules) { + BIC_PRESENT(BIC_Pkg_J); + BIC_PRESENT(BIC_Cor_J); + BIC_PRESENT(BIC_RAM_J); + } else { + BIC_PRESENT(BIC_PkgWatt); + BIC_PRESENT(BIC_CorWatt); + BIC_PRESENT(BIC_RAMWatt); + } break; case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */ case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */ do_rapl = RAPL_PKG | RAPL_CORES; + if (rapl_joules) { + BIC_PRESENT(BIC_Pkg_J); + BIC_PRESENT(BIC_Cor_J); + } else { + BIC_PRESENT(BIC_PkgWatt); + BIC_PRESENT(BIC_CorWatt); + } break; case INTEL_FAM6_ATOM_DENVERTON: /* DNV */ do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS; + BIC_PRESENT(BIC_PKG__); + BIC_PRESENT(BIC_RAM__); + if (rapl_joules) { + BIC_PRESENT(BIC_Pkg_J); + BIC_PRESENT(BIC_Cor_J); + BIC_PRESENT(BIC_RAM_J); + } else { + BIC_PRESENT(BIC_PkgWatt); + BIC_PRESENT(BIC_CorWatt); + BIC_PRESENT(BIC_RAMWatt); + } break; default: return; @@ -2844,7 +3448,7 @@ void rapl_probe(unsigned int family, unsigned int model) tdp = get_tdp(model); rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; - if (debug) + if (!quiet) fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); return; @@ -2969,11 +3573,9 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) return -1; - if (debug) { - fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx " - "(%f Watts, %f Joules, %f sec.)\n", cpu, msr, - rapl_power_units, rapl_energy_units, rapl_time_units); - } + fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr, + rapl_power_units, rapl_energy_units, rapl_time_units); + if (do_rapl & RAPL_PKG_POWER_INFO) { if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr)) @@ -2994,7 +3596,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) return -9; fprintf(outf, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n", - cpu, msr, (msr >> 63) & 1 ? "": "UN"); + cpu, msr, (msr >> 63) & 1 ? "" : "UN"); print_power_limit_msr(cpu, msr, "PKG Limit #1"); fprintf(outf, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n", @@ -3020,40 +3622,34 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr)) return -9; fprintf(outf, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n", - cpu, msr, (msr >> 31) & 1 ? "": "UN"); + cpu, msr, (msr >> 31) & 1 ? "" : "UN"); print_power_limit_msr(cpu, msr, "DRAM Limit"); } if (do_rapl & RAPL_CORE_POLICY) { - if (debug) { - if (get_msr(cpu, MSR_PP0_POLICY, &msr)) - return -7; + if (get_msr(cpu, MSR_PP0_POLICY, &msr)) + return -7; - fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF); - } + fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF); } if (do_rapl & RAPL_CORES_POWER_LIMIT) { - if (debug) { - if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr)) - return -9; - fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n", - cpu, msr, (msr >> 31) & 1 ? "": "UN"); - print_power_limit_msr(cpu, msr, "Cores Limit"); - } + if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr)) + return -9; + fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n", + cpu, msr, (msr >> 31) & 1 ? "" : "UN"); + print_power_limit_msr(cpu, msr, "Cores Limit"); } if (do_rapl & RAPL_GFX) { - if (debug) { - if (get_msr(cpu, MSR_PP1_POLICY, &msr)) - return -8; + if (get_msr(cpu, MSR_PP1_POLICY, &msr)) + return -8; - fprintf(outf, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF); + fprintf(outf, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF); - if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr)) - return -9; - fprintf(outf, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n", - cpu, msr, (msr >> 31) & 1 ? "": "UN"); - print_power_limit_msr(cpu, msr, "GFX Limit"); - } + if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr)) + return -9; + fprintf(outf, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n", + cpu, msr, (msr >> 31) & 1 ? "" : "UN"); + print_power_limit_msr(cpu, msr, "GFX Limit"); } return 0; } @@ -3090,6 +3686,7 @@ int has_snb_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ case INTEL_FAM6_SKYLAKE_X: /* SKX */ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ + case INTEL_FAM6_ATOM_GEMINI_LAKE: case INTEL_FAM6_ATOM_DENVERTON: /* DNV */ return 1; } @@ -3121,6 +3718,7 @@ int has_hsw_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ + case INTEL_FAM6_ATOM_GEMINI_LAKE: return 1; } return 0; @@ -3149,8 +3747,6 @@ int has_skl_msrs(unsigned int family, unsigned int model) return 0; } - - int is_slm(unsigned int family, unsigned int model) { if (!genuine_intel) @@ -3201,7 +3797,8 @@ double slm_bclk(void) } freq = slm_freq_table[i]; - fprintf(outf, "SLM BCLK: %.1f Mhz\n", freq); + if (!quiet) + fprintf(outf, "SLM BCLK: %.1f Mhz\n", freq); return freq; } @@ -3264,7 +3861,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk target_c_local = (msr >> 16) & 0xFF; - if (debug) + if (!quiet) fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n", cpu, msr, target_c_local); @@ -3299,13 +3896,30 @@ void decode_misc_enable_msr(void) unsigned long long msr; if (!get_msr(base_cpu, MSR_IA32_MISC_ENABLE, &msr)) - fprintf(outf, "cpu%d: MSR_IA32_MISC_ENABLE: 0x%08llx (%s %s %s)\n", + fprintf(outf, "cpu%d: MSR_IA32_MISC_ENABLE: 0x%08llx (%sTCC %sEIST %sMWAIT %sPREFETCH %sTURBO)\n", base_cpu, msr, - msr & (1 << 3) ? "TCC" : "", - msr & (1 << 16) ? "EIST" : "", - msr & (1 << 18) ? "MONITOR" : ""); + msr & MSR_IA32_MISC_ENABLE_TM1 ? "" : "No-", + msr & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP ? "" : "No-", + msr & MSR_IA32_MISC_ENABLE_MWAIT ? "No-" : "", + msr & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE ? "No-" : "", + msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ? "No-" : ""); } +void decode_misc_feature_control(void) +{ + unsigned long long msr; + + if (!has_misc_feature_control) + return; + + if (!get_msr(base_cpu, MSR_MISC_FEATURE_CONTROL, &msr)) + fprintf(outf, "cpu%d: MSR_MISC_FEATURE_CONTROL: 0x%08llx (%sL2-Prefetch %sL2-Prefetch-pair %sL1-Prefetch %sL1-IP-Prefetch)\n", + base_cpu, msr, + msr & (0 << 0) ? "No-" : "", + msr & (1 << 0) ? "No-" : "", + msr & (2 << 0) ? "No-" : "", + msr & (3 << 0) ? "No-" : ""); +} /* * Decode MSR_MISC_PWR_MGMT * @@ -3320,6 +3934,9 @@ void decode_misc_pwr_mgmt_msr(void) if (!do_nhm_platform_info) return; + if (no_MSR_MISC_PWR_MGMT) + return; + if (!get_msr(base_cpu, MSR_MISC_PWR_MGMT, &msr)) fprintf(outf, "cpu%d: MSR_MISC_PWR_MGMT: 0x%08llx (%sable-EIST_Coordination %sable-EPB %sable-OOB)\n", base_cpu, msr, @@ -3327,11 +3944,30 @@ void decode_misc_pwr_mgmt_msr(void) msr & (1 << 1) ? "EN" : "DIS", msr & (1 << 8) ? "EN" : "DIS"); } +/* + * Decode MSR_CC6_DEMOTION_POLICY_CONFIG, MSR_MC6_DEMOTION_POLICY_CONFIG + * + * This MSRs are present on Silvermont processors, + * Intel Atom processor E3000 series (Baytrail), and friends. + */ +void decode_c6_demotion_policy_msr(void) +{ + unsigned long long msr; + + if (!get_msr(base_cpu, MSR_CC6_DEMOTION_POLICY_CONFIG, &msr)) + fprintf(outf, "cpu%d: MSR_CC6_DEMOTION_POLICY_CONFIG: 0x%08llx (%sable-CC6-Demotion)\n", + base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS"); + + if (!get_msr(base_cpu, MSR_MC6_DEMOTION_POLICY_CONFIG, &msr)) + fprintf(outf, "cpu%d: MSR_MC6_DEMOTION_POLICY_CONFIG: 0x%08llx (%sable-MC6-Demotion)\n", + base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS"); +} void process_cpuid() { unsigned int eax, ebx, ecx, edx, max_level, max_extended_level; unsigned int fms, family, model, stepping; + unsigned int has_turbo; eax = ebx = ecx = edx = 0; @@ -3340,7 +3976,7 @@ void process_cpuid() if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) genuine_intel = 1; - if (debug) + if (!quiet) fprintf(outf, "CPUID(0): %.4s%.4s%.4s ", (char *)&ebx, (char *)&edx, (char *)&ecx); @@ -3351,7 +3987,7 @@ void process_cpuid() if (family == 6 || family == 0xf) model += ((fms >> 16) & 0xf) << 4; - if (debug) { + if (!quiet) { fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n", max_level, family, model, stepping, family, model, stepping); fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s\n", @@ -3394,8 +4030,18 @@ void process_cpuid() __cpuid(0x6, eax, ebx, ecx, edx); has_aperf = ecx & (1 << 0); + if (has_aperf) { + BIC_PRESENT(BIC_Avg_MHz); + BIC_PRESENT(BIC_Busy); + BIC_PRESENT(BIC_Bzy_MHz); + } do_dts = eax & (1 << 0); + if (do_dts) + BIC_PRESENT(BIC_CoreTmp); + has_turbo = eax & (1 << 1); do_ptm = eax & (1 << 6); + if (do_ptm) + BIC_PRESENT(BIC_PkgTmp); has_hwp = eax & (1 << 7); has_hwp_notify = eax & (1 << 8); has_hwp_activity_window = eax & (1 << 9); @@ -3403,10 +4049,11 @@ void process_cpuid() has_hwp_pkg = eax & (1 << 11); has_epb = ecx & (1 << 3); - if (debug) - fprintf(outf, "CPUID(6): %sAPERF, %sDTS, %sPTM, %sHWP, " + if (!quiet) + fprintf(outf, "CPUID(6): %sAPERF, %sTURBO, %sDTS, %sPTM, %sHWP, " "%sHWPnotify, %sHWPwindow, %sHWPepp, %sHWPpkg, %sEPB\n", has_aperf ? "" : "No-", + has_turbo ? "" : "No-", do_dts ? "" : "No-", do_ptm ? "" : "No-", has_hwp ? "" : "No-", @@ -3416,10 +4063,11 @@ void process_cpuid() has_hwp_pkg ? "" : "No-", has_epb ? "" : "No-"); - if (debug) + if (!quiet) decode_misc_enable_msr(); - if (max_level >= 0x7 && debug) { + + if (max_level >= 0x7 && !quiet) { int has_sgx; ecx = 0; @@ -3445,7 +4093,7 @@ void process_cpuid() if (ebx_tsc != 0) { - if (debug && (ebx != 0)) + if (!quiet && (ebx != 0)) fprintf(outf, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n", eax_crystal, ebx_tsc, crystal_hz); @@ -3462,6 +4110,7 @@ void process_cpuid() crystal_hz = 25000000; /* 25.0 MHz */ break; case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ + case INTEL_FAM6_ATOM_GEMINI_LAKE: crystal_hz = 19200000; /* 19.2 MHz */ break; default: @@ -3470,7 +4119,7 @@ void process_cpuid() if (crystal_hz) { tsc_hz = (unsigned long long) crystal_hz * ebx_tsc / eax_crystal; - if (debug) + if (!quiet) fprintf(outf, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n", tsc_hz / 1000000, crystal_hz, ebx_tsc, eax_crystal); } @@ -3485,7 +4134,7 @@ void process_cpuid() base_mhz = max_mhz = bus_mhz = edx = 0; __cpuid(0x16, base_mhz, max_mhz, bus_mhz, edx); - if (debug) + if (!quiet) fprintf(outf, "CPUID(0x16): base_mhz: %d max_mhz: %d bus_mhz: %d\n", base_mhz, max_mhz, bus_mhz); } @@ -3493,56 +4142,96 @@ void process_cpuid() if (has_aperf) aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model); - do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); + BIC_PRESENT(BIC_IRQ); + BIC_PRESENT(BIC_TSC_MHz); + + if (probe_nhm_msrs(family, model)) { + do_nhm_platform_info = 1; + BIC_PRESENT(BIC_CPU_c1); + BIC_PRESENT(BIC_CPU_c3); + BIC_PRESENT(BIC_CPU_c6); + BIC_PRESENT(BIC_SMI); + } do_snb_cstates = has_snb_msrs(family, model); + + if (do_snb_cstates) + BIC_PRESENT(BIC_CPU_c7); + do_irtl_snb = has_snb_msrs(family, model); - do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); - do_pc3 = (pkg_cstate_limit >= PCL__3); - do_pc6 = (pkg_cstate_limit >= PCL__6); - do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7); - do_c8_c9_c10 = has_hsw_msrs(family, model); + if (do_snb_cstates && (pkg_cstate_limit >= PCL__2)) + BIC_PRESENT(BIC_Pkgpc2); + if (pkg_cstate_limit >= PCL__3) + BIC_PRESENT(BIC_Pkgpc3); + if (pkg_cstate_limit >= PCL__6) + BIC_PRESENT(BIC_Pkgpc6); + if (do_snb_cstates && (pkg_cstate_limit >= PCL__7)) + BIC_PRESENT(BIC_Pkgpc7); + if (has_slv_msrs(family, model)) { + BIC_NOT_PRESENT(BIC_Pkgpc2); + BIC_NOT_PRESENT(BIC_Pkgpc3); + BIC_PRESENT(BIC_Pkgpc6); + BIC_NOT_PRESENT(BIC_Pkgpc7); + BIC_PRESENT(BIC_Mod_c6); + use_c1_residency_msr = 1; + } + if (is_dnv(family, model)) { + BIC_PRESENT(BIC_CPU_c1); + BIC_NOT_PRESENT(BIC_CPU_c3); + BIC_NOT_PRESENT(BIC_Pkgpc3); + BIC_NOT_PRESENT(BIC_CPU_c7); + BIC_NOT_PRESENT(BIC_Pkgpc7); + use_c1_residency_msr = 1; + } + if (is_skx(family, model)) { + BIC_NOT_PRESENT(BIC_CPU_c3); + BIC_NOT_PRESENT(BIC_Pkgpc3); + BIC_NOT_PRESENT(BIC_CPU_c7); + BIC_NOT_PRESENT(BIC_Pkgpc7); + } + if (is_bdx(family, model)) { + BIC_NOT_PRESENT(BIC_CPU_c7); + BIC_NOT_PRESENT(BIC_Pkgpc7); + } + if (has_hsw_msrs(family, model)) { + BIC_PRESENT(BIC_Pkgpc8); + BIC_PRESENT(BIC_Pkgpc9); + BIC_PRESENT(BIC_Pkgpc10); + } do_irtl_hsw = has_hsw_msrs(family, model); do_skl_residency = has_skl_msrs(family, model); do_slm_cstates = is_slm(family, model); do_knl_cstates = is_knl(family, model); - if (debug) + if (!quiet) decode_misc_pwr_mgmt_msr(); + if (!quiet && has_slv_msrs(family, model)) + decode_c6_demotion_policy_msr(); + rapl_probe(family, model); perf_limit_reasons_probe(family, model); - if (debug) + if (!quiet) dump_cstate_pstate_config_info(family, model); + if (!quiet) + dump_sysfs_cstate_config(); + if (!quiet) + dump_sysfs_pstate_config(); + if (has_skl_msrs(family, model)) calculate_tsc_tweak(); - do_gfx_rc6_ms = !access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK); + if (!access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK)) + BIC_PRESENT(BIC_GFX_rc6); - do_gfx_mhz = !access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK); + if (!access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK)) + BIC_PRESENT(BIC_GFXMHz); - return; -} + if (!quiet) + decode_misc_feature_control(); -void help() -{ - fprintf(outf, - "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n" - "\n" - "Turbostat forks the specified COMMAND and prints statistics\n" - "when COMMAND completes.\n" - "If no COMMAND is specified, turbostat wakes every 5-seconds\n" - "to print statistics, until interrupted.\n" - "--add add a counter\n" - " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n" - "--debug run in \"debug\" mode\n" - "--interval sec Override default 5-second measurement interval\n" - "--help print this help message\n" - "--out file create or truncate \"file\" for all output\n" - "--version print version information\n" - "\n" - "For more help, run \"man turbostat\"\n"); + return; } @@ -3579,7 +4268,7 @@ void topology_probe() topo.max_cpu_num = 0; for_all_proc_cpus(count_cpus); if (!summary_only && topo.num_cpus > 1) - show_cpu = 1; + BIC_PRESENT(BIC_CPU); if (debug > 1) fprintf(outf, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num); @@ -3599,6 +4288,15 @@ void topology_probe() for_all_proc_cpus(mark_cpu_present); /* + * Validate that all cpus in cpu_subset are also in cpu_present_set + */ + for (i = 0; i < CPU_SUBSET_MAXCPUS; ++i) { + if (CPU_ISSET_S(i, cpu_subset_size, cpu_subset)) + if (!CPU_ISSET_S(i, cpu_present_setsize, cpu_present_set)) + err(1, "cpu%d not present", i); + } + + /* * Allocate and initialize cpu_affinity_set */ cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1)); @@ -3639,15 +4337,15 @@ void topology_probe() if (debug > 1) fprintf(outf, "max_core_id %d, sizing for %d cores per package\n", max_core_id, topo.num_cores_per_pkg); - if (debug && !summary_only && topo.num_cores_per_pkg > 1) - show_core = 1; + if (!summary_only && topo.num_cores_per_pkg > 1) + BIC_PRESENT(BIC_Core); topo.num_packages = max_package_id + 1; if (debug > 1) fprintf(outf, "max_package_id %d, sizing for %d packages\n", max_package_id, topo.num_packages); - if (debug && !summary_only && topo.num_packages > 1) - show_pkg = 1; + if (!summary_only && topo.num_packages > 1) + BIC_PRESENT(BIC_Package); topo.num_threads_per_core = max_siblings; if (debug > 1) @@ -3662,7 +4360,7 @@ allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data int i; *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg * - topo.num_packages, sizeof(struct thread_data) + sys.thread_counter_bytes); + topo.num_packages, sizeof(struct thread_data)); if (*t == NULL) goto error; @@ -3671,14 +4369,14 @@ allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data (*t)[i].cpu_id = -1; *c = calloc(topo.num_cores_per_pkg * topo.num_packages, - sizeof(struct core_data) + sys.core_counter_bytes); + sizeof(struct core_data)); if (*c == NULL) goto error; for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++) (*c)[i].core_id = -1; - *p = calloc(topo.num_packages, sizeof(struct pkg_data) + sys.package_counter_bytes); + *p = calloc(topo.num_packages, sizeof(struct pkg_data)); if (*p == NULL) goto error; @@ -3789,24 +4487,24 @@ void turbostat_init() process_cpuid(); - if (debug) + if (!quiet) for_all_cpus(print_hwp, ODD_COUNTERS); - if (debug) + if (!quiet) for_all_cpus(print_epb, ODD_COUNTERS); - if (debug) + if (!quiet) for_all_cpus(print_perf_limit, ODD_COUNTERS); - if (debug) + if (!quiet) for_all_cpus(print_rapl, ODD_COUNTERS); for_all_cpus(set_temperature_target, ODD_COUNTERS); - if (debug) + if (!quiet) for_all_cpus(print_thermal, ODD_COUNTERS); - if (debug && do_irtl_snb) + if (!quiet && do_irtl_snb) print_irtl(); } @@ -3815,6 +4513,7 @@ int fork_it(char **argv) pid_t child_pid; int status; + snapshot_proc_sysfs_files(); status = for_all_cpus(get_counters, EVEN_COUNTERS); if (status) exit(status); @@ -3826,6 +4525,7 @@ int fork_it(char **argv) if (!child_pid) { /* child */ execvp(argv[0], argv); + err(errno, "exec %s", argv[0]); } else { /* parent */ @@ -3841,6 +4541,7 @@ int fork_it(char **argv) * n.b. fork_it() does not check for errors from for_all_cpus() * because re-starting is problematic when forking */ + snapshot_proc_sysfs_files(); for_all_cpus(get_counters, ODD_COUNTERS); gettimeofday(&tv_odd, (struct timezone *)NULL); timersub(&tv_odd, &tv_even, &tv_delta); @@ -3862,6 +4563,7 @@ int get_and_dump_counters(void) { int status; + snapshot_proc_sysfs_files(); status = for_all_cpus(get_counters, ODD_COUNTERS); if (status) return status; @@ -3876,13 +4578,13 @@ int get_and_dump_counters(void) } void print_version() { - fprintf(outf, "turbostat version 4.16 24 Dec 2016" + fprintf(outf, "turbostat version 17.02.24" " - Len Brown <lenb@kernel.org>\n"); } -int add_counter(unsigned int msr_num, char *name, unsigned int width, - enum counter_scope scope, enum counter_type type, - enum counter_format format) +int add_counter(unsigned int msr_num, char *path, char *name, + unsigned int width, enum counter_scope scope, + enum counter_type type, enum counter_format format, int flags) { struct msr_counter *msrp; @@ -3894,31 +4596,46 @@ int add_counter(unsigned int msr_num, char *name, unsigned int width, msrp->msr_num = msr_num; strncpy(msrp->name, name, NAME_BYTES); + if (path) + strncpy(msrp->path, path, PATH_BYTES); msrp->width = width; msrp->type = type; msrp->format = format; + msrp->flags = flags; switch (scope) { case SCOPE_CPU: - sys.thread_counter_bytes += 64; msrp->next = sys.tp; sys.tp = msrp; - sys.thread_counter_bytes += sizeof(unsigned long long); + sys.added_thread_counters++; + if (sys.added_thread_counters > MAX_ADDED_COUNTERS) { + fprintf(stderr, "exceeded max %d added thread counters\n", + MAX_ADDED_COUNTERS); + exit(-1); + } break; case SCOPE_CORE: - sys.core_counter_bytes += 64; msrp->next = sys.cp; sys.cp = msrp; - sys.core_counter_bytes += sizeof(unsigned long long); + sys.added_core_counters++; + if (sys.added_core_counters > MAX_ADDED_COUNTERS) { + fprintf(stderr, "exceeded max %d added core counters\n", + MAX_ADDED_COUNTERS); + exit(-1); + } break; case SCOPE_PACKAGE: - sys.package_counter_bytes += 64; msrp->next = sys.pp; sys.pp = msrp; - sys.package_counter_bytes += sizeof(unsigned long long); + sys.added_package_counters++; + if (sys.added_package_counters > MAX_ADDED_COUNTERS) { + fprintf(stderr, "exceeded max %d added package counters\n", + MAX_ADDED_COUNTERS); + exit(-1); + } break; } @@ -3928,7 +4645,8 @@ int add_counter(unsigned int msr_num, char *name, unsigned int width, void parse_add_command(char *add_command) { int msr_num = 0; - char name_buffer[NAME_BYTES]; + char *path = NULL; + char name_buffer[NAME_BYTES] = ""; int width = 64; int fail = 0; enum counter_scope scope = SCOPE_CPU; @@ -3943,6 +4661,11 @@ void parse_add_command(char *add_command) if (sscanf(add_command, "msr%d", &msr_num) == 1) goto next; + if (*add_command == '/') { + path = add_command; + goto next; + } + if (sscanf(add_command, "u%d", &width) == 1) { if ((width == 32) || (width == 64)) goto next; @@ -3968,6 +4691,10 @@ void parse_add_command(char *add_command) type = COUNTER_SECONDS; goto next; } + if (!strncmp(add_command, "usec", strlen("usec"))) { + type = COUNTER_USEC; + goto next; + } if (!strncmp(add_command, "raw", strlen("raw"))) { format = FORMAT_RAW; goto next; @@ -3992,36 +4719,26 @@ void parse_add_command(char *add_command) next: add_command = strchr(add_command, ','); - if (add_command) + if (add_command) { + *add_command = '\0'; add_command++; + } } - if (msr_num == 0) { - fprintf(stderr, "--add: (msrDDD | msr0xXXX) required\n"); + if ((msr_num == 0) && (path == NULL)) { + fprintf(stderr, "--add: (msrDDD | msr0xXXX | /path_to_counter ) required\n"); fail++; } /* generate default column header */ if (*name_buffer == '\0') { - if (format == FORMAT_RAW) { - if (width == 32) - sprintf(name_buffer, "msr%d", msr_num); - else - sprintf(name_buffer, "MSR%d", msr_num); - } else if (format == FORMAT_DELTA) { - if (width == 32) - sprintf(name_buffer, "cnt%d", msr_num); - else - sprintf(name_buffer, "CNT%d", msr_num); - } else if (format == FORMAT_PERCENT) { - if (width == 32) - sprintf(name_buffer, "msr%d%%", msr_num); - else - sprintf(name_buffer, "MSR%d%%", msr_num); - } + if (width == 32) + sprintf(name_buffer, "M0x%x%s", msr_num, format == FORMAT_PERCENT ? "%" : ""); + else + sprintf(name_buffer, "M0X%x%s", msr_num, format == FORMAT_PERCENT ? "%" : ""); } - if (add_counter(msr_num, name_buffer, width, scope, type, format)) + if (add_counter(msr_num, path, name_buffer, width, scope, type, format, 0)) fail++; if (fail) { @@ -4029,20 +4746,214 @@ next: exit(1); } } + +int is_deferred_skip(char *name) +{ + int i; + + for (i = 0; i < deferred_skip_index; ++i) + if (!strcmp(name, deferred_skip_names[i])) + return 1; + return 0; +} + +void probe_sysfs(void) +{ + char path[64]; + char name_buf[16]; + FILE *input; + int state; + char *sp; + + if (!DO_BIC(BIC_sysfs)) + return; + + for (state = 10; state > 0; --state) { + + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", + base_cpu, state); + input = fopen(path, "r"); + if (input == NULL) + continue; + fgets(name_buf, sizeof(name_buf), input); + + /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ + sp = strchr(name_buf, '-'); + if (!sp) + sp = strchrnul(name_buf, '\n'); + *sp = '%'; + *(sp + 1) = '\0'; + + fclose(input); + + sprintf(path, "cpuidle/state%d/time", state); + + if (is_deferred_skip(name_buf)) + continue; + + add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_USEC, + FORMAT_PERCENT, SYSFS_PERCPU); + } + + for (state = 10; state > 0; --state) { + + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", + base_cpu, state); + input = fopen(path, "r"); + if (input == NULL) + continue; + fgets(name_buf, sizeof(name_buf), input); + /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ + sp = strchr(name_buf, '-'); + if (!sp) + sp = strchrnul(name_buf, '\n'); + *sp = '\0'; + fclose(input); + + sprintf(path, "cpuidle/state%d/usage", state); + + if (is_deferred_skip(name_buf)) + continue; + + add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_ITEMS, + FORMAT_DELTA, SYSFS_PERCPU); + } + +} + + +/* + * parse cpuset with following syntax + * 1,2,4..6,8-10 and set bits in cpu_subset + */ +void parse_cpu_command(char *optarg) +{ + unsigned int start, end; + char *next; + + if (!strcmp(optarg, "core")) { + if (cpu_subset) + goto error; + show_core_only++; + return; + } + if (!strcmp(optarg, "package")) { + if (cpu_subset) + goto error; + show_pkg_only++; + return; + } + if (show_core_only || show_pkg_only) + goto error; + + cpu_subset = CPU_ALLOC(CPU_SUBSET_MAXCPUS); + if (cpu_subset == NULL) + err(3, "CPU_ALLOC"); + cpu_subset_size = CPU_ALLOC_SIZE(CPU_SUBSET_MAXCPUS); + + CPU_ZERO_S(cpu_subset_size, cpu_subset); + + next = optarg; + + while (next && *next) { + + if (*next == '-') /* no negative cpu numbers */ + goto error; + + start = strtoul(next, &next, 10); + + if (start >= CPU_SUBSET_MAXCPUS) + goto error; + CPU_SET_S(start, cpu_subset_size, cpu_subset); + + if (*next == '\0') + break; + + if (*next == ',') { + next += 1; + continue; + } + + if (*next == '-') { + next += 1; /* start range */ + } else if (*next == '.') { + next += 1; + if (*next == '.') + next += 1; /* start range */ + else + goto error; + } + + end = strtoul(next, &next, 10); + if (end <= start) + goto error; + + while (++start <= end) { + if (start >= CPU_SUBSET_MAXCPUS) + goto error; + CPU_SET_S(start, cpu_subset_size, cpu_subset); + } + + if (*next == ',') + next += 1; + else if (*next != '\0') + goto error; + } + + return; + +error: + fprintf(stderr, "\"--cpu %s\" malformed\n", optarg); + help(); + exit(-1); +} + +int shown; +/* + * parse_show_hide() - process cmdline to set default counter action + */ +void parse_show_hide(char *optarg, enum show_hide_mode new_mode) +{ + /* + * --show: show only those specified + * The 1st invocation will clear and replace the enabled mask + * subsequent invocations can add to it. + */ + if (new_mode == SHOW_LIST) { + if (shown == 0) + bic_enabled = bic_lookup(optarg, new_mode); + else + bic_enabled |= bic_lookup(optarg, new_mode); + shown = 1; + + return; + } + + /* + * --hide: do not show those specified + * multiple invocations simply clear more bits in enabled mask + */ + bic_enabled &= ~bic_lookup(optarg, new_mode); + +} + void cmdline(int argc, char **argv) { int opt; int option_index = 0; static struct option long_options[] = { {"add", required_argument, 0, 'a'}, + {"cpu", required_argument, 0, 'c'}, {"Dump", no_argument, 0, 'D'}, - {"debug", no_argument, 0, 'd'}, + {"debug", no_argument, 0, 'd'}, /* internal, not documented */ {"interval", required_argument, 0, 'i'}, {"help", no_argument, 0, 'h'}, + {"hide", required_argument, 0, 'H'}, // meh, -h taken by --help {"Joules", no_argument, 0, 'J'}, + {"list", no_argument, 0, 'l'}, {"out", required_argument, 0, 'o'}, - {"Package", no_argument, 0, 'p'}, - {"processor", no_argument, 0, 'p'}, + {"quiet", no_argument, 0, 'q'}, + {"show", required_argument, 0, 's'}, {"Summary", no_argument, 0, 'S'}, {"TCC", required_argument, 0, 'T'}, {"version", no_argument, 0, 'v' }, @@ -4051,18 +4962,24 @@ void cmdline(int argc, char **argv) progname = argv[0]; - while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:PpST:v", + while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:qST:v", long_options, &option_index)) != -1) { switch (opt) { case 'a': parse_add_command(optarg); break; + case 'c': + parse_cpu_command(optarg); + break; case 'D': dump_only++; break; case 'd': debug++; break; + case 'H': + parse_show_hide(optarg, HIDE_LIST); + break; case 'h': default: help(); @@ -4084,14 +5001,18 @@ void cmdline(int argc, char **argv) case 'J': rapl_joules++; break; + case 'l': + list_header_only++; + quiet++; + break; case 'o': outf = fopen_or_die(optarg, "w"); break; - case 'P': - show_pkg_only++; + case 'q': + quiet = 1; break; - case 'p': - show_core_only++; + case 's': + parse_show_hide(optarg, SHOW_LIST); break; case 'S': summary_only++; @@ -4113,15 +5034,24 @@ int main(int argc, char **argv) cmdline(argc, argv); - if (debug) + if (!quiet) print_version(); + probe_sysfs(); + turbostat_init(); /* dump counters and exit */ if (dump_only) return get_and_dump_counters(); + /* list header and exit */ + if (list_header_only) { + print_header(","); + flush_output_stdout(); + return 0; + } + /* * if any params left, it must be a command to fork */ diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index be93ab02b490..6e4eb2fc2d1e 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -179,6 +179,7 @@ my $localversion; my $iteration = 0; my $successes = 0; my $stty_orig; +my $run_command_status = 0; my $bisect_good; my $bisect_bad; @@ -1325,26 +1326,44 @@ sub wait_for_monitor; sub reboot { my ($time) = @_; + my $powercycle = 0; - # Make sure everything has been written to disk - run_ssh("sync"); + # test if the machine can be connected to within 5 seconds + my $stat = run_ssh("echo check machine status", 5); + if (!$stat) { + doprint("power cycle\n"); + $powercycle = 1; + } + + if ($powercycle) { + run_command "$power_cycle"; - if (defined($time)) { start_monitor; # flush out current monitor # May contain the reboot success line wait_for_monitor 1; - } - # try to reboot normally - if (run_command $reboot) { - if (defined($powercycle_after_reboot)) { - sleep $powercycle_after_reboot; + } else { + # Make sure everything has been written to disk + run_ssh("sync"); + + if (defined($time)) { + start_monitor; + # flush out current monitor + # May contain the reboot success line + wait_for_monitor 1; + } + + # try to reboot normally + if (run_command $reboot) { + if (defined($powercycle_after_reboot)) { + sleep $powercycle_after_reboot; + run_command "$power_cycle"; + } + } else { + # nope? power cycle it. run_command "$power_cycle"; } - } else { - # nope? power cycle it. - run_command "$power_cycle"; } if (defined($time)) { @@ -1412,6 +1431,10 @@ sub dodie { system("stty $stty_orig"); } + if (defined($post_test)) { + run_command $post_test; + } + die @_, "\n"; } @@ -1624,10 +1647,6 @@ sub save_logs { sub fail { - if (defined($post_test)) { - run_command $post_test; - } - if ($die_on_failure) { dodie @_; } @@ -1660,23 +1679,26 @@ sub fail { save_logs "fail", $store_failures; } + if (defined($post_test)) { + run_command $post_test; + } + return 1; } sub run_command { - my ($command, $redirect) = @_; + my ($command, $redirect, $timeout) = @_; my $start_time; my $end_time; my $dolog = 0; my $dord = 0; my $pid; - $start_time = time; - $command =~ s/\$SSH_USER/$ssh_user/g; $command =~ s/\$MACHINE/$machine/g; doprint("$command ... "); + $start_time = time; $pid = open(CMD, "$command 2>&1 |") or (fail "unable to exec $command" and return 0); @@ -1693,13 +1715,30 @@ sub run_command { $dord = 1; } - while (<CMD>) { - print LOG if ($dolog); - print RD if ($dord); + my $hit_timeout = 0; + + while (1) { + my $fp = \*CMD; + if (defined($timeout)) { + doprint "timeout = $timeout\n"; + } + my $line = wait_for_input($fp, $timeout); + if (!defined($line)) { + my $now = time; + if (defined($timeout) && (($now - $start_time) >= $timeout)) { + doprint "Hit timeout of $timeout, killing process\n"; + $hit_timeout = 1; + kill 9, $pid; + } + last; + } + print LOG $line if ($dolog); + print RD $line if ($dord); } waitpid($pid, 0); - my $failed = $?; + # shift 8 for real exit status + $run_command_status = $? >> 8; close(CMD); close(LOG) if ($dolog); @@ -1714,21 +1753,25 @@ sub run_command { doprint "[$delta seconds] "; } - if ($failed) { + if ($hit_timeout) { + $run_command_status = 1; + } + + if ($run_command_status) { doprint "FAILED!\n"; } else { doprint "SUCCESS\n"; } - return !$failed; + return !$run_command_status; } sub run_ssh { - my ($cmd) = @_; + my ($cmd, $timeout) = @_; my $cp_exec = $ssh_exec; $cp_exec =~ s/\$SSH_COMMAND/$cmd/g; - return run_command "$cp_exec"; + return run_command "$cp_exec", undef , $timeout; } sub run_scp { @@ -2489,10 +2532,6 @@ sub halt { sub success { my ($i) = @_; - if (defined($post_test)) { - run_command $post_test; - } - $successes++; my $name = ""; @@ -2517,6 +2556,10 @@ sub success { doprint "Reboot and wait $sleep_time seconds\n"; reboot_to_good $sleep_time; } + + if (defined($post_test)) { + run_command $post_test; + } } sub answer_bisect { @@ -2537,16 +2580,15 @@ sub answer_bisect { } sub child_run_test { - my $failed = 0; # child should have no power $reboot_on_error = 0; $poweroff_on_error = 0; $die_on_failure = 1; - run_command $run_test, $testlog or $failed = 1; + run_command $run_test, $testlog; - exit $failed; + exit $run_command_status; } my $child_done; @@ -2629,7 +2671,7 @@ sub do_run_test { } waitpid $child_pid, 0; - $child_exit = $?; + $child_exit = $? >> 8; my $end_time = time; $test_time = $end_time - $start_time; @@ -3330,7 +3372,6 @@ sub config_bisect { save_config \%good_configs, $good_config; save_config \%bad_configs, $bad_config; - if (defined($config_bisect_check) && $config_bisect_check ne "0") { if ($config_bisect_check ne "good") { doprint "Testing bad config\n"; diff --git a/tools/testing/radix-tree/.gitignore b/tools/testing/radix-tree/.gitignore index 11d888ca6a92..d4706c0ffceb 100644 --- a/tools/testing/radix-tree/.gitignore +++ b/tools/testing/radix-tree/.gitignore @@ -1,2 +1,6 @@ +generated/map-shift.h +idr.c +idr-test main +multiorder radix-tree.c diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index 3635e4d3eca7..f11315bedefc 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile @@ -1,29 +1,47 @@ -CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE +CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address LDFLAGS += -lpthread -lurcu -TARGETS = main -OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \ - regression1.o regression2.o regression3.o multiorder.o \ - iteration_check.o benchmark.o +TARGETS = main idr-test multiorder +CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o +OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \ + tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o -ifdef BENCHMARK - CFLAGS += -DBENCHMARK=1 +ifndef SHIFT + SHIFT=3 endif -targets: $(TARGETS) +targets: mapshift $(TARGETS) main: $(OFILES) - $(CC) $(CFLAGS) $(LDFLAGS) $(OFILES) -o main + $(CC) $(CFLAGS) $(LDFLAGS) $^ -o main + +idr-test: idr-test.o $(CORE_OFILES) + $(CC) $(CFLAGS) $(LDFLAGS) $^ -o idr-test + +multiorder: multiorder.o $(CORE_OFILES) + $(CC) $(CFLAGS) $(LDFLAGS) $^ -o multiorder clean: - $(RM) -f $(TARGETS) *.o radix-tree.c + $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h -find_next_bit.o: ../../lib/find_bit.c - $(CC) $(CFLAGS) -c -o $@ $< +vpath %.c ../../lib -$(OFILES): *.h */*.h \ +$(OFILES): *.h */*.h generated/map-shift.h \ ../../include/linux/*.h \ - ../../../include/linux/radix-tree.h + ../../include/asm/*.h \ + ../../../include/linux/radix-tree.h \ + ../../../include/linux/idr.h radix-tree.c: ../../../lib/radix-tree.c sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ + +idr.c: ../../../lib/idr.c + sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ + +.PHONY: mapshift + +mapshift: + @if ! grep -qw $(SHIFT) generated/map-shift.h; then \ + echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \ + generated/map-shift.h; \ + fi diff --git a/tools/testing/radix-tree/benchmark.c b/tools/testing/radix-tree/benchmark.c index 215ca86c7605..9b09ddfe462f 100644 --- a/tools/testing/radix-tree/benchmark.c +++ b/tools/testing/radix-tree/benchmark.c @@ -71,7 +71,7 @@ static void benchmark_size(unsigned long size, unsigned long step, int order) tagged = benchmark_iter(&tree, true); normal = benchmark_iter(&tree, false); - printf("Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n", + printv(2, "Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n", size, step, order, tagged, normal); item_kill_tree(&tree); @@ -85,8 +85,8 @@ void benchmark(void) 128, 256, 512, 12345, 0}; int c, s; - printf("starting benchmarks\n"); - printf("RADIX_TREE_MAP_SHIFT = %d\n", RADIX_TREE_MAP_SHIFT); + printv(1, "starting benchmarks\n"); + printv(1, "RADIX_TREE_MAP_SHIFT = %d\n", RADIX_TREE_MAP_SHIFT); for (c = 0; size[c]; c++) for (s = 0; step[s]; s++) diff --git a/tools/testing/radix-tree/generated/autoconf.h b/tools/testing/radix-tree/generated/autoconf.h index ad18cf5a2a3a..cf88dc5b8832 100644 --- a/tools/testing/radix-tree/generated/autoconf.h +++ b/tools/testing/radix-tree/generated/autoconf.h @@ -1,3 +1 @@ #define CONFIG_RADIX_TREE_MULTIORDER 1 -#define CONFIG_SHMEM 1 -#define CONFIG_SWAP 1 diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c new file mode 100644 index 000000000000..a26098c6123d --- /dev/null +++ b/tools/testing/radix-tree/idr-test.c @@ -0,0 +1,444 @@ +/* + * idr-test.c: Test the IDR API + * Copyright (c) 2016 Matthew Wilcox <willy@infradead.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#include <linux/bitmap.h> +#include <linux/idr.h> +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/errno.h> + +#include "test.h" + +#define DUMMY_PTR ((void *)0x12) + +int item_idr_free(int id, void *p, void *data) +{ + struct item *item = p; + assert(item->index == id); + free(p); + + return 0; +} + +void item_idr_remove(struct idr *idr, int id) +{ + struct item *item = idr_find(idr, id); + assert(item->index == id); + idr_remove(idr, id); + free(item); +} + +void idr_alloc_test(void) +{ + unsigned long i; + DEFINE_IDR(idr); + + assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0, 0x4000, GFP_KERNEL) == 0); + assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0x3ffd, 0x4000, GFP_KERNEL) == 0x3ffd); + idr_remove(&idr, 0x3ffd); + idr_remove(&idr, 0); + + for (i = 0x3ffe; i < 0x4003; i++) { + int id; + struct item *item; + + if (i < 0x4000) + item = item_create(i, 0); + else + item = item_create(i - 0x3fff, 0); + + id = idr_alloc_cyclic(&idr, item, 1, 0x4000, GFP_KERNEL); + assert(id == item->index); + } + + idr_for_each(&idr, item_idr_free, &idr); + idr_destroy(&idr); +} + +void idr_replace_test(void) +{ + DEFINE_IDR(idr); + + idr_alloc(&idr, (void *)-1, 10, 11, GFP_KERNEL); + idr_replace(&idr, &idr, 10); + + idr_destroy(&idr); +} + +/* + * Unlike the radix tree, you can put a NULL pointer -- with care -- into + * the IDR. Some interfaces, like idr_find() do not distinguish between + * "present, value is NULL" and "not present", but that's exactly what some + * users want. + */ +void idr_null_test(void) +{ + int i; + DEFINE_IDR(idr); + + assert(idr_is_empty(&idr)); + + assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); + assert(!idr_is_empty(&idr)); + idr_remove(&idr, 0); + assert(idr_is_empty(&idr)); + + assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); + assert(!idr_is_empty(&idr)); + idr_destroy(&idr); + assert(idr_is_empty(&idr)); + + for (i = 0; i < 10; i++) { + assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i); + } + + assert(idr_replace(&idr, DUMMY_PTR, 3) == NULL); + assert(idr_replace(&idr, DUMMY_PTR, 4) == NULL); + assert(idr_replace(&idr, NULL, 4) == DUMMY_PTR); + assert(idr_replace(&idr, DUMMY_PTR, 11) == ERR_PTR(-ENOENT)); + idr_remove(&idr, 5); + assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 5); + idr_remove(&idr, 5); + + for (i = 0; i < 9; i++) { + idr_remove(&idr, i); + assert(!idr_is_empty(&idr)); + } + idr_remove(&idr, 8); + assert(!idr_is_empty(&idr)); + idr_remove(&idr, 9); + assert(idr_is_empty(&idr)); + + assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); + assert(idr_replace(&idr, DUMMY_PTR, 3) == ERR_PTR(-ENOENT)); + assert(idr_replace(&idr, DUMMY_PTR, 0) == NULL); + assert(idr_replace(&idr, NULL, 0) == DUMMY_PTR); + + idr_destroy(&idr); + assert(idr_is_empty(&idr)); + + for (i = 1; i < 10; i++) { + assert(idr_alloc(&idr, NULL, 1, 0, GFP_KERNEL) == i); + } + + idr_destroy(&idr); + assert(idr_is_empty(&idr)); +} + +void idr_nowait_test(void) +{ + unsigned int i; + DEFINE_IDR(idr); + + idr_preload(GFP_KERNEL); + + for (i = 0; i < 3; i++) { + struct item *item = item_create(i, 0); + assert(idr_alloc(&idr, item, i, i + 1, GFP_NOWAIT) == i); + } + + idr_preload_end(); + + idr_for_each(&idr, item_idr_free, &idr); + idr_destroy(&idr); +} + +void idr_checks(void) +{ + unsigned long i; + DEFINE_IDR(idr); + + for (i = 0; i < 10000; i++) { + struct item *item = item_create(i, 0); + assert(idr_alloc(&idr, item, 0, 20000, GFP_KERNEL) == i); + } + + assert(idr_alloc(&idr, DUMMY_PTR, 5, 30, GFP_KERNEL) < 0); + + for (i = 0; i < 5000; i++) + item_idr_remove(&idr, i); + + idr_remove(&idr, 3); + + idr_for_each(&idr, item_idr_free, &idr); + idr_destroy(&idr); + + assert(idr_is_empty(&idr)); + + idr_remove(&idr, 3); + idr_remove(&idr, 0); + + for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) { + struct item *item = item_create(i, 0); + assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); + } + assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC); + + idr_for_each(&idr, item_idr_free, &idr); + idr_destroy(&idr); + idr_destroy(&idr); + + assert(idr_is_empty(&idr)); + + for (i = 1; i < 10000; i++) { + struct item *item = item_create(i, 0); + assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i); + } + + idr_for_each(&idr, item_idr_free, &idr); + idr_destroy(&idr); + + idr_replace_test(); + idr_alloc_test(); + idr_null_test(); + idr_nowait_test(); +} + +/* + * Check that we get the correct error when we run out of memory doing + * allocations. To ensure we run out of memory, just "forget" to preload. + * The first test is for not having a bitmap available, and the second test + * is for not being able to allocate a level of the radix tree. + */ +void ida_check_nomem(void) +{ + DEFINE_IDA(ida); + int id, err; + + err = ida_get_new_above(&ida, 256, &id); + assert(err == -EAGAIN); + err = ida_get_new_above(&ida, 1UL << 30, &id); + assert(err == -EAGAIN); +} + +/* + * Check what happens when we fill a leaf and then delete it. This may + * discover mishandling of IDR_FREE. + */ +void ida_check_leaf(void) +{ + DEFINE_IDA(ida); + int id; + unsigned long i; + + for (i = 0; i < IDA_BITMAP_BITS; i++) { + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new(&ida, &id)); + assert(id == i); + } + + ida_destroy(&ida); + assert(ida_is_empty(&ida)); + + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new(&ida, &id)); + assert(id == 0); + ida_destroy(&ida); + assert(ida_is_empty(&ida)); +} + +/* + * Check handling of conversions between exceptional entries and full bitmaps. + */ +void ida_check_conv(void) +{ + DEFINE_IDA(ida); + int id; + unsigned long i; + + for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) { + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new_above(&ida, i + 1, &id)); + assert(id == i + 1); + assert(!ida_get_new_above(&ida, i + BITS_PER_LONG, &id)); + assert(id == i + BITS_PER_LONG); + ida_remove(&ida, i + 1); + ida_remove(&ida, i + BITS_PER_LONG); + assert(ida_is_empty(&ida)); + } + + assert(ida_pre_get(&ida, GFP_KERNEL)); + + for (i = 0; i < IDA_BITMAP_BITS * 2; i++) { + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new(&ida, &id)); + assert(id == i); + } + + for (i = IDA_BITMAP_BITS * 2; i > 0; i--) { + ida_remove(&ida, i - 1); + } + assert(ida_is_empty(&ida)); + + for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++) { + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new(&ida, &id)); + assert(id == i); + } + + for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--) { + ida_remove(&ida, i - 1); + } + assert(ida_is_empty(&ida)); + + radix_tree_cpu_dead(1); + for (i = 0; i < 1000000; i++) { + int err = ida_get_new(&ida, &id); + if (err == -EAGAIN) { + assert((i % IDA_BITMAP_BITS) == (BITS_PER_LONG - 2)); + assert(ida_pre_get(&ida, GFP_KERNEL)); + err = ida_get_new(&ida, &id); + } else { + assert((i % IDA_BITMAP_BITS) != (BITS_PER_LONG - 2)); + } + assert(!err); + assert(id == i); + } + ida_destroy(&ida); +} + +/* + * Check allocations up to and slightly above the maximum allowed (2^31-1) ID. + * Allocating up to 2^31-1 should succeed, and then allocating the next one + * should fail. + */ +void ida_check_max(void) +{ + DEFINE_IDA(ida); + int id, err; + unsigned long i, j; + + for (j = 1; j < 65537; j *= 2) { + unsigned long base = (1UL << 31) - j; + for (i = 0; i < j; i++) { + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new_above(&ida, base, &id)); + assert(id == base + i); + } + assert(ida_pre_get(&ida, GFP_KERNEL)); + err = ida_get_new_above(&ida, base, &id); + assert(err == -ENOSPC); + ida_destroy(&ida); + assert(ida_is_empty(&ida)); + rcu_barrier(); + } +} + +void ida_check_random(void) +{ + DEFINE_IDA(ida); + DECLARE_BITMAP(bitmap, 2048); + int id; + unsigned int i; + time_t s = time(NULL); + + repeat: + memset(bitmap, 0, sizeof(bitmap)); + for (i = 0; i < 100000; i++) { + int i = rand(); + int bit = i & 2047; + if (test_bit(bit, bitmap)) { + __clear_bit(bit, bitmap); + ida_remove(&ida, bit); + } else { + __set_bit(bit, bitmap); + ida_pre_get(&ida, GFP_KERNEL); + assert(!ida_get_new_above(&ida, bit, &id)); + assert(id == bit); + } + } + ida_destroy(&ida); + if (time(NULL) < s + 10) + goto repeat; +} + +void ida_checks(void) +{ + DEFINE_IDA(ida); + int id; + unsigned long i; + + radix_tree_cpu_dead(1); + ida_check_nomem(); + + for (i = 0; i < 10000; i++) { + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new(&ida, &id)); + assert(id == i); + } + + ida_remove(&ida, 20); + ida_remove(&ida, 21); + for (i = 0; i < 3; i++) { + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new(&ida, &id)); + if (i == 2) + assert(id == 10000); + } + + for (i = 0; i < 5000; i++) + ida_remove(&ida, i); + + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new_above(&ida, 5000, &id)); + assert(id == 10001); + + ida_destroy(&ida); + + assert(ida_is_empty(&ida)); + + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new_above(&ida, 1, &id)); + assert(id == 1); + + ida_remove(&ida, id); + assert(ida_is_empty(&ida)); + ida_destroy(&ida); + assert(ida_is_empty(&ida)); + + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new_above(&ida, 1, &id)); + ida_destroy(&ida); + assert(ida_is_empty(&ida)); + + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new_above(&ida, 1, &id)); + assert(id == 1); + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new_above(&ida, 1025, &id)); + assert(id == 1025); + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new_above(&ida, 10000, &id)); + assert(id == 10000); + ida_remove(&ida, 1025); + ida_destroy(&ida); + assert(ida_is_empty(&ida)); + + ida_check_leaf(); + ida_check_max(); + ida_check_conv(); + ida_check_random(); + + radix_tree_cpu_dead(1); +} + +int __weak main(void) +{ + radix_tree_init(); + idr_checks(); + ida_checks(); + rcu_barrier(); + if (nr_allocated) + printf("nr_allocated = %d\n", nr_allocated); + return 0; +} diff --git a/tools/testing/radix-tree/iteration_check.c b/tools/testing/radix-tree/iteration_check.c index 7572b7ed930e..a92bab513701 100644 --- a/tools/testing/radix-tree/iteration_check.c +++ b/tools/testing/radix-tree/iteration_check.c @@ -177,7 +177,7 @@ void iteration_test(unsigned order, unsigned test_duration) { int i; - printf("Running %siteration tests for %d seconds\n", + printv(1, "Running %siteration tests for %d seconds\n", order > 0 ? "multiorder " : "", test_duration); max_order = order; diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index d31ea7c9abec..cf48c8473f48 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c @@ -5,7 +5,7 @@ #include <unistd.h> #include <assert.h> -#include <linux/mempool.h> +#include <linux/gfp.h> #include <linux/poison.h> #include <linux/slab.h> #include <linux/radix-tree.h> @@ -13,6 +13,8 @@ int nr_allocated; int preempt_count; +int kmalloc_verbose; +int test_verbose; struct kmem_cache { pthread_mutex_t lock; @@ -22,27 +24,6 @@ struct kmem_cache { void (*ctor)(void *); }; -void *mempool_alloc(mempool_t *pool, int gfp_mask) -{ - return pool->alloc(gfp_mask, pool->data); -} - -void mempool_free(void *element, mempool_t *pool) -{ - pool->free(element, pool->data); -} - -mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data) -{ - mempool_t *ret = malloc(sizeof(*ret)); - - ret->alloc = alloc_fn; - ret->free = free_fn; - ret->data = pool_data; - return ret; -} - void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) { struct radix_tree_node *node; @@ -54,9 +35,9 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) if (cachep->nr_objs) { cachep->nr_objs--; node = cachep->objs; - cachep->objs = node->private_data; + cachep->objs = node->parent; pthread_mutex_unlock(&cachep->lock); - node->private_data = NULL; + node->parent = NULL; } else { pthread_mutex_unlock(&cachep->lock); node = malloc(cachep->size); @@ -65,6 +46,8 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) } uatomic_inc(&nr_allocated); + if (kmalloc_verbose) + printf("Allocating %p from slab\n", node); return node; } @@ -72,6 +55,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) { assert(objp); uatomic_dec(&nr_allocated); + if (kmalloc_verbose) + printf("Freeing %p to slab\n", objp); pthread_mutex_lock(&cachep->lock); if (cachep->nr_objs > 10) { memset(objp, POISON_FREE, cachep->size); @@ -79,7 +64,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) } else { struct radix_tree_node *node = objp; cachep->nr_objs++; - node->private_data = cachep->objs; + node->parent = cachep->objs; cachep->objs = node; } pthread_mutex_unlock(&cachep->lock); @@ -89,6 +74,8 @@ void *kmalloc(size_t size, gfp_t gfp) { void *ret = malloc(size); uatomic_inc(&nr_allocated); + if (kmalloc_verbose) + printf("Allocating %p from malloc\n", ret); return ret; } @@ -97,6 +84,8 @@ void kfree(void *p) if (!p) return; uatomic_dec(&nr_allocated); + if (kmalloc_verbose) + printf("Freeing %p to malloc\n", p); free(p); } diff --git a/tools/testing/radix-tree/linux/bitops.h b/tools/testing/radix-tree/linux/bitops.h deleted file mode 100644 index a13e9bc76eec..000000000000 --- a/tools/testing/radix-tree/linux/bitops.h +++ /dev/null @@ -1,160 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ -#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ - -#include <linux/types.h> -#include <linux/bitops/find.h> -#include <linux/bitops/hweight.h> -#include <linux/kernel.h> - -#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) -#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) -#define BITS_PER_BYTE 8 -#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) - -/** - * __set_bit - Set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __set_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - - *p |= mask; -} - -static inline void __clear_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - - *p &= ~mask; -} - -/** - * __change_bit - Toggle a bit in memory - * @nr: the bit to change - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __change_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - - *p ^= mask; -} - -/** - * __test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old = *p; - - *p = old | mask; - return (old & mask) != 0; -} - -/** - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old = *p; - - *p = old & ~mask; - return (old & mask) != 0; -} - -/* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, - volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old = *p; - - *p = old ^ mask; - return (old & mask) != 0; -} - -/** - * test_bit - Determine whether a bit is set - * @nr: bit number to test - * @addr: Address to start counting from - */ -static inline int test_bit(int nr, const volatile unsigned long *addr) -{ - return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); -} - -/** - * __ffs - find first bit in word. - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -static inline unsigned long __ffs(unsigned long word) -{ - int num = 0; - - if ((word & 0xffffffff) == 0) { - num += 32; - word >>= 32; - } - if ((word & 0xffff) == 0) { - num += 16; - word >>= 16; - } - if ((word & 0xff) == 0) { - num += 8; - word >>= 8; - } - if ((word & 0xf) == 0) { - num += 4; - word >>= 4; - } - if ((word & 0x3) == 0) { - num += 2; - word >>= 2; - } - if ((word & 0x1) == 0) - num += 1; - return num; -} - -unsigned long find_next_bit(const unsigned long *addr, - unsigned long size, - unsigned long offset); - -static inline unsigned long hweight_long(unsigned long w) -{ - return sizeof(w) == 4 ? hweight32(w) : hweight64(w); -} - -#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/__ffs.h b/tools/testing/radix-tree/linux/bitops/__ffs.h deleted file mode 100644 index 9a3274aecf83..000000000000 --- a/tools/testing/radix-tree/linux/bitops/__ffs.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS___FFS_H_ -#define _ASM_GENERIC_BITOPS___FFS_H_ - -#include <asm/types.h> - -/** - * __ffs - find first bit in word. - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -static inline unsigned long __ffs(unsigned long word) -{ - int num = 0; - -#if BITS_PER_LONG == 64 - if ((word & 0xffffffff) == 0) { - num += 32; - word >>= 32; - } -#endif - if ((word & 0xffff) == 0) { - num += 16; - word >>= 16; - } - if ((word & 0xff) == 0) { - num += 8; - word >>= 8; - } - if ((word & 0xf) == 0) { - num += 4; - word >>= 4; - } - if ((word & 0x3) == 0) { - num += 2; - word >>= 2; - } - if ((word & 0x1) == 0) - num += 1; - return num; -} - -#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/ffs.h b/tools/testing/radix-tree/linux/bitops/ffs.h deleted file mode 100644 index fbbb43af7dc0..000000000000 --- a/tools/testing/radix-tree/linux/bitops/ffs.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_FFS_H_ -#define _ASM_GENERIC_BITOPS_FFS_H_ - -/** - * ffs - find first bit set - * @x: the word to search - * - * This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). - */ -static inline int ffs(int x) -{ - int r = 1; - - if (!x) - return 0; - if (!(x & 0xffff)) { - x >>= 16; - r += 16; - } - if (!(x & 0xff)) { - x >>= 8; - r += 8; - } - if (!(x & 0xf)) { - x >>= 4; - r += 4; - } - if (!(x & 3)) { - x >>= 2; - r += 2; - } - if (!(x & 1)) { - x >>= 1; - r += 1; - } - return r; -} - -#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/ffz.h b/tools/testing/radix-tree/linux/bitops/ffz.h deleted file mode 100644 index 6744bd4cdf46..000000000000 --- a/tools/testing/radix-tree/linux/bitops/ffz.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_FFZ_H_ -#define _ASM_GENERIC_BITOPS_FFZ_H_ - -/* - * ffz - find first zero in word. - * @word: The word to search - * - * Undefined if no zero exists, so code should check against ~0UL first. - */ -#define ffz(x) __ffs(~(x)) - -#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/find.h b/tools/testing/radix-tree/linux/bitops/find.h deleted file mode 100644 index 72a51e5a12ef..000000000000 --- a/tools/testing/radix-tree/linux/bitops/find.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_FIND_H_ -#define _ASM_GENERIC_BITOPS_FIND_H_ - -extern unsigned long find_next_bit(const unsigned long *addr, unsigned long - size, unsigned long offset); - -extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned - long size, unsigned long offset); - -#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) -#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) - -#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/fls.h b/tools/testing/radix-tree/linux/bitops/fls.h deleted file mode 100644 index 850859bc5069..000000000000 --- a/tools/testing/radix-tree/linux/bitops/fls.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_FLS_H_ -#define _ASM_GENERIC_BITOPS_FLS_H_ - -/** - * fls - find last (most-significant) bit set - * @x: the word to search - * - * This is defined the same way as ffs. - * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. - */ - -static inline int fls(int x) -{ - int r = 32; - - if (!x) - return 0; - if (!(x & 0xffff0000u)) { - x <<= 16; - r -= 16; - } - if (!(x & 0xff000000u)) { - x <<= 8; - r -= 8; - } - if (!(x & 0xf0000000u)) { - x <<= 4; - r -= 4; - } - if (!(x & 0xc0000000u)) { - x <<= 2; - r -= 2; - } - if (!(x & 0x80000000u)) { - x <<= 1; - r -= 1; - } - return r; -} - -#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/fls64.h b/tools/testing/radix-tree/linux/bitops/fls64.h deleted file mode 100644 index 1b6b17ce2428..000000000000 --- a/tools/testing/radix-tree/linux/bitops/fls64.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_FLS64_H_ -#define _ASM_GENERIC_BITOPS_FLS64_H_ - -#include <asm/types.h> - -static inline int fls64(__u64 x) -{ - __u32 h = x >> 32; - if (h) - return fls(h) + 32; - return fls(x); -} - -#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/hweight.h b/tools/testing/radix-tree/linux/bitops/hweight.h deleted file mode 100644 index fbbc383771da..000000000000 --- a/tools/testing/radix-tree/linux/bitops/hweight.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ -#define _ASM_GENERIC_BITOPS_HWEIGHT_H_ - -#include <asm/types.h> - -extern unsigned int hweight32(unsigned int w); -extern unsigned int hweight16(unsigned int w); -extern unsigned int hweight8(unsigned int w); -extern unsigned long hweight64(__u64 w); - -#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/le.h b/tools/testing/radix-tree/linux/bitops/le.h deleted file mode 100644 index b9c7e5d2d2ad..000000000000 --- a/tools/testing/radix-tree/linux/bitops/le.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_LE_H_ -#define _ASM_GENERIC_BITOPS_LE_H_ - -#include <asm/types.h> -#include <asm/byteorder.h> - -#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) -#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) - -#if defined(__LITTLE_ENDIAN) - -#define generic_test_le_bit(nr, addr) test_bit(nr, addr) -#define generic___set_le_bit(nr, addr) __set_bit(nr, addr) -#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr) - -#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr) -#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr) - -#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr) -#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr) - -#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset) - -#elif defined(__BIG_ENDIAN) - -#define generic_test_le_bit(nr, addr) \ - test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) -#define generic___set_le_bit(nr, addr) \ - __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) -#define generic___clear_le_bit(nr, addr) \ - __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) - -#define generic_test_and_set_le_bit(nr, addr) \ - test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) -#define generic_test_and_clear_le_bit(nr, addr) \ - test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) - -#define generic___test_and_set_le_bit(nr, addr) \ - __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) -#define generic___test_and_clear_le_bit(nr, addr) \ - __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) - -extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, - unsigned long size, unsigned long offset); - -#else -#error "Please fix <asm/byteorder.h>" -#endif - -#define generic_find_first_zero_le_bit(addr, size) \ - generic_find_next_zero_le_bit((addr), (size), 0) - -#endif /* _ASM_GENERIC_BITOPS_LE_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/non-atomic.h b/tools/testing/radix-tree/linux/bitops/non-atomic.h deleted file mode 100644 index 6a1bcb9d2c4a..000000000000 --- a/tools/testing/radix-tree/linux/bitops/non-atomic.h +++ /dev/null @@ -1,110 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ -#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ - -#include <asm/types.h> - -#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) - -/** - * __set_bit - Set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __set_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - - *p |= mask; -} - -static inline void __clear_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - - *p &= ~mask; -} - -/** - * __change_bit - Toggle a bit in memory - * @nr: the bit to change - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __change_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - - *p ^= mask; -} - -/** - * __test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - unsigned long old = *p; - - *p = old | mask; - return (old & mask) != 0; -} - -/** - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - unsigned long old = *p; - - *p = old & ~mask; - return (old & mask) != 0; -} - -/* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, - volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - unsigned long old = *p; - - *p = old ^ mask; - return (old & mask) != 0; -} - -/** - * test_bit - Determine whether a bit is set - * @nr: bit number to test - * @addr: Address to start counting from - */ -static inline int test_bit(int nr, const volatile unsigned long *addr) -{ - return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); -} - -#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ diff --git a/tools/testing/radix-tree/linux/export.h b/tools/testing/radix-tree/linux/export.h deleted file mode 100644 index b6afd131998d..000000000000 --- a/tools/testing/radix-tree/linux/export.h +++ /dev/null @@ -1,2 +0,0 @@ - -#define EXPORT_SYMBOL(sym) diff --git a/tools/testing/radix-tree/linux/gfp.h b/tools/testing/radix-tree/linux/gfp.h index 5b09b2ce6c33..39a0dcb9475a 100644 --- a/tools/testing/radix-tree/linux/gfp.h +++ b/tools/testing/radix-tree/linux/gfp.h @@ -1,6 +1,8 @@ #ifndef _GFP_H #define _GFP_H +#include <linux/types.h> + #define __GFP_BITS_SHIFT 26 #define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) @@ -13,10 +15,12 @@ #define __GFP_DIRECT_RECLAIM 0x400000u #define __GFP_KSWAPD_RECLAIM 0x2000000u -#define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM|__GFP_KSWAPD_RECLAIM) +#define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM|__GFP_KSWAPD_RECLAIM) + +#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) +#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) +#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) -#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) -#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) { diff --git a/tools/testing/radix-tree/linux/idr.h b/tools/testing/radix-tree/linux/idr.h new file mode 100644 index 000000000000..4e342f2e37cf --- /dev/null +++ b/tools/testing/radix-tree/linux/idr.h @@ -0,0 +1 @@ +#include "../../../../include/linux/idr.h" diff --git a/tools/testing/radix-tree/linux/init.h b/tools/testing/radix-tree/linux/init.h index 360cabb3c4e7..1bb0afc21309 100644 --- a/tools/testing/radix-tree/linux/init.h +++ b/tools/testing/radix-tree/linux/init.h @@ -1 +1 @@ -/* An empty file stub that allows radix-tree.c to compile. */ +#define __init diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h index 9b43b4975d83..b21a77fddcf7 100644 --- a/tools/testing/radix-tree/linux/kernel.h +++ b/tools/testing/radix-tree/linux/kernel.h @@ -1,64 +1,21 @@ #ifndef _KERNEL_H #define _KERNEL_H -#include <assert.h> +#include "../../include/linux/kernel.h" #include <string.h> #include <stdio.h> -#include <stddef.h> #include <limits.h> -#include "../../include/linux/compiler.h" -#include "../../include/linux/err.h" +#include <linux/compiler.h> +#include <linux/err.h> +#include <linux/bitops.h> +#include <linux/log2.h> #include "../../../include/linux/kconfig.h" -#ifdef BENCHMARK -#define RADIX_TREE_MAP_SHIFT 6 -#else -#define RADIX_TREE_MAP_SHIFT 3 -#endif - -#ifndef NULL -#define NULL 0 -#endif - -#define BUG_ON(expr) assert(!(expr)) -#define WARN_ON(expr) assert(!(expr)) -#define __init -#define __must_check -#define panic(expr) #define printk printf -#define __force -#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define pr_debug printk - -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define cpu_relax() barrier() +#define pr_cont printk #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) -#define container_of(ptr, type, member) ({ \ - const typeof( ((type *)0)->member ) *__mptr = (ptr); \ - (type *)( (char *)__mptr - offsetof(type, member) );}) -#define min(a, b) ((a) < (b) ? (a) : (b)) - -#define cond_resched() sched_yield() - -static inline int in_interrupt(void) -{ - return 0; -} - -/* - * This looks more complex than it should be. But we need to - * get the type for the ~ right in round_down (it needs to be - * as wide as the result!), and we want to evaluate the macro - * arguments just once each. - */ -#define __round_mask(x, y) ((__typeof__(x))((y)-1)) -#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) -#define round_down(x, y) ((x) & ~__round_mask(x, y)) - -#define xchg(ptr, x) uatomic_xchg(ptr, x) - #endif /* _KERNEL_H */ diff --git a/tools/testing/radix-tree/linux/mempool.h b/tools/testing/radix-tree/linux/mempool.h deleted file mode 100644 index 6a2dc55b41d6..000000000000 --- a/tools/testing/radix-tree/linux/mempool.h +++ /dev/null @@ -1,16 +0,0 @@ - -#include <linux/slab.h> - -typedef void *(mempool_alloc_t)(int gfp_mask, void *pool_data); -typedef void (mempool_free_t)(void *element, void *pool_data); - -typedef struct { - mempool_alloc_t *alloc; - mempool_free_t *free; - void *data; -} mempool_t; - -void *mempool_alloc(mempool_t *pool, int gfp_mask); -void mempool_free(void *element, mempool_t *pool); -mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data); diff --git a/tools/testing/radix-tree/linux/percpu.h b/tools/testing/radix-tree/linux/percpu.h index 5837f1d56f17..3ea01a1a88c2 100644 --- a/tools/testing/radix-tree/linux/percpu.h +++ b/tools/testing/radix-tree/linux/percpu.h @@ -1,7 +1,10 @@ - +#define DECLARE_PER_CPU(type, val) extern type val #define DEFINE_PER_CPU(type, val) type val #define __get_cpu_var(var) var #define this_cpu_ptr(var) var +#define this_cpu_read(var) var +#define this_cpu_xchg(var, val) uatomic_xchg(&var, val) +#define this_cpu_cmpxchg(var, old, new) uatomic_cmpxchg(&var, old, new) #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) diff --git a/tools/testing/radix-tree/linux/preempt.h b/tools/testing/radix-tree/linux/preempt.h index 65c04c226965..35c5ac81529f 100644 --- a/tools/testing/radix-tree/linux/preempt.h +++ b/tools/testing/radix-tree/linux/preempt.h @@ -1,4 +1,14 @@ +#ifndef __LINUX_PREEMPT_H +#define __LINUX_PREEMPT_H + extern int preempt_count; #define preempt_disable() uatomic_inc(&preempt_count) #define preempt_enable() uatomic_dec(&preempt_count) + +static inline int in_interrupt(void) +{ + return 0; +} + +#endif /* __LINUX_PREEMPT_H */ diff --git a/tools/testing/radix-tree/linux/radix-tree.h b/tools/testing/radix-tree/linux/radix-tree.h index ce694ddd4aea..bf1bb231f9b5 100644 --- a/tools/testing/radix-tree/linux/radix-tree.h +++ b/tools/testing/radix-tree/linux/radix-tree.h @@ -1 +1,26 @@ +#ifndef _TEST_RADIX_TREE_H +#define _TEST_RADIX_TREE_H + +#include "generated/map-shift.h" #include "../../../../include/linux/radix-tree.h" + +extern int kmalloc_verbose; +extern int test_verbose; + +static inline void trace_call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *head)) +{ + if (kmalloc_verbose) + printf("Delaying free of %p to slab\n", (char *)head - + offsetof(struct radix_tree_node, rcu_head)); + call_rcu(head, func); +} + +#define printv(verbosity_level, fmt, ...) \ + if(test_verbose >= verbosity_level) \ + printf(fmt, ##__VA_ARGS__) + +#undef call_rcu +#define call_rcu(x, y) trace_call_rcu(x, y) + +#endif /* _TEST_RADIX_TREE_H */ diff --git a/tools/testing/radix-tree/linux/types.h b/tools/testing/radix-tree/linux/types.h deleted file mode 100644 index 8491d89873bb..000000000000 --- a/tools/testing/radix-tree/linux/types.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _TYPES_H -#define _TYPES_H - -#include "../../include/linux/types.h" - -#define __rcu -#define __read_mostly - -static inline void INIT_LIST_HEAD(struct list_head *list) -{ - list->next = list; - list->prev = list; -} - -typedef struct { - unsigned int x; -} spinlock_t; - -#define uninitialized_var(x) x = x - -#include <linux/gfp.h> - -#endif diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index f7e9801a6754..b829127d5670 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c @@ -3,6 +3,7 @@ #include <unistd.h> #include <time.h> #include <assert.h> +#include <limits.h> #include <linux/slab.h> #include <linux/radix-tree.h> @@ -67,7 +68,7 @@ void big_gang_check(bool long_run) for (i = 0; i < (long_run ? 1000 : 3); i++) { __big_gang_check(); - printf("%d ", i); + printv(2, "%d ", i); fflush(stdout); } } @@ -128,14 +129,19 @@ void check_copied_tags(struct radix_tree_root *tree, unsigned long start, unsign putchar('.'); */ if (idx[i] < start || idx[i] > end) { if (item_tag_get(tree, idx[i], totag)) { - printf("%lu-%lu: %lu, tags %d-%d\n", start, end, idx[i], item_tag_get(tree, idx[i], fromtag), item_tag_get(tree, idx[i], totag)); + printv(2, "%lu-%lu: %lu, tags %d-%d\n", start, + end, idx[i], item_tag_get(tree, idx[i], + fromtag), + item_tag_get(tree, idx[i], totag)); } assert(!item_tag_get(tree, idx[i], totag)); continue; } if (item_tag_get(tree, idx[i], fromtag) ^ item_tag_get(tree, idx[i], totag)) { - printf("%lu-%lu: %lu, tags %d-%d\n", start, end, idx[i], item_tag_get(tree, idx[i], fromtag), item_tag_get(tree, idx[i], totag)); + printv(2, "%lu-%lu: %lu, tags %d-%d\n", start, end, + idx[i], item_tag_get(tree, idx[i], fromtag), + item_tag_get(tree, idx[i], totag)); } assert(!(item_tag_get(tree, idx[i], fromtag) ^ item_tag_get(tree, idx[i], totag))); @@ -237,7 +243,7 @@ static void __locate_check(struct radix_tree_root *tree, unsigned long index, item = item_lookup(tree, index); index2 = find_item(tree, item); if (index != index2) { - printf("index %ld order %d inserted; found %ld\n", + printv(2, "index %ld order %d inserted; found %ld\n", index, order, index2); abort(); } @@ -288,43 +294,48 @@ static void single_thread_tests(bool long_run) { int i; - printf("starting single_thread_tests: %d allocated, preempt %d\n", + printv(1, "starting single_thread_tests: %d allocated, preempt %d\n", nr_allocated, preempt_count); multiorder_checks(); rcu_barrier(); - printf("after multiorder_check: %d allocated, preempt %d\n", + printv(2, "after multiorder_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); locate_check(); rcu_barrier(); - printf("after locate_check: %d allocated, preempt %d\n", + printv(2, "after locate_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); tag_check(); rcu_barrier(); - printf("after tag_check: %d allocated, preempt %d\n", + printv(2, "after tag_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); gang_check(); rcu_barrier(); - printf("after gang_check: %d allocated, preempt %d\n", + printv(2, "after gang_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); add_and_check(); rcu_barrier(); - printf("after add_and_check: %d allocated, preempt %d\n", + printv(2, "after add_and_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); dynamic_height_check(); rcu_barrier(); - printf("after dynamic_height_check: %d allocated, preempt %d\n", + printv(2, "after dynamic_height_check: %d allocated, preempt %d\n", + nr_allocated, preempt_count); + idr_checks(); + ida_checks(); + rcu_barrier(); + printv(2, "after idr_checks: %d allocated, preempt %d\n", nr_allocated, preempt_count); big_gang_check(long_run); rcu_barrier(); - printf("after big_gang_check: %d allocated, preempt %d\n", + printv(2, "after big_gang_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); for (i = 0; i < (long_run ? 2000 : 3); i++) { copy_tag_check(); - printf("%d ", i); + printv(2, "%d ", i); fflush(stdout); } rcu_barrier(); - printf("after copy_tag_check: %d allocated, preempt %d\n", + printv(2, "after copy_tag_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); } @@ -334,24 +345,28 @@ int main(int argc, char **argv) int opt; unsigned int seed = time(NULL); - while ((opt = getopt(argc, argv, "ls:")) != -1) { + while ((opt = getopt(argc, argv, "ls:v")) != -1) { if (opt == 'l') long_run = true; else if (opt == 's') seed = strtoul(optarg, NULL, 0); + else if (opt == 'v') + test_verbose++; } printf("random seed %u\n", seed); srand(seed); + printf("running tests\n"); + rcu_register_thread(); radix_tree_init(); regression1_test(); regression2_test(); regression3_test(); - iteration_test(0, 10); - iteration_test(7, 20); + iteration_test(0, 10 + 90 * long_run); + iteration_test(7, 10 + 90 * long_run); single_thread_tests(long_run); /* Free any remaining preallocated nodes */ @@ -360,9 +375,11 @@ int main(int argc, char **argv) benchmark(); rcu_barrier(); - printf("after rcu_barrier: %d allocated, preempt %d\n", + printv(2, "after rcu_barrier: %d allocated, preempt %d\n", nr_allocated, preempt_count); rcu_unregister_thread(); + printf("tests completed\n"); + exit(0); } diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index f79812a5e070..06c71178d07d 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -30,7 +30,7 @@ static void __multiorder_tag_test(int index, int order) /* our canonical entry */ base = index & ~((1 << order) - 1); - printf("Multiorder tag test with index %d, canonical entry %d\n", + printv(2, "Multiorder tag test with index %d, canonical entry %d\n", index, base); err = item_insert_order(&tree, index, order); @@ -150,7 +150,7 @@ static void multiorder_check(unsigned long index, int order) struct item *item2 = item_create(min, order); RADIX_TREE(tree, GFP_KERNEL); - printf("Multiorder index %ld, order %d\n", index, order); + printv(2, "Multiorder index %ld, order %d\n", index, order); assert(item_insert_order(&tree, index, order) == 0); @@ -188,7 +188,7 @@ static void multiorder_shrink(unsigned long index, int order) RADIX_TREE(tree, GFP_KERNEL); struct radix_tree_node *node; - printf("Multiorder shrink index %ld, order %d\n", index, order); + printv(2, "Multiorder shrink index %ld, order %d\n", index, order); assert(item_insert_order(&tree, 0, order) == 0); @@ -209,7 +209,8 @@ static void multiorder_shrink(unsigned long index, int order) item_check_absent(&tree, i); if (!item_delete(&tree, 0)) { - printf("failed to delete index %ld (order %d)\n", index, order); abort(); + printv(2, "failed to delete index %ld (order %d)\n", index, order); + abort(); } for (i = 0; i < 2*max; i++) @@ -234,7 +235,7 @@ void multiorder_iteration(void) void **slot; int i, j, err; - printf("Multiorder iteration test\n"); + printv(1, "Multiorder iteration test\n"); #define NUM_ENTRIES 11 int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128}; @@ -275,7 +276,7 @@ void multiorder_tagged_iteration(void) void **slot; int i, j; - printf("Multiorder tagged iteration test\n"); + printv(1, "Multiorder tagged iteration test\n"); #define MT_NUM_ENTRIES 9 int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128}; @@ -355,6 +356,10 @@ void multiorder_tagged_iteration(void) item_kill_tree(&tree); } +/* + * Basic join checks: make sure we can't find an entry in the tree after + * a larger entry has replaced it + */ static void multiorder_join1(unsigned long index, unsigned order1, unsigned order2) { @@ -373,6 +378,10 @@ static void multiorder_join1(unsigned long index, item_kill_tree(&tree); } +/* + * Check that the accounting of exceptional entries is handled correctly + * by joining an exceptional entry to a normal pointer. + */ static void multiorder_join2(unsigned order1, unsigned order2) { RADIX_TREE(tree, GFP_KERNEL); @@ -386,6 +395,9 @@ static void multiorder_join2(unsigned order1, unsigned order2) assert(item2 == (void *)0x12UL); assert(node->exceptional == 1); + item2 = radix_tree_lookup(&tree, 0); + free(item2); + radix_tree_join(&tree, 0, order1, item1); item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL); assert(item2 == item1); @@ -453,7 +465,7 @@ static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc) { struct radix_tree_preload *rtp = &radix_tree_preloads; if (rtp->nr != 0) - printf("split(%u %u) remaining %u\n", old_order, new_order, + printv(2, "split(%u %u) remaining %u\n", old_order, new_order, rtp->nr); /* * Can't check for equality here as some nodes may have been @@ -461,7 +473,7 @@ static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc) * nodes allocated since they should have all been preloaded. */ if (nr_allocated > alloc) - printf("split(%u %u) allocated %u %u\n", old_order, new_order, + printv(2, "split(%u %u) allocated %u %u\n", old_order, new_order, alloc, nr_allocated); } @@ -471,6 +483,7 @@ static void __multiorder_split(int old_order, int new_order) void **slot; struct radix_tree_iter iter; unsigned alloc; + struct item *item; radix_tree_preload(GFP_KERNEL); assert(item_insert_order(&tree, 0, old_order) == 0); @@ -479,7 +492,7 @@ static void __multiorder_split(int old_order, int new_order) /* Wipe out the preloaded cache or it'll confuse check_mem() */ radix_tree_cpu_dead(0); - radix_tree_tag_set(&tree, 0, 2); + item = radix_tree_tag_set(&tree, 0, 2); radix_tree_split_preload(old_order, new_order, GFP_KERNEL); alloc = nr_allocated; @@ -492,6 +505,7 @@ static void __multiorder_split(int old_order, int new_order) radix_tree_preload_end(); item_kill_tree(&tree); + free(item); } static void __multiorder_split2(int old_order, int new_order) @@ -633,3 +647,10 @@ void multiorder_checks(void) radix_tree_cpu_dead(0); } + +int __weak main(void) +{ + radix_tree_init(); + multiorder_checks(); + return 0; +} diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c index 0d6813a61b37..bf97742fc18c 100644 --- a/tools/testing/radix-tree/regression1.c +++ b/tools/testing/radix-tree/regression1.c @@ -193,7 +193,7 @@ void regression1_test(void) long arg; /* Regression #1 */ - printf("running regression test 1, should finish in under a minute\n"); + printv(1, "running regression test 1, should finish in under a minute\n"); nr_threads = 2; pthread_barrier_init(&worker_barrier, NULL, nr_threads); @@ -216,5 +216,5 @@ void regression1_test(void) free(threads); - printf("regression test 1, done\n"); + printv(1, "regression test 1, done\n"); } diff --git a/tools/testing/radix-tree/regression2.c b/tools/testing/radix-tree/regression2.c index a41325d7a170..42dd2a33ed24 100644 --- a/tools/testing/radix-tree/regression2.c +++ b/tools/testing/radix-tree/regression2.c @@ -80,7 +80,7 @@ void regression2_test(void) unsigned long int start, end; struct page *pages[1]; - printf("running regression test 2 (should take milliseconds)\n"); + printv(1, "running regression test 2 (should take milliseconds)\n"); /* 0. */ for (i = 0; i <= max_slots - 1; i++) { p = page_alloc(); @@ -103,7 +103,7 @@ void regression2_test(void) /* 4. */ for (i = max_slots - 1; i >= 0; i--) - radix_tree_delete(&mt_tree, i); + free(radix_tree_delete(&mt_tree, i)); /* 5. */ // NOTE: start should not be 0 because radix_tree_gang_lookup_tag_slot @@ -114,7 +114,9 @@ void regression2_test(void) PAGECACHE_TAG_TOWRITE); /* We remove all the remained nodes */ - radix_tree_delete(&mt_tree, max_slots); + free(radix_tree_delete(&mt_tree, max_slots)); - printf("regression test 2, done\n"); + BUG_ON(!radix_tree_empty(&mt_tree)); + + printv(1, "regression test 2, done\n"); } diff --git a/tools/testing/radix-tree/regression3.c b/tools/testing/radix-tree/regression3.c index b594841fae85..670c3d2ae7b1 100644 --- a/tools/testing/radix-tree/regression3.c +++ b/tools/testing/radix-tree/regression3.c @@ -34,21 +34,21 @@ void regression3_test(void) void **slot; bool first; - printf("running regression test 3 (should take milliseconds)\n"); + printv(1, "running regression test 3 (should take milliseconds)\n"); radix_tree_insert(&root, 0, ptr0); radix_tree_tag_set(&root, 0, 0); first = true; radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) { - printf("tagged %ld %p\n", iter.index, *slot); + printv(2, "tagged %ld %p\n", iter.index, *slot); if (first) { radix_tree_insert(&root, 1, ptr); radix_tree_tag_set(&root, 1, 0); first = false; } if (radix_tree_deref_retry(*slot)) { - printf("retry at %ld\n", iter.index); + printv(2, "retry at %ld\n", iter.index); slot = radix_tree_iter_retry(&iter); continue; } @@ -57,13 +57,13 @@ void regression3_test(void) first = true; radix_tree_for_each_slot(slot, &root, &iter, 0) { - printf("slot %ld %p\n", iter.index, *slot); + printv(2, "slot %ld %p\n", iter.index, *slot); if (first) { radix_tree_insert(&root, 1, ptr); first = false; } if (radix_tree_deref_retry(*slot)) { - printk("retry at %ld\n", iter.index); + printv(2, "retry at %ld\n", iter.index); slot = radix_tree_iter_retry(&iter); continue; } @@ -72,30 +72,30 @@ void regression3_test(void) first = true; radix_tree_for_each_contig(slot, &root, &iter, 0) { - printk("contig %ld %p\n", iter.index, *slot); + printv(2, "contig %ld %p\n", iter.index, *slot); if (first) { radix_tree_insert(&root, 1, ptr); first = false; } if (radix_tree_deref_retry(*slot)) { - printk("retry at %ld\n", iter.index); + printv(2, "retry at %ld\n", iter.index); slot = radix_tree_iter_retry(&iter); continue; } } radix_tree_for_each_slot(slot, &root, &iter, 0) { - printf("slot %ld %p\n", iter.index, *slot); + printv(2, "slot %ld %p\n", iter.index, *slot); if (!iter.index) { - printf("next at %ld\n", iter.index); + printv(2, "next at %ld\n", iter.index); slot = radix_tree_iter_resume(slot, &iter); } } radix_tree_for_each_contig(slot, &root, &iter, 0) { - printf("contig %ld %p\n", iter.index, *slot); + printv(2, "contig %ld %p\n", iter.index, *slot); if (!iter.index) { - printf("next at %ld\n", iter.index); + printv(2, "next at %ld\n", iter.index); slot = radix_tree_iter_resume(slot, &iter); } } @@ -103,9 +103,9 @@ void regression3_test(void) radix_tree_tag_set(&root, 0, 0); radix_tree_tag_set(&root, 1, 0); radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) { - printf("tagged %ld %p\n", iter.index, *slot); + printv(2, "tagged %ld %p\n", iter.index, *slot); if (!iter.index) { - printf("next at %ld\n", iter.index); + printv(2, "next at %ld\n", iter.index); slot = radix_tree_iter_resume(slot, &iter); } } @@ -113,5 +113,5 @@ void regression3_test(void) radix_tree_delete(&root, 0); radix_tree_delete(&root, 1); - printf("regression test 3 passed\n"); + printv(1, "regression test 3 passed\n"); } diff --git a/tools/testing/radix-tree/tag_check.c b/tools/testing/radix-tree/tag_check.c index fd98c132207a..d4ff00989245 100644 --- a/tools/testing/radix-tree/tag_check.c +++ b/tools/testing/radix-tree/tag_check.c @@ -49,10 +49,10 @@ void simple_checks(void) } verify_tag_consistency(&tree, 0); verify_tag_consistency(&tree, 1); - printf("before item_kill_tree: %d allocated\n", nr_allocated); + printv(2, "before item_kill_tree: %d allocated\n", nr_allocated); item_kill_tree(&tree); rcu_barrier(); - printf("after item_kill_tree: %d allocated\n", nr_allocated); + printv(2, "after item_kill_tree: %d allocated\n", nr_allocated); } /* @@ -257,7 +257,7 @@ static void do_thrash(struct radix_tree_root *tree, char *thrash_state, int tag) gang_check(tree, thrash_state, tag); - printf("%d(%d) %d(%d) %d(%d) %d(%d) / " + printv(2, "%d(%d) %d(%d) %d(%d) %d(%d) / " "%d(%d) present, %d(%d) tagged\n", insert_chunk, nr_inserted, delete_chunk, nr_deleted, @@ -296,13 +296,13 @@ static void __leak_check(void) { RADIX_TREE(tree, GFP_KERNEL); - printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated); + printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated); item_insert(&tree, 1000000); - printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated); + printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated); item_delete(&tree, 1000000); - printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated); + printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated); item_kill_tree(&tree); - printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated); + printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated); } static void single_check(void) @@ -336,15 +336,15 @@ void tag_check(void) extend_checks(); contract_checks(); rcu_barrier(); - printf("after extend_checks: %d allocated\n", nr_allocated); + printv(2, "after extend_checks: %d allocated\n", nr_allocated); __leak_check(); leak_check(); rcu_barrier(); - printf("after leak_check: %d allocated\n", nr_allocated); + printv(2, "after leak_check: %d allocated\n", nr_allocated); simple_checks(); rcu_barrier(); - printf("after simple_checks: %d allocated\n", nr_allocated); + printv(2, "after simple_checks: %d allocated\n", nr_allocated); thrash_tags(); rcu_barrier(); - printf("after thrash_tags: %d allocated\n", nr_allocated); + printv(2, "after thrash_tags: %d allocated\n", nr_allocated); } diff --git a/tools/testing/radix-tree/test.c b/tools/testing/radix-tree/test.c index e5726e373646..1a257d738a1e 100644 --- a/tools/testing/radix-tree/test.c +++ b/tools/testing/radix-tree/test.c @@ -29,15 +29,28 @@ int __item_insert(struct radix_tree_root *root, struct item *item) return __radix_tree_insert(root, item->index, item->order, item); } -int item_insert(struct radix_tree_root *root, unsigned long index) +struct item *item_create(unsigned long index, unsigned int order) { - return __item_insert(root, item_create(index, 0)); + struct item *ret = malloc(sizeof(*ret)); + + ret->index = index; + ret->order = order; + return ret; } int item_insert_order(struct radix_tree_root *root, unsigned long index, unsigned order) { - return __item_insert(root, item_create(index, order)); + struct item *item = item_create(index, order); + int err = __item_insert(root, item); + if (err) + free(item); + return err; +} + +int item_insert(struct radix_tree_root *root, unsigned long index) +{ + return item_insert_order(root, index, 0); } void item_sanity(struct item *item, unsigned long index) @@ -61,15 +74,6 @@ int item_delete(struct radix_tree_root *root, unsigned long index) return 0; } -struct item *item_create(unsigned long index, unsigned int order) -{ - struct item *ret = malloc(sizeof(*ret)); - - ret->index = index; - ret->order = order; - return ret; -} - void item_check_present(struct radix_tree_root *root, unsigned long index) { struct item *item; diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index 056a23b56467..b30e11d9d271 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -34,6 +34,8 @@ void tag_check(void); void multiorder_checks(void); void iteration_test(unsigned order, unsigned duration); void benchmark(void); +void idr_checks(void); +void ida_checks(void); struct item * item_tag_set(struct radix_tree_root *root, unsigned long index, int tag); diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 831022b12848..e8b79a7b50bd 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -1,6 +1,7 @@ TARGETS = bpf TARGETS += breakpoints TARGETS += capabilities +TARGETS += cpufreq TARGETS += cpu-hotplug TARGETS += efivarfs TARGETS += exec @@ -8,6 +9,7 @@ TARGETS += firmware TARGETS += ftrace TARGETS += futex TARGETS += gpio +TARGETS += intel_pstate TARGETS += ipc TARGETS += kcmp TARGETS += lib @@ -49,29 +51,44 @@ override LDFLAGS = override MAKEFLAGS = endif +BUILD := $(O) +ifndef BUILD + BUILD := $(KBUILD_OUTPUT) +endif +ifndef BUILD + BUILD := $(shell pwd) +endif + +export BUILD all: - for TARGET in $(TARGETS); do \ - make -C $$TARGET; \ + for TARGET in $(TARGETS); do \ + BUILD_TARGET=$$BUILD/$$TARGET; \ + mkdir $$BUILD_TARGET -p; \ + make OUTPUT=$$BUILD_TARGET -C $$TARGET;\ done; run_tests: all for TARGET in $(TARGETS); do \ - make -C $$TARGET run_tests; \ + BUILD_TARGET=$$BUILD/$$TARGET; \ + make OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\ done; hotplug: for TARGET in $(TARGETS_HOTPLUG); do \ - make -C $$TARGET; \ + BUILD_TARGET=$$BUILD/$$TARGET; \ + make OUTPUT=$$BUILD_TARGET -C $$TARGET;\ done; run_hotplug: hotplug for TARGET in $(TARGETS_HOTPLUG); do \ - make -C $$TARGET run_full_test; \ + BUILD_TARGET=$$BUILD/$$TARGET; \ + make OUTPUT=$$BUILD_TARGET -C $$TARGET run_full_test;\ done; clean_hotplug: for TARGET in $(TARGETS_HOTPLUG); do \ - make -C $$TARGET clean; \ + BUILD_TARGET=$$BUILD/$$TARGET; \ + make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\ done; run_pstore_crash: @@ -86,7 +103,8 @@ ifdef INSTALL_PATH @# Ask all targets to install their files mkdir -p $(INSTALL_PATH) for TARGET in $(TARGETS); do \ - make -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install; \ + BUILD_TARGET=$$BUILD/$$TARGET; \ + make OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install; \ done; @# Ask all targets to emit their test scripts @@ -95,10 +113,11 @@ ifdef INSTALL_PATH echo "ROOT=\$$PWD" >> $(ALL_SCRIPT) for TARGET in $(TARGETS); do \ + BUILD_TARGET=$$BUILD/$$TARGET; \ echo "echo ; echo Running tests in $$TARGET" >> $(ALL_SCRIPT); \ echo "echo ========================================" >> $(ALL_SCRIPT); \ echo "cd $$TARGET" >> $(ALL_SCRIPT); \ - make -s --no-print-directory -C $$TARGET emit_tests >> $(ALL_SCRIPT); \ + make -s --no-print-directory OUTPUT=$$BUILD_TARGET -C $$TARGET emit_tests >> $(ALL_SCRIPT); \ echo "cd \$$ROOT" >> $(ALL_SCRIPT); \ done; @@ -109,7 +128,8 @@ endif clean: for TARGET in $(TARGETS); do \ - make -C $$TARGET clean; \ + BUILD_TARGET=$$BUILD/$$TARGET; \ + make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\ done; .PHONY: install diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index c7816fe60feb..4b498265dae6 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -3,15 +3,12 @@ BPFOBJ := $(LIBDIR)/bpf/bpf.o CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR) -test_objs = test_verifier test_tag test_maps test_lru_map test_lpm_map +TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map -TEST_PROGS := $(test_objs) test_kmod.sh -TEST_FILES := $(test_objs) +TEST_PROGS := test_kmod.sh .PHONY: all clean force -all: $(test_objs) - # force a rebuild of BPFOBJ when its dependencies are updated force: @@ -21,6 +18,3 @@ $(BPFOBJ): force $(test_objs): $(BPFOBJ) include ../lib.mk - -clean: - $(RM) $(test_objs) diff --git a/tools/testing/selftests/breakpoints/Makefile b/tools/testing/selftests/breakpoints/Makefile index 61b79e8df1f4..72aa103e4141 100644 --- a/tools/testing/selftests/breakpoints/Makefile +++ b/tools/testing/selftests/breakpoints/Makefile @@ -3,17 +3,13 @@ uname_M := $(shell uname -m 2>/dev/null || echo not) ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/) ifeq ($(ARCH),x86) -TEST_PROGS := breakpoint_test +TEST_GEN_PROGS := breakpoint_test endif ifeq ($(ARCH),aarch64) -TEST_PROGS := breakpoint_test_arm64 +TEST_GEN_PROGS := breakpoint_test_arm64 endif -TEST_PROGS += step_after_suspend_test - -all: $(TEST_PROGS) +TEST_GEN_PROGS += step_after_suspend_test include ../lib.mk -clean: - rm -fr breakpoint_test breakpoint_test_arm64 step_after_suspend_test diff --git a/tools/testing/selftests/capabilities/Makefile b/tools/testing/selftests/capabilities/Makefile index 008602aed920..29b8adfdac71 100644 --- a/tools/testing/selftests/capabilities/Makefile +++ b/tools/testing/selftests/capabilities/Makefile @@ -1,15 +1,8 @@ -TEST_FILES := validate_cap -TEST_PROGS := test_execve - -BINARIES := $(TEST_FILES) $(TEST_PROGS) +TEST_GEN_FILES := validate_cap +TEST_GEN_PROGS := test_execve CFLAGS += -O2 -g -std=gnu99 -Wall LDLIBS += -lcap-ng -lrt -ldl -all: $(BINARIES) - -clean: - $(RM) $(BINARIES) - include ../lib.mk diff --git a/tools/testing/selftests/cpufreq/Makefile b/tools/testing/selftests/cpufreq/Makefile new file mode 100644 index 000000000000..3955cd96f3a2 --- /dev/null +++ b/tools/testing/selftests/cpufreq/Makefile @@ -0,0 +1,8 @@ +all: + +TEST_PROGS := main.sh +TEST_FILES := cpu.sh cpufreq.sh governor.sh module.sh special-tests.sh + +include ../lib.mk + +clean: diff --git a/tools/testing/selftests/cpufreq/cpu.sh b/tools/testing/selftests/cpufreq/cpu.sh new file mode 100755 index 000000000000..8e08a83d65f2 --- /dev/null +++ b/tools/testing/selftests/cpufreq/cpu.sh @@ -0,0 +1,84 @@ +#!/bin/bash +# +# CPU helpers + +# protect against multiple inclusion +if [ $FILE_CPU ]; then + return 0 +else + FILE_CPU=DONE +fi + +source cpufreq.sh + +for_each_cpu() +{ + cpus=$(ls $CPUROOT | grep "cpu[0-9].*") + for cpu in $cpus; do + $@ $cpu + done +} + +for_each_non_boot_cpu() +{ + cpus=$(ls $CPUROOT | grep "cpu[1-9].*") + for cpu in $cpus; do + $@ $cpu + done +} + +#$1: cpu +offline_cpu() +{ + printf "Offline $1\n" + echo 0 > $CPUROOT/$1/online +} + +#$1: cpu +online_cpu() +{ + printf "Online $1\n" + echo 1 > $CPUROOT/$1/online +} + +#$1: cpu +reboot_cpu() +{ + offline_cpu $1 + online_cpu $1 +} + +# Reboot CPUs +# param: number of times we want to run the loop +reboot_cpus() +{ + printf "** Test: Running ${FUNCNAME[0]} for $1 loops **\n\n" + + for i in `seq 1 $1`; do + for_each_non_boot_cpu offline_cpu + for_each_non_boot_cpu online_cpu + printf "\n" + done + + printf "\n%s\n\n" "------------------------------------------------" +} + +# Prints warning for all CPUs with missing cpufreq directory +print_unmanaged_cpus() +{ + for_each_cpu cpu_should_have_cpufreq_directory +} + +# Counts CPUs with cpufreq directories +count_cpufreq_managed_cpus() +{ + count=0; + + for cpu in `ls $CPUROOT | grep "cpu[0-9].*"`; do + if [ -d $CPUROOT/$cpu/cpufreq ]; then + let count=count+1; + fi + done + + echo $count; +} diff --git a/tools/testing/selftests/cpufreq/cpufreq.sh b/tools/testing/selftests/cpufreq/cpufreq.sh new file mode 100755 index 000000000000..1ed3832030b4 --- /dev/null +++ b/tools/testing/selftests/cpufreq/cpufreq.sh @@ -0,0 +1,241 @@ +#!/bin/bash + +# protect against multiple inclusion +if [ $FILE_CPUFREQ ]; then + return 0 +else + FILE_CPUFREQ=DONE +fi + +source cpu.sh + + +# $1: cpu +cpu_should_have_cpufreq_directory() +{ + if [ ! -d $CPUROOT/$1/cpufreq ]; then + printf "Warning: No cpufreq directory present for $1\n" + fi +} + +cpu_should_not_have_cpufreq_directory() +{ + if [ -d $CPUROOT/$1/cpufreq ]; then + printf "Warning: cpufreq directory present for $1\n" + fi +} + +for_each_policy() +{ + policies=$(ls $CPUFREQROOT| grep "policy[0-9].*") + for policy in $policies; do + $@ $policy + done +} + +for_each_policy_concurrent() +{ + policies=$(ls $CPUFREQROOT| grep "policy[0-9].*") + for policy in $policies; do + $@ $policy & + done +} + +# $1: Path +read_cpufreq_files_in_dir() +{ + local files=`ls $1` + + printf "Printing directory: $1\n\n" + + for file in $files; do + if [ -f $1/$file ]; then + printf "$file:" + cat $1/$file + else + printf "\n" + read_cpufreq_files_in_dir "$1/$file" + fi + done + printf "\n" +} + + +read_all_cpufreq_files() +{ + printf "** Test: Running ${FUNCNAME[0]} **\n\n" + + read_cpufreq_files_in_dir $CPUFREQROOT + + printf "%s\n\n" "------------------------------------------------" +} + + +# UPDATE CPUFREQ FILES + +# $1: directory path +update_cpufreq_files_in_dir() +{ + local files=`ls $1` + + printf "Updating directory: $1\n\n" + + for file in $files; do + if [ -f $1/$file ]; then + # file is writable ? + local wfile=$(ls -l $1/$file | awk '$1 ~ /^.*w.*/ { print $NF; }') + + if [ ! -z $wfile ]; then + # scaling_setspeed is a special file and we + # should skip updating it + if [ $file != "scaling_setspeed" ]; then + local val=$(cat $1/$file) + printf "Writing $val to: $file\n" + echo $val > $1/$file + fi + fi + else + printf "\n" + update_cpufreq_files_in_dir "$1/$file" + fi + done + + printf "\n" +} + +# Update all writable files with their existing values +update_all_cpufreq_files() +{ + printf "** Test: Running ${FUNCNAME[0]} **\n\n" + + update_cpufreq_files_in_dir $CPUFREQROOT + + printf "%s\n\n" "------------------------------------------------" +} + + +# CHANGE CPU FREQUENCIES + +# $1: policy +find_current_freq() +{ + cat $CPUFREQROOT/$1/scaling_cur_freq +} + +# $1: policy +# $2: frequency +set_cpu_frequency() +{ + printf "Change frequency for $1 to $2\n" + echo $2 > $CPUFREQROOT/$1/scaling_setspeed +} + +# $1: policy +test_all_frequencies() +{ + local filepath="$CPUFREQROOT/$1" + + backup_governor $1 + + local found=$(switch_governor $1 "userspace") + if [ $found = 1 ]; then + printf "${FUNCNAME[0]}: userspace governor not available for: $1\n" + return; + fi + + printf "Switched governor for $1 to userspace\n\n" + + local freqs=$(cat $filepath/scaling_available_frequencies) + printf "Available frequencies for $1: $freqs\n\n" + + # Set all frequencies one-by-one + for freq in $freqs; do + set_cpu_frequency $1 $freq + done + + printf "\n" + + restore_governor $1 +} + +# $1: loop count +shuffle_frequency_for_all_cpus() +{ + printf "** Test: Running ${FUNCNAME[0]} for $1 loops **\n\n" + + for i in `seq 1 $1`; do + for_each_policy test_all_frequencies + done + printf "\n%s\n\n" "------------------------------------------------" +} + +# Basic cpufreq tests +cpufreq_basic_tests() +{ + printf "*** RUNNING CPUFREQ SANITY TESTS ***\n" + printf "====================================\n\n" + + count=$(count_cpufreq_managed_cpus) + if [ $count = 0 ]; then + printf "No cpu is managed by cpufreq core, exiting\n" + exit; + else + printf "CPUFreq manages: $count CPUs\n\n" + fi + + # Detect & print which CPUs are not managed by cpufreq + print_unmanaged_cpus + + # read/update all cpufreq files + read_all_cpufreq_files + update_all_cpufreq_files + + # hotplug cpus + reboot_cpus 5 + + # Test all frequencies + shuffle_frequency_for_all_cpus 2 + + # Test all governors + shuffle_governors_for_all_cpus 1 +} + +# Suspend/resume +# $1: "suspend" or "hibernate", $2: loop count +do_suspend() +{ + printf "** Test: Running ${FUNCNAME[0]}: Trying $1 for $2 loops **\n\n" + + # Is the directory available + if [ ! -d $SYSFS/power/ -o ! -f $SYSFS/power/state ]; then + printf "$SYSFS/power/state not available\n" + return 1 + fi + + if [ $1 = "suspend" ]; then + filename="mem" + elif [ $1 = "hibernate" ]; then + filename="disk" + else + printf "$1 is not a valid option\n" + return 1 + fi + + if [ -n $filename ]; then + present=$(cat $SYSFS/power/state | grep $filename) + + if [ -z "$present" ]; then + printf "Tried to $1 but $filename isn't present in $SYSFS/power/state\n" + return 1; + fi + + for i in `seq 1 $2`; do + printf "Starting $1\n" + echo $filename > $SYSFS/power/state + printf "Came out of $1\n" + + printf "Do basic tests after finishing $1 to verify cpufreq state\n\n" + cpufreq_basic_tests + done + fi +} diff --git a/tools/testing/selftests/cpufreq/governor.sh b/tools/testing/selftests/cpufreq/governor.sh new file mode 100755 index 000000000000..def645103555 --- /dev/null +++ b/tools/testing/selftests/cpufreq/governor.sh @@ -0,0 +1,153 @@ +#!/bin/bash +# +# Test governors + +# protect against multiple inclusion +if [ $FILE_GOVERNOR ]; then + return 0 +else + FILE_GOVERNOR=DONE +fi + +source cpu.sh +source cpufreq.sh + +CUR_GOV= +CUR_FREQ= + +# Find governor's directory path +# $1: policy, $2: governor +find_gov_directory() +{ + if [ -d $CPUFREQROOT/$2 ]; then + printf "$CPUFREQROOT/$2\n" + elif [ -d $CPUFREQROOT/$1/$2 ]; then + printf "$CPUFREQROOT/$1/$2\n" + else + printf "INVALID\n" + fi +} + +# $1: policy +find_current_governor() +{ + cat $CPUFREQROOT/$1/scaling_governor +} + +# $1: policy +backup_governor() +{ + CUR_GOV=$(find_current_governor $1) + + printf "Governor backup done for $1: $CUR_GOV\n" + + if [ $CUR_GOV == "userspace" ]; then + CUR_FREQ=$(find_current_freq $1) + printf "Governor frequency backup done for $1: $CUR_FREQ\n" + fi + + printf "\n" +} + +# $1: policy +restore_governor() +{ + __switch_governor $1 $CUR_GOV + + printf "Governor restored for $1 to $CUR_GOV\n" + + if [ $CUR_GOV == "userspace" ]; then + set_cpu_frequency $1 $CUR_FREQ + printf "Governor frequency restored for $1: $CUR_FREQ\n" + fi + + printf "\n" +} + +# param: +# $1: policy, $2: governor +__switch_governor() +{ + echo $2 > $CPUFREQROOT/$1/scaling_governor +} + +# param: +# $1: cpu, $2: governor +__switch_governor_for_cpu() +{ + echo $2 > $CPUROOT/$1/cpufreq/scaling_governor +} + +# SWITCH GOVERNORS + +# $1: cpu, $2: governor +switch_governor() +{ + local filepath=$CPUFREQROOT/$1/scaling_available_governors + + # check if governor is available + local found=$(cat $filepath | grep $2 | wc -l) + if [ $found = 0 ]; then + echo 1; + return + fi + + __switch_governor $1 $2 + echo 0; +} + +# $1: policy, $2: governor +switch_show_governor() +{ + cur_gov=find_current_governor + if [ $cur_gov == "userspace" ]; then + cur_freq=find_current_freq + fi + + # switch governor + __switch_governor $1 $2 + + printf "\nSwitched governor for $1 to $2\n\n" + + if [ $2 == "userspace" -o $2 == "powersave" -o $2 == "performance" ]; then + printf "No files to read for $2 governor\n\n" + return + fi + + # show governor files + local govpath=$(find_gov_directory $1 $2) + read_cpufreq_files_in_dir $govpath +} + +# $1: function to be called, $2: policy +call_for_each_governor() +{ + local filepath=$CPUFREQROOT/$2/scaling_available_governors + + # Exit if cpu isn't managed by cpufreq core + if [ ! -f $filepath ]; then + return; + fi + + backup_governor $2 + + local governors=$(cat $filepath) + printf "Available governors for $2: $governors\n" + + for governor in $governors; do + $1 $2 $governor + done + + restore_governor $2 +} + +# $1: loop count +shuffle_governors_for_all_cpus() +{ + printf "** Test: Running ${FUNCNAME[0]} for $1 loops **\n\n" + + for i in `seq 1 $1`; do + for_each_policy call_for_each_governor switch_show_governor + done + printf "%s\n\n" "------------------------------------------------" +} diff --git a/tools/testing/selftests/cpufreq/main.sh b/tools/testing/selftests/cpufreq/main.sh new file mode 100755 index 000000000000..01bac76ac0ec --- /dev/null +++ b/tools/testing/selftests/cpufreq/main.sh @@ -0,0 +1,194 @@ +#!/bin/bash + +source cpu.sh +source cpufreq.sh +source governor.sh +source module.sh +source special-tests.sh + +FUNC=basic # do basic tests by default +OUTFILE=cpufreq_selftest +SYSFS= +CPUROOT= +CPUFREQROOT= + +helpme() +{ + printf "Usage: $0 [-h] [-todg args] + [-h <help>] + [-o <output-file-for-dump>] + [-t <basic: Basic cpufreq testing + suspend: suspend/resume, + hibernate: hibernate/resume, + modtest: test driver or governor modules. Only to be used with -d or -g options, + sptest1: Simple governor switch to produce lockdep. + sptest2: Concurrent governor switch to produce lockdep. + sptest3: Governor races, shuffle between governors quickly. + sptest4: CPU hotplugs with updates to cpufreq files.>] + [-d <driver's module name: only with \"-t modtest>\"] + [-g <governor's module name: only with \"-t modtest>\"] + \n" + exit 2 +} + +prerequisite() +{ + msg="skip all tests:" + + if [ $UID != 0 ]; then + echo $msg must be run as root >&2 + exit 2 + fi + + taskset -p 01 $$ + + SYSFS=`mount -t sysfs | head -1 | awk '{ print $3 }'` + + if [ ! -d "$SYSFS" ]; then + echo $msg sysfs is not mounted >&2 + exit 2 + fi + + CPUROOT=$SYSFS/devices/system/cpu + CPUFREQROOT="$CPUROOT/cpufreq" + + if ! ls $CPUROOT/cpu* > /dev/null 2>&1; then + echo $msg cpus not available in sysfs >&2 + exit 2 + fi + + if ! ls $CPUROOT/cpufreq > /dev/null 2>&1; then + echo $msg cpufreq directory not available in sysfs >&2 + exit 2 + fi +} + +parse_arguments() +{ + while getopts ht:o:d:g: arg + do + case $arg in + h) # --help + helpme + ;; + + t) # --func_type (Function to perform: basic, suspend, hibernate, modtest, sptest1/2/3/4 (default: basic)) + FUNC=$OPTARG + ;; + + o) # --output-file (Output file to store dumps) + OUTFILE=$OPTARG + ;; + + d) # --driver-mod-name (Name of the driver module) + DRIVER_MOD=$OPTARG + ;; + + g) # --governor-mod-name (Name of the governor module) + GOVERNOR_MOD=$OPTARG + ;; + + \?) + helpme + ;; + esac + done +} + +do_test() +{ + # Check if CPUs are managed by cpufreq or not + count=$(count_cpufreq_managed_cpus) + + if [ $count = 0 -a $FUNC != "modtest" ]; then + echo "No cpu is managed by cpufreq core, exiting" + exit 2; + fi + + case "$FUNC" in + "basic") + cpufreq_basic_tests + ;; + + "suspend") + do_suspend "suspend" 1 + ;; + + "hibernate") + do_suspend "hibernate" 1 + ;; + + "modtest") + # Do we have modules in place? + if [ -z $DRIVER_MOD ] && [ -z $GOVERNOR_MOD ]; then + echo "No driver or governor module passed with -d or -g" + exit 2; + fi + + if [ $DRIVER_MOD ]; then + if [ $GOVERNOR_MOD ]; then + module_test $DRIVER_MOD $GOVERNOR_MOD + else + module_driver_test $DRIVER_MOD + fi + else + if [ $count = 0 ]; then + echo "No cpu is managed by cpufreq core, exiting" + exit 2; + fi + + module_governor_test $GOVERNOR_MOD + fi + ;; + + "sptest1") + simple_lockdep + ;; + + "sptest2") + concurrent_lockdep + ;; + + "sptest3") + governor_race + ;; + + "sptest4") + hotplug_with_updates + ;; + + *) + echo "Invalid [-f] function type" + helpme + ;; + esac +} + +# clear dumps +# $1: file name +clear_dumps() +{ + echo "" > $1.txt + echo "" > $1.dmesg_cpufreq.txt + echo "" > $1.dmesg_full.txt +} + +# $1: output file name +dmesg_dumps() +{ + dmesg | grep cpufreq >> $1.dmesg_cpufreq.txt + + # We may need the full logs as well + dmesg >> $1.dmesg_full.txt +} + +# Parse arguments +parse_arguments $@ + +# Make sure all requirements are met +prerequisite + +# Run requested functions +clear_dumps $OUTFILE +do_test >> $OUTFILE.txt +dmesg_dumps $OUTFILE diff --git a/tools/testing/selftests/cpufreq/module.sh b/tools/testing/selftests/cpufreq/module.sh new file mode 100755 index 000000000000..8ff2244a33a1 --- /dev/null +++ b/tools/testing/selftests/cpufreq/module.sh @@ -0,0 +1,243 @@ +#!/bin/bash +# +# Modules specific tests cases + +# protect against multiple inclusion +if [ $FILE_MODULE ]; then + return 0 +else + FILE_MODULE=DONE +fi + +source cpu.sh +source cpufreq.sh +source governor.sh + +# Check basic insmod/rmmod +# $1: module +test_basic_insmod_rmmod() +{ + printf "** Test: Running ${FUNCNAME[0]} **\n\n" + + printf "Inserting $1 module\n" + # insert module + insmod $1 + if [ $? != 0 ]; then + printf "Insmod $1 failed\n" + exit; + fi + + printf "Removing $1 module\n" + # remove module + rmmod $1 + if [ $? != 0 ]; then + printf "rmmod $1 failed\n" + exit; + fi + + printf "\n" +} + +# Insert cpufreq driver module and perform basic tests +# $1: cpufreq-driver module to insert +# $2: If we want to play with CPUs (1) or not (0) +module_driver_test_single() +{ + printf "** Test: Running ${FUNCNAME[0]} for driver $1 and cpus_hotplug=$2 **\n\n" + + if [ $2 -eq 1 ]; then + # offline all non-boot CPUs + for_each_non_boot_cpu offline_cpu + printf "\n" + fi + + # insert module + printf "Inserting $1 module\n\n" + insmod $1 + if [ $? != 0 ]; then + printf "Insmod $1 failed\n" + return; + fi + + if [ $2 -eq 1 ]; then + # online all non-boot CPUs + for_each_non_boot_cpu online_cpu + printf "\n" + fi + + # run basic tests + cpufreq_basic_tests + + # remove module + printf "Removing $1 module\n\n" + rmmod $1 + if [ $? != 0 ]; then + printf "rmmod $1 failed\n" + return; + fi + + # There shouldn't be any cpufreq directories now. + for_each_cpu cpu_should_not_have_cpufreq_directory + printf "\n" +} + +# $1: cpufreq-driver module to insert +module_driver_test() +{ + printf "** Test: Running ${FUNCNAME[0]} **\n\n" + + # check if module is present or not + ls $1 > /dev/null + if [ $? != 0 ]; then + printf "$1: not present in `pwd` folder\n" + return; + fi + + # test basic module tests + test_basic_insmod_rmmod $1 + + # Do simple module test + module_driver_test_single $1 0 + + # Remove CPUs before inserting module and then bring them back + module_driver_test_single $1 1 + printf "\n" +} + +# find governor name based on governor module name +# $1: governor module name +find_gov_name() +{ + if [ $1 = "cpufreq_ondemand.ko" ]; then + printf "ondemand" + elif [ $1 = "cpufreq_conservative.ko" ]; then + printf "conservative" + elif [ $1 = "cpufreq_userspace.ko" ]; then + printf "userspace" + elif [ $1 = "cpufreq_performance.ko" ]; then + printf "performance" + elif [ $1 = "cpufreq_powersave.ko" ]; then + printf "powersave" + elif [ $1 = "cpufreq_schedutil.ko" ]; then + printf "schedutil" + fi +} + +# $1: governor string, $2: governor module, $3: policy +# example: module_governor_test_single "ondemand" "cpufreq_ondemand.ko" 2 +module_governor_test_single() +{ + printf "** Test: Running ${FUNCNAME[0]} for $3 **\n\n" + + backup_governor $3 + + # switch to new governor + printf "Switch from $CUR_GOV to $1\n" + switch_show_governor $3 $1 + + # try removing module, it should fail as governor is used + printf "Removing $2 module\n\n" + rmmod $2 + if [ $? = 0 ]; then + printf "WARN: rmmod $2 succeeded even if governor is used\n" + insmod $2 + else + printf "Pass: unable to remove $2 while it is being used\n\n" + fi + + # switch back to old governor + printf "Switchback to $CUR_GOV from $1\n" + restore_governor $3 + printf "\n" +} + +# Insert cpufreq governor module and perform basic tests +# $1: cpufreq-governor module to insert +module_governor_test() +{ + printf "** Test: Running ${FUNCNAME[0]} **\n\n" + + # check if module is present or not + ls $1 > /dev/null + if [ $? != 0 ]; then + printf "$1: not present in `pwd` folder\n" + return; + fi + + # test basic module tests + test_basic_insmod_rmmod $1 + + # insert module + printf "Inserting $1 module\n\n" + insmod $1 + if [ $? != 0 ]; then + printf "Insmod $1 failed\n" + return; + fi + + # switch to new governor for each cpu + for_each_policy module_governor_test_single $(find_gov_name $1) $1 + + # remove module + printf "Removing $1 module\n\n" + rmmod $1 + if [ $? != 0 ]; then + printf "rmmod $1 failed\n" + return; + fi + printf "\n" +} + +# test modules: driver and governor +# $1: driver module, $2: governor module +module_test() +{ + printf "** Test: Running ${FUNCNAME[0]} **\n\n" + + # check if modules are present or not + ls $1 $2 > /dev/null + if [ $? != 0 ]; then + printf "$1 or $2: is not present in `pwd` folder\n" + return; + fi + + # TEST1: Insert gov after driver + # insert driver module + printf "Inserting $1 module\n\n" + insmod $1 + if [ $? != 0 ]; then + printf "Insmod $1 failed\n" + return; + fi + + # run governor tests + module_governor_test $2 + + # remove driver module + printf "Removing $1 module\n\n" + rmmod $1 + if [ $? != 0 ]; then + printf "rmmod $1 failed\n" + return; + fi + + # TEST2: Insert driver after governor + # insert governor module + printf "Inserting $2 module\n\n" + insmod $2 + if [ $? != 0 ]; then + printf "Insmod $2 failed\n" + return; + fi + + # run governor tests + module_driver_test $1 + + # remove driver module + printf "Removing $2 module\n\n" + rmmod $2 + if [ $? != 0 ]; then + printf "rmmod $2 failed\n" + return; + fi +} diff --git a/tools/testing/selftests/cpufreq/special-tests.sh b/tools/testing/selftests/cpufreq/special-tests.sh new file mode 100755 index 000000000000..58b730f23ef7 --- /dev/null +++ b/tools/testing/selftests/cpufreq/special-tests.sh @@ -0,0 +1,115 @@ +#!/bin/bash +# +# Special test cases reported by people + +# Testcase 1: Reported here: http://marc.info/?l=linux-pm&m=140618592709858&w=2 + +# protect against multiple inclusion +if [ $FILE_SPECIAL ]; then + return 0 +else + FILE_SPECIAL=DONE +fi + +source cpu.sh +source cpufreq.sh +source governor.sh + +# Test 1 +# $1: policy +__simple_lockdep() +{ + # switch to ondemand + __switch_governor $1 "ondemand" + + # cat ondemand files + local ondir=$(find_gov_directory $1 "ondemand") + if [ -z $ondir ]; then + printf "${FUNCNAME[0]}Ondemand directory not created, quit" + return + fi + + cat $ondir/* + + # switch to conservative + __switch_governor $1 "conservative" +} + +simple_lockdep() +{ + printf "** Test: Running ${FUNCNAME[0]} **\n" + + for_each_policy __simple_lockdep +} + +# Test 2 +# $1: policy +__concurrent_lockdep() +{ + for i in `seq 0 100`; do + __simple_lockdep $1 + done +} + +concurrent_lockdep() +{ + printf "** Test: Running ${FUNCNAME[0]} **\n" + + for_each_policy_concurrent __concurrent_lockdep +} + +# Test 3 +quick_shuffle() +{ + # this is called concurrently from governor_race + for I in `seq 1000` + do + echo ondemand | sudo tee $CPUFREQROOT/policy*/scaling_governor & + echo userspace | sudo tee $CPUFREQROOT/policy*/scaling_governor & + done +} + +governor_race() +{ + printf "** Test: Running ${FUNCNAME[0]} **\n" + + # run 8 concurrent instances + for I in `seq 8` + do + quick_shuffle & + done +} + +# Test 4 +# $1: cpu +hotplug_with_updates_cpu() +{ + local filepath="$CPUROOT/$1/cpufreq" + + # switch to ondemand + __switch_governor_for_cpu $1 "ondemand" + + for i in `seq 1 5000` + do + reboot_cpu $1 + done & + + local freqs=$(cat $filepath/scaling_available_frequencies) + local oldfreq=$(cat $filepath/scaling_min_freq) + + for j in `seq 1 5000` + do + # Set all frequencies one-by-one + for freq in $freqs; do + echo $freq > $filepath/scaling_min_freq + done + done + + # restore old freq + echo $oldfreq > $filepath/scaling_min_freq +} + +hotplug_with_updates() +{ + for_each_non_boot_cpu hotplug_with_updates_cpu +} diff --git a/tools/testing/selftests/efivarfs/Makefile b/tools/testing/selftests/efivarfs/Makefile index 736c3ddfc787..c49dcea69319 100644 --- a/tools/testing/selftests/efivarfs/Makefile +++ b/tools/testing/selftests/efivarfs/Makefile @@ -1,13 +1,7 @@ CFLAGS = -Wall -test_objs = open-unlink create-read - -all: $(test_objs) - +TEST_GEN_FILES := open-unlink create-read TEST_PROGS := efivarfs.sh -TEST_FILES := $(test_objs) include ../lib.mk -clean: - rm -f $(test_objs) diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile index d4300602bf37..2e13035dff7f 100644 --- a/tools/testing/selftests/exec/Makefile +++ b/tools/testing/selftests/exec/Makefile @@ -1,27 +1,23 @@ CFLAGS = -Wall -BINARIES = execveat -DEPS = execveat.symlink execveat.denatured script subdir -all: $(BINARIES) $(DEPS) -subdir: +TEST_GEN_PROGS := execveat +TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir +# Makefile is a run-time dependency, since it's accessed by the execveat test +TEST_FILES := Makefile + +EXTRA_CLEAN := $(OUTPUT)/subdir.moved $(OUTPUT)/execveat.moved $(OUTPUT)/xxxxx* + +include ../lib.mk + +$(OUTPUT)/subdir: mkdir -p $@ -script: +$(OUTPUT)/script: echo '#!/bin/sh' > $@ echo 'exit $$*' >> $@ chmod +x $@ -execveat.symlink: execveat - ln -s -f $< $@ -execveat.denatured: execveat +$(OUTPUT)/execveat.symlink: $(OUTPUT)/execveat + cd $(OUTPUT) && ln -s -f $(shell basename $<) $(shell basename $@) +$(OUTPUT)/execveat.denatured: $(OUTPUT)/execveat cp $< $@ chmod -x $@ -%: %.c - $(CC) $(CFLAGS) -o $@ $^ - -TEST_PROGS := execveat -# Makefile is a run-time dependency, since it's accessed by the execveat test -TEST_FILES := $(DEPS) Makefile - -include ../lib.mk -clean: - rm -rf $(BINARIES) $(DEPS) subdir.moved execveat.moved xxxxx* diff --git a/tools/testing/selftests/ftrace/Makefile b/tools/testing/selftests/ftrace/Makefile index 4e6ed13e7f66..a8a5e21850e7 100644 --- a/tools/testing/selftests/ftrace/Makefile +++ b/tools/testing/selftests/ftrace/Makefile @@ -1,9 +1,7 @@ all: TEST_PROGS := ftracetest -TEST_DIRS := test.d +TEST_FILES := test.d +EXTRA_CLEAN := $(OUTPUT)/logs/* include ../lib.mk - -clean: - rm -rf logs/* diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile index 6a1752956283..653c5cd9e44d 100644 --- a/tools/testing/selftests/futex/Makefile +++ b/tools/testing/selftests/futex/Makefile @@ -3,13 +3,18 @@ SUBDIRS := functional TEST_PROGS := run.sh .PHONY: all clean -all: - for DIR in $(SUBDIRS); do $(MAKE) -C $$DIR $@ ; done include ../lib.mk +all: + for DIR in $(SUBDIRS); do \ + BUILD_TARGET=$$OUTPUT/$$DIR; \ + mkdir $$BUILD_TARGET -p; \ + make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ + done + override define RUN_TESTS - ./run.sh + @if [ `dirname $(OUTPUT)` = $(PWD) ]; then ./run.sh; fi endef override define INSTALL_RULE @@ -17,7 +22,9 @@ override define INSTALL_RULE install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) @for SUBDIR in $(SUBDIRS); do \ - $(MAKE) -C $$SUBDIR INSTALL_PATH=$(INSTALL_PATH)/$$SUBDIR install; \ + BUILD_TARGET=$$OUTPUT/$$SUBDIR; \ + mkdir $$BUILD_TARGET -p; \ + $(MAKE) OUTPUT=$$BUILD_TARGET -C $$SUBDIR INSTALL_PATH=$(INSTALL_PATH)/$$SUBDIR install; \ done; endef @@ -26,4 +33,8 @@ override define EMIT_TESTS endef clean: - for DIR in $(SUBDIRS); do $(MAKE) -C $$DIR $@ ; done + for DIR in $(SUBDIRS); do \ + BUILD_TARGET=$$OUTPUT/$$DIR; \ + mkdir $$BUILD_TARGET -p; \ + make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ + done diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile index 9d6b75ef7b5d..a648e7a6cbc3 100644 --- a/tools/testing/selftests/futex/functional/Makefile +++ b/tools/testing/selftests/futex/functional/Makefile @@ -2,8 +2,11 @@ INCLUDES := -I../include -I../../ CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES) LDFLAGS := $(LDFLAGS) -pthread -lrt -HEADERS := ../include/futextest.h -TARGETS := \ +HEADERS := \ + ../include/futextest.h \ + ../include/atomic.h \ + ../include/logging.h +TEST_GEN_FILES := \ futex_wait_timeout \ futex_wait_wouldblock \ futex_requeue_pi \ @@ -12,14 +15,8 @@ TARGETS := \ futex_wait_uninitialized_heap \ futex_wait_private_mapped_file -TEST_PROGS := $(TARGETS) run.sh - -.PHONY: all clean -all: $(TARGETS) - -$(TARGETS): $(HEADERS) +TEST_PROGS := run.sh include ../../lib.mk -clean: - rm -f $(TARGETS) +$(TEST_GEN_FILES): $(HEADERS) diff --git a/tools/testing/selftests/futex/include/logging.h b/tools/testing/selftests/futex/include/logging.h index 014aa01197af..e14469103f07 100644 --- a/tools/testing/selftests/futex/include/logging.h +++ b/tools/testing/selftests/futex/include/logging.h @@ -21,6 +21,7 @@ #ifndef _LOGGING_H #define _LOGGING_H +#include <stdio.h> #include <string.h> #include <unistd.h> #include <linux/futex.h> diff --git a/tools/testing/selftests/gpio/.gitignore b/tools/testing/selftests/gpio/.gitignore new file mode 100644 index 000000000000..7d14f743d1a4 --- /dev/null +++ b/tools/testing/selftests/gpio/.gitignore @@ -0,0 +1 @@ +gpio-mockup-chardev diff --git a/tools/testing/selftests/intel_pstate/Makefile b/tools/testing/selftests/intel_pstate/Makefile index f5f1a28715ff..19678e90efb2 100644 --- a/tools/testing/selftests/intel_pstate/Makefile +++ b/tools/testing/selftests/intel_pstate/Makefile @@ -1,15 +1,10 @@ -CC := $(CROSS_COMPILE)gcc CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE LDFLAGS := $(LDFLAGS) -lm -TARGETS := msr aperf +TEST_GEN_FILES := msr aperf -TEST_PROGS := $(TARGETS) run.sh +TEST_PROGS := run.sh -.PHONY: all clean -all: $(TARGETS) +include ../lib.mk -$(TARGETS): $(HEADERS) - -clean: - rm -f $(TARGETS) +$(TEST_GEN_FILES): $(HEADERS) diff --git a/tools/testing/selftests/intel_pstate/aperf.c b/tools/testing/selftests/intel_pstate/aperf.c index 6046e183f4ad..cd72f3dc83e9 100644 --- a/tools/testing/selftests/intel_pstate/aperf.c +++ b/tools/testing/selftests/intel_pstate/aperf.c @@ -14,7 +14,7 @@ void usage(char *name) { } int main(int argc, char **argv) { - int i, cpu, fd; + unsigned int i, cpu, fd; char msr_file_name[64]; long long tsc, old_tsc, new_tsc; long long aperf, old_aperf, new_aperf; diff --git a/tools/testing/selftests/ipc/.gitignore b/tools/testing/selftests/ipc/.gitignore index 84b66a3c1f74..9af04c9353c0 100644 --- a/tools/testing/selftests/ipc/.gitignore +++ b/tools/testing/selftests/ipc/.gitignore @@ -1 +1,2 @@ msgque_test +msgque diff --git a/tools/testing/selftests/ipc/Makefile b/tools/testing/selftests/ipc/Makefile index 25d2e702c68a..30ef4c7f53ea 100644 --- a/tools/testing/selftests/ipc/Makefile +++ b/tools/testing/selftests/ipc/Makefile @@ -11,12 +11,7 @@ endif CFLAGS += -I../../../../usr/include/ -all: - $(CC) $(CFLAGS) msgque.c -o msgque_test - -TEST_PROGS := msgque_test +TEST_GEN_PROGS := msgque include ../lib.mk -clean: - rm -fr ./msgque_test diff --git a/tools/testing/selftests/kcmp/Makefile b/tools/testing/selftests/kcmp/Makefile index 2ae7450a9a89..47aa9887f9d4 100644 --- a/tools/testing/selftests/kcmp/Makefile +++ b/tools/testing/selftests/kcmp/Makefile @@ -1,10 +1,8 @@ CFLAGS += -I../../../../usr/include/ -all: kcmp_test +TEST_GEN_PROGS := kcmp_test -TEST_PROGS := kcmp_test +EXTRA_CLEAN := $(OUTPUT)/kcmp-test-file include ../lib.mk -clean: - $(RM) kcmp_test kcmp-test-file diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index 50a93f5f13d6..775c589ac3c0 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk @@ -2,9 +2,19 @@ # Makefile can operate with or without the kbuild infrastructure. CC := $(CROSS_COMPILE)gcc +ifeq (0,$(MAKELEVEL)) +OUTPUT := $(shell pwd) +endif + +TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS)) +TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES)) + +all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) + define RUN_TESTS - @for TEST in $(TEST_PROGS); do \ - (./$$TEST && echo "selftests: $$TEST [PASS]") || echo "selftests: $$TEST [FAIL]"; \ + @for TEST in $(TEST_GEN_PROGS) $(TEST_PROGS); do \ + BASENAME_TEST=`basename $$TEST`; \ + cd `dirname $$TEST`; (./$$BASENAME_TEST && echo "selftests: $$BASENAME_TEST [PASS]") || echo "selftests: $$BASENAME_TEST [FAIL]"; cd -;\ done; endef @@ -14,8 +24,13 @@ run_tests: all define INSTALL_RULE @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \ mkdir -p ${INSTALL_PATH}; \ - echo "rsync -a $(TEST_DIRS) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/"; \ - rsync -a $(TEST_DIRS) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/; \ + echo "rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/"; \ + rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/; \ + fi + @if [ "X$(TEST_GEN_PROGS)$(TEST_GEN_PROGS_EXTENDED)$(TEST_GEN_FILES)" != "X" ]; then \ + mkdir -p ${INSTALL_PATH}; \ + echo "rsync -a $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/"; \ + rsync -a $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/; \ fi endef @@ -27,12 +42,25 @@ else endif define EMIT_TESTS - @for TEST in $(TEST_PROGS); do \ - echo "(./$$TEST && echo \"selftests: $$TEST [PASS]\") || echo \"selftests: $$TEST [FAIL]\""; \ + @for TEST in $(TEST_GEN_PROGS) $(TEST_PROGS); do \ + BASENAME_TEST=`basename $$TEST`; \ + echo "(./$$BASENAME_TEST && echo \"selftests: $$BASENAME_TEST [PASS]\") || echo \"selftests: $$BASENAME_TEST [FAIL]\""; \ done; endef emit_tests: $(EMIT_TESTS) +clean: + $(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN) + +$(OUTPUT)/%:%.c + $(LINK.c) $^ $(LDLIBS) -o $@ + +$(OUTPUT)/%.o:%.S + $(COMPILE.S) $^ -o $@ + +$(OUTPUT)/%:%.S + $(LINK.S) $^ $(LDLIBS) -o $@ + .PHONY: run_tests all clean install emit_tests diff --git a/tools/testing/selftests/membarrier/Makefile b/tools/testing/selftests/membarrier/Makefile index a1a97085847d..02845532b059 100644 --- a/tools/testing/selftests/membarrier/Makefile +++ b/tools/testing/selftests/membarrier/Makefile @@ -1,10 +1,6 @@ CFLAGS += -g -I../../../../usr/include/ -TEST_PROGS := membarrier_test - -all: $(TEST_PROGS) +TEST_GEN_PROGS := membarrier_test include ../lib.mk -clean: - $(RM) $(TEST_PROGS) diff --git a/tools/testing/selftests/memfd/Makefile b/tools/testing/selftests/memfd/Makefile index fd396ac811b6..79891d033de1 100644 --- a/tools/testing/selftests/memfd/Makefile +++ b/tools/testing/selftests/memfd/Makefile @@ -1,22 +1,13 @@ -CC = $(CROSS_COMPILE)gcc CFLAGS += -D_FILE_OFFSET_BITS=64 CFLAGS += -I../../../../include/uapi/ CFLAGS += -I../../../../include/ CFLAGS += -I../../../../usr/include/ -TEST_PROGS := memfd_test - -all: $(TEST_PROGS) - -include ../lib.mk - -build_fuse: fuse_mnt fuse_test +TEST_PROGS := run_fuse_test.sh +TEST_GEN_FILES := memfd_test fuse_mnt fuse_test fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags) fuse_mnt: LDFLAGS += $(shell pkg-config fuse --libs) -run_fuse: build_fuse - @./run_fuse_test.sh || echo "fuse_test: [FAIL]" +include ../lib.mk -clean: - $(RM) memfd_test fuse_test diff --git a/tools/testing/selftests/mount/Makefile b/tools/testing/selftests/mount/Makefile index 5e35c9c50b72..9093d7ffe87f 100644 --- a/tools/testing/selftests/mount/Makefile +++ b/tools/testing/selftests/mount/Makefile @@ -1,14 +1,11 @@ # Makefile for mount selftests. CFLAGS = -Wall \ -O2 -all: unprivileged-remount-test -unprivileged-remount-test: unprivileged-remount-test.c - $(CC) $(CFLAGS) unprivileged-remount-test.c -o unprivileged-remount-test +TEST_GEN_PROGS := unprivileged-remount-test include ../lib.mk -TEST_PROGS := unprivileged-remount-test override RUN_TESTS := if [ -f /proc/self/uid_map ] ; \ then \ ./unprivileged-remount-test ; \ @@ -17,5 +14,3 @@ override RUN_TESTS := if [ -f /proc/self/uid_map ] ; \ fi override EMIT_TESTS := echo "$(RUN_TESTS)" -clean: - rm -f unprivileged-remount-test diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile index eebac29acbd9..79a664aeb8d7 100644 --- a/tools/testing/selftests/mqueue/Makefile +++ b/tools/testing/selftests/mqueue/Makefile @@ -1,8 +1,6 @@ CFLAGS += -O2 LDLIBS = -lrt -lpthread -lpopt -TEST_PROGS := mq_open_tests mq_perf_tests - -all: $(TEST_PROGS) +TEST_GEN_PROGS := mq_open_tests mq_perf_tests include ../lib.mk @@ -16,5 +14,3 @@ override define EMIT_TESTS echo "./mq_perf_tests || echo \"selftests: mq_perf_tests [FAIL]\"" endef -clean: - rm -f mq_open_tests mq_perf_tests diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index e24e4c82542e..fbfe5d0d5c2e 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -3,20 +3,13 @@ CFLAGS = -Wall -Wl,--no-as-needed -O2 -g CFLAGS += -I../../../../usr/include/ -NET_PROGS = socket -NET_PROGS += psock_fanout psock_tpacket -NET_PROGS += reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa -NET_PROGS += reuseport_dualstack - -all: $(NET_PROGS) reuseport_bpf_numa: LDFLAGS += -lnuma -%: %.c - $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $^ TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh -TEST_FILES := $(NET_PROGS) +TEST_GEN_FILES = socket +TEST_GEN_FILES += psock_fanout psock_tpacket +TEST_GEN_FILES += reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa +TEST_GEN_FILES += reuseport_dualstack include ../lib.mk -clean: - $(RM) $(NET_PROGS) diff --git a/tools/testing/selftests/nsfs/Makefile b/tools/testing/selftests/nsfs/Makefile index 2306054a901a..9ff7c7f80625 100644 --- a/tools/testing/selftests/nsfs/Makefile +++ b/tools/testing/selftests/nsfs/Makefile @@ -1,12 +1,5 @@ -TEST_PROGS := owner pidns +TEST_GEN_PROGS := owner pidns CFLAGS := -Wall -Werror -all: owner pidns -owner: owner.c -pidns: pidns.c - -clean: - $(RM) owner pidns - include ../lib.mk diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile index c2c4211ba58b..1c5d0575802e 100644 --- a/tools/testing/selftests/powerpc/Makefile +++ b/tools/testing/selftests/powerpc/Makefile @@ -34,31 +34,35 @@ endif all: $(SUB_DIRS) $(SUB_DIRS): - $(MAKE) -k -C $@ all + BUILD_TARGET=$$OUTPUT/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all include ../lib.mk override define RUN_TESTS @for TARGET in $(SUB_DIRS); do \ - $(MAKE) -C $$TARGET run_tests; \ + BUILD_TARGET=$$OUTPUT/$$TARGET; \ + $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\ done; endef override define INSTALL_RULE @for TARGET in $(SUB_DIRS); do \ - $(MAKE) -C $$TARGET install; \ + BUILD_TARGET=$$OUTPUT/$$TARGET; \ + $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install;\ done; endef override define EMIT_TESTS @for TARGET in $(SUB_DIRS); do \ - $(MAKE) -s -C $$TARGET emit_tests; \ + BUILD_TARGET=$$OUTPUT/$$TARGET; \ + $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\ done; endef clean: @for TARGET in $(SUB_DIRS); do \ - $(MAKE) -C $$TARGET clean; \ + BUILD_TARGET=$$OUTPUT/$$TARGET; \ + $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \ done; rm -f tags diff --git a/tools/testing/selftests/powerpc/alignment/Makefile b/tools/testing/selftests/powerpc/alignment/Makefile index ad6a4e49da91..16b22004e75f 100644 --- a/tools/testing/selftests/powerpc/alignment/Makefile +++ b/tools/testing/selftests/powerpc/alignment/Makefile @@ -1,10 +1,5 @@ -TEST_PROGS := copy_unaligned copy_first_unaligned paste_unaligned paste_last_unaligned - -all: $(TEST_PROGS) - -$(TEST_PROGS): ../harness.c ../utils.c copy_paste_unaligned_common.c +TEST_GEN_PROGS := copy_unaligned copy_first_unaligned paste_unaligned paste_last_unaligned include ../../lib.mk -clean: - rm -f $(TEST_PROGS) +$(TEST_GEN_PROGS): ../harness.c ../utils.c copy_paste_unaligned_common.c diff --git a/tools/testing/selftests/powerpc/benchmarks/Makefile b/tools/testing/selftests/powerpc/benchmarks/Makefile index 545077f98f72..fb96a89bd953 100644 --- a/tools/testing/selftests/powerpc/benchmarks/Makefile +++ b/tools/testing/selftests/powerpc/benchmarks/Makefile @@ -1,16 +1,11 @@ -TEST_PROGS := gettimeofday context_switch mmap_bench futex_bench null_syscall +TEST_GEN_PROGS := gettimeofday context_switch mmap_bench futex_bench null_syscall CFLAGS += -O2 -all: $(TEST_PROGS) - -$(TEST_PROGS): ../harness.c - -context_switch: ../utils.c -context_switch: CFLAGS += -maltivec -mvsx -mabi=altivec -context_switch: LDLIBS += -lpthread - include ../../lib.mk -clean: - rm -f $(TEST_PROGS) *.o +$(TEST_GEN_PROGS): ../harness.c + +$(OUTPUT)/context_switch: ../utils.c +$(OUTPUT)/context_switch: CFLAGS += -maltivec -mvsx -mabi=altivec +$(OUTPUT)/context_switch: LDLIBS += -lpthread diff --git a/tools/testing/selftests/powerpc/context_switch/Makefile b/tools/testing/selftests/powerpc/context_switch/Makefile index e164d1466466..e9351bb4285d 100644 --- a/tools/testing/selftests/powerpc/context_switch/Makefile +++ b/tools/testing/selftests/powerpc/context_switch/Makefile @@ -1,10 +1,5 @@ -TEST_PROGS := cp_abort - -all: $(TEST_PROGS) - -$(TEST_PROGS): ../harness.c ../utils.c +TEST_GEN_PROGS := cp_abort include ../../lib.mk -clean: - rm -f $(TEST_PROGS) +$(TEST_GEN_PROGS): ../harness.c ../utils.c diff --git a/tools/testing/selftests/powerpc/copyloops/Makefile b/tools/testing/selftests/powerpc/copyloops/Makefile index 384843ea0d40..681ab19d0a84 100644 --- a/tools/testing/selftests/powerpc/copyloops/Makefile +++ b/tools/testing/selftests/powerpc/copyloops/Makefile @@ -7,19 +7,14 @@ CFLAGS += -maltivec # Use our CFLAGS for the implicit .S rule ASFLAGS = $(CFLAGS) -TEST_PROGS := copyuser_64 copyuser_power7 memcpy_64 memcpy_power7 +TEST_GEN_PROGS := copyuser_64 copyuser_power7 memcpy_64 memcpy_power7 EXTRA_SOURCES := validate.c ../harness.c -all: $(TEST_PROGS) - -copyuser_64: CPPFLAGS += -D COPY_LOOP=test___copy_tofrom_user_base -copyuser_power7: CPPFLAGS += -D COPY_LOOP=test___copy_tofrom_user_power7 -memcpy_64: CPPFLAGS += -D COPY_LOOP=test_memcpy -memcpy_power7: CPPFLAGS += -D COPY_LOOP=test_memcpy_power7 - -$(TEST_PROGS): $(EXTRA_SOURCES) - include ../../lib.mk -clean: - rm -f $(TEST_PROGS) *.o +$(OUTPUT)/copyuser_64: CPPFLAGS += -D COPY_LOOP=test___copy_tofrom_user_base +$(OUTPUT)/copyuser_power7: CPPFLAGS += -D COPY_LOOP=test___copy_tofrom_user_power7 +$(OUTPUT)/memcpy_64: CPPFLAGS += -D COPY_LOOP=test_memcpy +$(OUTPUT)/memcpy_power7: CPPFLAGS += -D COPY_LOOP=test_memcpy_power7 + +$(TEST_GEN_PROGS): $(EXTRA_SOURCES) diff --git a/tools/testing/selftests/powerpc/dscr/Makefile b/tools/testing/selftests/powerpc/dscr/Makefile index 49327ee84e3a..c5639deb8887 100644 --- a/tools/testing/selftests/powerpc/dscr/Makefile +++ b/tools/testing/selftests/powerpc/dscr/Makefile @@ -1,14 +1,9 @@ -TEST_PROGS := dscr_default_test dscr_explicit_test dscr_user_test \ +TEST_GEN_PROGS := dscr_default_test dscr_explicit_test dscr_user_test \ dscr_inherit_test dscr_inherit_exec_test dscr_sysfs_test \ dscr_sysfs_thread_test -dscr_default_test: LDLIBS += -lpthread - -all: $(TEST_PROGS) - -$(TEST_PROGS): ../harness.c - include ../../lib.mk -clean: - rm -f $(TEST_PROGS) *.o +$(OUTPUT)/dscr_default_test: LDLIBS += -lpthread + +$(TEST_GEN_PROGS): ../harness.c diff --git a/tools/testing/selftests/powerpc/math/Makefile b/tools/testing/selftests/powerpc/math/Makefile index a505b66d408a..fa8bae920c91 100644 --- a/tools/testing/selftests/powerpc/math/Makefile +++ b/tools/testing/selftests/powerpc/math/Makefile @@ -1,22 +1,17 @@ -TEST_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt +TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt -all: $(TEST_PROGS) - -$(TEST_PROGS): ../harness.c -$(TEST_PROGS): CFLAGS += -O2 -g -pthread -m64 -maltivec - -fpu_syscall: fpu_asm.S -fpu_preempt: fpu_asm.S -fpu_signal: fpu_asm.S +include ../../lib.mk -vmx_syscall: vmx_asm.S -vmx_preempt: vmx_asm.S -vmx_signal: vmx_asm.S +$(TEST_GEN_PROGS): ../harness.c +$(TEST_GEN_PROGS): CFLAGS += -O2 -g -pthread -m64 -maltivec -vsx_preempt: CFLAGS += -mvsx -vsx_preempt: vsx_asm.S +$(OUTPUT)/fpu_syscall: fpu_asm.S +$(OUTPUT)/fpu_preempt: fpu_asm.S +$(OUTPUT)/fpu_signal: fpu_asm.S -include ../../lib.mk +$(OUTPUT)/vmx_syscall: vmx_asm.S +$(OUTPUT)/vmx_preempt: vmx_asm.S +$(OUTPUT)/vmx_signal: vmx_asm.S -clean: - rm -f $(TEST_PROGS) *.o +$(OUTPUT)/vsx_preempt: CFLAGS += -mvsx +$(OUTPUT)/vsx_preempt: vsx_asm.S diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile index 3bdb96eae558..1cffe54dccfb 100644 --- a/tools/testing/selftests/powerpc/mm/Makefile +++ b/tools/testing/selftests/powerpc/mm/Makefile @@ -1,19 +1,15 @@ noarg: $(MAKE) -C ../ -TEST_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao -TEST_FILES := tempfile +TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao +TEST_GEN_FILES := tempfile -all: $(TEST_PROGS) $(TEST_FILES) - -$(TEST_PROGS): ../harness.c +include ../../lib.mk -prot_sao: ../utils.c +$(TEST_GEN_PROGS): ../harness.c -include ../../lib.mk +$(OUTPUT)/prot_sao: ../utils.c -tempfile: - dd if=/dev/zero of=tempfile bs=64k count=1 +$(OUTPUT)/tempfile: + dd if=/dev/zero of=$@ bs=64k count=1 -clean: - rm -f $(TEST_PROGS) tempfile diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile index ac41a7177f2e..e4e55d1d3e0f 100644 --- a/tools/testing/selftests/powerpc/pmu/Makefile +++ b/tools/testing/selftests/powerpc/pmu/Makefile @@ -1,44 +1,44 @@ noarg: $(MAKE) -C ../ -TEST_PROGS := count_instructions l3_bank_test per_event_excludes +TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c -all: $(TEST_PROGS) ebb +include ../../lib.mk + +all: $(TEST_GEN_PROGS) ebb -$(TEST_PROGS): $(EXTRA_SOURCES) +$(TEST_GEN_PROGS): $(EXTRA_SOURCES) # loop.S can only be built 64-bit -count_instructions: loop.S count_instructions.c $(EXTRA_SOURCES) +$(OUTPUT)/count_instructions: loop.S count_instructions.c $(EXTRA_SOURCES) $(CC) $(CFLAGS) -m64 -o $@ $^ -per_event_excludes: ../utils.c - -include ../../lib.mk +$(OUTPUT)/per_event_excludes: ../utils.c DEFAULT_RUN_TESTS := $(RUN_TESTS) override define RUN_TESTS $(DEFAULT_RUN_TESTS) - $(MAKE) -C ebb run_tests + TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests endef DEFAULT_EMIT_TESTS := $(EMIT_TESTS) override define EMIT_TESTS $(DEFAULT_EMIT_TESTS) - $(MAKE) -s -C ebb emit_tests + TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests endef DEFAULT_INSTALL_RULE := $(INSTALL_RULE) override define INSTALL_RULE $(DEFAULT_INSTALL_RULE) - $(MAKE) -C ebb install + TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install endef clean: - rm -f $(TEST_PROGS) loop.o - $(MAKE) -C ebb clean + $(RM) $(TEST_GEN_PROGS) $(OUTPUT)/loop.o + TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean ebb: - $(MAKE) -k -C $@ all + TARGET=$@; BUILD_TARGET=$$OUTPUT/$$TARGET; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $$TARGET all .PHONY: all run_tests clean ebb diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile index 8d2279c4bb4b..6001fb0a377a 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile +++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile @@ -4,7 +4,7 @@ noarg: # The EBB handler is 64-bit code and everything links against it CFLAGS += -m64 -TEST_PROGS := reg_access_test event_attributes_test cycles_test \ +TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test \ cycles_with_freeze_test pmc56_overflow_test \ ebb_vs_cpu_event_test cpu_event_vs_ebb_test \ cpu_event_pinned_vs_ebb_test task_event_vs_ebb_test \ @@ -16,16 +16,11 @@ TEST_PROGS := reg_access_test event_attributes_test cycles_test \ lost_exception_test no_handler_test \ cycles_with_mmcr2_test -all: $(TEST_PROGS) +include ../../../lib.mk -$(TEST_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \ +$(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \ ebb.c ebb_handler.S trace.c busy_loop.S -instruction_count_test: ../loop.S - -lost_exception_test: ../lib.c - -include ../../../lib.mk +$(OUTPUT)/instruction_count_test: ../loop.S -clean: - rm -f $(TEST_PROGS) +$(OUTPUT)/lost_exception_test: ../lib.c diff --git a/tools/testing/selftests/powerpc/primitives/Makefile b/tools/testing/selftests/powerpc/primitives/Makefile index b68c6221d3d1..175366db7be8 100644 --- a/tools/testing/selftests/powerpc/primitives/Makefile +++ b/tools/testing/selftests/powerpc/primitives/Makefile @@ -1,12 +1,7 @@ CFLAGS += -I$(CURDIR) -TEST_PROGS := load_unaligned_zeropad - -all: $(TEST_PROGS) - -$(TEST_PROGS): ../harness.c +TEST_GEN_PROGS := load_unaligned_zeropad include ../../lib.mk -clean: - rm -f $(TEST_PROGS) *.o +$(TEST_GEN_PROGS): ../harness.c diff --git a/tools/testing/selftests/powerpc/stringloops/Makefile b/tools/testing/selftests/powerpc/stringloops/Makefile index 2a728f4d2873..557b9379f3bb 100644 --- a/tools/testing/selftests/powerpc/stringloops/Makefile +++ b/tools/testing/selftests/powerpc/stringloops/Makefile @@ -2,14 +2,9 @@ CFLAGS += -m64 CFLAGS += -I$(CURDIR) -TEST_PROGS := memcmp +TEST_GEN_PROGS := memcmp EXTRA_SOURCES := memcmp_64.S ../harness.c -all: $(TEST_PROGS) - -$(TEST_PROGS): $(EXTRA_SOURCES) - include ../../lib.mk -clean: - rm -f $(TEST_PROGS) *.o +$(TEST_GEN_PROGS): $(EXTRA_SOURCES) diff --git a/tools/testing/selftests/powerpc/switch_endian/Makefile b/tools/testing/selftests/powerpc/switch_endian/Makefile index e21d10674e54..b92c2a132c4f 100644 --- a/tools/testing/selftests/powerpc/switch_endian/Makefile +++ b/tools/testing/selftests/powerpc/switch_endian/Makefile @@ -1,18 +1,15 @@ -TEST_PROGS := switch_endian_test +TEST_GEN_PROGS := switch_endian_test ASFLAGS += -O2 -Wall -g -nostdlib -m64 -all: $(TEST_PROGS) +EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S -switch_endian_test: check-reversed.S +include ../../lib.mk + +$(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S -check-reversed.o: check.o +$(OUTPUT)/check-reversed.o: $(OUTPUT)/check.o $(CROSS_COMPILE)objcopy -j .text --reverse-bytes=4 -O binary $< $@ -check-reversed.S: check-reversed.o +$(OUTPUT)/check-reversed.S: $(OUTPUT)/check-reversed.o hexdump -v -e '/1 ".byte 0x%02X\n"' $< > $@ - -include ../../lib.mk - -clean: - rm -f $(TEST_PROGS) *.o check-reversed.S diff --git a/tools/testing/selftests/powerpc/syscalls/Makefile b/tools/testing/selftests/powerpc/syscalls/Makefile index b35c7945bec5..da22ca7c38c1 100644 --- a/tools/testing/selftests/powerpc/syscalls/Makefile +++ b/tools/testing/selftests/powerpc/syscalls/Makefile @@ -1,12 +1,7 @@ -TEST_PROGS := ipc_unmuxed +TEST_GEN_PROGS := ipc_unmuxed CFLAGS += -I../../../../../usr/include -all: $(TEST_PROGS) - -$(TEST_PROGS): ../harness.c - include ../../lib.mk -clean: - rm -f $(TEST_PROGS) *.o +$(TEST_GEN_PROGS): ../harness.c diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile index c6c53c82fdd6..5576ee6a51f2 100644 --- a/tools/testing/selftests/powerpc/tm/Makefile +++ b/tools/testing/selftests/powerpc/tm/Makefile @@ -1,23 +1,19 @@ SIGNAL_CONTEXT_CHK_TESTS := tm-signal-context-chk-gpr tm-signal-context-chk-fpu \ tm-signal-context-chk-vmx tm-signal-context-chk-vsx -TEST_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \ +TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \ tm-vmxcopy tm-fork tm-tar tm-tmspr $(SIGNAL_CONTEXT_CHK_TESTS) -all: $(TEST_PROGS) +include ../../lib.mk -$(TEST_PROGS): ../harness.c ../utils.c +$(TEST_GEN_PROGS): ../harness.c ../utils.c CFLAGS += -mhtm -tm-syscall: tm-syscall-asm.S -tm-syscall: CFLAGS += -I../../../../../usr/include -tm-tmspr: CFLAGS += -pthread +$(OUTPUT)/tm-syscall: tm-syscall-asm.S +$(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include +$(OUTPUT)/tm-tmspr: CFLAGS += -pthread +SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS)) $(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S $(SIGNAL_CONTEXT_CHK_TESTS): CFLAGS += -mhtm -m64 -mvsx - -include ../../lib.mk - -clean: - rm -f $(TEST_PROGS) *.o diff --git a/tools/testing/selftests/powerpc/vphn/Makefile b/tools/testing/selftests/powerpc/vphn/Makefile index a485f2e286ae..f8ced26748f8 100644 --- a/tools/testing/selftests/powerpc/vphn/Makefile +++ b/tools/testing/selftests/powerpc/vphn/Makefile @@ -1,12 +1,8 @@ -TEST_PROGS := test-vphn +TEST_GEN_PROGS := test-vphn CFLAGS += -m64 -all: $(TEST_PROGS) - -$(TEST_PROGS): ../harness.c - include ../../lib.mk -clean: - rm -f $(TEST_PROGS) +$(TEST_GEN_PROGS): ../harness.c + diff --git a/tools/testing/selftests/pstore/Makefile b/tools/testing/selftests/pstore/Makefile index bd7abe24ea08..c5f2440ba1f7 100644 --- a/tools/testing/selftests/pstore/Makefile +++ b/tools/testing/selftests/pstore/Makefile @@ -5,11 +5,9 @@ all: TEST_PROGS := pstore_tests pstore_post_reboot_tests TEST_FILES := common_tests pstore_crash_test +EXTRA_CLEAN := logs/* *uuid include ../lib.mk run_crash: @sh pstore_crash_test || { echo "pstore_crash_test: [FAIL]"; exit 1; } - -clean: - rm -rf logs/* *uuid diff --git a/tools/testing/selftests/ptrace/Makefile b/tools/testing/selftests/ptrace/Makefile index 453927fea90c..8a2bc5562179 100644 --- a/tools/testing/selftests/ptrace/Makefile +++ b/tools/testing/selftests/ptrace/Makefile @@ -1,11 +1,5 @@ CFLAGS += -iquote../../../../include/uapi -Wall -peeksiginfo: peeksiginfo.c -all: peeksiginfo - -clean: - rm -f peeksiginfo - -TEST_PROGS := peeksiginfo +TEST_GEN_PROGS := peeksiginfo include ../lib.mk diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile index 8401e87e34e1..5fa6fd2246b1 100644 --- a/tools/testing/selftests/seccomp/Makefile +++ b/tools/testing/selftests/seccomp/Makefile @@ -1,10 +1,6 @@ -TEST_PROGS := seccomp_bpf +TEST_GEN_PROGS := seccomp_bpf CFLAGS += -Wl,-no-as-needed -Wall LDFLAGS += -lpthread -all: $(TEST_PROGS) - include ../lib.mk -clean: - $(RM) $(TEST_PROGS) diff --git a/tools/testing/selftests/sigaltstack/Makefile b/tools/testing/selftests/sigaltstack/Makefile index 56af56eda6fa..f68fbf80d8be 100644 --- a/tools/testing/selftests/sigaltstack/Makefile +++ b/tools/testing/selftests/sigaltstack/Makefile @@ -1,8 +1,5 @@ CFLAGS = -Wall -BINARIES = sas -all: $(BINARIES) +TEST_GEN_PROGS = sas include ../lib.mk -clean: - rm -rf $(BINARIES) diff --git a/tools/testing/selftests/sigaltstack/sas.c b/tools/testing/selftests/sigaltstack/sas.c index 1bb01258e559..ccd07343d418 100644 --- a/tools/testing/selftests/sigaltstack/sas.c +++ b/tools/testing/selftests/sigaltstack/sas.c @@ -57,7 +57,7 @@ void my_usr1(int sig, siginfo_t *si, void *u) exit(EXIT_FAILURE); } if (stk.ss_flags != SS_DISABLE) - printf("[FAIL]\tss_flags=%i, should be SS_DISABLE\n", + printf("[FAIL]\tss_flags=%x, should be SS_DISABLE\n", stk.ss_flags); else printf("[OK]\tsigaltstack is disabled in sighandler\n"); @@ -122,7 +122,8 @@ int main(void) if (stk.ss_flags == SS_DISABLE) { printf("[OK]\tInitial sigaltstack state was SS_DISABLE\n"); } else { - printf("[FAIL]\tInitial sigaltstack state was %i; should have been SS_DISABLE\n", stk.ss_flags); + printf("[FAIL]\tInitial sigaltstack state was %x; " + "should have been SS_DISABLE\n", stk.ss_flags); return EXIT_FAILURE; } @@ -165,7 +166,7 @@ int main(void) exit(EXIT_FAILURE); } if (stk.ss_flags != SS_AUTODISARM) { - printf("[FAIL]\tss_flags=%i, should be SS_AUTODISARM\n", + printf("[FAIL]\tss_flags=%x, should be SS_AUTODISARM\n", stk.ss_flags); exit(EXIT_FAILURE); } diff --git a/tools/testing/selftests/size/Makefile b/tools/testing/selftests/size/Makefile index bbd0b5398b61..4685b3e421fc 100644 --- a/tools/testing/selftests/size/Makefile +++ b/tools/testing/selftests/size/Makefile @@ -1,11 +1,5 @@ -all: get_size +CFLAGS := -static -ffreestanding -nostartfiles -s -get_size: get_size.c - $(CC) -static -ffreestanding -nostartfiles -s $< -o $@ - -TEST_PROGS := get_size +TEST_GEN_PROGS := get_size include ../lib.mk - -clean: - $(RM) get_size diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile index 1d5556869137..b90e50c36f9f 100644 --- a/tools/testing/selftests/timers/Makefile +++ b/tools/testing/selftests/timers/Makefile @@ -1,20 +1,16 @@ -CC = $(CROSS_COMPILE)gcc BUILD_FLAGS = -DKTEST CFLAGS += -O3 -Wl,-no-as-needed -Wall $(BUILD_FLAGS) LDFLAGS += -lrt -lpthread # these are all "safe" tests that don't modify # system time or require escalated privledges -TEST_PROGS = posix_timers nanosleep nsleep-lat set-timer-lat mqueue-lat \ +TEST_GEN_PROGS = posix_timers nanosleep nsleep-lat set-timer-lat mqueue-lat \ inconsistency-check raw_skew threadtest rtctest -TEST_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex adjtick change_skew \ +TEST_GEN_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex adjtick change_skew \ skew_consistency clocksource-switch leap-a-day \ leapcrash set-tai set-2038 set-tz -bins = $(TEST_PROGS) $(TEST_PROGS_EXTENDED) - -all: ${bins} include ../lib.mk @@ -34,5 +30,3 @@ run_destructive_tests: run_tests ./set-tai ./set-2038 -clean: - rm -f ${bins} diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 900dfaf81051..4cff7e7ddcc4 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -1,41 +1,33 @@ # Makefile for vm selftests CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS) -BINARIES = compaction_test -BINARIES += hugepage-mmap -BINARIES += hugepage-shm -BINARIES += map_hugetlb -BINARIES += mlock2-tests -BINARIES += on-fault-limit -BINARIES += thuge-gen -BINARIES += transhuge-stress -BINARIES += userfaultfd -BINARIES += userfaultfd_hugetlb -BINARIES += userfaultfd_shmem -BINARIES += mlock-random-test - -all: $(BINARIES) -%: %.c - $(CC) $(CFLAGS) -o $@ $^ -lrt -userfaultfd: userfaultfd.c ../../../../usr/include/linux/kernel.h - $(CC) $(CFLAGS) -O2 -o $@ $< -lpthread - -userfaultfd_hugetlb: userfaultfd.c ../../../../usr/include/linux/kernel.h +LDLIBS = -lrt +TEST_GEN_FILES = compaction_test +TEST_GEN_FILES += hugepage-mmap +TEST_GEN_FILES += hugepage-shm +TEST_GEN_FILES += map_hugetlb +TEST_GEN_FILES += mlock2-tests +TEST_GEN_FILES += on-fault-limit +TEST_GEN_FILES += thuge-gen +TEST_GEN_FILES += transhuge-stress +TEST_GEN_FILES += userfaultfd +TEST_GEN_FILES += userfaultfd_hugetlb +TEST_GEN_FILES += userfaultfd_shmem +TEST_GEN_FILES += mlock-random-test + +TEST_PROGS := run_vmtests + +include ../lib.mk + +$(OUTPUT)/userfaultfd: LDLIBS += -lpthread ../../../../usr/include/linux/kernel.h + +$(OUTPUT)/userfaultfd_hugetlb: userfaultfd.c ../../../../usr/include/linux/kernel.h $(CC) $(CFLAGS) -DHUGETLB_TEST -O2 -o $@ $< -lpthread -userfaultfd_shmem: userfaultfd.c ../../../../usr/include/linux/kernel.h +$(OUTPUT)/userfaultfd_shmem: userfaultfd.c ../../../../usr/include/linux/kernel.h $(CC) $(CFLAGS) -DSHMEM_TEST -O2 -o $@ $< -lpthread -mlock-random-test: mlock-random-test.c - $(CC) $(CFLAGS) -o $@ $< -lcap +$(OUTPUT)/mlock-random-test: LDLIBS += -lcap ../../../../usr/include/linux/kernel.h: make -C ../../../.. headers_install - -TEST_PROGS := run_vmtests -TEST_FILES := $(BINARIES) - -include ../lib.mk - -clean: - $(RM) $(BINARIES) diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index 83d8b1c6cb0e..3a5ebae5303e 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -17,6 +17,9 @@ TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY) BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32) BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64) +BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32)) +BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64)) + CFLAGS := -O2 -g -std=gnu99 -pthread -Wall UNAME_M := $(shell uname -m) @@ -40,10 +43,10 @@ all_64: $(BINARIES_64) clean: $(RM) $(BINARIES_32) $(BINARIES_64) -$(TARGETS_C_32BIT_ALL:%=%_32): %_32: %.c +$(BINARIES_32): $(OUTPUT)/%_32: %.c $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -lm -$(TARGETS_C_64BIT_ALL:%=%_64): %_64: %.c +$(BINARIES_64): $(OUTPUT)/%_64: %.c $(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl # x86_64 users should be encouraged to install 32-bit libraries @@ -65,12 +68,12 @@ warn_32bit_failure: endif # Some tests have additional dependencies. -sysret_ss_attrs_64: thunks.S -ptrace_syscall_32: raw_syscall_helper_32.S -test_syscall_vdso_32: thunks_32.S +$(OUTPUT)/sysret_ss_attrs_64: thunks.S +$(OUTPUT)/ptrace_syscall_32: raw_syscall_helper_32.S +$(OUTPUT)/test_syscall_vdso_32: thunks_32.S # check_initial_reg_state is special: it needs a custom entry, and it # needs to be static so that its interpreter doesn't destroy its initial # state. -check_initial_reg_state_32: CFLAGS += -Wl,-ereal_start -static -check_initial_reg_state_64: CFLAGS += -Wl,-ereal_start -static +$(OUTPUT)/check_initial_reg_state_32: CFLAGS += -Wl,-ereal_start -static +$(OUTPUT)/check_initial_reg_state_64: CFLAGS += -Wl,-ereal_start -static diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c index df9e0a0cdf29..3237bc010e1c 100644 --- a/tools/testing/selftests/x86/protection_keys.c +++ b/tools/testing/selftests/x86/protection_keys.c @@ -192,7 +192,7 @@ void lots_o_noops_around_write(int *write_to_me) #define SYS_pkey_alloc 381 #define SYS_pkey_free 382 #define REG_IP_IDX REG_EIP -#define si_pkey_offset 0x18 +#define si_pkey_offset 0x14 #else #define SYS_mprotect_key 329 #define SYS_pkey_alloc 330 @@ -462,7 +462,7 @@ void pkey_disable_set(int pkey, int flags) unsigned long syscall_flags = 0; int ret; int pkey_rights; - u32 orig_pkru; + u32 orig_pkru = rdpkru(); dprintf1("START->%s(%d, 0x%x)\n", __func__, pkey, flags); @@ -812,8 +812,6 @@ void setup_hugetlbfs(void) { int err; int fd; - int validated_nr_pages; - int i; char buf[] = "123"; if (geteuid() != 0) { @@ -1116,11 +1114,6 @@ void test_pkey_syscalls_on_non_allocated_pkey(int *ptr, u16 pkey) err = sys_pkey_free(i); pkey_assert(err); - /* not enforced when pkey_get() is not a syscall - err = pkey_get(i, 0); - pkey_assert(err < 0); - */ - err = sys_pkey_free(i); pkey_assert(err); @@ -1133,14 +1126,8 @@ void test_pkey_syscalls_on_non_allocated_pkey(int *ptr, u16 pkey) void test_pkey_syscalls_bad_args(int *ptr, u16 pkey) { int err; - int bad_flag = (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE) + 1; int bad_pkey = NR_PKEYS+99; - /* not enforced when pkey_get() is not a syscall - err = pkey_get(bad_pkey, bad_flag); - pkey_assert(err < 0); - */ - /* pass a known-invalid pkey in: */ err = sys_mprotect_pkey(ptr, PAGE_SIZE, PROT_READ, bad_pkey); pkey_assert(err); @@ -1149,8 +1136,6 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey) /* Assumes that all pkeys other than 'pkey' are unallocated */ void test_pkey_alloc_exhaust(int *ptr, u16 pkey) { - unsigned long flags; - unsigned long init_val; int err; int allocated_pkeys[NR_PKEYS] = {0}; int nr_allocated_pkeys = 0; diff --git a/tools/testing/selftests/zram/Makefile b/tools/testing/selftests/zram/Makefile index 29d80346e3eb..c3a87e5f9d36 100644 --- a/tools/testing/selftests/zram/Makefile +++ b/tools/testing/selftests/zram/Makefile @@ -2,8 +2,7 @@ all: TEST_PROGS := zram.sh TEST_FILES := zram01.sh zram02.sh zram_lib.sh +EXTRA_CLEAN := err.log include ../lib.mk -clean: - $(RM) err.log diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 3815e940fbea..bb298a200cd3 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -24,6 +24,7 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/mmu_context.h> +#include <linux/sched/mm.h> #include "async_pf.h" #include <trace/events/kvm.h> @@ -204,7 +205,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, work->addr = hva; work->arch = *arch; work->mm = current->mm; - atomic_inc(&work->mm->mm_users); + mmget(work->mm); kvm_get_kvm(work->vcpu->kvm); /* this can't really happen otherwise gfn_to_pfn_async diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5b0dd4a9b2cb..799499417f5b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -32,7 +32,9 @@ #include <linux/file.h> #include <linux/syscore_ops.h> #include <linux/cpu.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> +#include <linux/sched/stat.h> #include <linux/cpumask.h> #include <linux/smp.h> #include <linux/anon_inodes.h> @@ -611,7 +613,7 @@ static struct kvm *kvm_create_vm(unsigned long type) return ERR_PTR(-ENOMEM); spin_lock_init(&kvm->mmu_lock); - atomic_inc(¤t->mm->mm_count); + mmgrab(current->mm); kvm->mm = current->mm; kvm_eventfd_init(kvm); mutex_init(&kvm->lock); |